add API 'Clone' to tim_vx op and support default layout inference
Signed-off-by: yuenan.li <yuenan.li@verisilicon.com>
This commit is contained in:
parent
21ecf5262e
commit
29f1efc492
|
|
@ -37,6 +37,7 @@ class Operation {
|
|||
Operation(Graph* graph, uint32_t operation_id,
|
||||
int input_cnt = 0, int ouput_cnt = 0, DataLayout layout = DataLayout::ANY);
|
||||
virtual ~Operation();
|
||||
virtual std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const = 0;
|
||||
Operation& BindInput(const std::shared_ptr<Tensor>& tensor);
|
||||
Operation& BindOutput(const std::shared_ptr<Tensor>& tensor);
|
||||
Operation& BindInputs(const std::vector<std::shared_ptr<Tensor>>& tensors);
|
||||
|
|
|
|||
|
|
@ -64,10 +64,12 @@ namespace ops {
|
|||
* ```
|
||||
*/
|
||||
|
||||
#define DECLARE_NO_PARAMETER_ACTIVATION(NAME) \
|
||||
class NAME : public Operation { \
|
||||
public: \
|
||||
NAME(Graph* graph); \
|
||||
#define DECLARE_NO_PARAMETER_ACTIVATION(NAME) \
|
||||
class NAME : public Operation { \
|
||||
public: \
|
||||
NAME(Graph* graph); \
|
||||
std::shared_ptr<Operation> Clone( \
|
||||
std::shared_ptr<Graph>& graph) const override; \
|
||||
};
|
||||
|
||||
DECLARE_NO_PARAMETER_ACTIVATION(Relu)
|
||||
|
|
@ -86,6 +88,8 @@ DECLARE_NO_PARAMETER_ACTIVATION(SoftRelu)
|
|||
class Prelu : public Operation {
|
||||
public:
|
||||
Prelu(Graph* graph, int axis);
|
||||
std::shared_ptr<Operation> Clone(
|
||||
std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
protected:
|
||||
int axis_;
|
||||
|
|
@ -94,6 +98,8 @@ class Prelu : public Operation {
|
|||
class LeakyRelu : public Operation {
|
||||
public:
|
||||
LeakyRelu(Graph* graph, float alpha);
|
||||
std::shared_ptr<Operation> Clone(
|
||||
std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
protected:
|
||||
float alpha_;
|
||||
|
|
@ -101,7 +107,10 @@ class LeakyRelu : public Operation {
|
|||
|
||||
class Linear : public Operation {
|
||||
public:
|
||||
Linear(Graph* graph, float a, float b=0.0);
|
||||
Linear(Graph* graph, float a, float b = 0.0);
|
||||
std::shared_ptr<Operation> Clone(
|
||||
std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
protected:
|
||||
float a_;
|
||||
float b_;
|
||||
|
|
|
|||
|
|
@ -40,6 +40,8 @@ namespace ops {
|
|||
class AddN : public Operation {
|
||||
public:
|
||||
AddN(Graph* graph, uint32_t num_inputs);
|
||||
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
};
|
||||
|
||||
} // namespace ops
|
||||
|
|
|
|||
|
|
@ -36,13 +36,15 @@ namespace ops {
|
|||
* along the provided **axis**. The type of the output tensor is integer.
|
||||
*/
|
||||
|
||||
#define DECLARE_ARG_OP(NAME) \
|
||||
class Arg##NAME : public Operation { \
|
||||
public: \
|
||||
Arg##NAME(Graph* graph, int32_t axis); \
|
||||
\
|
||||
protected: \
|
||||
int32_t axis_; \
|
||||
#define DECLARE_ARG_OP(NAME) \
|
||||
class Arg##NAME : public Operation { \
|
||||
public: \
|
||||
Arg##NAME(Graph* graph, int32_t axis); \
|
||||
std::shared_ptr<Operation> Clone( \
|
||||
std::shared_ptr<Graph>& graph) const override; \
|
||||
\
|
||||
protected: \
|
||||
int32_t axis_; \
|
||||
};
|
||||
|
||||
DECLARE_ARG_OP(Min);
|
||||
|
|
|
|||
|
|
@ -49,6 +49,8 @@ class Batch2Space : public Operation {
|
|||
const std::vector<int>& crop,
|
||||
DataLayout layout = DataLayout::WHCN);
|
||||
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
protected:
|
||||
std::vector<int> block_size_;
|
||||
std::vector<int> crop_;
|
||||
|
|
|
|||
|
|
@ -44,7 +44,9 @@ class BatchNorm : public Operation {
|
|||
public:
|
||||
BatchNorm(Graph* graph, float eps);
|
||||
|
||||
protected:
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
protected:
|
||||
float eps_;
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -39,7 +39,10 @@ namespace ops {
|
|||
class Clip : public Operation {
|
||||
public:
|
||||
Clip(Graph* graph, float min, float max);
|
||||
protected:
|
||||
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
protected:
|
||||
float min_;
|
||||
float max_;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -41,6 +41,8 @@ class Concat : public Operation {
|
|||
public:
|
||||
Concat(Graph* graph, uint32_t axis, int input_cnt);
|
||||
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
protected:
|
||||
uint32_t axis_;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -55,6 +55,8 @@ class Conv1d : public Operation {
|
|||
|
||||
DataLayout KernelDataLayout() { return kernel_layout_; }
|
||||
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
protected:
|
||||
const uint32_t weights_;
|
||||
const PadType padding_;
|
||||
|
|
|
|||
|
|
@ -83,6 +83,8 @@ class Conv2d : public Operation {
|
|||
|
||||
DataLayout KernelDataLayout() { return kernel_layout_; }
|
||||
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
protected:
|
||||
const uint32_t weights_;
|
||||
const PadType padding_;
|
||||
|
|
|
|||
|
|
@ -71,6 +71,9 @@ class DeConv2d : public Operation {
|
|||
DataLayout kernel_layout = DataLayout::WHIcOc);
|
||||
|
||||
DataLayout KernelDataLayout() { return kernel_layout_; }
|
||||
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
protected:
|
||||
const uint32_t oc_count_;
|
||||
const PadType pad_type_;
|
||||
|
|
|
|||
|
|
@ -74,6 +74,8 @@ class DeConv1d : public Operation {
|
|||
const std::array<uint32_t, 2>& pad, uint32_t group,
|
||||
DataLayout input_layout, DataLayout kernel_layout);
|
||||
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
protected:
|
||||
const uint32_t oc_count_; // output channel count
|
||||
const PadType pad_type_;
|
||||
|
|
|
|||
|
|
@ -50,6 +50,8 @@ class DepthToSpace : public Operation {
|
|||
DepthToSpace(Graph* Graph, int block_size,
|
||||
DataLayout layout = DataLayout::WHCN);
|
||||
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
protected:
|
||||
int block_size_;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -41,11 +41,13 @@ namespace ops {
|
|||
*/
|
||||
|
||||
class Dropout : public Operation {
|
||||
public:
|
||||
Dropout(Graph* graph, float ratio);
|
||||
public:
|
||||
Dropout(Graph* graph, float ratio);
|
||||
|
||||
protected:
|
||||
float ratio_;
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
protected:
|
||||
float ratio_;
|
||||
};
|
||||
|
||||
} // namespace ops
|
||||
|
|
|
|||
|
|
@ -66,10 +66,12 @@ namespace ops {
|
|||
* FloorDiv(x, y): floor( x / y ). This operation supports broadcasting.
|
||||
*/
|
||||
|
||||
#define DECLARE_ELEMENTWISE_OP(NAME) \
|
||||
class NAME : public Operation { \
|
||||
public: \
|
||||
NAME(Graph* graph); \
|
||||
#define DECLARE_ELEMENTWISE_OP(NAME) \
|
||||
class NAME : public Operation { \
|
||||
public: \
|
||||
NAME(Graph* graph); \
|
||||
std::shared_ptr<Operation> Clone( \
|
||||
std::shared_ptr<Graph>& graph) const override; \
|
||||
};
|
||||
|
||||
DECLARE_ELEMENTWISE_OP(Minimum)
|
||||
|
|
@ -81,8 +83,10 @@ DECLARE_ELEMENTWISE_OP(Pow)
|
|||
DECLARE_ELEMENTWISE_OP(FloorDiv)
|
||||
|
||||
class Multiply : public Operation {
|
||||
public:
|
||||
Multiply(Graph* graph, float scale = 1.0f);
|
||||
public:
|
||||
Multiply(Graph* graph, float scale = 1.0f);
|
||||
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
};
|
||||
|
||||
#undef DECLARE_ELEMENTWISE_OP
|
||||
|
|
|
|||
|
|
@ -44,6 +44,8 @@ class FullyConnected : public Operation {
|
|||
FullyConnected(Graph* graph, uint32_t axis);
|
||||
FullyConnected(Graph* graph, uint32_t axis, uint32_t weights);
|
||||
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
protected:
|
||||
uint32_t axis_;
|
||||
uint32_t weights_;
|
||||
|
|
|
|||
|
|
@ -39,6 +39,8 @@ class Gather : public Operation {
|
|||
public:
|
||||
Gather(Graph* Graph, int axis);
|
||||
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
protected:
|
||||
int axis_;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -38,6 +38,9 @@ namespace ops {
|
|||
class GatherNd : public Operation {
|
||||
public:
|
||||
GatherNd(Graph* Graph);
|
||||
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
};
|
||||
|
||||
} // namespace ops
|
||||
|
|
|
|||
|
|
@ -71,6 +71,8 @@ class GroupedConv2d : public Operation {
|
|||
|
||||
DataLayout KernelDataLayout() { return kernel_layout_; }
|
||||
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
protected:
|
||||
const PadType padding_;
|
||||
const std::array<uint32_t, 2> strides_;
|
||||
|
|
|
|||
|
|
@ -32,6 +32,8 @@ class InstanceNormalization : public Operation {
|
|||
public:
|
||||
InstanceNormalization(Graph* graph, float eps = 1e-5f);
|
||||
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
protected:
|
||||
float eps_;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -44,6 +44,8 @@ class L2Normalization : public Operation {
|
|||
public:
|
||||
L2Normalization(Graph* graph, int32_t axis);
|
||||
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
protected:
|
||||
int32_t axis_;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -34,6 +34,8 @@ class LayerNormalization : public Operation {
|
|||
public:
|
||||
LayerNormalization(Graph* graph, int32_t axis = 0, float eps = 1e-5f);
|
||||
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
protected:
|
||||
int32_t axis_;
|
||||
int32_t eps_;
|
||||
|
|
|
|||
|
|
@ -45,6 +45,8 @@ class LocalResponseNormalization : public Operation {
|
|||
LocalResponseNormalization(Graph* graph, uint32_t size, float alpha,
|
||||
float beta, float bias, int32_t axis);
|
||||
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
protected:
|
||||
uint32_t size_;
|
||||
float alpha_;
|
||||
|
|
@ -52,6 +54,8 @@ class LocalResponseNormalization : public Operation {
|
|||
float bias_;
|
||||
int32_t axis_;
|
||||
};
|
||||
|
||||
using LRN = LocalResponseNormalization;
|
||||
} // namespace ops
|
||||
} // namespace vx
|
||||
} // namespace tim
|
||||
|
|
|
|||
|
|
@ -39,10 +39,12 @@ namespace ops {
|
|||
* Returns the truth value of x OR y element-wise. This operation supports broadcasting.
|
||||
*/
|
||||
|
||||
#define DECLARE_LOGICAL_OP(NAME) \
|
||||
class Logical##NAME : public Operation { \
|
||||
public: \
|
||||
Logical##NAME(Graph* graph); \
|
||||
#define DECLARE_LOGICAL_OP(NAME) \
|
||||
class Logical##NAME : public Operation { \
|
||||
public: \
|
||||
Logical##NAME(Graph* graph); \
|
||||
std::shared_ptr<Operation> Clone( \
|
||||
std::shared_ptr<Graph>& graph) const override; \
|
||||
};
|
||||
|
||||
DECLARE_LOGICAL_OP(And);
|
||||
|
|
|
|||
|
|
@ -43,6 +43,8 @@ class LogSoftmax : public Operation {
|
|||
public:
|
||||
LogSoftmax(Graph* graph, int32_t axis, float beta = 1.f);
|
||||
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
protected:
|
||||
int32_t axis_;
|
||||
float beta_;
|
||||
|
|
|
|||
|
|
@ -45,6 +45,8 @@ class Matmul : public Operation {
|
|||
Matmul(Graph* graph, bool transpose_a = false, bool transpose_b = false,
|
||||
bool adjoint_a = false, bool adjoint_b = false);
|
||||
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
protected:
|
||||
bool transpose_a_;
|
||||
bool transpose_b_;
|
||||
|
|
|
|||
|
|
@ -52,6 +52,8 @@ class MaxpoolWithArgmax : public Operation {
|
|||
RoundType round_type = RoundType::FLOOR,
|
||||
DataLayout layout = DataLayout::WHCN);
|
||||
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
protected:
|
||||
const PadType padding_;
|
||||
const std::array<uint32_t, 2> ksize_;
|
||||
|
|
|
|||
|
|
@ -47,6 +47,8 @@ class MaxUnpool2d : public Operation {
|
|||
MaxUnpool2d(Graph* graph, const std::array<uint32_t, 2>& ksize,
|
||||
const std::array<uint32_t, 2>& stride, DataLayout layout = DataLayout::WHCN);
|
||||
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
protected:
|
||||
const std::array<uint32_t, 2> ksize_;
|
||||
const std::array<uint32_t, 2> stride_;
|
||||
|
|
|
|||
|
|
@ -40,13 +40,15 @@ namespace ops {
|
|||
*/
|
||||
|
||||
class Moments : public Operation {
|
||||
public:
|
||||
Moments(Graph* graph, const std::vector<int32_t>& axes,
|
||||
bool keep_dims = false);
|
||||
public:
|
||||
Moments(Graph* graph, const std::vector<int32_t>& axes,
|
||||
bool keep_dims = false);
|
||||
|
||||
protected:
|
||||
const std::vector<int32_t> axes_;
|
||||
const bool keep_dims_;
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
protected:
|
||||
const std::vector<int32_t> axes_;
|
||||
const bool keep_dims_;
|
||||
};
|
||||
|
||||
} // namespace ops
|
||||
|
|
|
|||
|
|
@ -40,6 +40,8 @@ class NBG : public Operation {
|
|||
public:
|
||||
NBG(Graph* graph, const char* binary, size_t input_count, size_t output_count);
|
||||
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
};
|
||||
|
||||
} // namespace ops
|
||||
|
|
|
|||
|
|
@ -42,6 +42,8 @@ class Pad : public Operation {
|
|||
Pad(Graph* graph, const std::vector<uint32_t>& front_size,
|
||||
const std::vector<uint32_t>& back_size, int32_t const_val);
|
||||
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
protected:
|
||||
std::vector<uint32_t> front_size_;
|
||||
std::vector<uint32_t> back_size_;
|
||||
|
|
|
|||
|
|
@ -59,6 +59,8 @@ class Pool2d : public Operation {
|
|||
RoundType round_type = RoundType::FLOOR,
|
||||
DataLayout layout = DataLayout::WHCN);
|
||||
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
protected:
|
||||
const PoolType type_;
|
||||
const PadType padding_;
|
||||
|
|
|
|||
|
|
@ -99,6 +99,8 @@ namespace ops {
|
|||
public: \
|
||||
Reduce##NAME(Graph* graph, const std::vector<int32_t>& axis, \
|
||||
bool keep_dims); \
|
||||
std::shared_ptr<Operation> \
|
||||
Clone(std::shared_ptr<Graph>& graph) const override; \
|
||||
\
|
||||
protected: \
|
||||
std::vector<int32_t> axis_; \
|
||||
|
|
|
|||
|
|
@ -55,10 +55,12 @@ namespace ops {
|
|||
* For input tensors x and y, computes x == y elementwise.
|
||||
*/
|
||||
|
||||
#define DECLARE_RELATIONAL_OP(NAME) \
|
||||
class NAME : public Operation { \
|
||||
public: \
|
||||
NAME(Graph* graph); \
|
||||
#define DECLARE_RELATIONAL_OP(NAME) \
|
||||
class NAME : public Operation { \
|
||||
public: \
|
||||
NAME(Graph* graph); \
|
||||
std::shared_ptr<Operation> Clone( \
|
||||
std::shared_ptr<Graph>& graph) const override; \
|
||||
};
|
||||
|
||||
DECLARE_RELATIONAL_OP(Greater)
|
||||
|
|
|
|||
|
|
@ -39,6 +39,8 @@ class Reorg : public Operation {
|
|||
public:
|
||||
Reorg(Graph* graph, const uint32_t stride);
|
||||
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
protected:
|
||||
uint32_t stride_;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -39,7 +39,9 @@ namespace ops {
|
|||
|
||||
class Reshape : public Operation {
|
||||
public:
|
||||
Reshape(Graph* graph, const std::vector<uint32_t>& perm);
|
||||
Reshape(Graph* graph, const std::vector<uint32_t>& size);
|
||||
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
protected:
|
||||
std::vector<uint32_t> size_;
|
||||
|
|
|
|||
|
|
@ -50,6 +50,8 @@ class Resize : public Operation {
|
|||
bool half_pixel_centers, int target_height, int target_width,
|
||||
DataLayout layout = DataLayout::WHCN);
|
||||
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
protected:
|
||||
const ResizeType type_;
|
||||
const float factor_;
|
||||
|
|
|
|||
|
|
@ -50,6 +50,8 @@ class Resize1d : public Operation {
|
|||
bool half_pixel_centers, int target_size,
|
||||
DataLayout layout = DataLayout::WHCN);
|
||||
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
protected:
|
||||
const ResizeType type_;
|
||||
const float factor_;
|
||||
|
|
|
|||
|
|
@ -41,6 +41,8 @@ class Reverse : public Operation {
|
|||
public:
|
||||
Reverse(Graph* graph, const std::vector<int32_t>& axis);
|
||||
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
protected:
|
||||
const std::vector<int32_t> axis_;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -41,6 +41,8 @@ class ScatterND : public Operation {
|
|||
public:
|
||||
ScatterND(Graph* graph, const std::vector<uint32_t>& shape);
|
||||
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
protected:
|
||||
const std::vector<uint32_t> shape_;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -39,6 +39,8 @@ namespace ops {
|
|||
class Select : public Operation {
|
||||
public:
|
||||
Select(Graph* graph);
|
||||
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
};
|
||||
|
||||
} // namespace ops
|
||||
|
|
|
|||
|
|
@ -29,10 +29,12 @@ namespace tim {
|
|||
namespace vx {
|
||||
namespace ops {
|
||||
|
||||
#define DECLARE_SIMPLE_OP(NAME) \
|
||||
class NAME : public Operation { \
|
||||
public: \
|
||||
NAME(Graph* graph); \
|
||||
#define DECLARE_SIMPLE_OP(NAME) \
|
||||
class NAME : public Operation { \
|
||||
public: \
|
||||
NAME(Graph* graph); \
|
||||
std::shared_ptr<Operation> Clone( \
|
||||
std::shared_ptr<Graph>& graph) const override; \
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -45,6 +45,8 @@ class Slice : public Operation {
|
|||
const std::vector<int32_t>& start,
|
||||
const std::vector<int32_t>& length);
|
||||
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
protected:
|
||||
uint32_t dims_;
|
||||
const std::vector<int32_t> start_;
|
||||
|
|
|
|||
|
|
@ -46,6 +46,8 @@ class Softmax : public Operation {
|
|||
public:
|
||||
Softmax(Graph* graph, float beta, int32_t axis);
|
||||
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
protected:
|
||||
float beta_;
|
||||
int32_t axis_;
|
||||
|
|
|
|||
|
|
@ -52,6 +52,8 @@ class Space2Batch : public Operation {
|
|||
const std::vector<int>& pad,
|
||||
DataLayout layout = DataLayout::WHCN);
|
||||
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
protected:
|
||||
std::vector<int> block_size_;
|
||||
std::vector<int> pad_;
|
||||
|
|
|
|||
|
|
@ -43,6 +43,8 @@ class SpaceToDepth : public Operation {
|
|||
SpaceToDepth(Graph* graph, std::vector<int> block_size,
|
||||
DataLayout layout = DataLayout::WHCN);
|
||||
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
protected:
|
||||
std::vector<int> block_size_;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -44,6 +44,8 @@ class Split : public Operation {
|
|||
public:
|
||||
Split(Graph* graph, uint32_t axis, std::vector<uint32_t> slices);
|
||||
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
protected:
|
||||
uint32_t axis_;
|
||||
std::vector<uint32_t> slices_;
|
||||
|
|
|
|||
|
|
@ -42,6 +42,8 @@ class Squeeze : public Operation {
|
|||
public:
|
||||
Squeeze(Graph* graph, std::vector<uint32_t> axis);
|
||||
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
protected:
|
||||
std::vector<uint32_t> axis_;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -40,6 +40,8 @@ class Stack : public Operation {
|
|||
public:
|
||||
Stack(Graph* graph, uint32_t axis, int input_cnt);
|
||||
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
protected:
|
||||
uint32_t axis_;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -59,6 +59,8 @@ class StridedSlice : public Operation {
|
|||
const std::vector<int32_t> stride_dims, int32_t begin_mask,
|
||||
int32_t end_mask, int32_t shrink_axis_mask);
|
||||
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
protected:
|
||||
std::vector<int32_t> begin_dims_;
|
||||
std::vector<int32_t> end_dims_;
|
||||
|
|
|
|||
|
|
@ -40,6 +40,9 @@ namespace ops {
|
|||
class Tile : public Operation {
|
||||
public:
|
||||
Tile(Graph* graph, const std::vector<int32_t>& multiples);
|
||||
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
protected:
|
||||
const std::vector<int32_t> multiples_;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -45,6 +45,8 @@ class Transpose : public Operation {
|
|||
public:
|
||||
Transpose(Graph* graph, const std::vector<uint32_t>& perm);
|
||||
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
protected:
|
||||
std::vector<uint32_t> perm_;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -41,6 +41,8 @@ class Unstack : public Operation {
|
|||
public:
|
||||
Unstack(Graph* graph, int32_t axis, uint32_t output_num);
|
||||
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
protected:
|
||||
int32_t axis_;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -31,7 +31,6 @@
|
|||
#include "ops/elementwise_layout_inference.h"
|
||||
#include "ops/activation_layout_inference.h"
|
||||
#include "ops/concat_layout_inferene.h"
|
||||
#include "ops/reshape_layout_inference.h"
|
||||
#include "ops/simple_ops_layout_inference.h"
|
||||
#include "ops/pool2d_layout_inference.h"
|
||||
#include "ops/softmax_layout_inference.h"
|
||||
|
|
@ -58,7 +57,7 @@
|
|||
#include "ops/logical_layout_inference.h"
|
||||
#include "ops/arg_layout_inference.h"
|
||||
#include "ops/deconv2d_layout_inference.h"
|
||||
#include "ops/nbg_layout_inference.h"
|
||||
#include "ops/default_layout_inference.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <deque>
|
||||
|
|
@ -216,7 +215,6 @@ std::vector<std::shared_ptr<vx::Tensor>> HandleLayoutInfer(
|
|||
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_POW, Pow);
|
||||
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_MINIMUM, Minimum);
|
||||
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_MAXIMUM, Maximum);
|
||||
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_RESHAPE, Reshape);
|
||||
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_DATACONVERT, DataConvert);
|
||||
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_NEG, Neg);
|
||||
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_ABS, Abs);
|
||||
|
|
@ -233,8 +231,8 @@ std::vector<std::shared_ptr<vx::Tensor>> HandleLayoutInfer(
|
|||
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_STACK, Stack);
|
||||
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_SPACE2DEPTH, SpaceToDepth);
|
||||
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_DEPTH2SPACE, DepthToSpace);
|
||||
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_SPACE2BATCH, SpaceToBatch);
|
||||
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_BATCH2SPACE, BatchToSpace);
|
||||
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_SPACE2BATCH, Space2Batch);
|
||||
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_BATCH2SPACE, Batch2Space);
|
||||
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_PAD, Pad);
|
||||
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_FCL2, FullyConnected);
|
||||
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_RESIZE, Resize);
|
||||
|
|
@ -249,15 +247,18 @@ std::vector<std::shared_ptr<vx::Tensor>> HandleLayoutInfer(
|
|||
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_REVERSE, Reverse);
|
||||
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_SLICE, Slice);
|
||||
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_SELECT, Select);
|
||||
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_ARGMAX, ArgMax);
|
||||
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_ARGMIN, ArgMin);
|
||||
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_ARGMAX, Arg);
|
||||
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_ARGMIN, Arg);
|
||||
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_DECONVOLUTION, DeConv2d);
|
||||
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_NBG, Nbg);
|
||||
REGIST_LOGICAL_LAYOUT_INFERENCE(VSI_NN_OP_LOGICAL_OPS);
|
||||
REGIST_REDUCE_LAYOUT_INFERENCE(VSI_NN_OP_REDUCE);
|
||||
default:
|
||||
VSILOGW("Op %d: not support layout inference.", op_id);
|
||||
assert(false);
|
||||
// use default layout inference
|
||||
default: {
|
||||
VSILOGW("Op %d: default layout inference pass.", op_id);
|
||||
auto op_infer = std::make_shared<DefaultLayoutInfer>(op, ctx);
|
||||
op_infer->OnInputs(next_tensors);
|
||||
op_infer->OnOutputs(next_tensors);
|
||||
}
|
||||
}
|
||||
return next_tensors;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -36,13 +36,12 @@ class AddNLayoutInfer : public OpLayoutInfer {
|
|||
const std::shared_ptr<vx::Operation>& op,
|
||||
std::shared_ptr<layout_inference_impl::LayoutInferContext>& context)
|
||||
: OpLayoutInfer(op, context) {}
|
||||
|
||||
void OnInputs(
|
||||
std::vector<std::shared_ptr<vx::Tensor>>& next_tensors) override {
|
||||
auto required_pv = AlignPermuteVectorForMutilInputs();
|
||||
uint32_t num_inputs = op_->impl()->input_cnt_;
|
||||
|
||||
auto addn =
|
||||
context_->infer_graph_->CreateOperation<vx::ops::AddN>(num_inputs);
|
||||
auto addn = op_->Clone(context_->infer_graph_);
|
||||
|
||||
for (const auto& i_src : op_->impl()->InputsTensor()) {
|
||||
(*addn).BindInput(context_->GetMapedTensor(i_src));
|
||||
|
|
|
|||
|
|
@ -29,9 +29,9 @@
|
|||
#include "tim/vx/ops/arg.h"
|
||||
namespace tim {
|
||||
namespace transform {
|
||||
class ArgMaxLayoutInfer : public OpLayoutInfer {
|
||||
class ArgLayoutInfer : public OpLayoutInfer {
|
||||
public:
|
||||
ArgMaxLayoutInfer(
|
||||
ArgLayoutInfer(
|
||||
const std::shared_ptr<vx::Operation> op,
|
||||
std::shared_ptr<layout_inference_impl::LayoutInferContext>& context)
|
||||
: OpLayoutInfer(op, context) {}
|
||||
|
|
@ -43,40 +43,10 @@ class ArgMaxLayoutInfer : public OpLayoutInfer {
|
|||
auto src_input = op_->impl()->InputsTensor()[0];
|
||||
auto input_pv = context_->GetPermuteVector(src_input);
|
||||
|
||||
uint32_t axis = op_->impl()->node()->nn_param.argmax.axis;
|
||||
|
||||
auto argmax =
|
||||
context_->infer_graph_->CreateOperation<vx::ops::ArgMax>(axis);
|
||||
auto arg = op_->Clone(context_->infer_graph_);
|
||||
auto infer_out = CreateOutputsTensor(input_pv);
|
||||
(*argmax).BindInput(context_->GetMapedTensor(src_input));
|
||||
(*argmax).BindOutput(infer_out[0]);
|
||||
|
||||
context_->SetPermuteVector(op_->impl()->OutputsTensor()[0], input_pv);
|
||||
next_tensors.push_back(op_->impl()->OutputsTensor()[0]);
|
||||
}
|
||||
};
|
||||
|
||||
class ArgMinLayoutInfer : public OpLayoutInfer {
|
||||
public:
|
||||
ArgMinLayoutInfer(
|
||||
const std::shared_ptr<vx::Operation> op,
|
||||
std::shared_ptr<layout_inference_impl::LayoutInferContext>& context)
|
||||
: OpLayoutInfer(op, context) {}
|
||||
|
||||
void OnInputs(
|
||||
std::vector<std::shared_ptr<vx::Tensor>>& next_tensors) override {
|
||||
ReverseInputsPermuteVector();
|
||||
assert(1 == op_->impl()->InputsTensor().size());
|
||||
auto src_input = op_->impl()->InputsTensor()[0];
|
||||
auto input_pv = context_->GetPermuteVector(src_input);
|
||||
|
||||
uint32_t axis = op_->impl()->node()->nn_param.argmin.axis;
|
||||
|
||||
auto argmin =
|
||||
context_->infer_graph_->CreateOperation<vx::ops::ArgMin>(axis);
|
||||
auto infer_out = CreateOutputsTensor(input_pv);
|
||||
(*argmin).BindInput(context_->GetMapedTensor(src_input));
|
||||
(*argmin).BindOutput(infer_out[0]);
|
||||
(*arg).BindInput(context_->GetMapedTensor(src_input));
|
||||
(*arg).BindOutput(infer_out[0]);
|
||||
|
||||
context_->SetPermuteVector(op_->impl()->OutputsTensor()[0], input_pv);
|
||||
next_tensors.push_back(op_->impl()->OutputsTensor()[0]);
|
||||
|
|
|
|||
|
|
@ -31,9 +31,9 @@
|
|||
#include "src/tim/vx/operation_private.h"
|
||||
namespace tim {
|
||||
namespace transform {
|
||||
class BatchToSpaceLayoutInfer : public OpLayoutInfer {
|
||||
class Batch2SpaceLayoutInfer : public OpLayoutInfer {
|
||||
public:
|
||||
BatchToSpaceLayoutInfer(
|
||||
Batch2SpaceLayoutInfer(
|
||||
const std::shared_ptr<vx::Operation> op,
|
||||
std::shared_ptr<layout_inference_impl::LayoutInferContext>& context)
|
||||
: OpLayoutInfer(op, context) {}
|
||||
|
|
|
|||
|
|
@ -32,7 +32,6 @@
|
|||
|
||||
namespace tim {
|
||||
namespace transform {
|
||||
|
||||
class Conv2dLayoutInfer : public OpLayoutInfer {
|
||||
public:
|
||||
Conv2dLayoutInfer(
|
||||
|
|
|
|||
|
|
@ -21,10 +21,15 @@
|
|||
* DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*****************************************************************************/
|
||||
#ifndef TIM_LAYOUT_INFER_RESHAPE_LAYOUT_INFERENCE_H_
|
||||
#define TIM_LAYOUT_INFER_RESHAPE_LAYOUT_INFERENCE_H_
|
||||
#ifndef TIM_LAYOUT_INFER_DEFAULT_LAYOUT_INFERENCE_H_
|
||||
#define TIM_LAYOUT_INFER_DEFAULT_LAYOUT_INFERENCE_H_
|
||||
|
||||
#include "tim/vx/ops/reshape.h"
|
||||
#include "tim/vx/ops/nbg.h"
|
||||
#include "tim/vx/ops/transpose.h"
|
||||
#include "tim/vx/ops/batchnorm.h"
|
||||
#include "tim/vx/ops/clip.h"
|
||||
|
||||
|
||||
#include "src/tim/transform/ops/op_layout_inference.h"
|
||||
#include "src/tim/transform/permute_vector.h"
|
||||
|
|
@ -32,30 +37,30 @@
|
|||
|
||||
namespace tim {
|
||||
namespace transform {
|
||||
class ReshapeLayoutInfer : public OpLayoutInfer {
|
||||
|
||||
class DefaultLayoutInfer : public OpLayoutInfer {
|
||||
public:
|
||||
ReshapeLayoutInfer(
|
||||
DefaultLayoutInfer(
|
||||
const std::shared_ptr<vx::Operation> op,
|
||||
std::shared_ptr<layout_inference_impl::LayoutInferContext>& context)
|
||||
: OpLayoutInfer(op, context) {}
|
||||
|
||||
// reverse any applied permute on it's input tensor
|
||||
void OnInputs(
|
||||
std::vector<std::shared_ptr<vx::Tensor>>& next_tensors) override {
|
||||
ReverseInputsPermuteVector();
|
||||
std::vector<uint32_t> perm;
|
||||
for (uint32_t i = 0; i < op_->impl()->node()->nn_param.reshape.dim_num;
|
||||
i++) {
|
||||
perm.push_back(op_->impl()->node()->nn_param.reshape.size[i]);
|
||||
}
|
||||
auto reshape =
|
||||
context_->infer_graph_->CreateOperation<vx::ops::Reshape>(perm);
|
||||
(*reshape).BindInput(
|
||||
context_->GetMapedTensor(op_->impl()->InputsTensor()[0]));
|
||||
|
||||
auto cloned_op = op_->Clone(context_->infer_graph_);
|
||||
|
||||
for (const auto& i_src : op_->impl()->InputsTensor()) {
|
||||
(*cloned_op).BindInput(context_->GetMapedTensor(i_src));
|
||||
}
|
||||
auto required_pv =
|
||||
MakeShared(op_->impl()->OutputsTensor()[0]->GetShape().size());
|
||||
auto out_infer = CreateOutputsTensor(required_pv);
|
||||
(*reshape).BindOutput(out_infer[0]);
|
||||
|
||||
// TODO: bind all output
|
||||
(*cloned_op).BindOutputs(out_infer);
|
||||
context_->SetPermuteVector(op_->impl()->OutputsTensor()[0], required_pv);
|
||||
next_tensors.push_back(op_->impl()->OutputsTensor()[0]);
|
||||
}
|
||||
|
|
@ -41,7 +41,7 @@ class FullyConnectedLayoutInfer : public OpLayoutInfer {
|
|||
|
||||
void OnInputs(
|
||||
std::vector<std::shared_ptr<vx::Tensor>>& next_tensors) override {
|
||||
|
||||
|
||||
auto input_tensors = op_->impl()->InputsTensor();
|
||||
for (const auto& in : input_tensors) {
|
||||
if (in->IsConstTensor()) {
|
||||
|
|
|
|||
|
|
@ -1,68 +0,0 @@
|
|||
/****************************************************************************
|
||||
*
|
||||
* Copyright (c) 2020 Vivante Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*****************************************************************************/
|
||||
#ifndef TIM_LAYOUT_INFER_DEFAULT_LAYOUT_INFERENCE_H_
|
||||
#define TIM_LAYOUT_INFER_DEFAULT_LAYOUT_INFERENCE_H_
|
||||
|
||||
#include "tim/vx/ops/nbg.h"
|
||||
|
||||
#include "src/tim/transform/ops/op_layout_inference.h"
|
||||
#include "src/tim/vx/operation_private.h"
|
||||
|
||||
namespace tim {
|
||||
namespace transform {
|
||||
class NbgLayoutInfer : public OpLayoutInfer {
|
||||
public:
|
||||
NbgLayoutInfer(
|
||||
const std::shared_ptr<vx::Operation> op,
|
||||
std::shared_ptr<layout_inference_impl::LayoutInferContext>& context)
|
||||
: OpLayoutInfer(op, context) {}
|
||||
// reverse any applied permute on it's input tensor
|
||||
void OnInputs(
|
||||
std::vector<std::shared_ptr<vx::Tensor>>& next_tensors) override {
|
||||
ReverseInputsPermuteVector();
|
||||
|
||||
auto url = op_->impl()->node()->nn_param.nbg.url;
|
||||
uint32_t input_count = op_->impl()->input_cnt_;
|
||||
uint32_t output_count = op_->impl()->output_cnt_;
|
||||
auto nbg = context_->infer_graph_->CreateOperation<vx::ops::NBG>(
|
||||
url, input_count, output_count);
|
||||
|
||||
for (auto i_src : op_->impl()->InputsTensor()) {
|
||||
(*nbg).BindInput(context_->GetMapedTensor(i_src));
|
||||
auto input_pv = MakeShared(i_src->GetShape().size());
|
||||
context_->SetPermuteVector(i_src, input_pv);
|
||||
}
|
||||
auto infer_out = CreateOutputsTensor(MakeShared(1));
|
||||
(*nbg).BindOutputs(infer_out);
|
||||
for (const auto& out : op_->impl()->OutputsTensor()) {
|
||||
context_->SetPermuteVector(out, MakeShared(out->GetShape().size()));
|
||||
next_tensors.push_back(out);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace transform
|
||||
} // namespace tim
|
||||
|
||||
#endif
|
||||
|
|
@ -31,7 +31,6 @@
|
|||
|
||||
namespace tim {
|
||||
namespace transform {
|
||||
|
||||
class Pool2dLayoutInfer : public OpLayoutInfer {
|
||||
public:
|
||||
Pool2dLayoutInfer(
|
||||
|
|
|
|||
|
|
@ -32,7 +32,6 @@
|
|||
|
||||
namespace tim {
|
||||
namespace transform {
|
||||
|
||||
class SoftmaxLayoutInfer : public OpLayoutInfer {
|
||||
public:
|
||||
SoftmaxLayoutInfer(
|
||||
|
|
|
|||
|
|
@ -31,9 +31,9 @@
|
|||
#include "src/tim/vx/operation_private.h"
|
||||
namespace tim {
|
||||
namespace transform {
|
||||
class SpaceToBatchLayoutInfer : public OpLayoutInfer {
|
||||
class Space2BatchLayoutInfer : public OpLayoutInfer {
|
||||
public:
|
||||
SpaceToBatchLayoutInfer(
|
||||
Space2BatchLayoutInfer(
|
||||
const std::shared_ptr<vx::Operation> op,
|
||||
std::shared_ptr<layout_inference_impl::LayoutInferContext>& context)
|
||||
: OpLayoutInfer(op, context) {}
|
||||
|
|
|
|||
|
|
@ -32,7 +32,6 @@
|
|||
|
||||
namespace tim {
|
||||
namespace transform {
|
||||
|
||||
class StackLayoutInfer : public OpLayoutInfer {
|
||||
public:
|
||||
StackLayoutInfer(
|
||||
|
|
|
|||
|
|
@ -30,8 +30,12 @@ namespace tim {
|
|||
namespace vx {
|
||||
namespace ops {
|
||||
|
||||
#define DEFINE_NO_PARAMETER_ACTIVATION(NAME, VSI_OP_CODE) \
|
||||
NAME::NAME(Graph* graph) : Operation(graph, VSI_OP_CODE) {}
|
||||
#define DEFINE_NO_PARAMETER_ACTIVATION(NAME, VSI_OP_CODE) \
|
||||
NAME::NAME(Graph* graph) : Operation(graph, VSI_OP_CODE) {} \
|
||||
std::shared_ptr<Operation> NAME::Clone(std::shared_ptr<Graph>& graph) \
|
||||
const { \
|
||||
return graph->CreateOperation<NAME>(); \
|
||||
}
|
||||
|
||||
DEFINE_NO_PARAMETER_ACTIVATION(Relu, VSI_NN_OP_RELU)
|
||||
DEFINE_NO_PARAMETER_ACTIVATION(Relu1, VSI_NN_OP_RELU1)
|
||||
|
|
@ -50,27 +54,49 @@ HardSwish::HardSwish(Graph* graph) : Operation(graph, VSI_NN_OP_SWISH) {
|
|||
this->impl()->node()->nn_param.swish.beta = 1.0f;
|
||||
}
|
||||
|
||||
std::shared_ptr<Operation> HardSwish::Clone(
|
||||
std::shared_ptr<Graph>& graph) const {
|
||||
return graph->CreateOperation<HardSwish>();
|
||||
}
|
||||
|
||||
Prelu::Prelu(Graph* graph, int axis)
|
||||
: Operation(graph, VSI_NN_OP_PRELU), axis_(axis) {
|
||||
this->impl()->node()->nn_param.prelu.axis = axis_;
|
||||
}
|
||||
|
||||
std::shared_ptr<Operation> Prelu::Clone(std::shared_ptr<Graph>& graph) const {
|
||||
return graph->CreateOperation<Prelu>(this->axis_);
|
||||
}
|
||||
|
||||
Tanh::Tanh(Graph* graph) : Operation(graph, VSI_NN_OP_TANH) {
|
||||
this->impl()->node()->nn_param.tanh.scale_a = 1.0;
|
||||
this->impl()->node()->nn_param.tanh.scale_b = 1.0;
|
||||
}
|
||||
|
||||
std::shared_ptr<Operation> Tanh::Clone(std::shared_ptr<Graph>& graph) const {
|
||||
return graph->CreateOperation<Tanh>();
|
||||
}
|
||||
|
||||
LeakyRelu::LeakyRelu(Graph* graph, float alpha)
|
||||
: Operation(graph, VSI_NN_OP_LEAKY_RELU), alpha_(alpha) {
|
||||
this->impl()->node()->nn_param.activation.leaky_ratio = alpha_;
|
||||
}
|
||||
|
||||
std::shared_ptr<Operation> LeakyRelu::Clone(
|
||||
std::shared_ptr<Graph>& graph) const {
|
||||
return graph->CreateOperation<LeakyRelu>(this->alpha_);
|
||||
}
|
||||
|
||||
Linear::Linear(Graph* graph, float a, float b)
|
||||
: Operation(graph, VSI_NN_OP_LINEAR), a_(a), b_(b) {
|
||||
this->impl()->node()->nn_param.linear.a = a_;
|
||||
this->impl()->node()->nn_param.linear.b = b_;
|
||||
}
|
||||
|
||||
std::shared_ptr<Operation> Linear::Clone(std::shared_ptr<Graph>& graph) const {
|
||||
return graph->CreateOperation<Linear>(this->a_, this->b_);
|
||||
}
|
||||
|
||||
} // namespace ops
|
||||
} // namespace vx
|
||||
} // namespace tim
|
||||
|
|
|
|||
|
|
@ -33,6 +33,10 @@ namespace ops {
|
|||
AddN::AddN(Graph* graph, uint32_t num_inputs)
|
||||
: Operation(graph, VSI_NN_OP_ADDN, num_inputs, 1) {}
|
||||
|
||||
std::shared_ptr<Operation> AddN::Clone(std::shared_ptr<Graph>& graph) const {
|
||||
return graph->CreateOperation<AddN>(this->impl_->input_cnt_);
|
||||
};
|
||||
|
||||
} // namespace ops
|
||||
} // namespace vx
|
||||
} // namespace tim
|
||||
|
|
|
|||
|
|
@ -31,10 +31,14 @@ namespace tim {
|
|||
namespace vx {
|
||||
namespace ops {
|
||||
|
||||
#define DEFINE_ARG_OP(NAME, VSI_OP_TYPE, OP_PARAM) \
|
||||
Arg##NAME::Arg##NAME(Graph* graph, int32_t axis) \
|
||||
: Operation(graph, VSI_NN_OP_ARG##VSI_OP_TYPE), axis_(axis) { \
|
||||
this->impl()->node()->nn_param.arg##OP_PARAM.axis = axis_; \
|
||||
#define DEFINE_ARG_OP(NAME, VSI_OP_TYPE, OP_PARAM) \
|
||||
Arg##NAME::Arg##NAME(Graph* graph, int32_t axis) \
|
||||
: Operation(graph, VSI_NN_OP_ARG##VSI_OP_TYPE), axis_(axis) { \
|
||||
this->impl()->node()->nn_param.arg##OP_PARAM.axis = axis_; \
|
||||
} \
|
||||
std::shared_ptr<Operation> Arg##NAME::Clone(std::shared_ptr<Graph>& graph) \
|
||||
const { \
|
||||
return graph->CreateOperation<Arg##NAME>(this->axis_); \
|
||||
}
|
||||
|
||||
DEFINE_ARG_OP(Max, MAX, max);
|
||||
|
|
|
|||
|
|
@ -42,6 +42,13 @@ Batch2Space::Batch2Space(Graph* graph, const std::vector<int>& block_size,
|
|||
this->impl()->node()->nn_param.batch2space.crop[i] = crop_[i];
|
||||
}
|
||||
}
|
||||
|
||||
std::shared_ptr<Operation> Batch2Space::Clone(
|
||||
std::shared_ptr<Graph>& graph) const {
|
||||
return graph->CreateOperation<Batch2Space>(this->block_size_, this->crop_,
|
||||
this->impl_->layout_);
|
||||
}
|
||||
|
||||
} // namespace ops
|
||||
} // namespace vx
|
||||
} // namespace tim
|
||||
|
|
|
|||
|
|
@ -23,21 +23,24 @@
|
|||
*****************************************************************************/
|
||||
#include "tim/vx/ops/batchnorm.h"
|
||||
|
||||
#include "vsi_nn_pub.h"
|
||||
|
||||
#include "operation_private.h"
|
||||
#include "vsi_nn_pub.h"
|
||||
|
||||
namespace tim {
|
||||
namespace vx {
|
||||
namespace ops {
|
||||
|
||||
|
||||
BatchNorm::BatchNorm(Graph* graph, float eps)
|
||||
: Operation(graph, VSI_NN_OP_BATCH_NORM),
|
||||
eps_(eps) {
|
||||
: Operation(graph, VSI_NN_OP_BATCH_NORM), eps_(eps) {
|
||||
this->impl()->node()->nn_param.batch_norm.eps = eps_;
|
||||
}
|
||||
|
||||
std::shared_ptr<Operation> BatchNorm::Clone(
|
||||
std::shared_ptr<Graph>& graph) const {
|
||||
return graph->CreateOperation<BatchNorm>(
|
||||
this->impl_->node_->nn_param.batch_norm.eps);
|
||||
}
|
||||
|
||||
} // namespace ops
|
||||
} // namespace vx
|
||||
} // namespace tim
|
||||
|
|
|
|||
|
|
@ -40,6 +40,11 @@ Clip::Clip(Graph* graph, float min, float max)
|
|||
this->impl()->node()->nn_param.clip.max = max_;
|
||||
}
|
||||
|
||||
std::shared_ptr<Operation> Clip::Clone(std::shared_ptr<Graph>& graph) const {
|
||||
return graph->CreateOperation<Clip>(this->impl_->node()->nn_param.clip.min,
|
||||
this->impl_->node_->nn_param.clip.max);
|
||||
}
|
||||
|
||||
} // namespace ops
|
||||
} // namespace vx
|
||||
} // namespace tim
|
||||
|
|
|
|||
|
|
@ -35,6 +35,10 @@ Concat::Concat(Graph* graph, uint32_t axis, int input_cnt)
|
|||
this->impl()->node()->nn_param.concat.axis = axis_;
|
||||
}
|
||||
|
||||
std::shared_ptr<Operation> Concat::Clone(std::shared_ptr<Graph>& graph) const {
|
||||
return graph->CreateOperation<Concat>(this->axis_, this->impl_->input_cnt_);
|
||||
}
|
||||
|
||||
} // namespace ops
|
||||
} // namespace vx
|
||||
} // namespace tim
|
||||
|
|
@ -72,6 +72,13 @@ Conv1d::Conv1d(Graph* graph, int32_t weights, PadType padding,
|
|||
this->impl()->node()->nn_param.conv1d.multiplier = multiplier_;
|
||||
}
|
||||
|
||||
std::shared_ptr<Operation> Conv1d::Clone(std::shared_ptr<Graph>& graph) const {
|
||||
return graph->CreateOperation<Conv1d>(
|
||||
this->weights_, this->padding_, this->ksize_, this->stride_,
|
||||
this->dilation_, this->pad_, this->multiplier_, this->impl_->layout_,
|
||||
this->kernel_layout_);
|
||||
}
|
||||
|
||||
} // namespace ops
|
||||
} // namespace vx
|
||||
} // namespace tim
|
||||
|
|
@ -81,6 +81,13 @@ Conv2d::Conv2d(Graph* graph, int32_t weights, PadType padding,
|
|||
this->impl()->node()->nn_param.conv2d.multiplier = multiplier_;
|
||||
}
|
||||
|
||||
std::shared_ptr<Operation> Conv2d::Clone(std::shared_ptr<Graph>& graph) const {
|
||||
return graph->CreateOperation<Conv2d>(
|
||||
this->weights_, this->padding_, this->ksize_, this->stride_,
|
||||
this->dilation_, this->pad_, this->multiplier_, this->impl_->layout_,
|
||||
this->kernel_layout_);
|
||||
}
|
||||
|
||||
} // namespace ops
|
||||
} // namespace vx
|
||||
} // namespace tim
|
||||
|
|
@ -77,6 +77,14 @@ DeConv2d::DeConv2d(Graph* graph, int32_t oc_count, PadType pad_type,
|
|||
this->impl()->node()->nn_param.deconv.pad[3] = pad_[3];
|
||||
}
|
||||
|
||||
std::shared_ptr<Operation> DeConv2d::Clone(
|
||||
std::shared_ptr<Graph>& graph) const {
|
||||
return graph->CreateOperation<DeConv2d>(
|
||||
this->oc_count_, this->pad_type_, this->ksize_, this->stride_,
|
||||
this->output_padding_, this->pad_, this->group_, this->impl_->layout_,
|
||||
this->kernel_layout_);
|
||||
}
|
||||
|
||||
} // namespace ops
|
||||
} // namespace vx
|
||||
} // namespace tim
|
||||
|
|
|
|||
|
|
@ -85,6 +85,12 @@ DeConv1d::DeConv1d(Graph* graph, PadType pad_type,
|
|||
this->impl()->node()->nn_param.deconvolution1d.pad[1] = pad_[1];
|
||||
}
|
||||
|
||||
std::shared_ptr<Operation> DeConv1d::Clone(
|
||||
std::shared_ptr<Graph>& graph) const {
|
||||
return graph->CreateOperation<DeConv1d>(
|
||||
this->pad_type_, this->stride_, this->output_padding_, this->pad_,
|
||||
this->group_, this->impl_->layout_, this->kernel_layout_);
|
||||
}
|
||||
} // namespace ops
|
||||
} // namespace vx
|
||||
} // namespace tim
|
||||
|
|
|
|||
|
|
@ -35,6 +35,13 @@ DepthToSpace::DepthToSpace(Graph* graph, int block_size, DataLayout layout)
|
|||
block_size_(block_size) {
|
||||
this->impl()->node()->nn_param.depth2space.block_size = block_size_;
|
||||
}
|
||||
|
||||
std::shared_ptr<Operation> DepthToSpace::Clone(
|
||||
std::shared_ptr<Graph>& graph) const {
|
||||
return graph->CreateOperation<DepthToSpace>(this->block_size_,
|
||||
this->impl_->layout_);
|
||||
}
|
||||
|
||||
} // namespace ops
|
||||
} // namespace vx
|
||||
} // namespace tim
|
||||
|
|
@ -38,6 +38,10 @@ Dropout::Dropout(Graph* graph, float ratio)
|
|||
this->impl()->node()->nn_param.dropout.ratio = ratio_;
|
||||
}
|
||||
|
||||
std::shared_ptr<Operation> Dropout::Clone(std::shared_ptr<Graph>& graph) const {
|
||||
return graph->CreateOperation<Dropout>(this->ratio_);
|
||||
}
|
||||
|
||||
} // namespace ops
|
||||
} // namespace vx
|
||||
} // namespace tim
|
||||
|
|
|
|||
|
|
@ -30,8 +30,12 @@ namespace tim {
|
|||
namespace vx {
|
||||
namespace ops {
|
||||
|
||||
#define DEFINE_ELEMENTWISE_OP(NAME, VSI_OP_CODE) \
|
||||
NAME::NAME(Graph* graph) : Operation(graph, VSI_OP_CODE, 2, 1) {}
|
||||
#define DEFINE_ELEMENTWISE_OP(NAME, VSI_OP_CODE) \
|
||||
NAME::NAME(Graph* graph) : Operation(graph, VSI_OP_CODE, 2, 1) {} \
|
||||
std::shared_ptr<Operation> NAME::Clone(std::shared_ptr<Graph>& graph) \
|
||||
const { \
|
||||
return graph->CreateOperation<NAME>(); \
|
||||
}
|
||||
|
||||
DEFINE_ELEMENTWISE_OP(Minimum, VSI_NN_OP_MINIMUM)
|
||||
DEFINE_ELEMENTWISE_OP(Maximum, VSI_NN_OP_MAXIMUM)
|
||||
|
|
@ -48,6 +52,12 @@ Multiply::Multiply(Graph* graph, float scale)
|
|||
this->impl()->node()->nn_param.multiply.scale = scale;
|
||||
}
|
||||
|
||||
std::shared_ptr<Operation> Multiply::Clone(
|
||||
std::shared_ptr<Graph>& graph) const {
|
||||
return graph->CreateOperation<Multiply>(
|
||||
this->impl_->node_->nn_param.multiply.scale);
|
||||
}
|
||||
|
||||
} // namespace ops
|
||||
} // namespace vx
|
||||
} // namespace tim
|
||||
|
|
|
|||
|
|
@ -35,11 +35,16 @@ FullyConnected::FullyConnected(Graph* graph, uint32_t axis)
|
|||
}
|
||||
|
||||
FullyConnected::FullyConnected(Graph* graph, uint32_t axis, uint32_t weights)
|
||||
: Operation(graph, VSI_NN_OP_FCL2) {
|
||||
: Operation(graph, VSI_NN_OP_FCL2), axis_(axis), weights_(weights) {
|
||||
this->impl()->node()->nn_param.fcl.axis = axis;
|
||||
this->impl()->node()->nn_param.fcl.weights = weights;
|
||||
}
|
||||
|
||||
std::shared_ptr<Operation> FullyConnected::Clone(
|
||||
std::shared_ptr<Graph>& graph) const {
|
||||
return graph->CreateOperation<FullyConnected>(this->axis_, this->weights_);
|
||||
}
|
||||
|
||||
} // namespace ops
|
||||
} // namespace vx
|
||||
} // namespace tim
|
||||
|
|
|
|||
|
|
@ -34,6 +34,11 @@ Gather::Gather(Graph* graph, int axis)
|
|||
: Operation(graph, VSI_NN_OP_GATHER), axis_(axis) {
|
||||
this->impl()->node()->nn_param.gather.axis = axis_;
|
||||
}
|
||||
|
||||
std::shared_ptr<Operation> Gather::Clone(std::shared_ptr<Graph>& graph) const {
|
||||
return graph->CreateOperation<Gather>(this->axis_);
|
||||
}
|
||||
|
||||
} // namespace ops
|
||||
} // namespace vx
|
||||
} // namespace tim
|
||||
|
|
@ -31,6 +31,11 @@ namespace vx {
|
|||
namespace ops {
|
||||
|
||||
GatherNd::GatherNd(Graph* graph) : Operation(graph, VSI_NN_OP_GATHER_ND) {}
|
||||
|
||||
std::shared_ptr<Operation> GatherNd::Clone(std::shared_ptr<Graph>& graph) const {
|
||||
return graph->CreateOperation<GatherNd>();
|
||||
}
|
||||
|
||||
} // namespace ops
|
||||
} // namespace vx
|
||||
} // namespace tim
|
||||
|
|
@ -65,6 +65,13 @@ GroupedConv2d::GroupedConv2d(Graph* graph,
|
|||
this->impl()->node()->nn_param.conv2d.dilation[1] = dilation_[1];
|
||||
}
|
||||
|
||||
std::shared_ptr<Operation> GroupedConv2d::Clone(
|
||||
std::shared_ptr<Graph>& graph) const {
|
||||
return graph->CreateOperation<GroupedConv2d>(
|
||||
this->pad_, this->strides_, this->dilation_, this->group_number_,
|
||||
this->impl_->layout_, this->kernel_layout_);
|
||||
}
|
||||
|
||||
} // namespace ops
|
||||
} // namespace vx
|
||||
} // namespace tim
|
||||
|
|
@ -34,6 +34,11 @@ InstanceNormalization::InstanceNormalization(Graph* graph, float eps)
|
|||
this->impl()->node()->nn_param.instancenorm.eps = eps_;
|
||||
}
|
||||
|
||||
std::shared_ptr<Operation> InstanceNormalization::Clone(
|
||||
std::shared_ptr<Graph>& graph) const {
|
||||
return graph->CreateOperation<InstanceNormalization>(this->eps_);
|
||||
}
|
||||
|
||||
} // namespace ops
|
||||
} // namespace vx
|
||||
} // namespace tim
|
||||
|
|
@ -34,6 +34,11 @@ L2Normalization::L2Normalization(Graph* graph, int32_t axis)
|
|||
this->impl()->node()->nn_param.l2_normalize.axis = axis_;
|
||||
}
|
||||
|
||||
std::shared_ptr<Operation> L2Normalization::Clone(
|
||||
std::shared_ptr<Graph>& graph) const {
|
||||
return graph->CreateOperation<L2Normalization>(this->axis_);
|
||||
}
|
||||
|
||||
} // namespace ops
|
||||
} // namespace vx
|
||||
} // namespace tim
|
||||
|
|
@ -40,6 +40,11 @@ LayerNormalization::LayerNormalization(Graph* graph, int32_t axis, float eps)
|
|||
this->impl()->node()->nn_param.instancenorm.eps = eps_;
|
||||
}
|
||||
|
||||
std::shared_ptr<Operation> LayerNormalization::Clone(
|
||||
std::shared_ptr<Graph>& graph) const {
|
||||
return graph->CreateOperation<LayerNormalization>(this->axis_, this->eps_);
|
||||
}
|
||||
|
||||
} // namespace ops
|
||||
} // namespace vx
|
||||
} // namespace tim
|
||||
|
|
@ -47,6 +47,13 @@ LocalResponseNormalization::LocalResponseNormalization(Graph* graph,
|
|||
this->impl()->node()->nn_param.lrn.type =
|
||||
VX_CONVOLUTIONAL_NETWORK_NORM_ACROSS_MAPS;
|
||||
}
|
||||
|
||||
std::shared_ptr<Operation> LocalResponseNormalization::Clone(
|
||||
std::shared_ptr<Graph>& graph) const {
|
||||
return graph->CreateOperation<LocalResponseNormalization>(
|
||||
this->size_, this->alpha_, this->beta_, this->bias_, this->axis_);
|
||||
}
|
||||
|
||||
} // namespace ops
|
||||
} // namespace vx
|
||||
} // namespace tim
|
||||
|
|
@ -35,6 +35,10 @@ namespace ops {
|
|||
: Operation(graph, VSI_NN_OP_LOGICAL_OPS) { \
|
||||
this->impl()->node()->nn_param.relational_ops.op = \
|
||||
VSI_NN_LOGICAL_##VSI_OP_CODE; \
|
||||
} \
|
||||
std::shared_ptr<Operation> Logical##NAME::Clone( \
|
||||
std::shared_ptr<Graph>& graph) const { \
|
||||
return graph->CreateOperation<Logical##NAME>(); \
|
||||
}
|
||||
|
||||
DEFINE_LOGICAL_OP(And, AND);
|
||||
|
|
|
|||
|
|
@ -36,6 +36,11 @@ LogSoftmax::LogSoftmax(Graph* graph, int32_t axis, float beta)
|
|||
this->impl()->node()->nn_param.log_softmax.axis = axis_;
|
||||
}
|
||||
|
||||
std::shared_ptr<Operation> LogSoftmax::Clone(
|
||||
std::shared_ptr<Graph>& graph) const {
|
||||
return graph->CreateOperation<LogSoftmax>(this->axis_, this->beta_);
|
||||
}
|
||||
|
||||
} // namespace ops
|
||||
} // namespace vx
|
||||
} // namespace tim
|
||||
|
|
@ -41,6 +41,11 @@ Matmul::Matmul(Graph* graph, bool transpose_a, bool transpose_b,
|
|||
this->impl()->node()->nn_param.matrixmul.adjoint[1] = ToVxBool(adjoint_b_);
|
||||
}
|
||||
|
||||
std::shared_ptr<Operation> Matmul::Clone(std::shared_ptr<Graph>& graph) const {
|
||||
return graph->CreateOperation<Matmul>(this->transpose_a_, this->transpose_b_,
|
||||
this->adjoint_a_, this->adjoint_b_);
|
||||
}
|
||||
|
||||
} // namespace ops
|
||||
} // namespace vx
|
||||
} // namespace tim
|
||||
|
|
@ -52,6 +52,13 @@ MaxpoolWithArgmax::MaxpoolWithArgmax(Graph* graph, PadType padding,
|
|||
this->SetRoundingPolicy(OverflowPolicy::SATURATE, RoundingPolicy::RTNE, round_type_);
|
||||
}
|
||||
|
||||
std::shared_ptr<Operation> MaxpoolWithArgmax::Clone(
|
||||
std::shared_ptr<Graph>& graph) const {
|
||||
return graph->CreateOperation<MaxpoolWithArgmax>(
|
||||
this->padding_, this->ksize_, this->stride_, this->round_type_,
|
||||
this->impl_->layout_);
|
||||
}
|
||||
|
||||
} // namespace ops
|
||||
} // namespace vx
|
||||
} // namespace tim
|
||||
|
|
|
|||
|
|
@ -41,6 +41,12 @@ MaxUnpool2d::MaxUnpool2d(Graph* graph, const std::array<uint32_t, 2>& ksize,
|
|||
this->impl()->node()->nn_param.upsample.size[1] = ksize_[1];
|
||||
}
|
||||
|
||||
std::shared_ptr<Operation> MaxUnpool2d::Clone(
|
||||
std::shared_ptr<Graph>& graph) const {
|
||||
return graph->CreateOperation<MaxUnpool2d>(this->ksize_, this->stride_,
|
||||
this->impl_->layout_);
|
||||
}
|
||||
|
||||
} // namespace ops
|
||||
} // namespace vx
|
||||
} // namespace tim
|
||||
|
|
|
|||
|
|
@ -30,12 +30,17 @@
|
|||
namespace tim {
|
||||
namespace vx {
|
||||
namespace ops {
|
||||
Moments::Moments(Graph* graph, const std::vector<int32_t>& axes, bool keep_dims)
|
||||
: Operation(graph, VSI_NN_OP_MOMENTS), axes_(axes), keep_dims_(keep_dims) {
|
||||
this->impl()->node()->nn_param.moments.axis = axes_.data();
|
||||
this->impl()->node()->nn_param.moments.axis_num = axes_.size();
|
||||
this->impl()->node()->nn_param.moments.keep_dim = ToVxBool(keep_dims_);
|
||||
}
|
||||
Moments::Moments(Graph* graph, const std::vector<int32_t>& axes, bool keep_dims)
|
||||
: Operation(graph, VSI_NN_OP_MOMENTS), axes_(axes), keep_dims_(keep_dims) {
|
||||
this->impl()->node()->nn_param.moments.axis = axes_.data();
|
||||
this->impl()->node()->nn_param.moments.axis_num = axes_.size();
|
||||
this->impl()->node()->nn_param.moments.keep_dim = ToVxBool(keep_dims_);
|
||||
}
|
||||
|
||||
std::shared_ptr<Operation> Moments::Clone(std::shared_ptr<Graph>& graph) const {
|
||||
return graph->CreateOperation<Moments>(this->axes_, this->keep_dims_);
|
||||
}
|
||||
|
||||
} // namespace ops
|
||||
} // namespace vx
|
||||
} // namespace tim
|
||||
|
|
@ -29,10 +29,19 @@
|
|||
namespace tim {
|
||||
namespace vx {
|
||||
namespace ops {
|
||||
NBG::NBG(Graph* graph, const char* binary, size_t input_count, size_t output_count) : Operation(graph, VSI_NN_OP_NBG, input_count, output_count) {
|
||||
this->impl()->node()->nn_param.nbg.url = binary;
|
||||
this->impl()->node()->nn_param.nbg.type = VSI_NN_NBG_POINTER;
|
||||
}
|
||||
NBG::NBG(Graph* graph, const char* binary, size_t input_count,
|
||||
size_t output_count)
|
||||
: Operation(graph, VSI_NN_OP_NBG, input_count, output_count) {
|
||||
this->impl()->node()->nn_param.nbg.url = binary;
|
||||
this->impl()->node()->nn_param.nbg.type = VSI_NN_NBG_POINTER;
|
||||
}
|
||||
|
||||
std::shared_ptr<Operation> NBG::Clone(std::shared_ptr<Graph>& graph) const {
|
||||
return graph->CreateOperation<NBG>(this->impl_->node_->nn_param.nbg.url,
|
||||
this->impl_->input_cnt_,
|
||||
this->impl_->output_cnt_);
|
||||
}
|
||||
|
||||
} // namespace ops
|
||||
} // namespace vx
|
||||
} // namespace tim
|
||||
|
|
@ -41,6 +41,11 @@ Pad::Pad(Graph* graph, const std::vector<uint32_t>& front_size,
|
|||
this->impl()->node()->nn_param.pad.const_val = const_val_;
|
||||
this->impl()->node()->nn_param.pad.mode = VSI_NN_PAD_MODE_CONSTANT;
|
||||
}
|
||||
|
||||
std::shared_ptr<Operation> Pad::Clone(std::shared_ptr<Graph>& graph) const {
|
||||
return graph->CreateOperation<Pad>(this->front_size_, this->back_size_, this->const_val_);
|
||||
}
|
||||
|
||||
} // namespace ops
|
||||
} // namespace vx
|
||||
} // namespace tim
|
||||
|
|
@ -75,6 +75,12 @@ Pool2d::Pool2d(Graph* graph, PoolType type,
|
|||
this->SetRoundingPolicy(OverflowPolicy::SATURATE, RoundingPolicy::RTNE, round_type_);
|
||||
}
|
||||
|
||||
std::shared_ptr<Operation> Pool2d::Clone(std::shared_ptr<Graph>& graph) const {
|
||||
return graph->CreateOperation<Pool2d>(this->type_, this->pad_, this->ksize_,
|
||||
this->stride_, this->round_type_,
|
||||
this->impl_->layout_);
|
||||
}
|
||||
|
||||
} // namespace ops
|
||||
} // namespace vx
|
||||
} // namespace tim
|
||||
|
|
@ -40,6 +40,11 @@ namespace ops {
|
|||
this->impl()->node()->nn_param.reduce.axis = axis_.data(); \
|
||||
this->impl()->node()->nn_param.reduce.axis_num = axis_.size(); \
|
||||
this->impl()->node()->nn_param.reduce.keep_dim = keep_dims_; \
|
||||
} \
|
||||
std::shared_ptr<Operation> Reduce##NAME::Clone( \
|
||||
std::shared_ptr<Graph>& graph) const { \
|
||||
return graph->CreateOperation<Reduce##NAME>(this->axis_, \
|
||||
this->keep_dims_); \
|
||||
}
|
||||
|
||||
DEFINE_REDUCE_OP(Min, VSI_NN_REDUCE_MIN);
|
||||
|
|
|
|||
|
|
@ -30,9 +30,14 @@ namespace tim {
|
|||
namespace vx {
|
||||
namespace ops {
|
||||
|
||||
#define DEFINE_RELATIONAL_OP(NAME, VSI_OP_CODE) \
|
||||
NAME::NAME(Graph* graph) : Operation(graph, VSI_NN_OP_RELATIONAL_OPS, 2, 1) { \
|
||||
this->impl()->node()->nn_param.relational_ops.op = VSI_OP_CODE; \
|
||||
#define DEFINE_RELATIONAL_OP(NAME, VSI_OP_CODE) \
|
||||
NAME::NAME(Graph* graph) \
|
||||
: Operation(graph, VSI_NN_OP_RELATIONAL_OPS, 2, 1) { \
|
||||
this->impl()->node()->nn_param.relational_ops.op = VSI_OP_CODE; \
|
||||
} \
|
||||
std::shared_ptr<Operation> NAME::Clone(std::shared_ptr<Graph>& graph) \
|
||||
const { \
|
||||
return graph->CreateOperation<NAME>(); \
|
||||
}
|
||||
|
||||
DEFINE_RELATIONAL_OP(Greater, VSI_NN_RELATIONAL_OPS_GREAT)
|
||||
|
|
|
|||
|
|
@ -35,6 +35,10 @@ Reorg::Reorg(Graph* graph, const uint32_t stride)
|
|||
this->impl()->node()->nn_param.reorg.stride = stride_;
|
||||
}
|
||||
|
||||
std::shared_ptr<Operation> Reorg::Clone(std::shared_ptr<Graph>& graph) const {
|
||||
return graph->CreateOperation<Reorg>(this->stride_);
|
||||
}
|
||||
|
||||
} // namespace ops
|
||||
} // namespace vx
|
||||
} // namespace tim
|
||||
|
|
|
|||
|
|
@ -36,6 +36,11 @@ Reshape::Reshape(Graph* graph, const std::vector<uint32_t>& size)
|
|||
this->impl()->node()->nn_param.reshape.dim_num = size_.size();
|
||||
}
|
||||
|
||||
std::shared_ptr<Operation> Reshape::Clone(
|
||||
std::shared_ptr<Graph>& graph) const {
|
||||
return graph->CreateOperation<Reshape>(this->size_);
|
||||
}
|
||||
|
||||
} // namespace ops
|
||||
} // namespace vx
|
||||
} // namespace tim
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue