Supprt layout inference for Operations

Signed-off-by: yuenan.li <yuenan.li@verisilicon.com>
This commit is contained in:
yuenan.li 2021-05-31 10:00:38 +08:00 committed by Kainan Cha
parent ebad62ab02
commit 1f08618403
17 changed files with 669 additions and 30 deletions

View File

@ -40,13 +40,17 @@ namespace ops {
* This operation is sometimes called "deconvolution" after Deconvolutional Networks, * This operation is sometimes called "deconvolution" after Deconvolutional Networks,
* but is actually the transpose (gradient) of Conv2D rather than an actual deconvolution. * but is actually the transpose (gradient) of Conv2D rather than an actual deconvolution.
* *
* - weights : the channel number for weight tensor. * - oc_count_ : the out channel count for weight tensor.
* - pad_type : SAME, VALID or AUTO.
* - ksize : the height and width for weight tensor. * - ksize : the height and width for weight tensor.
* - padding : AUTO, VALID or SAME. * - padding : AUTO, VALID or SAME.
* - pad : pad value for each spatial axis. * - pad : pad value for each spatial axis.
* - stride : stride along each spatial axis. * - stride : stride along each spatial axis.
* - output_padding : specifying the amount of padding along the height and width of * - output_padding : specifying the amount of padding along the height and width of
* the output tensor. * the output tensor.
* - group : the feature count of each group.
* - input_layout : Layout for input, WHCN by default.
* - kernel_layout: Layout for kernel, WHIO by default.
*/ */
class DeConv2d : public Operation { class DeConv2d : public Operation {
@ -54,22 +58,28 @@ class DeConv2d : public Operation {
DeConv2d(Graph* graph, int32_t oc_count_, PadType pad_type, DeConv2d(Graph* graph, int32_t oc_count_, PadType pad_type,
const std::array<uint32_t, 2>& ksize, const std::array<uint32_t, 2>& ksize,
const std::array<uint32_t, 2>& stride, const std::array<uint32_t, 2>& stride,
const std::array<uint32_t, 2>& output_padding); const std::array<uint32_t, 2>& output_padding,
DataLayout input_layout = DataLayout::WHCN,
DataLayout kernel_layout = DataLayout::WHIcOc);
DeConv2d(Graph* graph, int32_t oc_count_, PadType pad_type, DeConv2d(Graph* graph, int32_t oc_count_, PadType pad_type,
const std::array<uint32_t, 2>& ksize, const std::array<uint32_t, 2>& ksize,
const std::array<uint32_t, 2>& stride, const std::array<uint32_t, 2>& stride,
const std::array<uint32_t, 2>& output_padding, const std::array<uint32_t, 2>& output_padding,
const std::array<uint32_t, 4>& pad, const std::array<uint32_t, 4>& pad,
const uint32_t group = 1); const uint32_t group = 1,
DataLayout input_layout = DataLayout::WHCN,
DataLayout kernel_layout = DataLayout::WHIcOc);
DataLayout KernelDataLayout() { return kernel_layout_; }
protected: protected:
const uint32_t oc_count_; // output channel count const uint32_t oc_count_;
const PadType pad_type_; const PadType pad_type_;
const std::array<uint32_t, 2> ksize_; const std::array<uint32_t, 2> ksize_;
const std::array<uint32_t, 2> stride_; const std::array<uint32_t, 2> stride_;
const std::array<uint32_t, 2> output_padding_; const std::array<uint32_t, 2> output_padding_;
const std::array<uint32_t, 4> pad_; const std::array<uint32_t, 4> pad_;
const uint32_t group_; const uint32_t group_;
const DataLayout kernel_layout_;
}; };
} // namespace ops } // namespace ops

View File

@ -39,11 +39,10 @@ namespace ops {
class Reverse : public Operation { class Reverse : public Operation {
public: public:
Reverse(Graph* graph, int32_t* axis, uint32_t axis_num); Reverse(Graph* graph, const std::vector<int32_t>& axis);
protected: protected:
int32_t* axis_; const std::vector<int32_t> axis_;
uint32_t axis_num_;
}; };
} // namespace ops } // namespace ops

View File

@ -37,7 +37,7 @@ namespace ops {
* Splits a tensor along a given axis into num_splits subtensors. * Splits a tensor along a given axis into num_splits subtensors.
* *
* - axis : the axis along which to split. * - axis : the axis along which to split.
* - slices : ndicating the number of splits along given axis. * - slices : indicating the number of splits along given axis.
*/ */
class Split : public Operation { class Split : public Operation {

View File

@ -50,6 +50,14 @@
#include "ops/lrn_layout_inference.h" #include "ops/lrn_layout_inference.h"
#include "ops/l2normalization_layout_inference.h" #include "ops/l2normalization_layout_inference.h"
#include "ops/addn_layout_inference.h" #include "ops/addn_layout_inference.h"
#include "ops/gather_layout_inference.h"
#include "ops/gather_nd_layout_inference.h"
#include "ops/reverse_layout_inference.h"
#include "ops/slice_layout_inference.h"
#include "ops/select_layout_inference.h"
#include "ops/logical_layout_inference.h"
#include "ops/arg_layout_inference.h"
#include "ops/deconv2d_layout_inference.h"
#include <algorithm> #include <algorithm>
#include <deque> #include <deque>
@ -149,7 +157,7 @@ void LayoutInferContext::UpdateGraphInputMap(const std::shared_ptr<vx::Tensor>&
break; \ break; \
} \ } \
#define REGIST_REDUCE_LAYOUT_INFERENCE(op_idx) \ #define REGIST_REDUCE_LAYOUT_INFERENCE(op_idx) \
case op_idx: { \ case op_idx: { \
auto reduce_type = op->impl()->node()->nn_param.reduce.type; \ auto reduce_type = op->impl()->node()->nn_param.reduce.type; \
switch (reduce_type) { \ switch (reduce_type) { \
@ -166,6 +174,20 @@ void LayoutInferContext::UpdateGraphInputMap(const std::shared_ptr<vx::Tensor>&
break; \ break; \
} \ } \
#define REGIST_LOGICAL_LAYOUT_INFERENCE(op_idx) \
case op_idx: { \
auto logical_type = op->impl()->node()->nn_param.relational_ops.op; \
switch (logical_type) \
{ \
REGIST_LAYOUT_INFERENCE(VSI_NN_LOGICAL_AND, LogicalAnd); \
REGIST_LAYOUT_INFERENCE(VSI_NN_LOGICAL_OR, LogicalOr); \
default: \
VSILOGW("Op %d: Default layout inference pass for logical.", logical_type);\
assert(false); \
} \
break; \
} \
std::vector<std::shared_ptr<vx::Tensor>> HandleLayoutInfer( std::vector<std::shared_ptr<vx::Tensor>> HandleLayoutInfer(
std::shared_ptr<layout_inference_impl::LayoutInferContext>& ctx, std::shared_ptr<layout_inference_impl::LayoutInferContext>& ctx,
const std::shared_ptr<vx::Operation>& op) { const std::shared_ptr<vx::Operation>& op) {
@ -213,7 +235,6 @@ std::vector<std::shared_ptr<vx::Tensor>> HandleLayoutInfer(
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_SPACE2BATCH, SpaceToBatch); REGIST_LAYOUT_INFERENCE(VSI_NN_OP_SPACE2BATCH, SpaceToBatch);
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_BATCH2SPACE, BatchToSpace); REGIST_LAYOUT_INFERENCE(VSI_NN_OP_BATCH2SPACE, BatchToSpace);
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_PAD, Pad); REGIST_LAYOUT_INFERENCE(VSI_NN_OP_PAD, Pad);
REGIST_REDUCE_LAYOUT_INFERENCE(VSI_NN_OP_REDUCE);
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_FCL2, FullyConnected); REGIST_LAYOUT_INFERENCE(VSI_NN_OP_FCL2, FullyConnected);
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_RESIZE, Resize); REGIST_LAYOUT_INFERENCE(VSI_NN_OP_RESIZE, Resize);
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_SPLIT, Split); REGIST_LAYOUT_INFERENCE(VSI_NN_OP_SPLIT, Split);
@ -222,6 +243,16 @@ std::vector<std::shared_ptr<vx::Tensor>> HandleLayoutInfer(
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_L2_NORMALIZE, L2Normalization); REGIST_LAYOUT_INFERENCE(VSI_NN_OP_L2_NORMALIZE, L2Normalization);
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_ADDN, AddN); REGIST_LAYOUT_INFERENCE(VSI_NN_OP_ADDN, AddN);
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_PRELU, PRelu); REGIST_LAYOUT_INFERENCE(VSI_NN_OP_PRELU, PRelu);
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_GATHER, Gather);
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_GATHER_ND, GatherNd);
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_REVERSE, Reverse);
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_SLICE, Slice);
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_SELECT, Select);
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_ARGMAX, ArgMax);
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_ARGMIN, ArgMin);
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_DECONVOLUTION, DeConv2d);
REGIST_LOGICAL_LAYOUT_INFERENCE(VSI_NN_OP_LOGICAL_OPS);
REGIST_REDUCE_LAYOUT_INFERENCE(VSI_NN_OP_REDUCE);
default: default:
VSILOGW("Op %d: Default layout inference pass.", op_id); VSILOGW("Op %d: Default layout inference pass.", op_id);
assert(false); assert(false);

View File

@ -0,0 +1,88 @@
/****************************************************************************
*
* Copyright (c) 2020 Vivante Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*****************************************************************************/
#ifndef TIM_LAYOUT_INFER_ARG_OPS_LAYOUT_INFERENCE_H_
#define TIM_LAYOUT_INFER_ARG_OPS_LAYOUT_INFERENCE_H_
#include "src/tim/transform/ops/op_layout_inference.h"
#include "src/tim/vx/operation_private.h"
#include "tim/vx/ops/arg.h"
namespace tim {
namespace transform {
class ArgMaxLayoutInfer : public OpLayoutInfer {
public:
ArgMaxLayoutInfer(
const std::shared_ptr<vx::Operation> op,
std::shared_ptr<layout_inference_impl::LayoutInferContext>& context)
: OpLayoutInfer(op, context) {}
void OnInputs(
std::vector<std::shared_ptr<vx::Tensor>>& next_tensors) override {
assert(1 == op_->impl()->InputsTensor().size());
auto src_input = op_->impl()->InputsTensor()[0];
auto input_pv = context_->GetPermuteVector(src_input);
uint32_t axis = op_->impl()->node()->nn_param.argmax.axis;
axis = MapAxis(input_pv->AsStdVec(), axis);
auto argmax =
context_->infer_graph_->CreateOperation<vx::ops::ArgMax>(axis);
auto infer_out = CreateOutputsTensor(input_pv);
(*argmax).BindInput(context_->GetMapedTensor(src_input));
(*argmax).BindOutput(infer_out[0]);
context_->SetPermuteVector(op_->impl()->OutputsTensor()[0], input_pv);
next_tensors.push_back(op_->impl()->OutputsTensor()[0]);
}
};
class ArgMinLayoutInfer : public OpLayoutInfer {
public:
ArgMinLayoutInfer(
const std::shared_ptr<vx::Operation> op,
std::shared_ptr<layout_inference_impl::LayoutInferContext>& context)
: OpLayoutInfer(op, context) {}
void OnInputs(
std::vector<std::shared_ptr<vx::Tensor>>& next_tensors) override {
assert(1 == op_->impl()->InputsTensor().size());
auto src_input = op_->impl()->InputsTensor()[0];
auto input_pv = context_->GetPermuteVector(src_input);
uint32_t axis = op_->impl()->node()->nn_param.argmin.axis;
axis = MapAxis(input_pv->AsStdVec(), axis);
auto argmin =
context_->infer_graph_->CreateOperation<vx::ops::ArgMin>(axis);
auto infer_out = CreateOutputsTensor(input_pv);
(*argmin).BindInput(context_->GetMapedTensor(src_input));
(*argmin).BindOutput(infer_out[0]);
context_->SetPermuteVector(op_->impl()->OutputsTensor()[0], input_pv);
next_tensors.push_back(op_->impl()->OutputsTensor()[0]);
}
};
} // namespace transform
} // namespace tim
#endif

View File

@ -78,15 +78,22 @@ class Conv2dLayoutInfer : public OpLayoutInfer {
} }
} }
} else { } else {
// For input/weight // For bias
auto pv = context_->GetPermuteVector(in); if (in->GetShape().size() == 1) {
auto final_pv = pv->Reverse()->Add(required_pv);
if (!final_pv->IsAligned()) {
infer_tensor = InsertPermute(context_->GetMapedTensor(in), final_pv);
trans_pv = required_pv;
} else {
infer_tensor = context_->GetMapedTensor(in); infer_tensor = context_->GetMapedTensor(in);
trans_pv = pv; trans_pv = MakeShared(1);
} else {
// For input/weight
auto pv = context_->GetPermuteVector(in);
auto final_pv = pv->Reverse()->Add(required_pv);
if (!final_pv->IsAligned()) {
infer_tensor =
InsertPermute(context_->GetMapedTensor(in), final_pv);
trans_pv = required_pv;
} else {
infer_tensor = context_->GetMapedTensor(in);
trans_pv = pv;
}
} }
} }
context_->UpdateTensorMap(in, infer_tensor); context_->UpdateTensorMap(in, infer_tensor);

View File

@ -0,0 +1,136 @@
/****************************************************************************
*
* Copyright (c) 2020 Vivante Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*****************************************************************************/
#ifndef TIM_LAYOUT_INFER_DECONV2D_LAYOUT_INFERENCE_H_
#define TIM_LAYOUT_INFER_DECONV2D_LAYOUT_INFERENCE_H_
#include "src/tim/transform/ops/op_layout_inference.h"
#include "src/tim/transform/permute_vector.h"
#include "src/tim/vx/operation_private.h"
#include "tim/vx/ops/deconv.h"
namespace tim {
namespace transform {
class DeConv2dLayoutInfer : public OpLayoutInfer {
public:
DeConv2dLayoutInfer(
const std::shared_ptr<vx::Operation>& op,
std::shared_ptr<layout_inference_impl::LayoutInferContext>& context)
: OpLayoutInfer(op, context) {}
void OnInputs(
std::vector<std::shared_ptr<vx::Tensor>>& next_tensors) override {
vx::DataLayout layout = op_->impl()->layout_;
auto required_pv = MakeShared(4);
if (layout == vx::DataLayout::CWHN) {
required_pv = std::make_shared<PermuteVector<4>>(kCWHN2WHCN);
}
auto src_inputs = op_->impl()->InputsTensor();
for (const auto& in : src_inputs) {
std::shared_ptr<vx::Tensor> infer_tensor;
std::shared_ptr<IPermuteVector> trans_pv;
if (in->IsConstTensor() &&
!(in->GetSpec().attr_ & vx::TensorAttribute::INPUT)) {
// For bias
if (in->GetShape().size() == 1) {
infer_tensor = context_->infer_graph_->CreateTensor(in->GetSpec(),
in->GetDataRef());
trans_pv = MakeShared(1);
} else {
// For input/weight
if (!required_pv->IsAligned()) {
auto src_deconv2d =
std::static_pointer_cast<vx::ops::DeConv2d>(op_);
// Support TVM Kernel Layout
if (src_deconv2d->KernelDataLayout() == vx::DataLayout::OcIcWH) {
trans_pv = std::make_shared<PermuteVector<4>>(kOcIcWH2WHIcOc);
infer_tensor = PermuteConstTensor(in, trans_pv);
} else {
infer_tensor = PermuteConstTensor(in, required_pv);
trans_pv = required_pv;
}
} else {
infer_tensor = context_->infer_graph_->CreateTensor(
in->GetSpec(), in->GetDataRef());
trans_pv = MakeShared(required_pv->Rank());
}
}
} else {
// For bias
if (in->GetShape().size() == 1) {
infer_tensor = context_->GetMapedTensor(in);
trans_pv = MakeShared(1);
} else {
// For input/weight
auto pv = context_->GetPermuteVector(in);
auto final_pv = pv->Reverse()->Add(required_pv);
if (!final_pv->IsAligned()) {
infer_tensor =
InsertPermute(context_->GetMapedTensor(in), final_pv);
trans_pv = required_pv;
} else {
infer_tensor = context_->GetMapedTensor(in);
trans_pv = pv;
}
}
}
context_->UpdateTensorMap(in, infer_tensor);
context_->SetPermuteVector(in, trans_pv);
}
auto pad_type =
TranslatePadType(op_->impl()->node()->nn_param.deconv.pad_type);
std::array<uint32_t, 2> ksize = {
op_->impl()->node()->nn_param.deconv.ksize[0],
op_->impl()->node()->nn_param.deconv.ksize[1]};
std::array<uint32_t, 2> stride = {
op_->impl()->node()->nn_param.deconv.stride[0],
op_->impl()->node()->nn_param.deconv.stride[1]};
std::array<uint32_t, 2> output_padding = {
op_->impl()->node()->nn_param.deconv.output_padding[0],
op_->impl()->node()->nn_param.deconv.output_padding[0]};
std::array<uint32_t, 4> pad = {op_->impl()->node()->nn_param.deconv.pad[0],
op_->impl()->node()->nn_param.deconv.pad[1],
op_->impl()->node()->nn_param.deconv.pad[2],
op_->impl()->node()->nn_param.deconv.pad[3]};
int32_t oc_count = op_->impl()->node()->nn_param.deconv.weights;
const uint32_t group = op_->impl()->node()->nn_param.deconv.group;
auto deconv = context_->infer_graph_->CreateOperation<vx::ops::DeConv2d>(
oc_count, pad_type, ksize, stride, output_padding, pad, group);
auto infer_out = CreateOutputsTensor(required_pv);
for (const auto& i_src : src_inputs) {
(*deconv).BindInput(context_->GetMapedTensor(i_src));
}
(*deconv).BindOutput(infer_out[0]);
context_->SetPermuteVector(op_->impl()->OutputsTensor()[0], required_pv);
next_tensors.push_back(op_->impl()->OutputsTensor()[0]);
}
};
} // namespace transform
} // namespace tim
#endif

View File

@ -0,0 +1,60 @@
/****************************************************************************
*
* Copyright (c) 2020 Vivante Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*****************************************************************************/
#ifndef TIM_LAYOUT_INFER_GATHER_LAYOUT_INFERENCE_H_
#define TIM_LAYOUT_INFER_GATHER_LAYOUT_INFERENCE_H_
#include "src/tim/transform/ops/op_layout_inference.h"
#include "src/tim/vx/operation_private.h"
#include "tim/vx/ops/gather.h"
namespace tim {
namespace transform {
class GatherLayoutInfer : public OpLayoutInfer {
public:
GatherLayoutInfer(
const std::shared_ptr<vx::Operation>& op,
std::shared_ptr<layout_inference_impl::LayoutInferContext>& context)
: OpLayoutInfer(op, context) {}
void OnInputs(
std::vector<std::shared_ptr<vx::Tensor>>& next_tensors) override {
ReverseInputsPermuteVector();
auto gather = context_->infer_graph_->CreateOperation<vx::ops::Gather>(
op_->impl()->node()->nn_param.gather.axis);
int32_t output_rank = -1;
for (const auto& i_src : op_->impl()->InputsTensor()) {
(*gather).BindInput(context_->GetMapedTensor(i_src));
output_rank += i_src->GetShape().size();
}
auto infer_out = CreateOutputsTensor(
context_->GetPermuteVector(op_->impl()->InputsTensor()[0]));
(*gather).BindOutput(infer_out[0]);
auto output_pv = MakeShared(output_rank);
context_->SetPermuteVector(op_->impl()->OutputsTensor()[0], output_pv);
next_tensors.push_back(op_->impl()->OutputsTensor()[0]);
}
};
} // namespace transform
} // namespace tim
#endif

View File

@ -0,0 +1,61 @@
/****************************************************************************
*
* Copyright (c) 2020 Vivante Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*****************************************************************************/
#ifndef TIM_LAYOUT_INFER_GATHER_ND_LAYOUT_INFERENCE_H_
#define TIM_LAYOUT_INFER_GATHER_ND_LAYOUT_INFERENCE_H_
#include "src/tim/transform/ops/op_layout_inference.h"
#include "src/tim/vx/operation_private.h"
#include "tim/vx/ops/gathernd.h"
namespace tim {
namespace transform {
class GatherNdLayoutInfer : public OpLayoutInfer {
public:
GatherNdLayoutInfer(
const std::shared_ptr<vx::Operation>& op,
std::shared_ptr<layout_inference_impl::LayoutInferContext>& context)
: OpLayoutInfer(op, context) {}
void OnInputs(
std::vector<std::shared_ptr<vx::Tensor>>& next_tensors) override {
ReverseInputsPermuteVector();
uint32_t input_rank = op_->impl()->InputsTensor()[0]->GetShape().size();
uint32_t position_rank = op_->impl()->InputsTensor()[1]->GetShape().size();
uint32_t indices_rank = op_->impl()->InputsTensor()[1]->GetShape()[0];
int32_t output_rank = input_rank + position_rank - indices_rank - 1;
auto gather = context_->infer_graph_->CreateOperation<vx::ops::GatherNd>();
for (const auto& i_src : op_->impl()->InputsTensor()) {
(*gather).BindInput(context_->GetMapedTensor(i_src));
}
auto infer_out = CreateOutputsTensor(
context_->GetPermuteVector(op_->impl()->InputsTensor()[0]));
(*gather).BindOutput(infer_out[0]);
auto output_pv = MakeShared(output_rank);
context_->SetPermuteVector(op_->impl()->OutputsTensor()[0], output_pv);
next_tensors.push_back(op_->impl()->OutputsTensor()[0]);
}
};
} // namespace transform
} // namespace tim
#endif

View File

@ -0,0 +1,61 @@
/****************************************************************************
*
* Copyright (c) 2020 Vivante Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*****************************************************************************/
#ifndef TIM_LAYOUT_INFER_LOGICAL_OPS_LAYOUT_INFERENCE_H_
#define TIM_LAYOUT_INFER_LOGICAL_OPS_LAYOUT_INFERENCE_H_
#include "src/tim/transform/ops/op_layout_inference.h"
#include "src/tim/vx/operation_private.h"
#include "tim/vx/ops/logical.h"
namespace tim {
namespace transform {
template <typename OpTpye>
class LogicalOpsLayoutInfer : public OpLayoutInfer {
public:
LogicalOpsLayoutInfer(
const std::shared_ptr<vx::Operation> op,
std::shared_ptr<layout_inference_impl::LayoutInferContext>& context)
: OpLayoutInfer(op, context) {}
void OnInputs(
std::vector<std::shared_ptr<vx::Tensor>>& next_tensors) override {
auto required_pv = AlignPermuteVectorForMutilInputs();
auto infer_out = CreateOutputsTensor(required_pv);
auto logical_op = context_->infer_graph_->CreateOperation<OpTpye>();
for (const auto& i_src : op_->impl()->InputsTensor()) {
(*logical_op).BindInput(context_->GetMapedTensor(i_src));
}
(*logical_op).BindOutput(infer_out[0]);
context_->SetPermuteVector(op_->impl()->OutputsTensor()[0], required_pv);
next_tensors.push_back(op_->impl()->OutputsTensor()[0]);
}
};
using LogicalAndLayoutInfer = LogicalOpsLayoutInfer<vx::ops::LogicalAnd>;
using LogicalOrLayoutInfer = LogicalOpsLayoutInfer<vx::ops::LogicalOr>;
} // namespace transform
} // namespace tim
#endif

View File

@ -0,0 +1,61 @@
/****************************************************************************
*
* Copyright (c) 2020 Vivante Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*****************************************************************************/
#ifndef TIM_LAYOUT_INFER_REVERSE_LAYOUT_INFERENCE_H_
#define TIM_LAYOUT_INFER_REVERSE_LAYOUT_INFERENCE_H_
#include "src/tim/transform/ops/op_layout_inference.h"
#include "src/tim/vx/operation_private.h"
#include "tim/vx/ops/reverse.h"
namespace tim {
namespace transform {
class ReverseLayoutInfer : public OpLayoutInfer {
public:
ReverseLayoutInfer(
const std::shared_ptr<vx::Operation>& op,
std::shared_ptr<layout_inference_impl::LayoutInferContext>& context)
: OpLayoutInfer(op, context) {}
void OnInputs(
std::vector<std::shared_ptr<vx::Tensor>>& next_tensors) override {
auto src_input = op_->impl()->InputsTensor()[0];
auto input_pv = context_->GetPermuteVector(src_input);
std::vector<int32_t> axis(op_->impl()->node()->nn_param.reverse.axis_num);
memcpy(axis.data(), op_->impl()->node()->nn_param.reverse.axis,
axis.size() * sizeof(int32_t));
for (uint32_t i = 0; i < axis.size(); ++i) {
axis[i] = MapAxis(input_pv->AsStdVec(), axis[i]);
}
auto reverse = context_->infer_graph_->CreateOperation<vx::ops::Reverse>(
axis);
(*reverse).BindInput(context_->GetMapedTensor(src_input));
auto infer_out = CreateOutputsTensor(input_pv);
(*reverse).BindOutput(infer_out[0]);
context_->SetPermuteVector(op_->impl()->OutputsTensor()[0], input_pv);
next_tensors.push_back(op_->impl()->OutputsTensor()[0]);
}
};
} // namespace transform
} // namespace tim
#endif

View File

@ -0,0 +1,55 @@
/****************************************************************************
*
* Copyright (c) 2020 Vivante Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*****************************************************************************/
#ifndef TIM_LAYOUT_INFER_SELECT_LAYOUT_INFERENCE_H_
#define TIM_LAYOUT_INFER_SELECT_LAYOUT_INFERENCE_H_
#include "src/tim/transform/ops/op_layout_inference.h"
#include "src/tim/vx/operation_private.h"
#include "tim/vx/ops/select.h"
namespace tim {
namespace transform {
class SelectLayoutInfer : public OpLayoutInfer {
public:
SelectLayoutInfer(
const std::shared_ptr<vx::Operation>& op,
std::shared_ptr<layout_inference_impl::LayoutInferContext>& context)
: OpLayoutInfer(op, context) {}
void OnInputs(
std::vector<std::shared_ptr<vx::Tensor>>& next_tensors) override {
auto required_pv = AlignPermuteVectorForMutilInputs();
auto select = context_->infer_graph_->CreateOperation<vx::ops::Select>();
auto infer_out = CreateOutputsTensor(required_pv);
for (const auto& i_src : op_->impl()->InputsTensor()) {
(*select).BindInput(context_->GetMapedTensor(i_src));
}
(*select).BindOutput(infer_out[0]);
context_->SetPermuteVector(op_->impl()->OutputsTensor()[0], required_pv);
next_tensors.push_back(op_->impl()->OutputsTensor()[0]);
}
};
} // namespace transform
} // namespace tim
#endif

View File

@ -69,7 +69,6 @@ using RsqrtLayoutInfer = SimpleOpsLayoutInfer<vx::ops::Rsqrt>;
using SquareLayoutInfer = SimpleOpsLayoutInfer<vx::ops::Square>; using SquareLayoutInfer = SimpleOpsLayoutInfer<vx::ops::Square>;
using LogicalNotLayoutInfer = SimpleOpsLayoutInfer<vx::ops::LogicalNot>; using LogicalNotLayoutInfer = SimpleOpsLayoutInfer<vx::ops::LogicalNot>;
} // namespace transform } // namespace transform
} // namespace tim } // namespace tim

View File

@ -0,0 +1,65 @@
/****************************************************************************
*
* Copyright (c) 2020 Vivante Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*****************************************************************************/
#ifndef TIM_LAYOUT_INFER_SLICE_LAYOUT_INFERENCE_H_
#define TIM_LAYOUT_INFER_SLICE_LAYOUT_INFERENCE_H_
#include "src/tim/transform/ops/op_layout_inference.h"
#include "src/tim/vx/operation_private.h"
#include "tim/vx/ops/slice.h"
namespace tim {
namespace transform {
class SliceLayoutInfer : public OpLayoutInfer {
public:
SliceLayoutInfer(
const std::shared_ptr<vx::Operation>& op,
std::shared_ptr<layout_inference_impl::LayoutInferContext>& context)
: OpLayoutInfer(op, context) {}
void OnInputs(
std::vector<std::shared_ptr<vx::Tensor>>& next_tensors) override {
auto src_input = op_->impl()->InputsTensor()[0];
auto input_pv = context_->GetPermuteVector(src_input);
uint32_t dims = op_->impl()->node()->nn_param.slice.dims;
const uint32_t* start_ptr = op_->impl()->node()->nn_param.slice.start;
const uint32_t* length_ptr = op_->impl()->node()->nn_param.slice.length;
std::vector<int32_t> start(dims);
std::vector<int32_t> length(dims);
memcpy(start.data(), start_ptr, dims * sizeof(uint32_t));
memcpy(length.data(), length_ptr, dims * sizeof(uint32_t));
start = MapMultipleAxis(input_pv->AsStdVec(), start);
length = MapMultipleAxis(input_pv->AsStdVec(), length);
auto slice = context_->infer_graph_->CreateOperation<vx::ops::Slice>(
dims, start, length);
auto infer_out = CreateOutputsTensor(input_pv);
(*slice).BindInput(context_->GetMapedTensor(src_input));
(*slice).BindOutput(infer_out[0]);
context_->SetPermuteVector(op_->impl()->OutputsTensor()[0], input_pv);
next_tensors.push_back(op_->impl()->OutputsTensor()[0]);
}
};
} // namespace transform
} // namespace tim
#endif

View File

@ -45,7 +45,9 @@ class StackLayoutInfer : public OpLayoutInfer {
int32_t axis = op_->impl()->node()->nn_param.stack.axis; int32_t axis = op_->impl()->node()->nn_param.stack.axis;
auto stack = context_->infer_graph_->CreateOperation<vx::ops::Stack>( auto stack = context_->infer_graph_->CreateOperation<vx::ops::Stack>(
axis, op_->impl()->input_cnt_); axis, op_->impl()->input_cnt_);
(*stack).BindInput(context_->GetMapedTensor(op_->impl()->InputsTensor()[0])); for (const auto& i_src : op_->impl()->InputsTensor()) {
(*stack).BindInput(context_->GetMapedTensor(i_src));
}
auto required_pv = MakeShared(op_->impl()->OutputsTensor()[0]->GetShape().size()); auto required_pv = MakeShared(op_->impl()->OutputsTensor()[0]->GetShape().size());
auto out_infer = CreateOutputsTensor(required_pv); auto out_infer = CreateOutputsTensor(required_pv);
(*stack).BindOutput(out_infer[0]); (*stack).BindOutput(out_infer[0]);

View File

@ -36,25 +36,29 @@ namespace ops {
DeConv2d::DeConv2d(Graph* graph, int32_t oc_count, PadType pad_type, DeConv2d::DeConv2d(Graph* graph, int32_t oc_count, PadType pad_type,
const std::array<uint32_t, 2>& ksize, const std::array<uint32_t, 2>& ksize,
const std::array<uint32_t, 2>& stride, const std::array<uint32_t, 2>& stride,
const std::array<uint32_t, 2>& output_padding) const std::array<uint32_t, 2>& output_padding,
DataLayout input_layout,
DataLayout kernel_layout)
: DeConv2d(graph, oc_count, pad_type, ksize, stride, output_padding, : DeConv2d(graph, oc_count, pad_type, ksize, stride, output_padding,
{0, 0, 0, 0}) { {0, 0, 0, 0}, 1, input_layout, kernel_layout) {}
}
DeConv2d::DeConv2d(Graph* graph, int32_t oc_count, PadType pad_type, DeConv2d::DeConv2d(Graph* graph, int32_t oc_count, PadType pad_type,
const std::array<uint32_t, 2>& ksize, const std::array<uint32_t, 2>& ksize,
const std::array<uint32_t, 2>& stride, const std::array<uint32_t, 2>& stride,
const std::array<uint32_t, 2>& output_padding, const std::array<uint32_t, 2>& output_padding,
const std::array<uint32_t, 4>& pad, const std::array<uint32_t, 4>& pad,
const uint32_t group) const uint32_t group,
: Operation(graph, VSI_NN_OP_DECONVOLUTION), DataLayout input_layout,
DataLayout kernel_layout)
: Operation(graph, VSI_NN_OP_DECONVOLUTION, 0, 0, input_layout),
oc_count_(oc_count), oc_count_(oc_count),
pad_type_(pad_type), pad_type_(pad_type),
ksize_(ksize), ksize_(ksize),
stride_(stride), stride_(stride),
output_padding_(output_padding), output_padding_(output_padding),
pad_(pad), pad_(pad),
group_(group) { group_(group),
kernel_layout_(kernel_layout) {
// TODO(Sven): only support depthwise usage // TODO(Sven): only support depthwise usage
assert((group == 1U) || group == static_cast<uint32_t>(oc_count)); assert((group == 1U) || group == static_cast<uint32_t>(oc_count));

View File

@ -30,10 +30,10 @@ namespace tim {
namespace vx { namespace vx {
namespace ops { namespace ops {
Reverse::Reverse(Graph* graph, int32_t* axis, uint32_t axis_num) Reverse::Reverse(Graph* graph, const std::vector<int32_t>& axis)
: Operation(graph, VSI_NN_OP_REVERSE), axis_(axis), axis_num_(axis_num) { : Operation(graph, VSI_NN_OP_REVERSE), axis_(axis) {
this->impl()->node()->nn_param.reverse.axis = axis_; this->impl()->node()->nn_param.reverse.axis = axis_.data();
this->impl()->node()->nn_param.reverse.axis_num = axis_num_; this->impl()->node()->nn_param.reverse.axis_num = axis_.size();
} }
} // namespace ops } // namespace ops
} // namespace vx } // namespace vx