Added pad_v2 & pad_v2 layout infer

And added 4 rank case

Type: Added new op
Signed-off-by: Chen Xin <jack.chen@verisilicon.com>
This commit is contained in:
Chen Xin 2022-12-12 18:55:04 +08:00 committed by Sven
parent 8d8f4b6e68
commit 7582b57edc
7 changed files with 296 additions and 1 deletions

View File

@ -62,6 +62,7 @@
#include "tim/vx/ops/nbg.h"
#include "tim/vx/ops/onehot.h"
#include "tim/vx/ops/pad.h"
#include "tim/vx/ops/pad_v2.h"
#include "tim/vx/ops/pool2d.h"
#include "tim/vx/ops/reduce.h"
#include "tim/vx/ops/relational_operations.h"

View File

@ -34,7 +34,7 @@ namespace ops {
*
* Pads a tensor.
*
* - const_val : the value to pad.
* - const_val : the int32 value to pad.
* - pad_mode : the mode of pad.
* - front_size : Add pad values to the left and top.
* - back_size : Add pad values to the right and bottom.

View File

@ -0,0 +1,71 @@
/****************************************************************************
*
* Copyright (c) 2022 Vivante Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*****************************************************************************/
#ifndef TIM_VX_OPERATION_PADV2_H_
#define TIM_VX_OPERATION_PADV2_H_
#include "tim/vx/builtin_op.h"
namespace tim {
namespace vx {
namespace ops {
/**
* ## PadV2
*
* Pads a tensor.
*
* - const_val : the float value to pad.
* - pad_mode : the mode of pad.
* - front_size : Add pad values to the left and top.
* - back_size : Add pad values to the right and bottom.
*/
class PadV2 : public BuiltinOp {
public:
typedef enum {
// signature
PAD_MODE_CONSTANT,
PAD_MODE_EDGE,
PAD_MODE_SYMMETRIC,
PAD_MODE_REFLECT,
} pad_mode_type;
PadV2(Graph* graph, const std::vector<uint32_t>& front_size,
const std::vector<uint32_t>& back_size, float const_val);
PadV2(Graph* graph, const std::vector<uint32_t>& front_size,
const std::vector<uint32_t>& back_size, float const_val,
pad_mode_type pad_mode);
std::shared_ptr<Operation> Clone(
std::shared_ptr<Graph>& graph) const override;
protected:
std::vector<uint32_t> front_size_;
std::vector<uint32_t> back_size_;
float const_val_;
pad_mode_type pad_mode_;
};
} // namespace ops
} // namespace vx
} // namespace tim
#endif

View File

@ -42,6 +42,7 @@
#include "ops/space2batch_layout_inference.h"
#include "ops/batch2space_layout_inference.h"
#include "ops/pad_layout_inference.h"
#include "ops/pad_v2_layout_inference.h"
#include "ops/reduce_layout_inference.h"
#include "ops/fullyconnected_layout_inference.h"
#include "ops/resize_layout_inference.h"
@ -248,6 +249,7 @@ std::vector<std::shared_ptr<vx::Tensor>> HandleLayoutInfer(
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_SPACE2BATCH, Space2Batch);
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_BATCH2SPACE, Batch2Space);
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_PAD, Pad);
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_PAD2, PadV2);
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_FCL2, FullyConnected);
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_RESIZE, Resize);
REGIST_LAYOUT_INFERENCE(VSI_NN_OP_SPLIT, Split);

View File

@ -0,0 +1,74 @@
/****************************************************************************
*
* Copyright (c) 2020 Vivante Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*****************************************************************************/
#ifndef TIM_LAYOUT_INFER_PADV2_LAYOUT_INFERENCE_H_
#define TIM_LAYOUT_INFER_PADV2_LAYOUT_INFERENCE_H_
#include "tim/vx/ops/pad_v2.h"
#include "ops/op_layout_inference.h"
#include "permute_vector.h"
#include "builtin_op_impl.h"
namespace tim {
namespace transform {
class PadV2LayoutInfer : public OpLayoutInfer {
public:
PadV2LayoutInfer(
const std::shared_ptr<vx::Operation> op,
std::shared_ptr<layout_inference_impl::LayoutInferContext>& context)
: OpLayoutInfer(op, context) {}
void OnInputs(
std::vector<std::shared_ptr<vx::Tensor>>& next_tensors) override {
assert(op_->impl()->InputsTensor().size() == 1);
auto i_src = op_->impl()->InputsTensor()[0];
auto input_pv = context_->GetPermuteVector(i_src);
uint32_t dim_num = op_->impl()->node()->nn_param.pad.dim_num;
std::vector<uint32_t> front_size(dim_num);
std::vector<uint32_t> back_size(dim_num);
memcpy(front_size.data(), op_->impl()->node()->nn_param.pad.front_size,
sizeof(uint32_t) * dim_num);
memcpy(back_size.data(), op_->impl()->node()->nn_param.pad.back_size,
sizeof(uint32_t) * dim_num);
float pad_value = op_->impl()->node()->nn_param.pad.const_val;
if (!input_pv->IsAligned()) {
front_size = MapMultipleAxis(input_pv->AsStdVec(), front_size);
back_size = MapMultipleAxis(input_pv->AsStdVec(), back_size);
}
auto pad_v2 = context_->infer_graph_->CreateOperation<vx::ops::PadV2>(
front_size, back_size, pad_value);
auto out_infer = CreateOutputsTensor(input_pv);
(*pad_v2).BindInput(context_->GetMapedTensor(i_src));
(*pad_v2).BindOutput(out_infer[0]);
context_->SetPermuteVector(op_->impl()->OutputsTensor()[0], input_pv);
next_tensors.push_back(op_->impl()->OutputsTensor()[0]);
}
};
} // namespace transform
} // namespace tim
#endif

View File

@ -24,6 +24,7 @@
#include "tim/vx/context.h"
#include "tim/vx/graph.h"
#include "tim/vx/ops/pad.h"
#include "tim/vx/ops/pad_v2.h"
#include "tim/vx/types.h"
#include "test_utils.h"
@ -67,6 +68,91 @@ TEST(Pad, constant) {
EXPECT_EQ(golden, output);
}
TEST(Pad, float_1_3_2_1) {
auto ctx = tim::vx::Context::Create();
auto graph = ctx->CreateGraph();
tim::vx::ShapeType input_shape({1, 3, 2, 1});
tim::vx::ShapeType output_shape({1, 7, 4, 1});
tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32, input_shape,
tim::vx::TensorAttribute::INPUT);
tim::vx::TensorSpec output_spec(tim::vx::DataType::FLOAT32, output_shape,
tim::vx::TensorAttribute::OUTPUT);
auto input_tensor = graph->CreateTensor(input_spec);
auto output_tensor = graph->CreateTensor(output_spec);
std::vector<float> input_data = {
1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f
};
std::vector<float> golden = {
9.3f, 1.0f, 2.0f, 3.0f, 9.3f, 9.3f, 9.3f, 9.3f, 4.0f, 5.0f, 6.0f, 9.3f, 9.3f, 9.3f,
9.3f, 9.3f, 9.3f, 9.3f, 9.3f, 9.3f, 9.3f, 9.3f, 9.3f, 9.3f, 9.3f, 9.3f, 9.3f, 9.3f
};
EXPECT_TRUE(
input_tensor->CopyDataToTensor(input_data.data(), input_data.size() * 4));
std::vector<uint32_t> front = {0, 1, 0, 0};
std::vector<uint32_t> back = {0, 3, 2, 0};
auto op = graph->CreateOperation<tim::vx::ops::PadV2>(
front, back, 9.3f, tim::vx::ops::PadV2::PAD_MODE_CONSTANT);
(*op).BindInput(input_tensor).BindOutput(output_tensor);
EXPECT_TRUE(graph->Compile());
EXPECT_TRUE(graph->Run());
std::vector<float> output(golden.size());
EXPECT_TRUE(output_tensor->CopyDataFromTensor(output.data()));
EXPECT_TRUE(ArraysMatch(golden, output, 1e-5f));
}
TEST(Pad, int8_1_3_2_1) {
auto ctx = tim::vx::Context::Create();
auto graph = ctx->CreateGraph();
tim::vx::ShapeType input_shape({1, 3, 2, 1});
tim::vx::ShapeType output_shape({1, 7, 4, 1});
float scales = 2.3f;
int zero_point = -124;
tim::vx::Quantization quant_input(tim::vx::QuantType::ASYMMETRIC,
scales, zero_point);
tim::vx::Quantization quant_output(tim::vx::QuantType::ASYMMETRIC,
scales, zero_point);
tim::vx::TensorSpec input_spec(tim::vx::DataType::INT8, input_shape,
tim::vx::TensorAttribute::INPUT, quant_input);
tim::vx::TensorSpec output_spec(tim::vx::DataType::INT8, output_shape,
tim::vx::TensorAttribute::OUTPUT, quant_output);
auto input_tensor = graph->CreateTensor(input_spec);
auto output_tensor = graph->CreateTensor(output_spec);
std::vector<int8_t> input_data = {
-127, -126, -125, -124, -123, -122
};
std::vector<int8_t> golden = {
-120, -127, -126, -125, -120, -120, -120, -120, -124, -123, -122, -120, -120, -120,
-120, -120, -120, -120, -120, -120, -120, -120, -120, -120, -120, -120, -120, -120,
};
EXPECT_TRUE(
input_tensor->CopyDataToTensor(input_data.data(), input_data.size() * 4));
std::vector<uint32_t> front = {0, 1, 0, 0};
std::vector<uint32_t> back = {0, 3, 2, 0};
auto op = graph->CreateOperation<tim::vx::ops::Pad>(
front, back, 9, tim::vx::ops::Pad::PAD_MODE_CONSTANT);
(*op).BindInput(input_tensor).BindOutput(output_tensor);
EXPECT_TRUE(graph->Compile());
EXPECT_TRUE(graph->Run());
std::vector<int8_t> output(golden.size());
EXPECT_TRUE(output_tensor->CopyDataFromTensor(output.data()));
EXPECT_EQ(golden, output);
}
TEST(Pad, reflect) {
auto ctx = tim::vx::Context::Create();
auto graph = ctx->CreateGraph();

61
src/tim/vx/ops/pad_v2.cc Normal file
View File

@ -0,0 +1,61 @@
/****************************************************************************
*
* Copyright (c) 2022 Vivante Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*****************************************************************************/
#include "tim/vx/ops/pad_v2.h"
#include "builtin_op_impl.h"
#include "vsi_nn_pub.h"
namespace tim {
namespace vx {
namespace ops {
PadV2::PadV2(Graph* graph, const std::vector<uint32_t>& front_size,
const std::vector<uint32_t>& back_size, float const_val)
: PadV2(graph, front_size, back_size, const_val, PAD_MODE_CONSTANT) {}
PadV2::PadV2(Graph* graph, const std::vector<uint32_t>& front_size,
const std::vector<uint32_t>& back_size, float const_val,
pad_mode_type pad_mode)
: BuiltinOp(graph, VSI_NN_OP_PAD2),
front_size_(front_size),
back_size_(back_size),
const_val_(const_val),
pad_mode_(pad_mode) {
this->impl()->node()->nn_param.pad2.front_size = front_size_.data();
this->impl()->node()->nn_param.pad2.back_size = back_size_.data();
this->impl()->node()->nn_param.pad2.dim_num = front_size_.size();
if (pad_mode_ == PAD_MODE_CONSTANT) {
this->impl()->node()->nn_param.pad2.const_val = const_val_;
}
this->impl()->node()->nn_param.pad2.mode = (vsi_nn_pad_mode_e)pad_mode_;
}
std::shared_ptr<Operation> PadV2::Clone(std::shared_ptr<Graph>& graph) const {
return graph->CreateOperation<PadV2>(this->front_size_, this->back_size_,
this->const_val_, this->pad_mode_);
}
} // namespace ops
} // namespace vx
} // namespace tim