From 7582b57edc8849dde47fd0de5d8b1e0cef74097c Mon Sep 17 00:00:00 2001 From: Chen Xin Date: Mon, 12 Dec 2022 18:55:04 +0800 Subject: [PATCH] Added pad_v2 & pad_v2 layout infer And added 4 rank case Type: Added new op Signed-off-by: Chen Xin --- include/tim/vx/ops.h | 1 + include/tim/vx/ops/pad.h | 2 +- include/tim/vx/ops/pad_v2.h | 71 +++++++++++++++ src/tim/transform/layout_inference.cc | 2 + .../transform/ops/pad_v2_layout_inference.h | 74 ++++++++++++++++ src/tim/vx/ops/pad_test.cc | 86 +++++++++++++++++++ src/tim/vx/ops/pad_v2.cc | 61 +++++++++++++ 7 files changed, 296 insertions(+), 1 deletion(-) create mode 100644 include/tim/vx/ops/pad_v2.h create mode 100644 src/tim/transform/ops/pad_v2_layout_inference.h create mode 100644 src/tim/vx/ops/pad_v2.cc diff --git a/include/tim/vx/ops.h b/include/tim/vx/ops.h index 52d57a2..cf82e8e 100644 --- a/include/tim/vx/ops.h +++ b/include/tim/vx/ops.h @@ -62,6 +62,7 @@ #include "tim/vx/ops/nbg.h" #include "tim/vx/ops/onehot.h" #include "tim/vx/ops/pad.h" +#include "tim/vx/ops/pad_v2.h" #include "tim/vx/ops/pool2d.h" #include "tim/vx/ops/reduce.h" #include "tim/vx/ops/relational_operations.h" diff --git a/include/tim/vx/ops/pad.h b/include/tim/vx/ops/pad.h index faafe38..fd7ddc2 100644 --- a/include/tim/vx/ops/pad.h +++ b/include/tim/vx/ops/pad.h @@ -34,7 +34,7 @@ namespace ops { * * Pads a tensor. * - * - const_val : the value to pad. + * - const_val : the int32 value to pad. * - pad_mode : the mode of pad. * - front_size : Add pad values to the left and top. * - back_size : Add pad values to the right and bottom. diff --git a/include/tim/vx/ops/pad_v2.h b/include/tim/vx/ops/pad_v2.h new file mode 100644 index 0000000..73640b0 --- /dev/null +++ b/include/tim/vx/ops/pad_v2.h @@ -0,0 +1,71 @@ +/**************************************************************************** +* +* Copyright (c) 2022 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +*****************************************************************************/ +#ifndef TIM_VX_OPERATION_PADV2_H_ +#define TIM_VX_OPERATION_PADV2_H_ +#include "tim/vx/builtin_op.h" + +namespace tim { +namespace vx { +namespace ops { + +/** + * ## PadV2 + * + * Pads a tensor. + * + * - const_val : the float value to pad. + * - pad_mode : the mode of pad. + * - front_size : Add pad values to the left and top. + * - back_size : Add pad values to the right and bottom. + */ + +class PadV2 : public BuiltinOp { + public: + typedef enum { + // signature + PAD_MODE_CONSTANT, + PAD_MODE_EDGE, + PAD_MODE_SYMMETRIC, + PAD_MODE_REFLECT, + } pad_mode_type; + + PadV2(Graph* graph, const std::vector& front_size, + const std::vector& back_size, float const_val); + PadV2(Graph* graph, const std::vector& front_size, + const std::vector& back_size, float const_val, + pad_mode_type pad_mode); + + std::shared_ptr Clone( + std::shared_ptr& graph) const override; + + protected: + std::vector front_size_; + std::vector back_size_; + float const_val_; + pad_mode_type pad_mode_; +}; +} // namespace ops +} // namespace vx +} // namespace tim +#endif \ No newline at end of file diff --git a/src/tim/transform/layout_inference.cc b/src/tim/transform/layout_inference.cc index 6e25694..c9de876 100644 --- a/src/tim/transform/layout_inference.cc +++ b/src/tim/transform/layout_inference.cc @@ -42,6 +42,7 @@ #include "ops/space2batch_layout_inference.h" #include "ops/batch2space_layout_inference.h" #include "ops/pad_layout_inference.h" +#include "ops/pad_v2_layout_inference.h" #include "ops/reduce_layout_inference.h" #include "ops/fullyconnected_layout_inference.h" #include "ops/resize_layout_inference.h" @@ -248,6 +249,7 @@ std::vector> HandleLayoutInfer( REGIST_LAYOUT_INFERENCE(VSI_NN_OP_SPACE2BATCH, Space2Batch); REGIST_LAYOUT_INFERENCE(VSI_NN_OP_BATCH2SPACE, Batch2Space); REGIST_LAYOUT_INFERENCE(VSI_NN_OP_PAD, Pad); + REGIST_LAYOUT_INFERENCE(VSI_NN_OP_PAD2, PadV2); REGIST_LAYOUT_INFERENCE(VSI_NN_OP_FCL2, FullyConnected); REGIST_LAYOUT_INFERENCE(VSI_NN_OP_RESIZE, Resize); REGIST_LAYOUT_INFERENCE(VSI_NN_OP_SPLIT, Split); diff --git a/src/tim/transform/ops/pad_v2_layout_inference.h b/src/tim/transform/ops/pad_v2_layout_inference.h new file mode 100644 index 0000000..595b44e --- /dev/null +++ b/src/tim/transform/ops/pad_v2_layout_inference.h @@ -0,0 +1,74 @@ +/**************************************************************************** + * + * Copyright (c) 2020 Vivante Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + *****************************************************************************/ +#ifndef TIM_LAYOUT_INFER_PADV2_LAYOUT_INFERENCE_H_ +#define TIM_LAYOUT_INFER_PADV2_LAYOUT_INFERENCE_H_ + +#include "tim/vx/ops/pad_v2.h" + +#include "ops/op_layout_inference.h" +#include "permute_vector.h" +#include "builtin_op_impl.h" +namespace tim { +namespace transform { +class PadV2LayoutInfer : public OpLayoutInfer { + public: + PadV2LayoutInfer( + const std::shared_ptr op, + std::shared_ptr& context) + : OpLayoutInfer(op, context) {} + + void OnInputs( + std::vector>& next_tensors) override { + assert(op_->impl()->InputsTensor().size() == 1); + auto i_src = op_->impl()->InputsTensor()[0]; + auto input_pv = context_->GetPermuteVector(i_src); + + uint32_t dim_num = op_->impl()->node()->nn_param.pad.dim_num; + std::vector front_size(dim_num); + std::vector back_size(dim_num); + memcpy(front_size.data(), op_->impl()->node()->nn_param.pad.front_size, + sizeof(uint32_t) * dim_num); + memcpy(back_size.data(), op_->impl()->node()->nn_param.pad.back_size, + sizeof(uint32_t) * dim_num); + float pad_value = op_->impl()->node()->nn_param.pad.const_val; + + if (!input_pv->IsAligned()) { + front_size = MapMultipleAxis(input_pv->AsStdVec(), front_size); + back_size = MapMultipleAxis(input_pv->AsStdVec(), back_size); + } + + auto pad_v2 = context_->infer_graph_->CreateOperation( + front_size, back_size, pad_value); + auto out_infer = CreateOutputsTensor(input_pv); + (*pad_v2).BindInput(context_->GetMapedTensor(i_src)); + (*pad_v2).BindOutput(out_infer[0]); + context_->SetPermuteVector(op_->impl()->OutputsTensor()[0], input_pv); + next_tensors.push_back(op_->impl()->OutputsTensor()[0]); + } +}; + +} // namespace transform +} // namespace tim + +#endif \ No newline at end of file diff --git a/src/tim/vx/ops/pad_test.cc b/src/tim/vx/ops/pad_test.cc index b0a67ca..23e799a 100644 --- a/src/tim/vx/ops/pad_test.cc +++ b/src/tim/vx/ops/pad_test.cc @@ -24,6 +24,7 @@ #include "tim/vx/context.h" #include "tim/vx/graph.h" #include "tim/vx/ops/pad.h" +#include "tim/vx/ops/pad_v2.h" #include "tim/vx/types.h" #include "test_utils.h" @@ -67,6 +68,91 @@ TEST(Pad, constant) { EXPECT_EQ(golden, output); } +TEST(Pad, float_1_3_2_1) { + auto ctx = tim::vx::Context::Create(); + auto graph = ctx->CreateGraph(); + + tim::vx::ShapeType input_shape({1, 3, 2, 1}); + tim::vx::ShapeType output_shape({1, 7, 4, 1}); + tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32, input_shape, + tim::vx::TensorAttribute::INPUT); + tim::vx::TensorSpec output_spec(tim::vx::DataType::FLOAT32, output_shape, + tim::vx::TensorAttribute::OUTPUT); + + auto input_tensor = graph->CreateTensor(input_spec); + auto output_tensor = graph->CreateTensor(output_spec); + + std::vector input_data = { + 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f + }; + + std::vector golden = { + 9.3f, 1.0f, 2.0f, 3.0f, 9.3f, 9.3f, 9.3f, 9.3f, 4.0f, 5.0f, 6.0f, 9.3f, 9.3f, 9.3f, + 9.3f, 9.3f, 9.3f, 9.3f, 9.3f, 9.3f, 9.3f, 9.3f, 9.3f, 9.3f, 9.3f, 9.3f, 9.3f, 9.3f + }; + + EXPECT_TRUE( + input_tensor->CopyDataToTensor(input_data.data(), input_data.size() * 4)); + std::vector front = {0, 1, 0, 0}; + std::vector back = {0, 3, 2, 0}; + auto op = graph->CreateOperation( + front, back, 9.3f, tim::vx::ops::PadV2::PAD_MODE_CONSTANT); + (*op).BindInput(input_tensor).BindOutput(output_tensor); + + EXPECT_TRUE(graph->Compile()); + EXPECT_TRUE(graph->Run()); + std::vector output(golden.size()); + + EXPECT_TRUE(output_tensor->CopyDataFromTensor(output.data())); + EXPECT_TRUE(ArraysMatch(golden, output, 1e-5f)); +} + +TEST(Pad, int8_1_3_2_1) { + auto ctx = tim::vx::Context::Create(); + auto graph = ctx->CreateGraph(); + + tim::vx::ShapeType input_shape({1, 3, 2, 1}); + tim::vx::ShapeType output_shape({1, 7, 4, 1}); + float scales = 2.3f; + int zero_point = -124; + + tim::vx::Quantization quant_input(tim::vx::QuantType::ASYMMETRIC, + scales, zero_point); + tim::vx::Quantization quant_output(tim::vx::QuantType::ASYMMETRIC, + scales, zero_point); + tim::vx::TensorSpec input_spec(tim::vx::DataType::INT8, input_shape, + tim::vx::TensorAttribute::INPUT, quant_input); + tim::vx::TensorSpec output_spec(tim::vx::DataType::INT8, output_shape, + tim::vx::TensorAttribute::OUTPUT, quant_output); + + auto input_tensor = graph->CreateTensor(input_spec); + auto output_tensor = graph->CreateTensor(output_spec); + + std::vector input_data = { + -127, -126, -125, -124, -123, -122 + }; + + std::vector golden = { + -120, -127, -126, -125, -120, -120, -120, -120, -124, -123, -122, -120, -120, -120, + -120, -120, -120, -120, -120, -120, -120, -120, -120, -120, -120, -120, -120, -120, + }; + + EXPECT_TRUE( + input_tensor->CopyDataToTensor(input_data.data(), input_data.size() * 4)); + std::vector front = {0, 1, 0, 0}; + std::vector back = {0, 3, 2, 0}; + auto op = graph->CreateOperation( + front, back, 9, tim::vx::ops::Pad::PAD_MODE_CONSTANT); + (*op).BindInput(input_tensor).BindOutput(output_tensor); + + EXPECT_TRUE(graph->Compile()); + EXPECT_TRUE(graph->Run()); + std::vector output(golden.size()); + + EXPECT_TRUE(output_tensor->CopyDataFromTensor(output.data())); + EXPECT_EQ(golden, output); +} + TEST(Pad, reflect) { auto ctx = tim::vx::Context::Create(); auto graph = ctx->CreateGraph(); diff --git a/src/tim/vx/ops/pad_v2.cc b/src/tim/vx/ops/pad_v2.cc new file mode 100644 index 0000000..d844e96 --- /dev/null +++ b/src/tim/vx/ops/pad_v2.cc @@ -0,0 +1,61 @@ +/**************************************************************************** +* +* Copyright (c) 2022 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +*****************************************************************************/ +#include "tim/vx/ops/pad_v2.h" + +#include "builtin_op_impl.h" +#include "vsi_nn_pub.h" + +namespace tim { +namespace vx { +namespace ops { + +PadV2::PadV2(Graph* graph, const std::vector& front_size, + const std::vector& back_size, float const_val) + : PadV2(graph, front_size, back_size, const_val, PAD_MODE_CONSTANT) {} + +PadV2::PadV2(Graph* graph, const std::vector& front_size, + const std::vector& back_size, float const_val, + pad_mode_type pad_mode) + : BuiltinOp(graph, VSI_NN_OP_PAD2), + front_size_(front_size), + back_size_(back_size), + const_val_(const_val), + pad_mode_(pad_mode) { + this->impl()->node()->nn_param.pad2.front_size = front_size_.data(); + this->impl()->node()->nn_param.pad2.back_size = back_size_.data(); + this->impl()->node()->nn_param.pad2.dim_num = front_size_.size(); + if (pad_mode_ == PAD_MODE_CONSTANT) { + this->impl()->node()->nn_param.pad2.const_val = const_val_; + } + this->impl()->node()->nn_param.pad2.mode = (vsi_nn_pad_mode_e)pad_mode_; +} + +std::shared_ptr PadV2::Clone(std::shared_ptr& graph) const { + return graph->CreateOperation(this->front_size_, this->back_size_, + this->const_val_, this->pad_mode_); +} + +} // namespace ops +} // namespace vx +} // namespace tim \ No newline at end of file