diff --git a/include/tim/vx/ops/pad.h b/include/tim/vx/ops/pad.h index 4a214fa..6ac1d9b 100644 --- a/include/tim/vx/ops/pad.h +++ b/include/tim/vx/ops/pad.h @@ -35,19 +35,35 @@ namespace ops { * Pads a tensor. * * - const_val : the value to pad. + * - pad_mode : the mode of pad. + * - front_size : Add pad values to the left and top. + * - back_size : Add pad values to the right and bottom. */ class Pad : public DirectMapOp { public: - Pad(Graph* graph, const std::vector& front_size, - const std::vector& back_size, int32_t const_val); + typedef enum { + // signature + PAD_MODE_CONSTANT, + PAD_MODE_EDGE, + PAD_MODE_SYMMETRIC, + PAD_MODE_REFLECT, + } pad_mode_type; - std::shared_ptr Clone(std::shared_ptr& graph) const override; + Pad(Graph* graph, const std::vector& front_size, + const std::vector& back_size, int32_t const_val); + Pad(Graph* graph, const std::vector& front_size, + const std::vector& back_size, int32_t const_val, + pad_mode_type pad_mode); + + std::shared_ptr Clone( + std::shared_ptr& graph) const override; protected: std::vector front_size_; std::vector back_size_; int32_t const_val_; + pad_mode_type pad_mode_; }; } // namespace ops } // namespace vx diff --git a/src/tim/transform/ops/pad_layout_inference.h b/src/tim/transform/ops/pad_layout_inference.h index cb4fe5a..374ae38 100644 --- a/src/tim/transform/ops/pad_layout_inference.h +++ b/src/tim/transform/ops/pad_layout_inference.h @@ -51,15 +51,13 @@ class PadLayoutInfer : public OpLayoutInfer { sizeof(uint32_t) * dim_num); memcpy(back_size.data(), op_->impl()->node()->nn_param.pad.back_size, sizeof(uint32_t) * dim_num); - int32_t pad_value = op_->impl()->node()->nn_param.pad.const_val; if (!input_pv->IsAligned()) { front_size = MapMultipleAxis(input_pv->AsStdVec(), front_size); back_size = MapMultipleAxis(input_pv->AsStdVec(), back_size); } - auto pad = context_->infer_graph_->CreateOperation( - front_size, back_size, pad_value); + auto pad = op_->Clone(context_->infer_graph_); auto out_infer = CreateOutputsTensor(input_pv); (*pad).BindInput(context_->GetMapedTensor(i_src)); (*pad).BindOutput(out_infer[0]); diff --git a/src/tim/vx/ops/pad.cc b/src/tim/vx/ops/pad.cc index e0f12db..ca818bc 100644 --- a/src/tim/vx/ops/pad.cc +++ b/src/tim/vx/ops/pad.cc @@ -29,21 +29,31 @@ namespace tim { namespace vx { namespace ops { + Pad::Pad(Graph* graph, const std::vector& front_size, const std::vector& back_size, int32_t const_val) + : Pad(graph, front_size, back_size, const_val, PAD_MODE_CONSTANT) {} + +Pad::Pad(Graph* graph, const std::vector& front_size, + const std::vector& back_size, int32_t const_val, + pad_mode_type pad_mode) : DirectMapOp(graph, VSI_NN_OP_PAD), front_size_(front_size), back_size_(back_size), - const_val_(const_val) { + const_val_(const_val), + pad_mode_(pad_mode) { this->impl()->node()->nn_param.pad.front_size = front_size_.data(); this->impl()->node()->nn_param.pad.back_size = back_size_.data(); this->impl()->node()->nn_param.pad.dim_num = front_size_.size(); - this->impl()->node()->nn_param.pad.const_val = const_val_; - this->impl()->node()->nn_param.pad.mode = VSI_NN_PAD_MODE_CONSTANT; + if (pad_mode_ == PAD_MODE_CONSTANT) { + this->impl()->node()->nn_param.pad.const_val = const_val_; + } + this->impl()->node()->nn_param.pad.mode = (vsi_nn_pad_mode_e)pad_mode_; } std::shared_ptr Pad::Clone(std::shared_ptr& graph) const { - return graph->CreateOperation(this->front_size_, this->back_size_, this->const_val_); + return graph->CreateOperation(this->front_size_, this->back_size_, + this->const_val_, this->pad_mode_); } } // namespace ops diff --git a/src/tim/vx/ops/pad_test.cc b/src/tim/vx/ops/pad_test.cc new file mode 100644 index 0000000..b0a67ca --- /dev/null +++ b/src/tim/vx/ops/pad_test.cc @@ -0,0 +1,143 @@ +/**************************************************************************** +* +* Copyright (c) 2021 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +*****************************************************************************/ +#include "tim/vx/context.h" +#include "tim/vx/graph.h" +#include "tim/vx/ops/pad.h" +#include "tim/vx/types.h" +#include "test_utils.h" + +#include "gtest/gtest.h" + +TEST(Pad, constant) { + auto ctx = tim::vx::Context::Create(); + auto graph = ctx->CreateGraph(); + + tim::vx::ShapeType input_shape({3, 2}); + tim::vx::ShapeType output_shape({5, 4}); + tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32, input_shape, + tim::vx::TensorAttribute::INPUT); + tim::vx::TensorSpec output_spec(tim::vx::DataType::FLOAT32, output_shape, + tim::vx::TensorAttribute::OUTPUT); + + auto input_tensor = graph->CreateTensor(input_spec); + auto output_tensor = graph->CreateTensor(output_spec); + + std::vector input_data = { + 0, 1, 2, 3, 4, 5, 6, + }; + + std::vector golden = { + 1, 1, 1, 1, 1, 1, 0, 1, 2, 1, 1, 3, 4, 5, 1, 1, 1, 1, 1, 1, + }; + + EXPECT_TRUE( + input_tensor->CopyDataToTensor(input_data.data(), input_data.size() * 4)); + std::vector front = {1, 1}; + std::vector back = {1, 1}; + auto op = graph->CreateOperation( + front, back, 1, tim::vx::ops::Pad::PAD_MODE_CONSTANT); + (*op).BindInput(input_tensor).BindOutput(output_tensor); + + EXPECT_TRUE(graph->Compile()); + EXPECT_TRUE(graph->Run()); + std::vector output(golden.size()); + + EXPECT_TRUE(output_tensor->CopyDataFromTensor(output.data())); + EXPECT_EQ(golden, output); +} + +TEST(Pad, reflect) { + auto ctx = tim::vx::Context::Create(); + auto graph = ctx->CreateGraph(); + + tim::vx::ShapeType input_shape({3, 2}); + tim::vx::ShapeType output_shape({5, 4}); + tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32, input_shape, + tim::vx::TensorAttribute::INPUT); + tim::vx::TensorSpec output_spec(tim::vx::DataType::FLOAT32, output_shape, + tim::vx::TensorAttribute::OUTPUT); + + auto input_tensor = graph->CreateTensor(input_spec); + auto output_tensor = graph->CreateTensor(output_spec); + + std::vector input_data = { + 0, 1, 2, 3, 4, 5, 6, + }; + + std::vector golden = { + 0, 0, 1, 2, 2, 0, 0, 1, 2, 2, 3, 3, 4, 5, 5, 3, 3, 4, 5, 5, + }; + + EXPECT_TRUE( + input_tensor->CopyDataToTensor(input_data.data(), input_data.size() * 4)); + std::vector front = {1, 1}; + std::vector back = {1, 1}; + auto op = graph->CreateOperation( + front, back, 0, tim::vx::ops::Pad::PAD_MODE_EDGE); + (*op).BindInput(input_tensor).BindOutput(output_tensor); + + EXPECT_TRUE(graph->Compile()); + EXPECT_TRUE(graph->Run()); + std::vector output(golden.size()); + + EXPECT_TRUE(output_tensor->CopyDataFromTensor(output.data())); + EXPECT_EQ(golden, output); +} + +TEST(Pad, edge) { + auto ctx = tim::vx::Context::Create(); + auto graph = ctx->CreateGraph(); + + tim::vx::ShapeType input_shape({3, 2}); + tim::vx::ShapeType output_shape({5, 4}); + tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32, input_shape, + tim::vx::TensorAttribute::INPUT); + tim::vx::TensorSpec output_spec(tim::vx::DataType::FLOAT32, output_shape, + tim::vx::TensorAttribute::OUTPUT); + + auto input_tensor = graph->CreateTensor(input_spec); + auto output_tensor = graph->CreateTensor(output_spec); + + std::vector input_data = { + 0, 1, 2, 3, 4, 5, 6, + }; + + std::vector golden = {0, 0, 1, 2, 2, 0, 0, 1, 2, 2, + 3, 3, 4, 5, 5, 3, 3, 4, 5, 5}; + + EXPECT_TRUE( + input_tensor->CopyDataToTensor(input_data.data(), input_data.size() * 4)); + std::vector front = {1, 1}; + std::vector back = {1, 1}; + auto op = graph->CreateOperation( + front, back, 0, tim::vx::ops::Pad::PAD_MODE_EDGE); + (*op).BindInput(input_tensor).BindOutput(output_tensor); + + EXPECT_TRUE(graph->Compile()); + EXPECT_TRUE(graph->Run()); + std::vector output(golden.size()); + + EXPECT_TRUE(output_tensor->CopyDataFromTensor(output.data())); + EXPECT_EQ(golden, output); +} \ No newline at end of file