From 5c4800ab33dbfdc4109948350158953c9cb065cf Mon Sep 17 00:00:00 2001 From: chxin66 <57057788+chxin66@users.noreply.github.com> Date: Wed, 20 Apr 2022 21:44:43 +0800 Subject: [PATCH] Fixed pad layout inference bug & added one stridedslice case (#370) Signed-off-by: Chen Xin Co-authored-by: Chen Xin --- src/tim/transform/ops/pad_layout_inference.h | 4 +- .../transform/pad_layout_inference_test.cc | 69 ++++++++++++++++++ .../stridedslice_layout_inference_test.cc | 70 ++++++++++++++++++- 3 files changed, 139 insertions(+), 4 deletions(-) create mode 100644 src/tim/transform/pad_layout_inference_test.cc diff --git a/src/tim/transform/ops/pad_layout_inference.h b/src/tim/transform/ops/pad_layout_inference.h index 374ae38..cb4fe5a 100644 --- a/src/tim/transform/ops/pad_layout_inference.h +++ b/src/tim/transform/ops/pad_layout_inference.h @@ -51,13 +51,15 @@ class PadLayoutInfer : public OpLayoutInfer { sizeof(uint32_t) * dim_num); memcpy(back_size.data(), op_->impl()->node()->nn_param.pad.back_size, sizeof(uint32_t) * dim_num); + int32_t pad_value = op_->impl()->node()->nn_param.pad.const_val; if (!input_pv->IsAligned()) { front_size = MapMultipleAxis(input_pv->AsStdVec(), front_size); back_size = MapMultipleAxis(input_pv->AsStdVec(), back_size); } - auto pad = op_->Clone(context_->infer_graph_); + auto pad = context_->infer_graph_->CreateOperation( + front_size, back_size, pad_value); auto out_infer = CreateOutputsTensor(input_pv); (*pad).BindInput(context_->GetMapedTensor(i_src)); (*pad).BindOutput(out_infer[0]); diff --git a/src/tim/transform/pad_layout_inference_test.cc b/src/tim/transform/pad_layout_inference_test.cc new file mode 100644 index 0000000..c354eb7 --- /dev/null +++ b/src/tim/transform/pad_layout_inference_test.cc @@ -0,0 +1,69 @@ +#include "tim/vx/context.h" +#include "tim/vx/graph.h" +#include "tim/vx/ops.h" +#include "tim/transform/layout_inference.h" + +#include "gtest/gtest.h" + +TEST(Pad, layout_inference) { + auto ctx = tim::vx::Context::Create(); + auto graph = ctx->CreateGraph(); + + tim::vx::ShapeType input_shape({32, 112, 112, 1}); //cwhn + tim::vx::ShapeType kernel_shape({32, 2, 2, 32}); //iwho + // tim::vx::ShapeType conv2dout_shape({32, 111, 111, 1}); //iwho + tim::vx::ShapeType output_shape({32, 112, 112, 1}); + + tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32, input_shape, + tim::vx::TensorAttribute::INPUT); + tim::vx::TensorSpec kernel_spec(tim::vx::DataType::FLOAT32, kernel_shape, + tim::vx::TensorAttribute::CONSTANT); + tim::vx::TensorSpec conv2dout_spec(tim::vx::DataType::FLOAT32, {0, 0, 0, 0}, + tim::vx::TensorAttribute::TRANSIENT); + tim::vx::TensorSpec output_spec(tim::vx::DataType::FLOAT32, output_shape, + tim::vx::TensorAttribute::OUTPUT); + + auto input_tensor = graph->CreateTensor(input_spec); + auto conv2dout_tensor = graph->CreateTensor(conv2dout_spec); + auto output_tensor = graph->CreateTensor(output_spec); + + std::vector in_data; + for (uint32_t i = 0; i < 32 * 112 * 112; ++i) { + in_data.push_back(0.5); + }; + std::vector kernel_data; + for (uint32_t i = 0; i < 4 * 32 * 32; ++i) { + kernel_data.push_back(0.5); + }; + + auto kernel_tensor = graph->CreateTensor(kernel_spec, kernel_data.data()); + + std::array stride({1, 1}); + std::array dilation({1, 1}); + + auto op1 = graph->CreateOperation( + tim::vx::PadType::VALID, stride, dilation, 0, tim::vx::DataLayout::CWHN); + (*op1) + .BindInputs({input_tensor, kernel_tensor}) + .BindOutputs({conv2dout_tensor}); + + std::vector front_size = {0, 0, 0, 0}; + std::vector back_size = {0, 1, 1, 0}; + + auto op2 = + graph->CreateOperation(front_size, back_size, 0); + (*op2).BindInputs({conv2dout_tensor}).BindOutputs({output_tensor}); + + auto transform = tim::transform::LayoutInference(graph, ctx); + auto infer_graph = transform.first; + auto graph_io_map = transform.second; + auto infer_input = graph_io_map[graph->InputsTensor()[0]]; + auto infer_output = graph_io_map[graph->OutputsTensor()[0]]; + infer_input->CopyDataToTensor(in_data.data(), in_data.size() * sizeof(float)); + + EXPECT_TRUE(infer_graph->Compile()); + EXPECT_TRUE(infer_graph->Run()); + + std::vector output(32*112*112); + EXPECT_TRUE(infer_output->CopyDataFromTensor(output.data())); +} diff --git a/src/tim/transform/stridedslice_layout_inference_test.cc b/src/tim/transform/stridedslice_layout_inference_test.cc index 0f678a4..52afd26 100644 --- a/src/tim/transform/stridedslice_layout_inference_test.cc +++ b/src/tim/transform/stridedslice_layout_inference_test.cc @@ -105,9 +105,7 @@ TEST(StridedSlice, endmask_6_shrinkmask_5) { std::vector kernel_data = { 1, 0, 3, 4, 4, 2, 1, 2, 3, 1, 3, 1, 1, 3, 1, 0, 2, 0, 3, 1, 4, 0, 0, 2, }; - std::vector golden = { - 55, 30, 55, 30, 55, 30, 55, 30, 55, 30 - }; + std::vector golden = {55, 30, 55, 30, 55, 30, 55, 30, 55, 30}; auto kernel_tensor = graph->CreateTensor(kernel_spec, kernel_data.data()); // The following parameters have been reverse @@ -211,3 +209,69 @@ TEST(StridedSlice, endmask_1_shrinkmask_1) { EXPECT_TRUE(infer_output->CopyDataFromTensor(output.data())); EXPECT_EQ(golden, output); } + +TEST(StridedSlice, beginmask_9_endmask_15) { + auto ctx = tim::vx::Context::Create(); + auto graph = ctx->CreateGraph(); + + tim::vx::ShapeType input_shape({44, 58, 58, 1}); //tflite layout, cwhn + tim::vx::ShapeType kernel_shape({44,2,2,44}); //tflite layout, iwho +// tim::vx::ShapeType conv2dout_shape({44, 57, 57, 1}); //cwhn + tim::vx::ShapeType output_shape({44, 56, 56, 1}); + + tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32, input_shape, + tim::vx::TensorAttribute::INPUT); + tim::vx::TensorSpec kernel_spec(tim::vx::DataType::FLOAT32, kernel_shape, + tim::vx::TensorAttribute::CONSTANT); + tim::vx::TensorSpec conv2dout_spec(tim::vx::DataType::FLOAT32, + {0,0,0,0}, + tim::vx::TensorAttribute::TRANSIENT); + tim::vx::TensorSpec output_spec(tim::vx::DataType::FLOAT32, output_shape, + tim::vx::TensorAttribute::OUTPUT); + + auto input_tensor = graph->CreateTensor(input_spec); + auto conv2dout_tensor = graph->CreateTensor(conv2dout_spec); + auto output_tensor = graph->CreateTensor(output_spec); + + std::vector in_data; + for (uint32_t i = 0; i < 44*58*58; ++i) { + in_data.push_back(0.5); + }; + std::vector kernel_data; + for (uint32_t i = 0; i < 44*4*44; ++i) { + kernel_data.push_back(0.5); + }; + + auto kernel_tensor = graph->CreateTensor(kernel_spec, kernel_data.data()); + + // The following parameters have been reverse + std::vector begin = {0, 1, 1, 0}; + std::vector end = {0, 0, 0, 0}; + std::vector strides = {1, 1, 1, 1}; + uint32_t MASK_BEGIN = 0b1001, MASK_END = 0b1111, MASK_SHRINK = 0b0000; + + std::array stride({1, 1}); + std::array dilation({1, 1}); + + auto op1 = graph->CreateOperation( + tim::vx::PadType::VALID, stride, dilation, 0, tim::vx::DataLayout::CWHN); + (*op1) + .BindInputs({input_tensor, kernel_tensor}) + .BindOutputs({conv2dout_tensor}); + auto op2 = graph->CreateOperation( + begin, end, strides, MASK_BEGIN, MASK_END, MASK_SHRINK); + (*op2).BindInputs({conv2dout_tensor}).BindOutputs({output_tensor}); + + auto transform = tim::transform::LayoutInference(graph, ctx); + auto infer_graph = transform.first; + auto graph_io_map = transform.second; + auto infer_input = graph_io_map[graph->InputsTensor()[0]]; + auto infer_output = graph_io_map[graph->OutputsTensor()[0]]; + infer_input->CopyDataToTensor(in_data.data(), in_data.size() * sizeof(float)); + + EXPECT_TRUE(infer_graph->Compile()); + EXPECT_TRUE(infer_graph->Run()); + + std::vector output(44*56*56); + EXPECT_TRUE(infer_output->CopyDataFromTensor(output.data())); +}