From 9fe7b955e514886508a711755c5672b526fcced2 Mon Sep 17 00:00:00 2001 From: Chen Xin Date: Tue, 8 Nov 2022 10:41:10 +0800 Subject: [PATCH] Fixed average pool layout infer Signed-off-by: Chen Xin --- .../average_pool_layout_infer_test.cc | 59 +++++++++++++++++++ .../transform/ops/pool2d_layout_inference.h | 16 ++++- src/tim/vx/ops/avg_pool_test.cc | 49 +++++++++++++++ 3 files changed, 122 insertions(+), 2 deletions(-) create mode 100644 src/tim/transform/average_pool_layout_infer_test.cc diff --git a/src/tim/transform/average_pool_layout_infer_test.cc b/src/tim/transform/average_pool_layout_infer_test.cc new file mode 100644 index 0000000..0ff8115 --- /dev/null +++ b/src/tim/transform/average_pool_layout_infer_test.cc @@ -0,0 +1,59 @@ +#include "tim/vx/context.h" +#include "tim/vx/graph.h" +#include "tim/vx/ops.h" +#include "tim/transform/layout_inference.h" + +#include "gtest/gtest.h" +TEST(AVG_ANDROID, layout_infer_) { + auto ctx = tim::vx::Context::Create(); + auto graph = ctx->CreateGraph(); + + tim::vx::ShapeType in_shape({3, 60, 52, 5}); //CWHN + tim::vx::ShapeType out_shape({3, 13, 11, 5}); + tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32, + in_shape, tim::vx::TensorAttribute::INPUT); + tim::vx::TensorSpec output_spec(tim::vx::DataType::FLOAT32, + out_shape, tim::vx::TensorAttribute::OUTPUT); + auto input_tensor = graph->CreateTensor(input_spec); + auto output_tensor = graph->CreateTensor(output_spec); + std::vector in_data; + for(int i = 0; i < 5; i++){ + for(int j = 0; j < 3; j++){ + for(int k = 0; k < 52; k++){ + for(int m = 0; m < 60; m++){ + in_data.push_back(1); + } + } + } + } + std::vector golden; + for(int i = 0; i < 5; i++){ + for(int j = 0; j < 3; j++){ + for(int k = 0; k < 11; k++){ + for(int k = 0; k < 13; k++){ + golden.push_back(1); + } + } + } + } + std::array pad = {50, 50, 50, 50}; + std::array ksize = {100, 100}; + std::array stride = {5, 5}; + auto op = graph->CreateOperation(tim::vx::PoolType::AVG_ANDROID, + pad, ksize, stride, tim::vx::RoundType::FLOOR, tim::vx::DataLayout::CWHN); + (*op).BindInputs({input_tensor}).BindOutputs({output_tensor}); + std::vector output(golden.size()); + + auto transform = tim::transform::LayoutInference(graph, ctx); + auto infer_graph = transform.first; + auto graph_io_map = transform.second; + auto infer_input = graph_io_map[graph->InputsTensor()[0]]; + auto infer_output = graph_io_map[graph->OutputsTensor()[0]]; + + EXPECT_TRUE(infer_graph->Compile()); + EXPECT_TRUE(infer_input->CopyDataToTensor(in_data.data(), in_data.size() * sizeof(float))); + EXPECT_TRUE(infer_graph->Run()); + EXPECT_TRUE(infer_output->CopyDataFromTensor(output.data())); + + EXPECT_EQ(golden, output); +} \ No newline at end of file diff --git a/src/tim/transform/ops/pool2d_layout_inference.h b/src/tim/transform/ops/pool2d_layout_inference.h index 810f944..1d9581b 100644 --- a/src/tim/transform/ops/pool2d_layout_inference.h +++ b/src/tim/transform/ops/pool2d_layout_inference.h @@ -55,11 +55,14 @@ class Pool2dLayoutInfer : public OpLayoutInfer { context_->SetPermuteVector(input_tensors[0], required_pv); } + std::shared_ptr pool2d; + std::array pad; auto pool_type = TranslatePoolType(op_->impl()->node()->nn_param.pool.type); auto round_type = TranslateRoundType(op_->impl()->node()->nn_param.pool.round_type); auto pad_type = TranslatePadType(op_->impl()->node()->nn_param.pool.pad_type); + std::array ksize = { op_->impl()->node()->nn_param.pool.ksize[0], op_->impl()->node()->nn_param.pool.ksize[1]}; @@ -67,8 +70,17 @@ class Pool2dLayoutInfer : public OpLayoutInfer { op_->impl()->node()->nn_param.pool.stride[0], op_->impl()->node()->nn_param.pool.stride[1]}; - auto pool2d = context_->infer_graph_->CreateOperation( - pool_type, pad_type, ksize, stride, round_type, vx::DataLayout::WHCN); + if (pad_type == tim::vx::PadType::AUTO) { + pad = {op_->impl()->node()->nn_param.pool.pad[0], + op_->impl()->node()->nn_param.pool.pad[1], + op_->impl()->node()->nn_param.pool.pad[2], + op_->impl()->node()->nn_param.pool.pad[3]}; + pool2d = context_->infer_graph_->CreateOperation( + pool_type, pad, ksize, stride, round_type, vx::DataLayout::WHCN); + } else { + pool2d = context_->infer_graph_->CreateOperation( + pool_type, pad_type, ksize, stride, round_type, vx::DataLayout::WHCN); + } auto otensor_infer = CreateOutputsTensor(required_pv); (*pool2d).BindInput(context_->GetMapedTensor(input_tensors[0])); (*pool2d).BindOutput(otensor_infer[0]); diff --git a/src/tim/vx/ops/avg_pool_test.cc b/src/tim/vx/ops/avg_pool_test.cc index 807975f..81756bc 100644 --- a/src/tim/vx/ops/avg_pool_test.cc +++ b/src/tim/vx/ops/avg_pool_test.cc @@ -301,6 +301,55 @@ TEST(AVG_ANDROID, shape_60_52_3_5_fp32_kernel_35_stride_5) { ArraysMatch(golden, output, 1e-5f); } +TEST(AVG_ANDROID, shape_60_52_3_5_fp32_kernel_50_stride_5) { + auto ctx = tim::vx::Context::Create(); + auto graph = ctx->CreateGraph(); + + tim::vx::ShapeType in_shape({60, 52, 3, 5}); //WHCN + tim::vx::ShapeType out_shape({13, 11, 3, 5}); + + tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32, + in_shape, tim::vx::TensorAttribute::INPUT); + tim::vx::TensorSpec output_spec(tim::vx::DataType::FLOAT32, + out_shape, tim::vx::TensorAttribute::OUTPUT); + auto input_tensor = graph->CreateTensor(input_spec); + auto output_tensor = graph->CreateTensor(output_spec); + std::vector in_data; + for(int i = 0; i < 5; i++){ + for(int j = 0; j < 3; j++){ + for(int k = 0; k < 52; k++){ + for(int m = 0; m < 60; m++){ + in_data.push_back(1); + } + } + } + } + std::vector golden; + for(int i = 0; i < 5; i++){ + for(int j = 0; j < 3; j++){ + for(int k = 0; k < 11; k++){ + for(int k = 0; k < 13; k++){ + golden.push_back(1); + } + } + } + } + std::array pad = {50, 50, 50, 50}; + std::array ksize = {100, 100}; + std::array stride = {5, 5}; + auto op = graph->CreateOperation(tim::vx::PoolType::AVG_ANDROID, + pad, ksize, stride, tim::vx::RoundType::FLOOR, tim::vx::DataLayout::WHCN); + (*op).BindInputs({input_tensor}).BindOutputs({output_tensor}); + std::vector output(golden.size()); + + EXPECT_TRUE(graph->Compile()); + EXPECT_TRUE(input_tensor->CopyDataToTensor(in_data.data(), in_data.size() * sizeof(float))); + EXPECT_TRUE(graph->Run()); + EXPECT_TRUE(output_tensor->CopyDataFromTensor(output.data())); + + ArraysMatch(golden, output, 1e-5f); +} + TEST(AVG_ANDROID, shape_60_52_3_5_uint8_kernel_35_stride_5) { auto ctx = tim::vx::Context::Create(); auto graph = ctx->CreateGraph();