Fixed average pool layout infer

Signed-off-by: Chen Xin <jack.chen@verisilicon.com>
This commit is contained in:
Chen Xin 2022-11-08 10:41:10 +08:00 committed by Sven
parent 883334e1bb
commit 9fe7b955e5
3 changed files with 122 additions and 2 deletions

View File

@ -0,0 +1,59 @@
#include "tim/vx/context.h"
#include "tim/vx/graph.h"
#include "tim/vx/ops.h"
#include "tim/transform/layout_inference.h"
#include "gtest/gtest.h"
TEST(AVG_ANDROID, layout_infer_) {
auto ctx = tim::vx::Context::Create();
auto graph = ctx->CreateGraph();
tim::vx::ShapeType in_shape({3, 60, 52, 5}); //CWHN
tim::vx::ShapeType out_shape({3, 13, 11, 5});
tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32,
in_shape, tim::vx::TensorAttribute::INPUT);
tim::vx::TensorSpec output_spec(tim::vx::DataType::FLOAT32,
out_shape, tim::vx::TensorAttribute::OUTPUT);
auto input_tensor = graph->CreateTensor(input_spec);
auto output_tensor = graph->CreateTensor(output_spec);
std::vector<float> in_data;
for(int i = 0; i < 5; i++){
for(int j = 0; j < 3; j++){
for(int k = 0; k < 52; k++){
for(int m = 0; m < 60; m++){
in_data.push_back(1);
}
}
}
}
std::vector<float> golden;
for(int i = 0; i < 5; i++){
for(int j = 0; j < 3; j++){
for(int k = 0; k < 11; k++){
for(int k = 0; k < 13; k++){
golden.push_back(1);
}
}
}
}
std::array<uint32_t, 4> pad = {50, 50, 50, 50};
std::array<uint32_t, 2> ksize = {100, 100};
std::array<uint32_t, 2> stride = {5, 5};
auto op = graph->CreateOperation<tim::vx::ops::Pool2d>(tim::vx::PoolType::AVG_ANDROID,
pad, ksize, stride, tim::vx::RoundType::FLOOR, tim::vx::DataLayout::CWHN);
(*op).BindInputs({input_tensor}).BindOutputs({output_tensor});
std::vector<float> output(golden.size());
auto transform = tim::transform::LayoutInference(graph, ctx);
auto infer_graph = transform.first;
auto graph_io_map = transform.second;
auto infer_input = graph_io_map[graph->InputsTensor()[0]];
auto infer_output = graph_io_map[graph->OutputsTensor()[0]];
EXPECT_TRUE(infer_graph->Compile());
EXPECT_TRUE(infer_input->CopyDataToTensor(in_data.data(), in_data.size() * sizeof(float)));
EXPECT_TRUE(infer_graph->Run());
EXPECT_TRUE(infer_output->CopyDataFromTensor(output.data()));
EXPECT_EQ(golden, output);
}

View File

@ -55,11 +55,14 @@ class Pool2dLayoutInfer : public OpLayoutInfer {
context_->SetPermuteVector(input_tensors[0], required_pv);
}
std::shared_ptr<tim::vx::Operation> pool2d;
std::array<uint32_t, 4> pad;
auto pool_type = TranslatePoolType(op_->impl()->node()->nn_param.pool.type);
auto round_type =
TranslateRoundType(op_->impl()->node()->nn_param.pool.round_type);
auto pad_type =
TranslatePadType(op_->impl()->node()->nn_param.pool.pad_type);
std::array<uint32_t, 2> ksize = {
op_->impl()->node()->nn_param.pool.ksize[0],
op_->impl()->node()->nn_param.pool.ksize[1]};
@ -67,8 +70,17 @@ class Pool2dLayoutInfer : public OpLayoutInfer {
op_->impl()->node()->nn_param.pool.stride[0],
op_->impl()->node()->nn_param.pool.stride[1]};
auto pool2d = context_->infer_graph_->CreateOperation<vx::ops::Pool2d>(
pool_type, pad_type, ksize, stride, round_type, vx::DataLayout::WHCN);
if (pad_type == tim::vx::PadType::AUTO) {
pad = {op_->impl()->node()->nn_param.pool.pad[0],
op_->impl()->node()->nn_param.pool.pad[1],
op_->impl()->node()->nn_param.pool.pad[2],
op_->impl()->node()->nn_param.pool.pad[3]};
pool2d = context_->infer_graph_->CreateOperation<vx::ops::Pool2d>(
pool_type, pad, ksize, stride, round_type, vx::DataLayout::WHCN);
} else {
pool2d = context_->infer_graph_->CreateOperation<vx::ops::Pool2d>(
pool_type, pad_type, ksize, stride, round_type, vx::DataLayout::WHCN);
}
auto otensor_infer = CreateOutputsTensor(required_pv);
(*pool2d).BindInput(context_->GetMapedTensor(input_tensors[0]));
(*pool2d).BindOutput(otensor_infer[0]);

View File

@ -301,6 +301,55 @@ TEST(AVG_ANDROID, shape_60_52_3_5_fp32_kernel_35_stride_5) {
ArraysMatch(golden, output, 1e-5f);
}
TEST(AVG_ANDROID, shape_60_52_3_5_fp32_kernel_50_stride_5) {
auto ctx = tim::vx::Context::Create();
auto graph = ctx->CreateGraph();
tim::vx::ShapeType in_shape({60, 52, 3, 5}); //WHCN
tim::vx::ShapeType out_shape({13, 11, 3, 5});
tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32,
in_shape, tim::vx::TensorAttribute::INPUT);
tim::vx::TensorSpec output_spec(tim::vx::DataType::FLOAT32,
out_shape, tim::vx::TensorAttribute::OUTPUT);
auto input_tensor = graph->CreateTensor(input_spec);
auto output_tensor = graph->CreateTensor(output_spec);
std::vector<float> in_data;
for(int i = 0; i < 5; i++){
for(int j = 0; j < 3; j++){
for(int k = 0; k < 52; k++){
for(int m = 0; m < 60; m++){
in_data.push_back(1);
}
}
}
}
std::vector<float> golden;
for(int i = 0; i < 5; i++){
for(int j = 0; j < 3; j++){
for(int k = 0; k < 11; k++){
for(int k = 0; k < 13; k++){
golden.push_back(1);
}
}
}
}
std::array<uint32_t, 4> pad = {50, 50, 50, 50};
std::array<uint32_t, 2> ksize = {100, 100};
std::array<uint32_t, 2> stride = {5, 5};
auto op = graph->CreateOperation<tim::vx::ops::Pool2d>(tim::vx::PoolType::AVG_ANDROID,
pad, ksize, stride, tim::vx::RoundType::FLOOR, tim::vx::DataLayout::WHCN);
(*op).BindInputs({input_tensor}).BindOutputs({output_tensor});
std::vector<float> output(golden.size());
EXPECT_TRUE(graph->Compile());
EXPECT_TRUE(input_tensor->CopyDataToTensor(in_data.data(), in_data.size() * sizeof(float)));
EXPECT_TRUE(graph->Run());
EXPECT_TRUE(output_tensor->CopyDataFromTensor(output.data()));
ArraysMatch(golden, output, 1e-5f);
}
TEST(AVG_ANDROID, shape_60_52_3_5_uint8_kernel_35_stride_5) {
auto ctx = tim::vx::Context::Create();
auto graph = ctx->CreateGraph();