Fixed pad layout inference bug & added one stridedslice case (#370)

Signed-off-by: Chen Xin <jack.chen@verisilicon.com>

Co-authored-by: Chen Xin <jack.chen@verisilicon.com>
This commit is contained in:
chxin66 2022-04-20 21:44:43 +08:00 committed by GitHub
parent b5c4514b94
commit 5c4800ab33
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 139 additions and 4 deletions

View File

@ -51,13 +51,15 @@ class PadLayoutInfer : public OpLayoutInfer {
sizeof(uint32_t) * dim_num);
memcpy(back_size.data(), op_->impl()->node()->nn_param.pad.back_size,
sizeof(uint32_t) * dim_num);
int32_t pad_value = op_->impl()->node()->nn_param.pad.const_val;
if (!input_pv->IsAligned()) {
front_size = MapMultipleAxis(input_pv->AsStdVec(), front_size);
back_size = MapMultipleAxis(input_pv->AsStdVec(), back_size);
}
auto pad = op_->Clone(context_->infer_graph_);
auto pad = context_->infer_graph_->CreateOperation<vx::ops::Pad>(
front_size, back_size, pad_value);
auto out_infer = CreateOutputsTensor(input_pv);
(*pad).BindInput(context_->GetMapedTensor(i_src));
(*pad).BindOutput(out_infer[0]);

View File

@ -0,0 +1,69 @@
#include "tim/vx/context.h"
#include "tim/vx/graph.h"
#include "tim/vx/ops.h"
#include "tim/transform/layout_inference.h"
#include "gtest/gtest.h"
TEST(Pad, layout_inference) {
auto ctx = tim::vx::Context::Create();
auto graph = ctx->CreateGraph();
tim::vx::ShapeType input_shape({32, 112, 112, 1}); //cwhn
tim::vx::ShapeType kernel_shape({32, 2, 2, 32}); //iwho
// tim::vx::ShapeType conv2dout_shape({32, 111, 111, 1}); //iwho
tim::vx::ShapeType output_shape({32, 112, 112, 1});
tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32, input_shape,
tim::vx::TensorAttribute::INPUT);
tim::vx::TensorSpec kernel_spec(tim::vx::DataType::FLOAT32, kernel_shape,
tim::vx::TensorAttribute::CONSTANT);
tim::vx::TensorSpec conv2dout_spec(tim::vx::DataType::FLOAT32, {0, 0, 0, 0},
tim::vx::TensorAttribute::TRANSIENT);
tim::vx::TensorSpec output_spec(tim::vx::DataType::FLOAT32, output_shape,
tim::vx::TensorAttribute::OUTPUT);
auto input_tensor = graph->CreateTensor(input_spec);
auto conv2dout_tensor = graph->CreateTensor(conv2dout_spec);
auto output_tensor = graph->CreateTensor(output_spec);
std::vector<float> in_data;
for (uint32_t i = 0; i < 32 * 112 * 112; ++i) {
in_data.push_back(0.5);
};
std::vector<float> kernel_data;
for (uint32_t i = 0; i < 4 * 32 * 32; ++i) {
kernel_data.push_back(0.5);
};
auto kernel_tensor = graph->CreateTensor(kernel_spec, kernel_data.data());
std::array<uint32_t, 2> stride({1, 1});
std::array<uint32_t, 2> dilation({1, 1});
auto op1 = graph->CreateOperation<tim::vx::ops::Conv2d>(
tim::vx::PadType::VALID, stride, dilation, 0, tim::vx::DataLayout::CWHN);
(*op1)
.BindInputs({input_tensor, kernel_tensor})
.BindOutputs({conv2dout_tensor});
std::vector<uint32_t> front_size = {0, 0, 0, 0};
std::vector<uint32_t> back_size = {0, 1, 1, 0};
auto op2 =
graph->CreateOperation<tim::vx::ops::Pad>(front_size, back_size, 0);
(*op2).BindInputs({conv2dout_tensor}).BindOutputs({output_tensor});
auto transform = tim::transform::LayoutInference(graph, ctx);
auto infer_graph = transform.first;
auto graph_io_map = transform.second;
auto infer_input = graph_io_map[graph->InputsTensor()[0]];
auto infer_output = graph_io_map[graph->OutputsTensor()[0]];
infer_input->CopyDataToTensor(in_data.data(), in_data.size() * sizeof(float));
EXPECT_TRUE(infer_graph->Compile());
EXPECT_TRUE(infer_graph->Run());
std::vector<float> output(32*112*112);
EXPECT_TRUE(infer_output->CopyDataFromTensor(output.data()));
}

View File

@ -105,9 +105,7 @@ TEST(StridedSlice, endmask_6_shrinkmask_5) {
std::vector<float> kernel_data = {
1, 0, 3, 4, 4, 2, 1, 2, 3, 1, 3, 1, 1, 3, 1, 0, 2, 0, 3, 1, 4, 0, 0, 2,
};
std::vector<float> golden = {
55, 30, 55, 30, 55, 30, 55, 30, 55, 30
};
std::vector<float> golden = {55, 30, 55, 30, 55, 30, 55, 30, 55, 30};
auto kernel_tensor = graph->CreateTensor(kernel_spec, kernel_data.data());
// The following parameters have been reverse
@ -211,3 +209,69 @@ TEST(StridedSlice, endmask_1_shrinkmask_1) {
EXPECT_TRUE(infer_output->CopyDataFromTensor(output.data()));
EXPECT_EQ(golden, output);
}
TEST(StridedSlice, beginmask_9_endmask_15) {
auto ctx = tim::vx::Context::Create();
auto graph = ctx->CreateGraph();
tim::vx::ShapeType input_shape({44, 58, 58, 1}); //tflite layout, cwhn
tim::vx::ShapeType kernel_shape({44,2,2,44}); //tflite layout, iwho
// tim::vx::ShapeType conv2dout_shape({44, 57, 57, 1}); //cwhn
tim::vx::ShapeType output_shape({44, 56, 56, 1});
tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32, input_shape,
tim::vx::TensorAttribute::INPUT);
tim::vx::TensorSpec kernel_spec(tim::vx::DataType::FLOAT32, kernel_shape,
tim::vx::TensorAttribute::CONSTANT);
tim::vx::TensorSpec conv2dout_spec(tim::vx::DataType::FLOAT32,
{0,0,0,0},
tim::vx::TensorAttribute::TRANSIENT);
tim::vx::TensorSpec output_spec(tim::vx::DataType::FLOAT32, output_shape,
tim::vx::TensorAttribute::OUTPUT);
auto input_tensor = graph->CreateTensor(input_spec);
auto conv2dout_tensor = graph->CreateTensor(conv2dout_spec);
auto output_tensor = graph->CreateTensor(output_spec);
std::vector<float> in_data;
for (uint32_t i = 0; i < 44*58*58; ++i) {
in_data.push_back(0.5);
};
std::vector<float> kernel_data;
for (uint32_t i = 0; i < 44*4*44; ++i) {
kernel_data.push_back(0.5);
};
auto kernel_tensor = graph->CreateTensor(kernel_spec, kernel_data.data());
// The following parameters have been reverse
std::vector<int> begin = {0, 1, 1, 0};
std::vector<int> end = {0, 0, 0, 0};
std::vector<int> strides = {1, 1, 1, 1};
uint32_t MASK_BEGIN = 0b1001, MASK_END = 0b1111, MASK_SHRINK = 0b0000;
std::array<uint32_t, 2> stride({1, 1});
std::array<uint32_t, 2> dilation({1, 1});
auto op1 = graph->CreateOperation<tim::vx::ops::Conv2d>(
tim::vx::PadType::VALID, stride, dilation, 0, tim::vx::DataLayout::CWHN);
(*op1)
.BindInputs({input_tensor, kernel_tensor})
.BindOutputs({conv2dout_tensor});
auto op2 = graph->CreateOperation<tim::vx::ops::StridedSlice>(
begin, end, strides, MASK_BEGIN, MASK_END, MASK_SHRINK);
(*op2).BindInputs({conv2dout_tensor}).BindOutputs({output_tensor});
auto transform = tim::transform::LayoutInference(graph, ctx);
auto infer_graph = transform.first;
auto graph_io_map = transform.second;
auto infer_input = graph_io_map[graph->InputsTensor()[0]];
auto infer_output = graph_io_map[graph->OutputsTensor()[0]];
infer_input->CopyDataToTensor(in_data.data(), in_data.size() * sizeof(float));
EXPECT_TRUE(infer_graph->Compile());
EXPECT_TRUE(infer_graph->Run());
std::vector<float> output(44*56*56);
EXPECT_TRUE(infer_output->CopyDataFromTensor(output.data()));
}