Fixed grouped_conv2d layout infer & Added cases

added cases for conv2d/grouped_conv2d

Type: Bug fixed

Signed-off-by: Chen Xin <jack.chen@verisilicon.com>
This commit is contained in:
Chen Xin 2023-01-11 11:41:37 +08:00 committed by Sven
parent d778dfb82d
commit 5e7f5cecea
5 changed files with 117 additions and 6 deletions

View File

@ -1,6 +1,6 @@
#include "tim/vx/context.h"
#include "tim/vx/graph.h"
#include "tim/vx/ops/conv2d.h"
#include "tim/vx/ops.h"
#include "tim/transform/layout_inference.h"
#include "gtest/gtest.h"
@ -117,3 +117,57 @@ TEST(LayoutInference, weight_as_input_conv2d) {
tim::vx::ShapeType expect_shape({2, 2, 1, 1});
EXPECT_EQ(infer_out_shape, expect_shape);
}
TEST(GroupedConv2d, kernel_bigger_than_input_SAME) {
auto ctx = tim::vx::Context::Create();
auto src_graph = ctx->CreateGraph();
tim::vx::ShapeType input_shape({2, 3, 2, 1}); //whcn
tim::vx::ShapeType kernel_shape({1, 3, 2, 2}); //iwho, i*groups=c
tim::vx::ShapeType bias_shape({2});
tim::vx::ShapeType output_shape({2, 3, 2, 1});
tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32, input_shape,
tim::vx::TensorAttribute::INPUT);
tim::vx::TensorSpec kernel_spec(tim::vx::DataType::FLOAT32, kernel_shape,
tim::vx::TensorAttribute::CONSTANT);
tim::vx::TensorSpec bias_spec(tim::vx::DataType::FLOAT32, bias_shape,
tim::vx::TensorAttribute::CONSTANT);
tim::vx::TensorSpec output_spec(tim::vx::DataType::FLOAT32, output_shape,
tim::vx::TensorAttribute::OUTPUT);
std::vector<float> in_data = {1.0f, 3.0f, 4.0f, 2.0f, 2.0f, 3.0f,
2.0f, 4.0f, 3.0f, 1.0f, 3.0f, 3.0f};
std::vector<float> weight = {100.0f, 20.0f, 1.0f, 200.0f, 10.0f, 2.0f,
200.0f, 30.0f, 1.0f, 100.0f, 20.0f, 3.0f};
std::vector<float> bias = {500.0f, -1000.0f};
std::vector<float> golden = {567.0f, 1480.0f, 608.0f, 1370.0f,
543.0f, 760.0f, -873.0f, -160.0f,
-840.0f, -10.0f, -907.0f, -310.0f};
auto input_tensor = src_graph->CreateTensor(input_spec);
auto weight_tensor = src_graph->CreateTensor(kernel_spec, weight.data());
auto bias_tensor = src_graph->CreateTensor(bias_spec, bias.data());
auto output_tensor = src_graph->CreateTensor(output_spec);
std::array<uint32_t, 2> dilations = {0, 0};
std::array<uint32_t, 2> strides = {1, 1};
auto op = src_graph->CreateOperation<tim::vx::ops::GroupedConv2d>(
tim::vx::PadType::SAME, strides, dilations, 2, tim::vx::DataLayout::WHCN,
tim::vx::DataLayout::IcWHOc);
(*op).BindInputs({input_tensor, weight_tensor, bias_tensor}).BindOutputs({output_tensor});
// Do layout inference
auto transform = tim::transform::LayoutInference(src_graph, ctx);
auto infer_graph = transform.first;
auto graph_io_map = transform.second;
infer_graph->Compile();
auto infer_input = graph_io_map[src_graph->InputsTensor()[0]];
auto infer_output = graph_io_map[src_graph->OutputsTensor()[0]];
infer_input->CopyDataToTensor(in_data.data(), in_data.size() * sizeof(float));
infer_graph->Run();
std::vector<float> output(golden.size());
EXPECT_TRUE(infer_output->CopyDataFromTensor(output.data()));
EXPECT_EQ(golden, output);
}

View File

@ -40,7 +40,7 @@ class GroupedConv2dLayoutInfer : public OpLayoutInfer {
: OpLayoutInfer(op, context) {}
void OnInputs(
std::vector<std::shared_ptr<vx::Tensor>>& next_tensors) override {
auto src_grouped_conv2d = std::static_pointer_cast<vx::ops::Conv2d>(op_);
auto src_grouped_conv2d = std::static_pointer_cast<vx::ops::GroupedConv2d>(op_);
vx::DataLayout layout = op_->impl()->layout_;
auto kernel_layout = src_grouped_conv2d->KernelDataLayout();
std::shared_ptr<IPermuteVector> required_pv, weight_required_pv;

View File

@ -1846,3 +1846,54 @@ TEST(Conv2d, shape_4_2_1_1_int16_DFPQuantizedTest) {
}
EXPECT_EQ(golden, f);
}
TEST(Conv2d, kernel_bigger_than_input_SAME) {
auto ctx = tim::vx::Context::Create();
auto graph = ctx->CreateGraph();
tim::vx::ShapeType input_shape({2, 3, 1, 1}); //whcn
tim::vx::ShapeType kernel_shape({3, 2, 1, 1}); //whio
tim::vx::ShapeType bias_shape({1});
tim::vx::ShapeType output_shape({2, 3, 1, 1});
tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32, input_shape,
tim::vx::TensorAttribute::INPUT);
tim::vx::TensorSpec kernel_spec(tim::vx::DataType::FLOAT32, kernel_shape,
tim::vx::TensorAttribute::CONSTANT);
tim::vx::TensorSpec bias_spec(tim::vx::DataType::FLOAT32, bias_shape,
tim::vx::TensorAttribute::CONSTANT);
tim::vx::TensorSpec output_spec(tim::vx::DataType::FLOAT32, output_shape,
tim::vx::TensorAttribute::OUTPUT);
std::vector<float> input_data = {1.0f, 3.0f, 4.0f, 2.0f, 2.0f, 3.0f,
};
std::vector<float> weight = {100.0f, 20.0f, 1.0f, 200.0f, 10.0f, 2.0f,
};
std::vector<float> bias = {500.0f};
std::vector<float> golden = {567.0f, 1480.0f, 608.0f, 1370.0f,
543.0f, 760.0f, };
auto input_tensor = graph->CreateTensor(input_spec);
auto weight_tensor = graph->CreateTensor(kernel_spec, weight.data());
auto bias_tensor = graph->CreateTensor(bias_spec, bias.data());
auto output_tensor = graph->CreateTensor(output_spec);
std::array<uint32_t, 2> dilations = {0, 0};
std::array<uint32_t, 2> strides = {1, 1};
auto op = graph->CreateOperation<tim::vx::ops::Conv2d>(
tim::vx::PadType::SAME, strides, dilations, 0, tim::vx::DataLayout::WHCN,
tim::vx::DataLayout::IcWHOc);
(*op).BindInputs({input_tensor, weight_tensor, bias_tensor}).BindOutputs({output_tensor});
EXPECT_TRUE(graph->Compile());
input_tensor->CopyDataToTensor(input_data.data());
EXPECT_TRUE(graph->Run());
uint32_t output_size = 1;
for (auto i : output_tensor->GetShape()) {
output_size *= i;
}
std::vector<float> output(output_size);
EXPECT_TRUE(output_tensor->CopyDataFromTensor(output.data()));
EXPECT_EQ(golden, output);
}

View File

@ -71,9 +71,15 @@ GroupedConv2d::GroupedConv2d(Graph* graph,
std::shared_ptr<Operation> GroupedConv2d::Clone(
std::shared_ptr<Graph>& graph) const {
if(this->padding_ == PadType::AUTO){
return graph->CreateOperation<GroupedConv2d>(
this->pad_, this->strides_, this->dilation_, this->group_number_,
this->impl_->layout_, this->kernel_layout_);
} else {
return graph->CreateOperation<GroupedConv2d>(
this->padding_, this->strides_, this->dilation_, this->group_number_,
this->impl_->layout_, this->kernel_layout_);
}
}
} // namespace ops

View File

@ -77,8 +77,8 @@ vsi_nn_pad_e TranslatePadType(PadType pad) {
return VSI_NN_PAD_VALID;
case PadType::SAME:
return VSI_NN_PAD_SAME;
default:
VSILOGE("PadType not support.");
break;
}
return VSI_NN_PAD_AUTO;