diff --git a/src/tim/transform/layout_inference_test.cc b/src/tim/transform/layout_inference_test.cc index 5500ba4..ebd659c 100644 --- a/src/tim/transform/layout_inference_test.cc +++ b/src/tim/transform/layout_inference_test.cc @@ -1,6 +1,6 @@ #include "tim/vx/context.h" #include "tim/vx/graph.h" -#include "tim/vx/ops/conv2d.h" +#include "tim/vx/ops.h" #include "tim/transform/layout_inference.h" #include "gtest/gtest.h" @@ -116,4 +116,58 @@ TEST(LayoutInference, weight_as_input_conv2d) { sizeof(float) * out_data.size())); tim::vx::ShapeType expect_shape({2, 2, 1, 1}); EXPECT_EQ(infer_out_shape, expect_shape); +} + +TEST(GroupedConv2d, kernel_bigger_than_input_SAME) { + auto ctx = tim::vx::Context::Create(); + auto src_graph = ctx->CreateGraph(); + + tim::vx::ShapeType input_shape({2, 3, 2, 1}); //whcn + tim::vx::ShapeType kernel_shape({1, 3, 2, 2}); //iwho, i*groups=c + tim::vx::ShapeType bias_shape({2}); + tim::vx::ShapeType output_shape({2, 3, 2, 1}); + tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32, input_shape, + tim::vx::TensorAttribute::INPUT); + tim::vx::TensorSpec kernel_spec(tim::vx::DataType::FLOAT32, kernel_shape, + tim::vx::TensorAttribute::CONSTANT); + tim::vx::TensorSpec bias_spec(tim::vx::DataType::FLOAT32, bias_shape, + tim::vx::TensorAttribute::CONSTANT); + tim::vx::TensorSpec output_spec(tim::vx::DataType::FLOAT32, output_shape, + tim::vx::TensorAttribute::OUTPUT); + + std::vector in_data = {1.0f, 3.0f, 4.0f, 2.0f, 2.0f, 3.0f, + 2.0f, 4.0f, 3.0f, 1.0f, 3.0f, 3.0f}; + std::vector weight = {100.0f, 20.0f, 1.0f, 200.0f, 10.0f, 2.0f, + 200.0f, 30.0f, 1.0f, 100.0f, 20.0f, 3.0f}; + std::vector bias = {500.0f, -1000.0f}; + std::vector golden = {567.0f, 1480.0f, 608.0f, 1370.0f, + 543.0f, 760.0f, -873.0f, -160.0f, + -840.0f, -10.0f, -907.0f, -310.0f}; + auto input_tensor = src_graph->CreateTensor(input_spec); + auto weight_tensor = src_graph->CreateTensor(kernel_spec, weight.data()); + auto bias_tensor = src_graph->CreateTensor(bias_spec, bias.data()); + auto output_tensor = src_graph->CreateTensor(output_spec); + + std::array dilations = {0, 0}; + std::array strides = {1, 1}; + auto op = src_graph->CreateOperation( + tim::vx::PadType::SAME, strides, dilations, 2, tim::vx::DataLayout::WHCN, + tim::vx::DataLayout::IcWHOc); + (*op).BindInputs({input_tensor, weight_tensor, bias_tensor}).BindOutputs({output_tensor}); + + // Do layout inference + auto transform = tim::transform::LayoutInference(src_graph, ctx); + auto infer_graph = transform.first; + auto graph_io_map = transform.second; + infer_graph->Compile(); + + auto infer_input = graph_io_map[src_graph->InputsTensor()[0]]; + auto infer_output = graph_io_map[src_graph->OutputsTensor()[0]]; + + infer_input->CopyDataToTensor(in_data.data(), in_data.size() * sizeof(float)); + infer_graph->Run(); + + std::vector output(golden.size()); + EXPECT_TRUE(infer_output->CopyDataFromTensor(output.data())); + EXPECT_EQ(golden, output); } \ No newline at end of file diff --git a/src/tim/transform/ops/grouped_conv2d_layout_inference.h b/src/tim/transform/ops/grouped_conv2d_layout_inference.h index 9b9955c..79d5a52 100644 --- a/src/tim/transform/ops/grouped_conv2d_layout_inference.h +++ b/src/tim/transform/ops/grouped_conv2d_layout_inference.h @@ -40,7 +40,7 @@ class GroupedConv2dLayoutInfer : public OpLayoutInfer { : OpLayoutInfer(op, context) {} void OnInputs( std::vector>& next_tensors) override { - auto src_grouped_conv2d = std::static_pointer_cast(op_); + auto src_grouped_conv2d = std::static_pointer_cast(op_); vx::DataLayout layout = op_->impl()->layout_; auto kernel_layout = src_grouped_conv2d->KernelDataLayout(); std::shared_ptr required_pv, weight_required_pv; diff --git a/src/tim/vx/ops/conv2d_test.cc b/src/tim/vx/ops/conv2d_test.cc index 58efb58..ca00178 100644 --- a/src/tim/vx/ops/conv2d_test.cc +++ b/src/tim/vx/ops/conv2d_test.cc @@ -1846,3 +1846,54 @@ TEST(Conv2d, shape_4_2_1_1_int16_DFPQuantizedTest) { } EXPECT_EQ(golden, f); } + +TEST(Conv2d, kernel_bigger_than_input_SAME) { + auto ctx = tim::vx::Context::Create(); + auto graph = ctx->CreateGraph(); + + tim::vx::ShapeType input_shape({2, 3, 1, 1}); //whcn + tim::vx::ShapeType kernel_shape({3, 2, 1, 1}); //whio + tim::vx::ShapeType bias_shape({1}); + tim::vx::ShapeType output_shape({2, 3, 1, 1}); + tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32, input_shape, + tim::vx::TensorAttribute::INPUT); + tim::vx::TensorSpec kernel_spec(tim::vx::DataType::FLOAT32, kernel_shape, + tim::vx::TensorAttribute::CONSTANT); + tim::vx::TensorSpec bias_spec(tim::vx::DataType::FLOAT32, bias_shape, + tim::vx::TensorAttribute::CONSTANT); + tim::vx::TensorSpec output_spec(tim::vx::DataType::FLOAT32, output_shape, + tim::vx::TensorAttribute::OUTPUT); + + std::vector input_data = {1.0f, 3.0f, 4.0f, 2.0f, 2.0f, 3.0f, + }; + std::vector weight = {100.0f, 20.0f, 1.0f, 200.0f, 10.0f, 2.0f, + }; + std::vector bias = {500.0f}; + std::vector golden = {567.0f, 1480.0f, 608.0f, 1370.0f, + 543.0f, 760.0f, }; + auto input_tensor = graph->CreateTensor(input_spec); + auto weight_tensor = graph->CreateTensor(kernel_spec, weight.data()); + auto bias_tensor = graph->CreateTensor(bias_spec, bias.data()); + auto output_tensor = graph->CreateTensor(output_spec); + + std::array dilations = {0, 0}; + std::array strides = {1, 1}; + auto op = graph->CreateOperation( + tim::vx::PadType::SAME, strides, dilations, 0, tim::vx::DataLayout::WHCN, + tim::vx::DataLayout::IcWHOc); + (*op).BindInputs({input_tensor, weight_tensor, bias_tensor}).BindOutputs({output_tensor}); + + EXPECT_TRUE(graph->Compile()); + + input_tensor->CopyDataToTensor(input_data.data()); + + EXPECT_TRUE(graph->Run()); + + uint32_t output_size = 1; + for (auto i : output_tensor->GetShape()) { + output_size *= i; + } + std::vector output(output_size); + EXPECT_TRUE(output_tensor->CopyDataFromTensor(output.data())); + EXPECT_EQ(golden, output); +} \ No newline at end of file diff --git a/src/tim/vx/ops/groupedconv2d.cc b/src/tim/vx/ops/groupedconv2d.cc index e6b7cc3..a437877 100644 --- a/src/tim/vx/ops/groupedconv2d.cc +++ b/src/tim/vx/ops/groupedconv2d.cc @@ -71,9 +71,15 @@ GroupedConv2d::GroupedConv2d(Graph* graph, std::shared_ptr GroupedConv2d::Clone( std::shared_ptr& graph) const { - return graph->CreateOperation( - this->pad_, this->strides_, this->dilation_, this->group_number_, - this->impl_->layout_, this->kernel_layout_); + if(this->padding_ == PadType::AUTO){ + return graph->CreateOperation( + this->pad_, this->strides_, this->dilation_, this->group_number_, + this->impl_->layout_, this->kernel_layout_); + } else { + return graph->CreateOperation( + this->padding_, this->strides_, this->dilation_, this->group_number_, + this->impl_->layout_, this->kernel_layout_); + } } } // namespace ops diff --git a/src/tim/vx/type_utils.cc b/src/tim/vx/type_utils.cc index ecc9f91..383bfcb 100644 --- a/src/tim/vx/type_utils.cc +++ b/src/tim/vx/type_utils.cc @@ -77,8 +77,8 @@ vsi_nn_pad_e TranslatePadType(PadType pad) { return VSI_NN_PAD_VALID; case PadType::SAME: return VSI_NN_PAD_SAME; - default: + VSILOGE("PadType not support."); break; } return VSI_NN_PAD_AUTO;