From 9bb3e7c68b2155979c1b8ed7b3d276b4f3c6a370 Mon Sep 17 00:00:00 2001 From: Chen Feiyue <69809761+chenfeiyue-cfy@users.noreply.github.com> Date: Thu, 17 Aug 2023 21:26:54 +0800 Subject: [PATCH] Fixed misleading test case bug in deconv1d (#633) Correct erros of deconv1d unittest Added hint in the header indicating that padtype is not supported yet Added 2 cases for deconv1d Type: Code Improvement Issue: github issue #585 Signed-off-by: Feiyue Chen --- include/tim/vx/ops/deconv1d.h | 74 ++++----- src/tim/vx/ops/deconv1d_test.cc | 282 +++++++++++++++++++++++--------- 2 files changed, 240 insertions(+), 116 deletions(-) diff --git a/include/tim/vx/ops/deconv1d.h b/include/tim/vx/ops/deconv1d.h index 4697569..e8ddf6e 100644 --- a/include/tim/vx/ops/deconv1d.h +++ b/include/tim/vx/ops/deconv1d.h @@ -42,53 +42,49 @@ namespace ops { * * - weights : the channel number for weight tensor. * - ksize : the length for weight tensor. - * - padding : AUTO, VALID or SAME. + * - padtype : AUTO, VALID or SAME.** * - pad : pad value for each spatial axis. * - stride : stride along each spatial axis. - * - output_padding : specifying the amount of padding along the height and width of - * the output tensor. + * - output_padding : additional padding lines added to the output tensor, default is zero + * + * Caution**: PadType is not really supported yet, will be supported in future. */ class DeConv1d : public BuiltinOp { - public: - DeConv1d(Graph* graph, PadType pad_type, - uint32_t stride, uint32_t output_padding, uint32_t group = 1, - DataLayout input_layout = DataLayout::WHCN, - DataLayout kernel_layout = DataLayout::WHIcOc); - DeConv1d(Graph* graph, const std::array& pad, - uint32_t stride, uint32_t output_padding, uint32_t group = 1, - DataLayout input_layout = DataLayout::WHCN, - DataLayout kernel_layout = DataLayout::WHIcOc); - DeConv1d(Graph* graph, int32_t oc_count_, PadType pad_type, - uint32_t ksize, - uint32_t stride, - uint32_t output_padding); - DeConv1d(Graph* graph, int32_t oc_count_, PadType pad_type, - uint32_t ksize, - uint32_t stride, - uint32_t output_padding, - const std::array& pad, - uint32_t group = 1); - DeConv1d(Graph* graph, PadType pad_type, - uint32_t stride, uint32_t output_padding, - const std::array& pad, uint32_t group, - DataLayout input_layout, DataLayout kernel_layout); + public: + DeConv1d(Graph* graph, PadType pad_type, uint32_t stride, + uint32_t output_padding, uint32_t group = 1, + DataLayout input_layout = DataLayout::WHCN, + DataLayout kernel_layout = DataLayout::WHIcOc); + DeConv1d(Graph* graph, const std::array& pad, uint32_t stride, + uint32_t output_padding, uint32_t group = 1, + DataLayout input_layout = DataLayout::WHCN, + DataLayout kernel_layout = DataLayout::WHIcOc); + DeConv1d(Graph* graph, int32_t oc_count_, PadType pad_type, uint32_t ksize, + uint32_t stride, uint32_t output_padding); + DeConv1d(Graph* graph, int32_t oc_count_, PadType pad_type, uint32_t ksize, + uint32_t stride, uint32_t output_padding, + const std::array& pad, uint32_t group = 1); + DeConv1d(Graph* graph, PadType pad_type, uint32_t stride, + uint32_t output_padding, const std::array& pad, + uint32_t group, DataLayout input_layout, DataLayout kernel_layout); - std::shared_ptr Clone(std::shared_ptr& graph) const override; + std::shared_ptr Clone( + std::shared_ptr& graph) const override; - protected: - const uint32_t oc_count_; // output channel count - const PadType pad_type_; - const uint32_t ksize_; - const uint32_t stride_; - const uint32_t output_padding_; - const std::array pad_; - const uint32_t group_; - const DataLayout kernel_layout_; + protected: + const uint32_t oc_count_; // output channel count + const PadType pad_type_; + const uint32_t ksize_; + const uint32_t stride_; + const uint32_t output_padding_; + const std::array pad_; + const uint32_t group_; + const DataLayout kernel_layout_; }; -} // namespace ops -} // namespace vx -} // namespace tim +} // namespace ops +} // namespace vx +} // namespace tim #endif /* TIM_VX_OPS_DECONV1D_H_ */ diff --git a/src/tim/vx/ops/deconv1d_test.cc b/src/tim/vx/ops/deconv1d_test.cc index d15f973..8eb9f61 100644 --- a/src/tim/vx/ops/deconv1d_test.cc +++ b/src/tim/vx/ops/deconv1d_test.cc @@ -24,110 +24,238 @@ #include "tim/vx/context.h" #include "tim/vx/graph.h" #include "tim/vx/ops/deconv1d.h" +#include "tim/vx/ops/activations.h" #include "gtest/gtest.h" -TEST(DeConv1d, no_bias_layout_whcn_depthwise_shape_3_2_1) { - auto ctx = tim::vx::Context::Create(); - auto graph = ctx->CreateGraph(); +TEST(DeConv1d, no_bias_no_outputpadding_shape_3_2_1) { + auto ctx = tim::vx::Context::Create(); + auto graph = ctx->CreateGraph(); - tim::vx::ShapeType input_shape ({3, 2, 1}); //whcn - tim::vx::ShapeType kernel_shape({3, 2, 1}); //whc1 same as depthwise convolution - tim::vx::ShapeType output_shape({5, 2, 1}); //whcn + tim::vx::ShapeType input_shape({3, 2, 1}); + tim::vx::ShapeType kernel_shape({3, 2, 1}); + tim::vx::ShapeType output_shape({5, 1, 1}); - tim::vx::TensorSpec input_spec (tim::vx::DataType::FLOAT32, input_shape, tim::vx::TensorAttribute::INPUT); - tim::vx::TensorSpec kernel_spec (tim::vx::DataType::FLOAT32, kernel_shape, tim::vx::TensorAttribute::CONSTANT); - tim::vx::TensorSpec output_spec (tim::vx::DataType::FLOAT32, output_shape, tim::vx::TensorAttribute::OUTPUT); + tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32, input_shape, + tim::vx::TensorAttribute::INPUT); + tim::vx::TensorSpec kernel_spec(tim::vx::DataType::FLOAT32, kernel_shape, + tim::vx::TensorAttribute::CONSTANT); + tim::vx::TensorSpec output_spec(tim::vx::DataType::FLOAT32, output_shape, + tim::vx::TensorAttribute::OUTPUT); - auto input_tensor = graph->CreateTensor(input_spec); - auto output_tensor = graph->CreateTensor(output_spec); - auto kernel_tensor = graph->CreateTensor(kernel_spec); + auto input_tensor = graph->CreateTensor(input_spec); + auto output_tensor = graph->CreateTensor(output_spec); + auto kernel_tensor = graph->CreateTensor(kernel_spec); - std::vector input_data = { 3.0f, 9.0f, 3.0f, - 7.0f, 5.0f, 9.0f, }; - std::vector kernel_data = { 9.0f, 0.0f, 1.0f, - 3.0f, 0.0f, 0.0f, }; + std::vector input_data = { + 3.0f, 9.0f, 3.0f, 7.0f, 5.0f, 9.0f, + }; + std::vector kernel_data = { + 9.0f, 0.0f, 1.0f, 3.0f, 0.0f, 0.0f, + }; - std::vector golden = { - 27.0f, 81.0f, 30.0f, 9.0f, 3.0f, - 21.0f, 15.0f, 27.0f, 0.0f, 0.0f, }; + std::vector golden = {48, 96, 57, 9, 3}; - std::vector output_data(golden.size()); + std::vector output_data(golden.size()); - EXPECT_TRUE(input_tensor->CopyDataToTensor(input_data.data(), input_data.size()*4)); - EXPECT_TRUE(kernel_tensor->CopyDataToTensor(kernel_data.data(), kernel_data.size()*4)); + EXPECT_TRUE( + input_tensor->CopyDataToTensor(input_data.data(), input_data.size() * 4)); + EXPECT_TRUE(kernel_tensor->CopyDataToTensor(kernel_data.data(), + kernel_data.size() * 4)); - auto op = graph->CreateOperation( - 2, tim::vx::PadType::SAME, 3, 1, 1, std::array({0, 0}), 2); - (*op).BindInputs({input_tensor, kernel_tensor}).BindOutputs({output_tensor}); + auto op = graph->CreateOperation( + 2, tim::vx::PadType::VALID, /*ksize=*/3, /*stride=*/1, + /*output_padding=*/0, std::array({0, 0}), 1); + (*op).BindInputs({input_tensor, kernel_tensor}).BindOutputs({output_tensor}); - EXPECT_TRUE(graph->Compile()); - EXPECT_TRUE(graph->Run()); + EXPECT_TRUE(graph->Compile()); + EXPECT_TRUE(graph->Run()); - EXPECT_TRUE(output_tensor->CopyDataFromTensor(output_data.data())); + EXPECT_TRUE(output_tensor->CopyDataFromTensor(output_data.data())); - EXPECT_EQ(golden, output_data) << "Result mismatch"; + EXPECT_EQ(golden, output_data) << "Result mismatch"; } -TEST(DeConv1d, layout_whcn_shape_3_1_1) { - auto ctx = tim::vx::Context::Create(); - auto graph = ctx->CreateGraph(); +TEST(DeConv1d, no_bias_has_outputpadding_shape_3_2_1) { + auto ctx = tim::vx::Context::Create(); + auto graph = ctx->CreateGraph(); - tim::vx::ShapeType input_shape ({3, 1, 1}); - tim::vx::ShapeType kernel_shape({3, 1, 1}); - tim::vx::ShapeType output_shape({5, 1, 1}); - tim::vx::ShapeType bias_shape({1}); + tim::vx::ShapeType input_shape({3, 2, 1}); + tim::vx::ShapeType kernel_shape({3, 2, 1}); + tim::vx::ShapeType output_shape({6, 1, 1}); - tim::vx::Quantization input_quant(tim::vx::QuantType::ASYMMETRIC, 1.0f, 0); - tim::vx::Quantization output_quant(tim::vx::QuantType::ASYMMETRIC, 1.0f, 2); - tim::vx::Quantization weight_quant(tim::vx::QuantType::ASYMMETRIC, 1.0f, 0); - tim::vx::Quantization bias_quant(tim::vx::QuantType::ASYMMETRIC, 1.0f, 0); + tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32, input_shape, + tim::vx::TensorAttribute::INPUT); + tim::vx::TensorSpec kernel_spec(tim::vx::DataType::FLOAT32, kernel_shape, + tim::vx::TensorAttribute::CONSTANT); + tim::vx::TensorSpec output_spec(tim::vx::DataType::FLOAT32, output_shape, + tim::vx::TensorAttribute::OUTPUT); - tim::vx::TensorSpec input_spec ( - tim::vx::DataType::UINT8, input_shape, tim::vx::TensorAttribute::INPUT, input_quant); - tim::vx::TensorSpec kernel_spec ( - tim::vx::DataType::UINT8, kernel_shape, tim::vx::TensorAttribute::CONSTANT, weight_quant); - tim::vx::TensorSpec bias_spec ( - tim::vx::DataType::INT32, bias_shape, tim::vx::TensorAttribute::CONSTANT, bias_quant); - tim::vx::TensorSpec output_spec ( - tim::vx::DataType::UINT8, output_shape, tim::vx::TensorAttribute::OUTPUT, output_quant); + auto input_tensor = graph->CreateTensor(input_spec); + auto output_tensor = graph->CreateTensor(output_spec); + auto kernel_tensor = graph->CreateTensor(kernel_spec); - auto input_tensor = graph->CreateTensor(input_spec); - auto output_tensor = graph->CreateTensor(output_spec); - auto kernel_tensor = graph->CreateTensor(kernel_spec); - auto bias_tensor = graph->CreateTensor(bias_spec); + std::vector input_data = { + 3.0f, 9.0f, 3.0f, 7.0f, 5.0f, 9.0f, + }; + std::vector kernel_data = { + 9.0f, 0.0f, 1.0f, 3.0f, 0.0f, 0.0f, + }; - std::vector input_data = { - 3, 9, 3, - }; + std::vector golden = {48, 96, 57, 9, 3, 0}; - std::vector kernel_data = { - 9, 0, 1, - }; + std::vector output_data(golden.size()); - std::vector bias_data = { - -5, - }; + EXPECT_TRUE( + input_tensor->CopyDataToTensor(input_data.data(), input_data.size() * 4)); + EXPECT_TRUE(kernel_tensor->CopyDataToTensor(kernel_data.data(), + kernel_data.size() * 4)); - std::vector golden = { - 24, 78, 27, 6, 0, - }; + auto op = graph->CreateOperation( + 2, tim::vx::PadType::VALID, /*ksize=*/3, /*stride=*/1, + /*output_padding=*/ 1, std::array({0, 0}), 1); + (*op).BindInputs({input_tensor, kernel_tensor}).BindOutputs({output_tensor}); - std::vector output_data(golden.size()); + EXPECT_TRUE(graph->Compile()); + EXPECT_TRUE(graph->Run()); - EXPECT_TRUE(input_tensor->CopyDataToTensor(input_data.data(), input_data.size())); - EXPECT_TRUE(kernel_tensor->CopyDataToTensor(kernel_data.data(), kernel_data.size())); - EXPECT_TRUE(bias_tensor->CopyDataToTensor(bias_data.data(), bias_data.size() * sizeof(int32_t))); + EXPECT_TRUE(output_tensor->CopyDataFromTensor(output_data.data())); - auto op = graph->CreateOperation( - 1, tim::vx::PadType::SAME, 3, 1, 1, - std::array({0, 0,}), - 1); - (*op).BindInputs({input_tensor, kernel_tensor, bias_tensor}).BindOutputs({output_tensor}); + EXPECT_EQ(golden, output_data) << "Result mismatch"; +} - EXPECT_TRUE(graph->Compile()); - EXPECT_TRUE(graph->Run()); +TEST(DeConv1d, layout_wcn_shape_3_1_1) { + auto ctx = tim::vx::Context::Create(); + auto graph = ctx->CreateGraph(); - EXPECT_TRUE(output_tensor->CopyDataFromTensor(output_data.data())); - EXPECT_EQ(golden, output_data) << "Result mismatch"; + tim::vx::ShapeType input_shape({3, 1, 1}); + tim::vx::ShapeType kernel_shape({3, 1, 1}); + tim::vx::ShapeType output_shape({5, 1, 1}); + tim::vx::ShapeType bias_shape({1}); + + tim::vx::Quantization input_quant(tim::vx::QuantType::ASYMMETRIC, 1.0f, 0); + tim::vx::Quantization output_quant(tim::vx::QuantType::ASYMMETRIC, 1.0f, 2); + tim::vx::Quantization weight_quant(tim::vx::QuantType::ASYMMETRIC, 1.0f, 0); + tim::vx::Quantization bias_quant(tim::vx::QuantType::ASYMMETRIC, 1.0f, 0); + + tim::vx::TensorSpec input_spec(tim::vx::DataType::UINT8, input_shape, + tim::vx::TensorAttribute::INPUT, input_quant); + tim::vx::TensorSpec kernel_spec(tim::vx::DataType::UINT8, kernel_shape, + tim::vx::TensorAttribute::CONSTANT, + weight_quant); + tim::vx::TensorSpec bias_spec(tim::vx::DataType::INT32, bias_shape, + tim::vx::TensorAttribute::CONSTANT, bias_quant); + tim::vx::TensorSpec output_spec(tim::vx::DataType::UINT8, output_shape, + tim::vx::TensorAttribute::OUTPUT, + output_quant); + + auto input_tensor = graph->CreateTensor(input_spec); + auto output_tensor = graph->CreateTensor(output_spec); + auto kernel_tensor = graph->CreateTensor(kernel_spec); + auto bias_tensor = graph->CreateTensor(bias_spec); + + std::vector input_data = { + 3, + 9, + 3, + }; + + std::vector kernel_data = { + 9, + 0, + 1, + }; + + std::vector bias_data = { + -5, + }; + + std::vector golden = { + 24, 78, 27, 6, 0, + }; + + std::vector output_data(golden.size()); + + EXPECT_TRUE( + input_tensor->CopyDataToTensor(input_data.data(), input_data.size())); + EXPECT_TRUE( + kernel_tensor->CopyDataToTensor(kernel_data.data(), kernel_data.size())); + EXPECT_TRUE(bias_tensor->CopyDataToTensor( + bias_data.data(), bias_data.size() * sizeof(int32_t))); + + auto op = graph->CreateOperation( + 1, tim::vx::PadType::VALID, /*ksize=*/3, /*stride=*/1, + /*output_padding=*/0, + std::array({ + 0, + 0, + }), + /*group=*/1); + (*op) + .BindInputs({input_tensor, kernel_tensor, bias_tensor}) + .BindOutputs({output_tensor}); + + EXPECT_TRUE(graph->Compile()); + EXPECT_TRUE(graph->Run()); + + EXPECT_TRUE(output_tensor->CopyDataFromTensor(output_data.data())); + EXPECT_EQ(golden, output_data) << "Result mismatch"; +} + +TEST(DeConv1dLeakyRelu, shape_5_5_1) { + auto ctx = tim::vx::Context::Create(); + auto graph = ctx->CreateGraph(); + + tim::vx::ShapeType input_shape({5, 5, 1}); //wcn + tim::vx::ShapeType kernel_shape({3, 5, 2}); + tim::vx::ShapeType output_shape({7, 2, 1}); //wcn + + tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32, input_shape, + tim::vx::TensorAttribute::INPUT); + tim::vx::TensorSpec kernel_spec(tim::vx::DataType::FLOAT32, kernel_shape, + tim::vx::TensorAttribute::CONSTANT); + tim::vx::TensorSpec transisent_spec(tim::vx::DataType::FLOAT32, {}, + tim::vx::TensorAttribute::TRANSIENT); + tim::vx::TensorSpec output_spec(tim::vx::DataType::FLOAT32, output_shape, + tim::vx::TensorAttribute::OUTPUT); + + auto input_tensor = graph->CreateTensor(input_spec); + auto output_tensor = graph->CreateTensor(output_spec); + auto transient_tensor = graph->CreateTensor(transisent_spec); + auto kernel_tensor = graph->CreateTensor(kernel_spec); + + std::vector input_data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, + 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, + 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, + 22.0, 23.0, 24.0, 25.0}; + std::vector kernel_data = {1, 2, 3, 7, 8, 9, 13, 14, 15, 19, + 20, 21, 25, 26, 27, 4, 5, 6, 10, 11, + 12, 16, 17, 18, 22, 23, 24, 28, 29, 30}; + std::vector golden = { + 1015.0, 2150.0, 3410.0, 3620.0, 3830.0, 2700.0, 1425.0, + 1180.0, 2495.0, 3950.0, 4205.0, 4460.0, 3135.0, 1650.0, + }; + + EXPECT_TRUE( + input_tensor->CopyDataToTensor(input_data.data(), input_data.size() * 4)); + EXPECT_TRUE(kernel_tensor->CopyDataToTensor(kernel_data.data(), + kernel_data.size() * 4)); + + auto op = graph->CreateOperation( + 1, tim::vx::PadType::VALID, /*ksize=*/3, /*stride=*/1, + /*output_padding=*/0, std::array({0, 0}), /*group=*/1); + (*op) + .BindInputs({input_tensor, kernel_tensor}) + .BindOutputs({transient_tensor}); + + auto leakyrelu = graph->CreateOperation(0.01f); + (*leakyrelu).BindInputs({transient_tensor}).BindOutputs({output_tensor}); + + EXPECT_TRUE(graph->Compile()); + EXPECT_TRUE(graph->Run()); + std::vector output_data(golden.size()); + + EXPECT_TRUE(output_tensor->CopyDataFromTensor(output_data.data())); + + EXPECT_EQ(golden, output_data) << "Result mismatch"; } \ No newline at end of file