From dc31091db5bd4eaa31fa0586678e150efa966d15 Mon Sep 17 00:00:00 2001 From: chxin66 <57057788+chxin66@users.noreply.github.com> Date: Mon, 6 Dec 2021 19:20:13 +0800 Subject: [PATCH] mapped groupedconv1d & unit test (#233) Signed-off-by: Chen Xin Co-authored-by: Chen Xin --- include/tim/vx/ops/conv1d.h | 16 +++--- include/tim/vx/ops/groupedconv1d.h | 82 ++++++++++++++++++++++++++++ include/tim/vx/types.h | 4 +- src/tim/vx/ops/README.md | 1 + src/tim/vx/ops/conv1d_test.cc | 15 ++--- src/tim/vx/ops/groupedconv1d.cc | 59 ++++++++++++++++++++ src/tim/vx/ops/groupedconv1d_test.cc | 72 ++++++++++++++++++++++++ src/tim/vx/ops/groupedconv2d.cc | 18 +++--- 8 files changed, 242 insertions(+), 25 deletions(-) create mode 100644 include/tim/vx/ops/groupedconv1d.h create mode 100644 src/tim/vx/ops/groupedconv1d.cc create mode 100644 src/tim/vx/ops/groupedconv1d_test.cc diff --git a/include/tim/vx/ops/conv1d.h b/include/tim/vx/ops/conv1d.h index 60383c3..957d437 100644 --- a/include/tim/vx/ops/conv1d.h +++ b/include/tim/vx/ops/conv1d.h @@ -36,22 +36,22 @@ class Conv1d : public Operation { public: Conv1d(Graph* graph, PadType padding, uint32_t stride, uint32_t dilation, int32_t multiplier = 0, - DataLayout input_layout = DataLayout::WHCN, - DataLayout kernel_layout = DataLayout::WHIcOc); + DataLayout input_layout = DataLayout::WCN, + DataLayout kernel_layout = DataLayout::WIcOc); Conv1d(Graph* graph, const std::array& pad, uint32_t stride, uint32_t dilation, int32_t multiplier = 0, - DataLayout input_layout = DataLayout::WHCN, - DataLayout kernel_layout = DataLayout::WHIcOc); + DataLayout input_layout = DataLayout::WCN, + DataLayout kernel_layout = DataLayout::WIcOc); Conv1d(Graph* graph, int32_t weights, PadType padding, uint32_t ksize, uint32_t stride, uint32_t dilation, int32_t multiplier = 0, - DataLayout input_layout = DataLayout::WHCN, - DataLayout kernel_layout = DataLayout::WHIcOc); + DataLayout input_layout = DataLayout::WCN, + DataLayout kernel_layout = DataLayout::WIcOc); Conv1d(Graph* graph, int32_t weights, PadType padding, uint32_t ksize, uint32_t stride, uint32_t dilation, const std::array& pad, int32_t multiplier = 0, - DataLayout input_layout = DataLayout::WHCN, - DataLayout kernel_layout = DataLayout::WHIcOc); + DataLayout input_layout = DataLayout::WCN, + DataLayout kernel_layout = DataLayout::WIcOc); DataLayout KernelDataLayout() { return kernel_layout_; } diff --git a/include/tim/vx/ops/groupedconv1d.h b/include/tim/vx/ops/groupedconv1d.h new file mode 100644 index 0000000..c00054e --- /dev/null +++ b/include/tim/vx/ops/groupedconv1d.h @@ -0,0 +1,82 @@ +/**************************************************************************** +* +* Copyright (c) 2021 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +*****************************************************************************/ +#ifndef TIM_VX_OPS_GROUPEDCONV1D_H_ +#define TIM_VX_OPS_GROUPEDCONV1D_H_ + +#include + +#include "tim/vx/operation.h" + +namespace tim { +namespace vx { +namespace ops { + +/** + * ## GroupedConv1d + * + * Performs a grouped 1-D convolution operation. + * + * Input: + * - input [WCN]. + * - kernel [ WIcOc ] (Ic: Input Channels. Oc: Output Channels).Ic*group=C. + * - bias [ O ]. Optional. + * + * Attribute: + * - weights : the output channel number for weight tensor. + * - ksize : the height and width for weight tensor. + * - padding : AUTO, VALID or SAME. + * - pad : pad value for each spatial axis. + * - stride : stride along each spatial axis. + * - dilation : dilation value along each spatial axis of the filter. + * - group: Split conv to n group. + * - layout : WCN or CWN. + */ + +class GroupedConv1d : public Operation { + public: + GroupedConv1d(Graph* graph, PadType padding, + uint32_t stride, + uint32_t dilation, + uint32_t group, + DataLayout input_layout = DataLayout::WCN, + DataLayout kernel_layout = DataLayout::WIcOc); + + DataLayout KernelDataLayout() { return kernel_layout_; } + + std::shared_ptr Clone(std::shared_ptr& graph) const override; + + protected: + const PadType padding_; + const uint32_t stride_; + const uint32_t dilation_; + const std::array pad_; + const uint32_t group_; + const DataLayout kernel_layout_; +}; + +} // namespace ops +} // namespace vx +} // namespace tim + +#endif /* TIM_VX_OPS_GROUPED_CONV1D_H_ */ \ No newline at end of file diff --git a/include/tim/vx/types.h b/include/tim/vx/types.h index d8acebf..150744b 100644 --- a/include/tim/vx/types.h +++ b/include/tim/vx/types.h @@ -69,7 +69,9 @@ enum class DataLayout { IcWHOc, /*TF*/ OcIcWH, /*TVM for classic conv2d in tflite model*/ IcOcWH, /*TVM for depthwise conv2d in tflite model*/ - WHIcOc /*TIM-VX default*/ + WHIcOc, /*TIM-VX default*/ + WCN, /*for conv1d*/ + WIcOc, /*for conv1d*/ }; } // namespace vx diff --git a/src/tim/vx/ops/README.md b/src/tim/vx/ops/README.md index 8746aa5..a301e56 100644 --- a/src/tim/vx/ops/README.md +++ b/src/tim/vx/ops/README.md @@ -101,6 +101,7 @@ shuffle_channel|SHUFFLECHANNEL|Mapped|[ANEURALNETWORKS_CHANNEL_SHUFFLE](https:// Gelu|GELU|Mapped|[tf.nn.gelu](https://tensorflow.google.cn/api_docs/python/tf/nn/gelu) Svdf|SVDF|Mapped|[ANEURALNETWORKS_SVDF](https://developer.android.com/ndk/reference/group/neural-networks#group___neural_networks_1ggaabbe492c60331b13038e39d4207940e0a7096de21038c1ce49d354a00cba7b552) Erf|ERF|Mapped|[tf.math.erf](https://tensorflow.google.cn/api_docs/python/tf/math/erf) +GROUPED_CONV1D|Mapped|[tf.keras.layers.Conv1D](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv1D) ||PROPOSAL| TBD |[Faster-RCNN Proposal Layer](https://github.com/intel/caffe/blob/master/examples/faster-rcnn/lib/rpn/proposal_layer.py) ||ROI_POOL|Planned 22Q1 |[ANEURALNETWORKS_ROI_POOLING](https://developer.android.com/ndk/reference/group/neural-networks#group___neural_networks_1ggaabbe492c60331b13038e39d4207940e0a6736198af337b2efbdb0b6b64dee7fe4) ||ROI_ALIGN| TBD |[ANEURALNETWORKS_ROI_ALIGN](https://developer.android.com/ndk/reference/group/neural-networks#group___neural_networks_1ggaabbe492c60331b13038e39d4207940e0a2848b39dd4bfba78f2438fda0d9397a4) diff --git a/src/tim/vx/ops/conv1d_test.cc b/src/tim/vx/ops/conv1d_test.cc index 1bf1ae7..79b92c4 100644 --- a/src/tim/vx/ops/conv1d_test.cc +++ b/src/tim/vx/ops/conv1d_test.cc @@ -27,18 +27,19 @@ #include "test_utils.h" #include "gtest/gtest.h" -TEST(Conv1d, shape_3_6_1_float_ksize_1_stride_1_weights_3_no_bias_whcn) { +TEST(Conv1d, shape_3_6_1_float_ksize_1_stride_1_weights_3_no_bias_wcn) { auto ctx = tim::vx::Context::Create(); auto graph = ctx->CreateGraph(); - tim::vx::ShapeType io_shape({3, 6, 1}); + tim::vx::ShapeType in_shape({3, 6, 1}); tim::vx::ShapeType param_shape({1,6,3}); + tim::vx::ShapeType out_shape({3, 3, 1}); tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32, - io_shape, tim::vx::TensorAttribute::INPUT); + in_shape, tim::vx::TensorAttribute::INPUT); tim::vx::TensorSpec param_spec(tim::vx::DataType::FLOAT32, param_shape, tim::vx::TensorAttribute::INPUT); tim::vx::TensorSpec output_spec(tim::vx::DataType::FLOAT32, - io_shape, tim::vx::TensorAttribute::OUTPUT); + out_shape, tim::vx::TensorAttribute::OUTPUT); auto input_tensor = graph->CreateTensor(input_spec); auto weight_tensor = graph->CreateTensor(param_spec); @@ -78,7 +79,7 @@ TEST(Conv1d, shape_3_6_1_float_ksize_1_stride_1_weights_3_no_bias_whcn) { EXPECT_TRUE(ArraysMatch(golden, output, 1e-5f)); } -TEST(Conv1d, shape_6_2_1_uint8_ksize_6_stride_1_weights_2_whcn) { +TEST(Conv1d, shape_6_2_1_uint8_ksize_6_stride_1_weights_2_wcn) { auto ctx = tim::vx::Context::Create(); auto graph = ctx->CreateGraph(); @@ -144,7 +145,7 @@ TEST(Conv1d, shape_6_2_1_uint8_ksize_6_stride_1_weights_2_whcn) { EXPECT_TRUE(ArraysMatch(golden, output, static_cast(0))); } -TEST(Conv1d, shape_6_2_1_uint8_ksize_3_stride_1_pad_1_weights_2_no_bias_whcn) { +TEST(Conv1d, shape_6_2_1_uint8_ksize_3_stride_1_pad_1_weights_2_no_bias_wcn) { auto ctx = tim::vx::Context::Create(); auto graph = ctx->CreateGraph(); @@ -199,7 +200,7 @@ TEST(Conv1d, shape_6_2_1_uint8_ksize_3_stride_1_pad_1_weights_2_no_bias_whcn) { #if 0 // Fail case // Internal impl conv1d don't support multiplier, need wait for the fix. -TEST(Conv1d, shape_7_2_1_uint8_ksize_3_stride_2_multiplier_1_whcn) { +TEST(Conv1d, shape_7_2_1_uint8_ksize_3_stride_2_multiplier_1_wcn) { auto ctx = tim::vx::Context::Create(); auto graph = ctx->CreateGraph(); diff --git a/src/tim/vx/ops/groupedconv1d.cc b/src/tim/vx/ops/groupedconv1d.cc new file mode 100644 index 0000000..273b936 --- /dev/null +++ b/src/tim/vx/ops/groupedconv1d.cc @@ -0,0 +1,59 @@ +/**************************************************************************** +* +* Copyright (c) 2021 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +*****************************************************************************/ +#include "tim/vx/ops/groupedconv1d.h" + +#include "operation_private.h" +#include "type_utils.h" +#include "vsi_nn_pub.h" + +namespace tim { +namespace vx { +namespace ops { + +GroupedConv1d::GroupedConv1d(Graph* graph, + PadType padding, + const uint32_t stride, + const uint32_t dilation, + uint32_t group, + DataLayout input_layout, DataLayout kernel_layout) + : Operation(graph, VSI_NN_OP_GROUPED_CONV1D, 3, 1, input_layout), + padding_(padding), stride_(stride), dilation_(dilation), + pad_({0,0}), group_(group), + kernel_layout_(kernel_layout) { + this->impl()->node()->nn_param.grouped_conv1d.pad_type = TranslatePadType(padding_); + this->impl()->node()->nn_param.grouped_conv1d.stride = stride_; + this->impl()->node()->nn_param.grouped_conv1d.group = group_; + this->impl()->node()->nn_param.grouped_conv1d.dilation = dilation_; + } + +std::shared_ptr GroupedConv1d::Clone( + std::shared_ptr& graph) const { + return graph->CreateOperation( + this->padding_, this->stride_, this->dilation_, this->group_, this->impl_->layout_, + this->kernel_layout_); +} + +} // namespace ops +} // namespace vx +} // namespace tim \ No newline at end of file diff --git a/src/tim/vx/ops/groupedconv1d_test.cc b/src/tim/vx/ops/groupedconv1d_test.cc new file mode 100644 index 0000000..ce96400 --- /dev/null +++ b/src/tim/vx/ops/groupedconv1d_test.cc @@ -0,0 +1,72 @@ +/**************************************************************************** +* +* Copyright (c) 2021 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +*****************************************************************************/ +#include "tim/vx/context.h" +#include "tim/vx/graph.h" +#include "tim/vx/ops/groupedconv1d.h" +#include "test_utils.h" +#include "gtest/gtest.h" + +TEST(GroupedConv1d, shape_6_2_1_float_ksize_6_stride_1_group_2_no_bias_wcn) { + auto ctx = tim::vx::Context::Create(); + auto graph = ctx->CreateGraph(); + + tim::vx::ShapeType in_shape({6, 2, 1}); + tim::vx::ShapeType param_shape({6, 1, 2}); + tim::vx::ShapeType out_shape({1, 2, 1}); + tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32, + in_shape, tim::vx::TensorAttribute::INPUT); + tim::vx::TensorSpec param_spec(tim::vx::DataType::FLOAT32, + param_shape, tim::vx::TensorAttribute::INPUT); + tim::vx::TensorSpec output_spec(tim::vx::DataType::FLOAT32, + out_shape, tim::vx::TensorAttribute::OUTPUT); + + auto input_tensor = graph->CreateTensor(input_spec); + auto weight_tensor = graph->CreateTensor(param_spec); + auto output_tensor = graph->CreateTensor(output_spec); + + std::vector in_data = { + -1, 0, 1, -1.5, 0.5, 1.5, + -2, -0.5, 2, -2.5, 0, 2.5, + }; + std::vector weight = { + -3, -2, -1.5, 1.5, 2, 3, + -2.5, -2, -1.5, 1.5, 2, 2.5, + }; + std::vector golden = { + 4.75, 5.5, + }; + + EXPECT_TRUE(input_tensor->CopyDataToTensor(in_data.data(), in_data.size() * sizeof(float))); + EXPECT_TRUE(weight_tensor->CopyDataToTensor(weight.data(), weight.size() * sizeof(float))); + + auto op = graph->CreateOperation(tim::vx::PadType::VALID, 1, 1, 2); + (*op).BindInputs({input_tensor, weight_tensor}).BindOutputs({output_tensor}); + + EXPECT_TRUE(graph->Compile()); + EXPECT_TRUE(graph->Run()); + + std::vector output(golden.size()); + EXPECT_TRUE(output_tensor->CopyDataFromTensor(output.data())); + EXPECT_TRUE(ArraysMatch(golden, output, 1e-5f)); +} diff --git a/src/tim/vx/ops/groupedconv2d.cc b/src/tim/vx/ops/groupedconv2d.cc index 040eaf6..a6de274 100644 --- a/src/tim/vx/ops/groupedconv2d.cc +++ b/src/tim/vx/ops/groupedconv2d.cc @@ -58,15 +58,15 @@ GroupedConv2d::GroupedConv2d(Graph* graph, : Operation(graph, VSI_NN_OP_GROUPED_CONV2D, 3, 1, input_layout), padding_(PadType::AUTO), strides_(strides), dilation_(dilation), pad_(pad), group_number_(group_number), kernel_layout_(kernel_layout) { - this->impl()->node()->nn_param.conv2d.stride[0] = strides_[0]; - this->impl()->node()->nn_param.conv2d.stride[1] = strides_[1]; - this->impl()->node()->nn_param.conv2d.group = group_number_; - this->impl()->node()->nn_param.conv2d.dilation[0] = dilation_[0]; - this->impl()->node()->nn_param.conv2d.dilation[1] = dilation_[1]; - this->impl()->node()->nn_param.conv2d.pad[0] = pad_[0]; - this->impl()->node()->nn_param.conv2d.pad[1] = pad_[1]; - this->impl()->node()->nn_param.conv2d.pad[2] = pad_[2]; - this->impl()->node()->nn_param.conv2d.pad[3] = pad_[3]; + this->impl()->node()->nn_param.grouped_conv2d.stride[0] = strides_[0]; + this->impl()->node()->nn_param.grouped_conv2d.stride[1] = strides_[1]; + this->impl()->node()->nn_param.grouped_conv2d.group = group_number_; + this->impl()->node()->nn_param.grouped_conv2d.dilation[0] = dilation_[0]; + this->impl()->node()->nn_param.grouped_conv2d.dilation[1] = dilation_[1]; + this->impl()->node()->nn_param.grouped_conv2d.pad[0] = pad_[0]; + this->impl()->node()->nn_param.grouped_conv2d.pad[1] = pad_[1]; + this->impl()->node()->nn_param.grouped_conv2d.pad[2] = pad_[2]; + this->impl()->node()->nn_param.grouped_conv2d.pad[3] = pad_[3]; } std::shared_ptr GroupedConv2d::Clone(