diff --git a/include/tim/vx/ops/deconv1d.h b/include/tim/vx/ops/deconv1d.h new file mode 100644 index 0000000..13d2ccb --- /dev/null +++ b/include/tim/vx/ops/deconv1d.h @@ -0,0 +1,79 @@ +/**************************************************************************** +* +* Copyright (c) 2021 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +*****************************************************************************/ +#ifndef TIM_VX_OPS_DECONV1D_H_ +#define TIM_VX_OPS_DECONV1D_H_ + +#include + +#include "tim/vx/operation.h" + +namespace tim { +namespace vx { +namespace ops { + +/** + * ## DeConv1d + * + * Performs the transpose of 1-D convolution operation. + * + * This operation is sometimes called "deconvolution1d" after Deconvolutional Networks, + * but is actually the transpose (gradient) of Conv2D rather than an actual deconvolution. + * + * - weights : the channel number for weight tensor. + * - ksize : the length for weight tensor. + * - padding : AUTO, VALID or SAME. + * - pad : pad value for each spatial axis. + * - stride : stride along each spatial axis. + * - output_padding : specifying the amount of padding along the height and width of + * the output tensor. + */ + +class DeConv1d : public Operation { + public: + DeConv1d(Graph* graph, int32_t oc_count_, PadType pad_type, + uint32_t ksize, + uint32_t stride, + uint32_t output_padding); + DeConv1d(Graph* graph, int32_t oc_count_, PadType pad_type, + uint32_t ksize, + uint32_t stride, + uint32_t output_padding, + const std::array& pad, + uint32_t group = 1); + + protected: + const uint32_t oc_count_; // output channel count + const PadType pad_type_; + const uint32_t ksize_; + const uint32_t stride_; + const uint32_t output_padding_; + const std::array pad_; + const uint32_t group_; +}; + +} // namespace ops +} // namespace vx +} // namespace tim + +#endif /* TIM_VX_OPS_DECONV1D_H_ */ diff --git a/src/tim/vx/ops/README.md b/src/tim/vx/ops/README.md index 123f537..cb38a30 100644 --- a/src/tim/vx/ops/README.md +++ b/src/tim/vx/ops/README.md @@ -148,7 +148,7 @@ Mish|MISH|Mapped|[tfa.activations.mish](https://tensorflow.google.cn/addons/api_ ||PRE_PROCESS_YUV444|Unmapped ||PRE_PROCESS_NV12|Unmapped ||SCATTER_ND|Unmapped|[tf.scatter_nd](https://tensorflow.google.cn/api_docs/python/tf/scatter_nd) -||DECONVOLUTION1D|Unmapped|[tf.nn.conv1d_transpose](https://tensorflow.google.cn/api_docs/python/tf/nn/conv1d_transpose) +|DeConv1d|DECONVOLUTION1D|Mapped|[tf.nn.conv1d_transpose](https://tensorflow.google.cn/api_docs/python/tf/nn/conv1d_transpose) ||INTERP|Unmapped ||RESIZE_1D|Unmapped ||CONV_RELU|Deprecated diff --git a/src/tim/vx/ops/deconv1d.cc b/src/tim/vx/ops/deconv1d.cc new file mode 100644 index 0000000..1de74ea --- /dev/null +++ b/src/tim/vx/ops/deconv1d.cc @@ -0,0 +1,66 @@ +/**************************************************************************** +* +* Copyright (c) 2021 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +*****************************************************************************/ +#include "tim/vx/ops/deconv1d.h" + +#include + +#include "operation_private.h" +#include "type_utils.h" +#include "vsi_nn_pub.h" + +namespace tim { +namespace vx { +namespace ops { + +DeConv1d::DeConv1d(Graph* graph, int32_t oc_count, PadType pad_type, + uint32_t ksize, uint32_t stride, uint32_t output_padding) + : DeConv1d(graph, oc_count, pad_type, ksize, stride, output_padding, + {0, 0}) { +} + +DeConv1d::DeConv1d(Graph* graph, int32_t oc_count, PadType pad_type, + uint32_t ksize, uint32_t stride, uint32_t output_padding, + const std::array& pad, uint32_t group) + : Operation(graph, VSI_NN_OP_DECONVOLUTION1D), + oc_count_(oc_count), + pad_type_(pad_type), + ksize_(ksize), + stride_(stride), + output_padding_(output_padding), + pad_(pad), + group_(group) { + + this->impl()->node()->nn_param.deconvolution1d.ksize = ksize_; + this->impl()->node()->nn_param.deconvolution1d.stride = stride_; + this->impl()->node()->nn_param.deconvolution1d.pad_type = TranslatePadType(pad_type_); + this->impl()->node()->nn_param.deconvolution1d.weights = oc_count_; + this->impl()->node()->nn_param.deconvolution1d.group = group_; + this->impl()->node()->nn_param.deconvolution1d.output_padding = output_padding_; + this->impl()->node()->nn_param.deconvolution1d.pad[0] = pad_[0]; + this->impl()->node()->nn_param.deconvolution1d.pad[1] = pad_[1]; +} + +} // namespace ops +} // namespace vx +} // namespace tim diff --git a/src/tim/vx/ops/deconv1d_test.cc b/src/tim/vx/ops/deconv1d_test.cc new file mode 100644 index 0000000..0c710fe --- /dev/null +++ b/src/tim/vx/ops/deconv1d_test.cc @@ -0,0 +1,133 @@ +/**************************************************************************** +* +* Copyright (c) 2021 Vivante Corporation +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +* DEALINGS IN THE SOFTWARE. +* +*****************************************************************************/ +#include "tim/vx/context.h" +#include "tim/vx/graph.h" +#include "tim/vx/ops/deconv1d.h" + +#include "gtest/gtest.h" + +TEST(DeConv1d, no_bias_layout_whcn_depthwise_shape_3_2_1) { + auto ctx = tim::vx::Context::Create(); + auto graph = ctx->CreateGraph(); + + tim::vx::ShapeType input_shape ({3, 2, 1}); //whcn + tim::vx::ShapeType kernel_shape({3, 2, 1}); //whc1 same as depthwise convolution + tim::vx::ShapeType output_shape({5, 2, 1}); //whcn + + tim::vx::TensorSpec input_spec (tim::vx::DataType::FLOAT32, input_shape, tim::vx::TensorAttribute::INPUT); + tim::vx::TensorSpec kernel_spec (tim::vx::DataType::FLOAT32, kernel_shape, tim::vx::TensorAttribute::CONSTANT); + tim::vx::TensorSpec output_spec (tim::vx::DataType::FLOAT32, output_shape, tim::vx::TensorAttribute::OUTPUT); + + auto input_tensor = graph->CreateTensor(input_spec); + auto output_tensor = graph->CreateTensor(output_spec); + auto kernel_tensor = graph->CreateTensor(kernel_spec); + + std::vector input_data = { 3.0f, 9.0f, 3.0f, + 7.0f, 5.0f, 9.0f, }; + std::vector kernel_data = { 9.0f, 0.0f, 1.0f, + 3.0f, 0.0f, 0.0f, }; + + std::vector golden = { + 27.0f, 81.0f, 30.0f, 9.0f, 3.0f, + 21.0f, 15.0f, 27.0f, 0.0f, 0.0f, }; + + std::vector output_data(golden.size()); + + EXPECT_TRUE(input_tensor->CopyDataToTensor(input_data.data(), input_data.size()*4)); + EXPECT_TRUE(kernel_tensor->CopyDataToTensor(kernel_data.data(), kernel_data.size()*4)); + + auto op = graph->CreateOperation( + 2, tim::vx::PadType::SAME, 3, 1, 1, std::array({0, 0}), 2); + (*op).BindInputs({input_tensor, kernel_tensor}).BindOutputs({output_tensor}); + + EXPECT_TRUE(graph->Compile()); + EXPECT_TRUE(graph->Run()); + + EXPECT_TRUE(output_tensor->CopyDataFromTensor(output_data.data())); + + EXPECT_EQ(golden, output_data) << "Result mismatch"; +} + +TEST(DeConv1d, layout_whcn_shape_3_1_1) { + auto ctx = tim::vx::Context::Create(); + auto graph = ctx->CreateGraph(); + + tim::vx::ShapeType input_shape ({3, 1, 1}); + tim::vx::ShapeType kernel_shape({3, 1, 1}); + tim::vx::ShapeType output_shape({5, 1, 1}); + tim::vx::ShapeType bias_shape({1}); + + tim::vx::Quantization input_quant(tim::vx::QuantType::ASYMMETRIC, 1.0f, 0); + tim::vx::Quantization output_quant(tim::vx::QuantType::ASYMMETRIC, 1.0f, 2); + tim::vx::Quantization weight_quant(tim::vx::QuantType::ASYMMETRIC, 1.0f, 0); + tim::vx::Quantization bias_quant(tim::vx::QuantType::ASYMMETRIC, 1.0f, 0); + + tim::vx::TensorSpec input_spec ( + tim::vx::DataType::UINT8, input_shape, tim::vx::TensorAttribute::INPUT, input_quant); + tim::vx::TensorSpec kernel_spec ( + tim::vx::DataType::UINT8, kernel_shape, tim::vx::TensorAttribute::CONSTANT, weight_quant); + tim::vx::TensorSpec bias_spec ( + tim::vx::DataType::INT32, bias_shape, tim::vx::TensorAttribute::CONSTANT, bias_quant); + tim::vx::TensorSpec output_spec ( + tim::vx::DataType::UINT8, output_shape, tim::vx::TensorAttribute::OUTPUT, output_quant); + + auto input_tensor = graph->CreateTensor(input_spec); + auto output_tensor = graph->CreateTensor(output_spec); + auto kernel_tensor = graph->CreateTensor(kernel_spec); + auto bias_tensor = graph->CreateTensor(bias_spec); + + std::vector input_data = { + 3, 9, 3, + }; + + std::vector kernel_data = { + 9, 0, 1, + }; + + std::vector bias_data = { + -5, + }; + + std::vector golden = { + 24, 78, 27, 6, 0, + }; + + std::vector output_data(golden.size()); + + EXPECT_TRUE(input_tensor->CopyDataToTensor(input_data.data(), input_data.size())); + EXPECT_TRUE(kernel_tensor->CopyDataToTensor(kernel_data.data(), kernel_data.size())); + EXPECT_TRUE(bias_tensor->CopyDataToTensor(bias_data.data(), bias_data.size() * sizeof(int32_t))); + + auto op = graph->CreateOperation( + 1, tim::vx::PadType::SAME, 3, 1, 1, + std::array({0, 0,}), + 1); + (*op).BindInputs({input_tensor, kernel_tensor, bias_tensor}).BindOutputs({output_tensor}); + + EXPECT_TRUE(graph->Compile()); + EXPECT_TRUE(graph->Run()); + + EXPECT_TRUE(output_tensor->CopyDataFromTensor(output_data.data())); + EXPECT_EQ(golden, output_data) << "Result mismatch"; +} \ No newline at end of file