Support specifying pad_mode in pad (#355)

https://github.com/VeriSilicon/TIM-VX/issues/307

Signed-off-by: Chen Xin <jack.chen@verisilicon.com>

Co-authored-by: Chen Xin <jack.chen@verisilicon.com>
This commit is contained in:
chxin66 2022-04-14 19:55:47 +08:00 committed by GitHub
parent 479fc576ae
commit eb21143987
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 177 additions and 10 deletions

View File

@ -35,19 +35,35 @@ namespace ops {
* Pads a tensor.
*
* - const_val : the value to pad.
* - pad_mode : the mode of pad.
* - front_size : Add pad values to the left and top.
* - back_size : Add pad values to the right and bottom.
*/
class Pad : public DirectMapOp {
public:
Pad(Graph* graph, const std::vector<uint32_t>& front_size,
const std::vector<uint32_t>& back_size, int32_t const_val);
typedef enum {
// signature
PAD_MODE_CONSTANT,
PAD_MODE_EDGE,
PAD_MODE_SYMMETRIC,
PAD_MODE_REFLECT,
} pad_mode_type;
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
Pad(Graph* graph, const std::vector<uint32_t>& front_size,
const std::vector<uint32_t>& back_size, int32_t const_val);
Pad(Graph* graph, const std::vector<uint32_t>& front_size,
const std::vector<uint32_t>& back_size, int32_t const_val,
pad_mode_type pad_mode);
std::shared_ptr<Operation> Clone(
std::shared_ptr<Graph>& graph) const override;
protected:
std::vector<uint32_t> front_size_;
std::vector<uint32_t> back_size_;
int32_t const_val_;
pad_mode_type pad_mode_;
};
} // namespace ops
} // namespace vx

View File

@ -51,15 +51,13 @@ class PadLayoutInfer : public OpLayoutInfer {
sizeof(uint32_t) * dim_num);
memcpy(back_size.data(), op_->impl()->node()->nn_param.pad.back_size,
sizeof(uint32_t) * dim_num);
int32_t pad_value = op_->impl()->node()->nn_param.pad.const_val;
if (!input_pv->IsAligned()) {
front_size = MapMultipleAxis(input_pv->AsStdVec(), front_size);
back_size = MapMultipleAxis(input_pv->AsStdVec(), back_size);
}
auto pad = context_->infer_graph_->CreateOperation<vx::ops::Pad>(
front_size, back_size, pad_value);
auto pad = op_->Clone(context_->infer_graph_);
auto out_infer = CreateOutputsTensor(input_pv);
(*pad).BindInput(context_->GetMapedTensor(i_src));
(*pad).BindOutput(out_infer[0]);

View File

@ -29,21 +29,31 @@
namespace tim {
namespace vx {
namespace ops {
Pad::Pad(Graph* graph, const std::vector<uint32_t>& front_size,
const std::vector<uint32_t>& back_size, int32_t const_val)
: Pad(graph, front_size, back_size, const_val, PAD_MODE_CONSTANT) {}
Pad::Pad(Graph* graph, const std::vector<uint32_t>& front_size,
const std::vector<uint32_t>& back_size, int32_t const_val,
pad_mode_type pad_mode)
: DirectMapOp(graph, VSI_NN_OP_PAD),
front_size_(front_size),
back_size_(back_size),
const_val_(const_val) {
const_val_(const_val),
pad_mode_(pad_mode) {
this->impl()->node()->nn_param.pad.front_size = front_size_.data();
this->impl()->node()->nn_param.pad.back_size = back_size_.data();
this->impl()->node()->nn_param.pad.dim_num = front_size_.size();
this->impl()->node()->nn_param.pad.const_val = const_val_;
this->impl()->node()->nn_param.pad.mode = VSI_NN_PAD_MODE_CONSTANT;
if (pad_mode_ == PAD_MODE_CONSTANT) {
this->impl()->node()->nn_param.pad.const_val = const_val_;
}
this->impl()->node()->nn_param.pad.mode = (vsi_nn_pad_mode_e)pad_mode_;
}
std::shared_ptr<Operation> Pad::Clone(std::shared_ptr<Graph>& graph) const {
return graph->CreateOperation<Pad>(this->front_size_, this->back_size_, this->const_val_);
return graph->CreateOperation<Pad>(this->front_size_, this->back_size_,
this->const_val_, this->pad_mode_);
}
} // namespace ops

143
src/tim/vx/ops/pad_test.cc Normal file
View File

@ -0,0 +1,143 @@
/****************************************************************************
*
* Copyright (c) 2021 Vivante Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*****************************************************************************/
#include "tim/vx/context.h"
#include "tim/vx/graph.h"
#include "tim/vx/ops/pad.h"
#include "tim/vx/types.h"
#include "test_utils.h"
#include "gtest/gtest.h"
TEST(Pad, constant) {
auto ctx = tim::vx::Context::Create();
auto graph = ctx->CreateGraph();
tim::vx::ShapeType input_shape({3, 2});
tim::vx::ShapeType output_shape({5, 4});
tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32, input_shape,
tim::vx::TensorAttribute::INPUT);
tim::vx::TensorSpec output_spec(tim::vx::DataType::FLOAT32, output_shape,
tim::vx::TensorAttribute::OUTPUT);
auto input_tensor = graph->CreateTensor(input_spec);
auto output_tensor = graph->CreateTensor(output_spec);
std::vector<float> input_data = {
0, 1, 2, 3, 4, 5, 6,
};
std::vector<float> golden = {
1, 1, 1, 1, 1, 1, 0, 1, 2, 1, 1, 3, 4, 5, 1, 1, 1, 1, 1, 1,
};
EXPECT_TRUE(
input_tensor->CopyDataToTensor(input_data.data(), input_data.size() * 4));
std::vector<uint32_t> front = {1, 1};
std::vector<uint32_t> back = {1, 1};
auto op = graph->CreateOperation<tim::vx::ops::Pad>(
front, back, 1, tim::vx::ops::Pad::PAD_MODE_CONSTANT);
(*op).BindInput(input_tensor).BindOutput(output_tensor);
EXPECT_TRUE(graph->Compile());
EXPECT_TRUE(graph->Run());
std::vector<float> output(golden.size());
EXPECT_TRUE(output_tensor->CopyDataFromTensor(output.data()));
EXPECT_EQ(golden, output);
}
TEST(Pad, reflect) {
auto ctx = tim::vx::Context::Create();
auto graph = ctx->CreateGraph();
tim::vx::ShapeType input_shape({3, 2});
tim::vx::ShapeType output_shape({5, 4});
tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32, input_shape,
tim::vx::TensorAttribute::INPUT);
tim::vx::TensorSpec output_spec(tim::vx::DataType::FLOAT32, output_shape,
tim::vx::TensorAttribute::OUTPUT);
auto input_tensor = graph->CreateTensor(input_spec);
auto output_tensor = graph->CreateTensor(output_spec);
std::vector<float> input_data = {
0, 1, 2, 3, 4, 5, 6,
};
std::vector<float> golden = {
0, 0, 1, 2, 2, 0, 0, 1, 2, 2, 3, 3, 4, 5, 5, 3, 3, 4, 5, 5,
};
EXPECT_TRUE(
input_tensor->CopyDataToTensor(input_data.data(), input_data.size() * 4));
std::vector<uint32_t> front = {1, 1};
std::vector<uint32_t> back = {1, 1};
auto op = graph->CreateOperation<tim::vx::ops::Pad>(
front, back, 0, tim::vx::ops::Pad::PAD_MODE_EDGE);
(*op).BindInput(input_tensor).BindOutput(output_tensor);
EXPECT_TRUE(graph->Compile());
EXPECT_TRUE(graph->Run());
std::vector<float> output(golden.size());
EXPECT_TRUE(output_tensor->CopyDataFromTensor(output.data()));
EXPECT_EQ(golden, output);
}
TEST(Pad, edge) {
auto ctx = tim::vx::Context::Create();
auto graph = ctx->CreateGraph();
tim::vx::ShapeType input_shape({3, 2});
tim::vx::ShapeType output_shape({5, 4});
tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32, input_shape,
tim::vx::TensorAttribute::INPUT);
tim::vx::TensorSpec output_spec(tim::vx::DataType::FLOAT32, output_shape,
tim::vx::TensorAttribute::OUTPUT);
auto input_tensor = graph->CreateTensor(input_spec);
auto output_tensor = graph->CreateTensor(output_spec);
std::vector<float> input_data = {
0, 1, 2, 3, 4, 5, 6,
};
std::vector<float> golden = {0, 0, 1, 2, 2, 0, 0, 1, 2, 2,
3, 3, 4, 5, 5, 3, 3, 4, 5, 5};
EXPECT_TRUE(
input_tensor->CopyDataToTensor(input_data.data(), input_data.size() * 4));
std::vector<uint32_t> front = {1, 1};
std::vector<uint32_t> back = {1, 1};
auto op = graph->CreateOperation<tim::vx::ops::Pad>(
front, back, 0, tim::vx::ops::Pad::PAD_MODE_EDGE);
(*op).BindInput(input_tensor).BindOutput(output_tensor);
EXPECT_TRUE(graph->Compile());
EXPECT_TRUE(graph->Run());
std::vector<float> output(golden.size());
EXPECT_TRUE(output_tensor->CopyDataFromTensor(output.data()));
EXPECT_EQ(golden, output);
}