add alpha & beta parameters for HardSigmoid (#265)

This commit is contained in:
Antkillerfarm 2022-01-13 14:17:19 +08:00 committed by GitHub
parent fe91e7f13d
commit 36e6afa567
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 74 additions and 28 deletions

View File

@ -85,7 +85,6 @@ DECLARE_NO_PARAMETER_ACTIVATION(Sigmoid)
DECLARE_NO_PARAMETER_ACTIVATION(Swish)
DECLARE_NO_PARAMETER_ACTIVATION(HardSwish)
DECLARE_NO_PARAMETER_ACTIVATION(Mish)
DECLARE_NO_PARAMETER_ACTIVATION(HardSigmoid)
DECLARE_NO_PARAMETER_ACTIVATION(SoftRelu)
#undef DEFINE_NO_PARAMETER_ACTIVATION
@ -100,6 +99,17 @@ class Prelu : public DirectMapOp {
int axis_;
};
class HardSigmoid : public DirectMapOp {
public:
HardSigmoid(Graph* graph, float alpha, float beta);
std::shared_ptr<Operation> Clone(
std::shared_ptr<Graph>& graph) const override;
protected:
float alpha_;
float beta_;
};
class LeakyRelu : public DirectMapOp {
public:
LeakyRelu(Graph* graph, float alpha);

View File

@ -46,7 +46,7 @@ class ActivationLayoutInfer : public OpLayoutInfer {
assert(op_->impl()->InputsTensor().size() == 1);
auto i_src = op_->impl()->InputsTensor()[0];
auto input_pv = context_->GetPermuteVector(i_src);
auto activation = context_->infer_graph_->CreateOperation<OpType>();
auto activation = op_->Clone(context_->infer_graph_);
auto out_infer = CreateOutputsTensor(input_pv);
(*activation)
.BindInput(context_->GetMapedTensor(i_src))
@ -56,30 +56,6 @@ class ActivationLayoutInfer : public OpLayoutInfer {
}
};
class LeakyReluLayoutInfer : public OpLayoutInfer {
public:
LeakyReluLayoutInfer(
const std::shared_ptr<vx::Operation> op,
std::shared_ptr<layout_inference_impl::LayoutInferContext>& context)
: OpLayoutInfer(op, context) {}
void OnInputs(
std::vector<std::shared_ptr<vx::Tensor>>& next_tensors) override {
assert(op_->impl()->InputsTensor().size() == 1);
auto i_src = op_->impl()->InputsTensor()[0];
auto input_pv = context_->GetPermuteVector(i_src);
auto leaky_relu =
context_->infer_graph_->CreateOperation<vx::ops::LeakyRelu>(
op_->impl()->node()->nn_param.activation.leaky_ratio);
auto out_infer = CreateOutputsTensor(input_pv);
(*leaky_relu)
.BindInput(context_->GetMapedTensor(i_src))
.BindOutput(out_infer[0]);
context_->SetPermuteVector(op_->impl()->OutputsTensor()[0], input_pv);
next_tensors.push_back(op_->impl()->OutputsTensor()[0]);
}
};
class PReluLayoutInfer : public OpLayoutInfer {
public:
PReluLayoutInfer(
@ -107,6 +83,7 @@ class PReluLayoutInfer : public OpLayoutInfer {
using ReluLayoutInfer = ActivationLayoutInfer<vx::ops::Relu>;
using Relu1LayoutInfer = ActivationLayoutInfer<vx::ops::Relu1>;
using Relu6LayoutInfer = ActivationLayoutInfer<vx::ops::Relu6>;
using LeakyReluLayoutInfer = ActivationLayoutInfer<vx::ops::LeakyRelu>;
using EluLayoutInfer = ActivationLayoutInfer<vx::ops::Elu>;
using SigmoidLayoutInfer = ActivationLayoutInfer<vx::ops::Sigmoid>;
using MishLayoutInfer = ActivationLayoutInfer<vx::ops::Mish>;

View File

@ -43,7 +43,6 @@ DEFINE_NO_PARAMETER_ACTIVATION(Relu6, VSI_NN_OP_RELU6)
DEFINE_NO_PARAMETER_ACTIVATION(Elu, VSI_NN_OP_ELU)
DEFINE_NO_PARAMETER_ACTIVATION(Sigmoid, VSI_NN_OP_SIGMOID)
DEFINE_NO_PARAMETER_ACTIVATION(Mish, VSI_NN_OP_MISH)
DEFINE_NO_PARAMETER_ACTIVATION(HardSigmoid, VSI_NN_OP_HARD_SIGMOID)
DEFINE_NO_PARAMETER_ACTIVATION(SoftRelu, VSI_NN_OP_SOFTRELU)
@ -78,6 +77,16 @@ std::shared_ptr<Operation> Prelu::Clone(std::shared_ptr<Graph>& graph) const {
return graph->CreateOperation<Prelu>(this->axis_);
}
HardSigmoid::HardSigmoid(Graph* graph, float alpha, float beta)
: DirectMapOp(graph, VSI_NN_OP_HARD_SIGMOID), alpha_(alpha), beta_(beta) {
this->impl()->node()->nn_param.hard_sigmoid.alpha = alpha_;
this->impl()->node()->nn_param.hard_sigmoid.beta = beta_;
}
std::shared_ptr<Operation> HardSigmoid::Clone(std::shared_ptr<Graph>& graph) const {
return graph->CreateOperation<HardSigmoid>(this->alpha_, this->beta_);
}
Tanh::Tanh(Graph* graph) : DirectMapOp(graph, VSI_NN_OP_TANH) {
this->impl()->node()->nn_param.tanh.scale_a = 1.0;
this->impl()->node()->nn_param.tanh.scale_b = 1.0;

View File

@ -177,4 +177,54 @@ TEST(Gelu, shape_5_1_uint8_Quantized) {
EXPECT_TRUE(output_tensor->CopyDataFromTensor(output.data()));
EXPECT_TRUE(ArraysMatch(golden, output, (uint8_t)1));
}
}
TEST(HardSigmoid, shape_5_1_uint8_Quantized) {
auto ctx = tim::vx::Context::Create();
auto graph = ctx->CreateGraph();
tim::vx::ShapeType in_shape({20, 1});
tim::vx::ShapeType out_shape({20, 1});
std::vector<float> scalesInput = {0.00228914}; //scale
std::vector<int32_t> zeroPointsInput = {128}; //zero point
std::vector<float> scalesOutput = {0.005};
std::vector<int32_t> zeroPointsOutput = {128};
tim::vx::Quantization quantInput(tim::vx::QuantType::ASYMMETRIC, 1,
scalesInput, zeroPointsInput);
tim::vx::Quantization quantOutput(tim::vx::QuantType::ASYMMETRIC, 1,
scalesOutput, zeroPointsOutput);
tim::vx::TensorSpec input_spec(tim::vx::DataType::UINT8, in_shape,
tim::vx::TensorAttribute::INPUT, quantInput);
tim::vx::TensorSpec output_spec(tim::vx::DataType::UINT8, out_shape,
tim::vx::TensorAttribute::OUTPUT,
quantOutput);
auto input_tensor = graph->CreateTensor(input_spec);
auto output_tensor = graph->CreateTensor(output_spec);
std::vector<uint8_t> in_data = {65, 255, 140, 92, 142,
122, 117, 167, 132, 117,
44, 99, 109, 96, 216,
222, 135, 126, 113, 100};
std::vector<uint8_t> golden_data = {222, 240, 229, 225, 229,
227, 227, 232, 228, 227,
220, 225, 226, 225, 236,
237, 229, 228, 227, 225};
EXPECT_TRUE(
input_tensor->CopyDataToTensor(in_data.data(), in_data.size()));
auto op = graph->CreateOperation<tim::vx::ops::HardSigmoid>(0.2, 0.5);
(*op).BindInput(input_tensor).BindOutput(output_tensor);
EXPECT_TRUE(graph->Compile());
EXPECT_TRUE(graph->Run());
std::vector<uint8_t> output(golden_data.size());
EXPECT_TRUE(output_tensor->CopyDataFromTensor(output.data()));
EXPECT_TRUE(ArraysMatch(golden_data, output, (uint8_t)1));
}