Fixed layernorm and logsoftmax ut error (#702)

Type:  Bug Fix

Signed-off-by: Feiyue Chen <Feiyue.Chen@verisilicon.com>
This commit is contained in:
Chen Feiyue 2024-07-29 10:44:28 +08:00 committed by GitHub
parent 81b6c07c5d
commit fcdf223d06
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 9 additions and 11 deletions

View File

@ -33,7 +33,7 @@ TEST(LayerNorm, axis_0_shape_3_6_1_float) {
float tolerance = ctx->hasSP() ? 0.01 : 1e-5f; float tolerance = ctx->hasSP() ? 0.01 : 1e-5f;
tim::vx::ShapeType io_shape({3, 6, 1}); tim::vx::ShapeType io_shape({3, 6, 1});
tim::vx::ShapeType param_shape({6}); tim::vx::ShapeType param_shape({3});
tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32, tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32,
io_shape, tim::vx::TensorAttribute::INPUT); io_shape, tim::vx::TensorAttribute::INPUT);
tim::vx::TensorSpec param_spec(tim::vx::DataType::FLOAT32, tim::vx::TensorSpec param_spec(tim::vx::DataType::FLOAT32,
@ -54,11 +54,9 @@ TEST(LayerNorm, axis_0_shape_3_6_1_float) {
-6, 0, 6, -6, 0, 6,
-7, 0, 7 }; -7, 0, 7 };
std::vector<float> gamma = { std::vector<float> gamma = {
1.0f, 1.0f, 1.0f,
1.0f, 1.0f, 1.0f 1.0f, 1.0f, 1.0f
}; };
std::vector<float> beta = { std::vector<float> beta = {
.0f, .0f, .0f,
.0f, .0f, .0f .0f, .0f, .0f
}; };
std::vector<float> golden = { std::vector<float> golden = {
@ -91,7 +89,7 @@ TEST(LayerNorm, axis_0_shape_2_3_6_1_float) {
float tolerance = ctx->hasSP() ? 0.01 : 1e-5f; float tolerance = ctx->hasSP() ? 0.01 : 1e-5f;
tim::vx::ShapeType io_shape({2, 3, 6, 1}); tim::vx::ShapeType io_shape({2, 3, 6, 1});
tim::vx::ShapeType param_shape({6}); tim::vx::ShapeType param_shape({2});
tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32, tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32,
io_shape, tim::vx::TensorAttribute::INPUT); io_shape, tim::vx::TensorAttribute::INPUT);
tim::vx::TensorSpec param_spec(tim::vx::DataType::FLOAT32, tim::vx::TensorSpec param_spec(tim::vx::DataType::FLOAT32,
@ -113,12 +111,10 @@ TEST(LayerNorm, axis_0_shape_2_3_6_1_float) {
-7, 7, -7, 7, -7, 7 -7, 7, -7, 7, -7, 7
}; };
std::vector<float> gamma = { std::vector<float> gamma = {
1.0f, 1.0f, 1.0f, 1.0f, 1.0f
1.0f, 1.0f, 1.0f
}; };
std::vector<float> beta = { std::vector<float> beta = {
.0f, .0f, .0f, .0f, .0f
.0f, .0f, .0f
}; };
std::vector<float> golden = { std::vector<float> golden = {
-1.f, 1.f, -1.f, 1.f, -1.f, 1.f, -1.f, 1.f, -1.f, 1.f, -1.f, 1.f,

View File

@ -30,6 +30,7 @@
TEST(LogSoftmax, shape_6_1_float_axis_0) { TEST(LogSoftmax, shape_6_1_float_axis_0) {
auto ctx = tim::vx::Context::Create(); auto ctx = tim::vx::Context::Create();
auto graph = ctx->CreateGraph(); auto graph = ctx->CreateGraph();
float tolerance = ctx->hasSP() ? 0.01 : 1e-5f;
tim::vx::ShapeType io_shape({6, 1}); tim::vx::ShapeType io_shape({6, 1});
tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32, tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32,
@ -57,12 +58,13 @@ TEST(LogSoftmax, shape_6_1_float_axis_0) {
std::vector<float> output(golden.size() * sizeof(float)); std::vector<float> output(golden.size() * sizeof(float));
EXPECT_TRUE(output_tensor->CopyDataFromTensor(output.data())); EXPECT_TRUE(output_tensor->CopyDataFromTensor(output.data()));
EXPECT_TRUE(ArraysMatch(golden, output, 1e-5f)); EXPECT_TRUE(ArraysMatch(golden, output, tolerance));
} }
TEST(LogSoftmax, shape_3_6_1_float_axis_1) { TEST(LogSoftmax, shape_3_6_1_float_axis_1) {
auto ctx = tim::vx::Context::Create(); auto ctx = tim::vx::Context::Create();
auto graph = ctx->CreateGraph(); auto graph = ctx->CreateGraph();
float tolerance = ctx->hasSP() ? 0.01 : 1e-5f;
tim::vx::ShapeType io_shape({3, 6, 1}); tim::vx::ShapeType io_shape({3, 6, 1});
tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32, tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32,
@ -100,7 +102,7 @@ TEST(LogSoftmax, shape_3_6_1_float_axis_1) {
std::vector<float> output(golden.size() * sizeof(float)); std::vector<float> output(golden.size() * sizeof(float));
EXPECT_TRUE(output_tensor->CopyDataFromTensor(output.data())); EXPECT_TRUE(output_tensor->CopyDataFromTensor(output.data()));
EXPECT_TRUE(ArraysMatch(golden, output, 1e-5f)); EXPECT_TRUE(ArraysMatch(golden, output, tolerance));
} }
TEST(LogSoftmax, shape_3_6_1_uint8_axis_1) { TEST(LogSoftmax, shape_3_6_1_uint8_axis_1) {