From a64a0f7379790b67f99ff1d974f0a4752351e3f4 Mon Sep 17 00:00:00 2001 From: chxin66 <57057788+chxin66@users.noreply.github.com> Date: Fri, 2 Jun 2023 07:52:06 +0800 Subject: [PATCH] Added a case for resize_bilinear layoutinfer (#595) Signed-off-by: Chen Co-authored-by: Chen --- src/tim/transform/layout_inference_test.cc | 80 ++++++++++++++++++++++ 1 file changed, 80 insertions(+) diff --git a/src/tim/transform/layout_inference_test.cc b/src/tim/transform/layout_inference_test.cc index 52d2477..941b976 100644 --- a/src/tim/transform/layout_inference_test.cc +++ b/src/tim/transform/layout_inference_test.cc @@ -271,4 +271,84 @@ TEST(InstanceNorm, nhwc) { std::vector output(golden.size()); EXPECT_TRUE(infer_output->CopyDataFromTensor(output.data())); EXPECT_TRUE(ArraysMatch(golden, output, 1e-5f)); +} + +TEST(Resize, bilinear_factor) { + auto ctx = tim::vx::Context::Create(); + auto graph = ctx->CreateGraph(); + + tim::vx::ShapeType input_shape({1,2,2, 1}); + tim::vx::ShapeType output_shape({1,3,3, 1}); + tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32, + input_shape, tim::vx::TensorAttribute::INPUT); + tim::vx::TensorSpec output_spec(tim::vx::DataType::FLOAT32, + output_shape, tim::vx::TensorAttribute::OUTPUT); + + auto input_tensor = graph->CreateTensor(input_spec); + auto output_tensor = graph->CreateTensor(output_spec); + + std::vector in_data = {1.0f, 1.0f, 2.0f, 2.0f}; + std::vector golden = {1.0f, 1.0f, 1.0f, 1.6666666269302368f, 1.6666666269302368f, + 1.6666666269302368f, 2.0f, 2.0f, 2.0f}; + + EXPECT_TRUE(input_tensor->CopyDataToTensor(in_data.data(), in_data.size() * sizeof(float))); + + auto op = graph->CreateOperation(tim::vx::ResizeType::BILINEAR, + 1.7999999523162842f, false, false, 0, 0, tim::vx::DataLayout::CWHN); + (*op).BindInputs({input_tensor}).BindOutputs({output_tensor}); + + auto transform = tim::transform::LayoutInference(graph, ctx); + auto infer_graph = transform.first; + auto graph_io_map = transform.second; + infer_graph->Compile(); + + auto infer_input = graph_io_map[graph->InputsTensor()[0]]; + auto infer_output = graph_io_map[graph->OutputsTensor()[0]]; + + infer_input->CopyDataToTensor(in_data.data(), in_data.size() * sizeof(float)); + infer_graph->Run(); + + std::vector output(golden.size()); + EXPECT_TRUE(infer_output->CopyDataFromTensor(output.data())); + EXPECT_TRUE(ArraysMatch(golden, output, 1e-5f)); +} + +TEST(Resize, bilinear_outputsize) { + auto ctx = tim::vx::Context::Create(); + auto graph = ctx->CreateGraph(); + + tim::vx::ShapeType input_shape({1,2,2, 1}); + tim::vx::ShapeType output_shape({1,3,3, 1}); + tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32, + input_shape, tim::vx::TensorAttribute::INPUT); + tim::vx::TensorSpec output_spec(tim::vx::DataType::FLOAT32, + output_shape, tim::vx::TensorAttribute::OUTPUT); + + auto input_tensor = graph->CreateTensor(input_spec); + auto output_tensor = graph->CreateTensor(output_spec); + + std::vector in_data = {1.0f, 1.0f, 2.0f, 2.0f}; + std::vector golden = {1.0f, 1.0f, 1.0f, 1.6666666269302368f, 1.6666666269302368f, + 1.6666666269302368f, 2.0f, 2.0f, 2.0f}; + + EXPECT_TRUE(input_tensor->CopyDataToTensor(in_data.data(), in_data.size() * sizeof(float))); + + auto op = graph->CreateOperation(tim::vx::ResizeType::BILINEAR, + 0, false, false, 3, 3, tim::vx::DataLayout::CWHN); + (*op).BindInputs({input_tensor}).BindOutputs({output_tensor}); + + auto transform = tim::transform::LayoutInference(graph, ctx); + auto infer_graph = transform.first; + auto graph_io_map = transform.second; + infer_graph->Compile(); + + auto infer_input = graph_io_map[graph->InputsTensor()[0]]; + auto infer_output = graph_io_map[graph->OutputsTensor()[0]]; + + infer_input->CopyDataToTensor(in_data.data(), in_data.size() * sizeof(float)); + infer_graph->Run(); + + std::vector output(golden.size()); + EXPECT_TRUE(infer_output->CopyDataFromTensor(output.data())); + EXPECT_TRUE(ArraysMatch(golden, output, 1e-5f)); } \ No newline at end of file