From 1ca89d2ffa113560495a7a0241482263c082f19e Mon Sep 17 00:00:00 2001 From: chxin66 <57057788+chxin66@users.noreply.github.com> Date: Wed, 6 Apr 2022 13:01:41 +0800 Subject: [PATCH] Add layout inference & layout test for stack (#337) * Added layout inference & layout test for stack Signed-off-by: Chen Xin --- include/tim/vx/ops/stack.h | 3 +- .../transform/ops/stack_layout_inference.h | 34 ++- .../transform/stack_layout_inference_test.cc | 226 ++++++++++++++++ src/tim/vx/ops/stack_test.cc | 241 +++++++++--------- 4 files changed, 373 insertions(+), 131 deletions(-) create mode 100644 src/tim/transform/stack_layout_inference_test.cc diff --git a/include/tim/vx/ops/stack.h b/include/tim/vx/ops/stack.h index 8f1fde6..793f0fa 100644 --- a/include/tim/vx/ops/stack.h +++ b/include/tim/vx/ops/stack.h @@ -33,7 +33,8 @@ namespace ops { * ## Stack * * Packs the list of tensors in inputs into a tensor with rank one higher than - * each tensor in values, by packing them along the **axis** dimension. + * each tensor in values, by packing them along the **axis** dimension. + * Dimensions below the dimension specified by axis will be packed together with other inputs. */ class Stack : public DirectMapOp { diff --git a/src/tim/transform/ops/stack_layout_inference.h b/src/tim/transform/ops/stack_layout_inference.h index 87492d1..6a5338c 100644 --- a/src/tim/transform/ops/stack_layout_inference.h +++ b/src/tim/transform/ops/stack_layout_inference.h @@ -25,6 +25,7 @@ #define TIM_LAYOUT_INFER_STACK_LAYOUT_INFERENCE_H_ #include "tim/vx/ops/stack.h" +#include "tim/vx/ops/transpose.h" #include "direct_map_op_impl.h" #include "permute_vector.h" @@ -40,17 +41,42 @@ class StackLayoutInfer : public OpLayoutInfer { : OpLayoutInfer(op, context) {} void OnInputs( std::vector>& next_tensors) override { - ReverseInputsPermuteVector(); + auto src_input = op_->impl()->InputsTensor()[0]; + auto input_pv = context_->GetPermuteVector(src_input); + int32_t axis = op_->impl()->node()->nn_param.stack.axis; auto stack = context_->infer_graph_->CreateOperation( axis, op_->impl()->input_cnt_); + auto aligninput_pv = AlignPermuteVectorForMutilInputs(); + for (const auto& i_src : op_->impl()->InputsTensor()) { (*stack).BindInput(context_->GetMapedTensor(i_src)); } - auto required_pv = MakeShared(op_->impl()->OutputsTensor()[0]->GetShape().size()); - auto out_infer = CreateOutputsTensor(required_pv); + + std::vector v; + uint32_t dim_num = src_input->GetShape().size(); + if (axis < 0) { + axis += dim_num; + } + for (uint32_t i = 0; i < src_input->GetShape().size(); ++i) { + if (input_pv->At(i) > (uint32_t)axis) { + v.push_back(input_pv->At(i) + 1); + } else if (input_pv->At(i) == (uint32_t)axis) { + v.push_back(input_pv->At(i)); + v.push_back(input_pv->At(i) + 1); + } else { + v.push_back(input_pv->At(i)); + } + } + auto out_pv = + MakeShared(op_->impl()->OutputsTensor()[0]->GetShape().size()); + for (uint32_t i = 0; i < out_pv->Rank(); ++i) { + out_pv->At(i) = v[i]; + } + + auto out_infer = CreateOutputsTensor(out_pv); (*stack).BindOutput(out_infer[0]); - context_->SetPermuteVector(op_->impl()->OutputsTensor()[0], required_pv); + context_->SetPermuteVector(op_->impl()->OutputsTensor()[0], out_pv); // Add out tensor of src_graph into next_tensor next_tensors.push_back(op_->impl()->OutputsTensor()[0]); } diff --git a/src/tim/transform/stack_layout_inference_test.cc b/src/tim/transform/stack_layout_inference_test.cc new file mode 100644 index 0000000..b045246 --- /dev/null +++ b/src/tim/transform/stack_layout_inference_test.cc @@ -0,0 +1,226 @@ +#include "tim/vx/context.h" +#include "tim/vx/graph.h" +#include "tim/vx/ops.h" +#include "tim/transform/layout_inference.h" + +#include "gtest/gtest.h" + +TEST(Stack, LayoutinferernceTest_1) { + auto ctx = tim::vx::Context::Create(); + auto graph = ctx->CreateGraph(); + + tim::vx::ShapeType input_shape({2, 3, 4, 1}); //cwhn + tim::vx::ShapeType kernel_shape({2, 3, 3, 3}); //iwho +// tim::vx::ShapeType conv2dout_shape({3, 1, 2, 1}); //cwhn + tim::vx::ShapeType output_shape({2, 3, 1, 2, 1}); + + tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32, input_shape, + tim::vx::TensorAttribute::INPUT); + tim::vx::TensorSpec kernel_spec(tim::vx::DataType::FLOAT32, kernel_shape, + tim::vx::TensorAttribute::CONSTANT); + tim::vx::TensorSpec conv2dout_spec(tim::vx::DataType::FLOAT32, + {0,0,0,0}, + tim::vx::TensorAttribute::TRANSIENT); + tim::vx::TensorSpec output_spec(tim::vx::DataType::FLOAT32, output_shape, + tim::vx::TensorAttribute::OUTPUT); + + auto input_tensor = graph->CreateTensor(input_spec); + auto conv2dout_tensor = graph->CreateTensor(conv2dout_spec); + auto output_tensor = graph->CreateTensor(output_spec); + + std::vector in_data = { + 1, 1, 1, 1, 2, 0, 5, 3, 6, 3, 1, 1, + 1, 4, 2, 5, 7, 6, 3, 1, 1, 0, 2, 5, + }; + std::vector kernel_data = { + 1, 1, 1, 1, 1, 1, 1, 1, 1, 5, 1, 1, 1, 1, 2, 1, 1, 1, + 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, + 1, 1, 1, 1, 2, 1, 1, 5, 3, 1, 2, 3, 1, 1, 2, 1, 1, 1, + }; + std::vector golden = { + 64, 77, 49, 44, 81, 97, 64, 77, 49, 44, 81, 97 + }; + auto kernel_tensor = graph->CreateTensor(kernel_spec, kernel_data.data()); + + std::array stride({1, 1}); + std::array dilation({1, 1}); + + auto op1 = graph->CreateOperation( + tim::vx::PadType::VALID, stride, dilation, 0, tim::vx::DataLayout::CWHN); + (*op1) + .BindInputs({input_tensor, kernel_tensor}) + .BindOutputs({conv2dout_tensor}); + + auto op2 = graph->CreateOperation(0, 2); + (*op2).BindInputs({conv2dout_tensor, conv2dout_tensor}).BindOutputs({output_tensor}); + + auto transform = tim::transform::LayoutInference(graph, ctx); + auto infer_graph = transform.first; + auto graph_io_map = transform.second; + auto infer_input = graph_io_map[graph->InputsTensor()[0]]; + auto infer_output = graph_io_map[graph->OutputsTensor()[0]]; + infer_input->CopyDataToTensor(in_data.data(), in_data.size() * sizeof(float)); + + EXPECT_TRUE(infer_graph->Compile()); + EXPECT_TRUE(infer_graph->Run()); + + std::vector output(golden.size()); + EXPECT_TRUE(infer_output->CopyDataFromTensor(output.data())); + EXPECT_EQ(golden, output); +} + +TEST(Stack, LayoutinferernceTest_2) { + auto ctx = tim::vx::Context::Create(); + auto graph = ctx->CreateGraph(); + + tim::vx::ShapeType input_shape({2, 3, 4, 1}); //cwhn + tim::vx::ShapeType kernel_shape({2, 2, 3, 3}); //iwho + tim::vx::ShapeType conv2dout_shape({3, 2, 2, 1}); //cwhn +// tim::vx::ShapeType output_shape({2, 1, 2, 1}); + tim::vx::ShapeType output_shape({2, 3, 2}); + + tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32, input_shape, + tim::vx::TensorAttribute::INPUT); + tim::vx::TensorSpec kernel_spec(tim::vx::DataType::FLOAT32, kernel_shape, + tim::vx::TensorAttribute::CONSTANT); + tim::vx::TensorSpec conv2dout_spec(tim::vx::DataType::FLOAT32, + conv2dout_shape, + tim::vx::TensorAttribute::OUTPUT); + tim::vx::TensorSpec reduceout_spec(tim::vx::DataType::FLOAT32, + {0,0,0}, + tim::vx::TensorAttribute::TRANSIENT); + tim::vx::TensorSpec output_spec(tim::vx::DataType::FLOAT32, output_shape, + tim::vx::TensorAttribute::OUTPUT); + + auto input_tensor = graph->CreateTensor(input_spec); + auto conv2dout_tensor = graph->CreateTensor(conv2dout_spec); + auto reduceout_tensor = graph->CreateTensor(reduceout_spec); + auto output_tensor = graph->CreateTensor(output_spec); + + std::vector in_data = { + 1, 1, 1, 1, 2, 0, 5, 3, 6, 3, 1, 1, + 1, 4, 2, 5, 7, 6, 3, 1, 1, 0, 2, 5, + }; + std::vector kernel_data = { + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + }; + std::vector golden = { + 33, 33, 37, 35, 35, 43, 34, 34, 58, 39, 39, 43, + }; + auto kernel_tensor = graph->CreateTensor(kernel_spec, kernel_data.data()); + + std::array stride({1, 1}); + std::array dilation({1, 1}); + + auto op1 = graph->CreateOperation( + tim::vx::PadType::VALID, stride, dilation, 0, tim::vx::DataLayout::CWHN); + (*op1) + .BindInputs({input_tensor, kernel_tensor}) + .BindOutputs({conv2dout_tensor}); + std::vector axis = {2,3}; + auto op2 = graph->CreateOperation(axis, false); + (*op2).BindInputs({conv2dout_tensor}).BindOutputs({reduceout_tensor}); + auto op3 = graph->CreateOperation(0, 2); + (*op3).BindInputs({reduceout_tensor, reduceout_tensor}).BindOutputs({output_tensor}); + + auto transform = tim::transform::LayoutInference(graph, ctx); + auto infer_graph = transform.first; + auto graph_io_map = transform.second; + auto infer_input = graph_io_map[graph->InputsTensor()[0]]; + auto infer_output = graph_io_map[graph->OutputsTensor()[0]]; + infer_input->CopyDataToTensor(in_data.data(), in_data.size() * sizeof(float)); + + EXPECT_TRUE(infer_graph->Compile()); + EXPECT_TRUE(infer_graph->Run()); + + std::vector output(golden.size()); + EXPECT_TRUE(infer_output->CopyDataFromTensor(output.data())); + EXPECT_EQ(golden, output); +} + +TEST(Stack, LayoutinferernceTest_3) { + auto ctx = tim::vx::Context::Create(); + auto graph = ctx->CreateGraph(); + + tim::vx::ShapeType input_shape({2, 3, 4, 1}); //cwhn + tim::vx::ShapeType kernel_shape({2, 2, 3, 3}); //iwho + tim::vx::ShapeType conv2dout_shape({3, 2, 2, 1}); //cwhn + tim::vx::ShapeType output_shape({2, 3, 2, 1}); + + tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32, input_shape, + tim::vx::TensorAttribute::INPUT); + tim::vx::TensorSpec kernel_spec(tim::vx::DataType::FLOAT32, kernel_shape, + tim::vx::TensorAttribute::CONSTANT); + tim::vx::TensorSpec conv2dout_spec(tim::vx::DataType::FLOAT32, + {0,0,0,0}, + tim::vx::TensorAttribute::TRANSIENT); + tim::vx::TensorSpec reduceout_spec(tim::vx::DataType::FLOAT32, + {0,0,0}, + tim::vx::TensorAttribute::TRANSIENT); + tim::vx::TensorSpec output_spec(tim::vx::DataType::FLOAT32, output_shape, + tim::vx::TensorAttribute::OUTPUT); + + auto input_tensor = graph->CreateTensor(input_spec); + auto input2_tensor = graph->CreateTensor(input_spec); + auto conv2dout_tensor = graph->CreateTensor(conv2dout_spec); + auto conv2dout2_tensor = graph->CreateTensor(conv2dout_spec); + auto reduceout_tensor = graph->CreateTensor(reduceout_spec); + auto reduceout2_tensor = graph->CreateTensor(reduceout_spec); + auto output_tensor = graph->CreateTensor(output_spec); + + std::vector in_data = { + 1, 1, 1, 1, 2, 0, 5, 3, 6, 3, 1, 1, + 1, 4, 2, 5, 7, 6, 3, 1, 1, 0, 2, 5, + }; + std::vector kernel_data = { + 1, 1, 1, 1, 1, 1, 1, 1, 1, 5, 1, 1, 1, 1, 2, 1, 1, 1, + 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, + 1, 1, 1, 1, 2, 1, 1, 5, 3, 1, 2, 3, 1, 1, 2, 1, 1, 1, + }; + std::vector golden = { + 55, 39, 21, 28, 37, 41, 49, 55, 28, 24, 40, 41 + }; + auto kernel_tensor = graph->CreateTensor(kernel_spec, kernel_data.data()); + auto kernel2_tensor = graph->CreateTensor(kernel_spec, kernel_data.data()); + + std::array stride({1, 1}); + std::array dilation({1, 1}); + auto op1 = graph->CreateOperation( + tim::vx::PadType::VALID, stride, dilation, 0, tim::vx::DataLayout::CWHN); + (*op1) + .BindInputs({input_tensor, kernel_tensor}) + .BindOutputs({conv2dout_tensor}); + auto op11 = graph->CreateOperation( + tim::vx::PadType::VALID, stride, dilation, 0, tim::vx::DataLayout::CWHN); + (*op11) + .BindInputs({input2_tensor, kernel2_tensor}) + .BindOutputs({conv2dout2_tensor}); + + std::vector axis = {1}; + auto op2 = graph->CreateOperation(axis, false); + (*op2).BindInputs({conv2dout_tensor}).BindOutputs({reduceout_tensor}); + axis = {2}; + auto op22 = graph->CreateOperation(axis, false); + (*op22).BindInputs({conv2dout2_tensor}).BindOutputs({reduceout2_tensor}); + + auto op3 = graph->CreateOperation(0, 2); + (*op3).BindInputs({reduceout_tensor, reduceout2_tensor}).BindOutputs({output_tensor}); + + auto transform = tim::transform::LayoutInference(graph, ctx); + auto infer_graph = transform.first; + auto graph_io_map = transform.second; + auto infer_input = graph_io_map[graph->InputsTensor()[0]]; + auto infer_input2 = graph_io_map[graph->InputsTensor()[1]]; + auto infer_output = graph_io_map[graph->OutputsTensor()[0]]; + infer_input->CopyDataToTensor(in_data.data(), in_data.size() * sizeof(float)); + infer_input2->CopyDataToTensor(in_data.data(), in_data.size() * sizeof(float)); + + EXPECT_TRUE(infer_graph->Compile()); + EXPECT_TRUE(infer_graph->Run()); + + std::vector output(golden.size()); + EXPECT_TRUE(infer_output->CopyDataFromTensor(output.data())); + EXPECT_EQ(golden, output); +} diff --git a/src/tim/vx/ops/stack_test.cc b/src/tim/vx/ops/stack_test.cc index a17575b..ae6079b 100644 --- a/src/tim/vx/ops/stack_test.cc +++ b/src/tim/vx/ops/stack_test.cc @@ -27,155 +27,144 @@ #include "gtest/gtest.h" -TEST(Stack, shape_2_3_axis_2) { - auto ctx = tim::vx::Context::Create(); - auto graph = ctx->CreateGraph(); +TEST(Stack, shape_3_4_axis_2) { + auto ctx = tim::vx::Context::Create(); + auto graph = ctx->CreateGraph(); - tim::vx::ShapeType input_shape({2,3}); - tim::vx::ShapeType output_shape({2,3,2}); - tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32, - input_shape, tim::vx::TensorAttribute::INPUT); - tim::vx::TensorSpec output_spec(tim::vx::DataType::FLOAT32, - output_shape, tim::vx::TensorAttribute::OUTPUT); + tim::vx::ShapeType input_shape({4, 3}); + tim::vx::ShapeType output_shape({4, 3, 2}); + tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32, input_shape, + tim::vx::TensorAttribute::INPUT); + tim::vx::TensorSpec output_spec(tim::vx::DataType::FLOAT32, output_shape, + tim::vx::TensorAttribute::OUTPUT); - auto input_tensor1 = graph->CreateTensor(input_spec); - auto input_tensor2 = graph->CreateTensor(input_spec); - auto output_tensor = graph->CreateTensor(output_spec); + auto input_tensor1 = graph->CreateTensor(input_spec); + auto input_tensor2 = graph->CreateTensor(input_spec); + auto output_tensor = graph->CreateTensor(output_spec); - std::vector in_data1 = { - 1,4, - 2,5, - 3,6 - }; - std::vector in_data2 = { - 1,4, - 2,5, - 3,6 - }; - std::vector golden = { - 1,4, - 2,5, - 3,6, + std::vector in_data = { + 2, 1, 0, 1, + 2, 4, 4, 4, + 3, 2, 1, 4, + }; + std::vector golden = { + 2, 1, 0, 1, + 2, 4, 4, 4, + 3, 2, 1, 4, + 2, 1, 0, 1, + 2, 4, 4, 4, + 3, 2, 1, 4, + }; - 1,4, - 2,5, - 3,6 - }; + EXPECT_TRUE(input_tensor1->CopyDataToTensor(in_data.data(), + in_data.size() * sizeof(float))); + EXPECT_TRUE(input_tensor2->CopyDataToTensor(in_data.data(), + in_data.size() * sizeof(float))); + auto op = graph->CreateOperation(2, 2); + (*op).BindInputs({input_tensor1, input_tensor2}).BindOutputs({output_tensor}); - EXPECT_TRUE(input_tensor1->CopyDataToTensor( - in_data1.data(), in_data1.size() * sizeof(float))); - EXPECT_TRUE(input_tensor2->CopyDataToTensor( - in_data2.data(), in_data2.size() * sizeof(float))); - auto op = graph->CreateOperation(2, 2); - (*op).BindInputs({input_tensor1,input_tensor2}).BindOutputs( - {output_tensor}); + EXPECT_TRUE(graph->Compile()); + EXPECT_TRUE(graph->Run()); - EXPECT_TRUE(graph->Compile()); - EXPECT_TRUE(graph->Run()); - - std::vector output(golden.size()); - EXPECT_TRUE(output_tensor->CopyDataFromTensor(output.data())); - EXPECT_EQ(golden, output); + std::vector output(golden.size()); + EXPECT_TRUE(output_tensor->CopyDataFromTensor(output.data())); + EXPECT_EQ(golden, output); } -TEST(Stack, shape_2_3_axis_1) { - auto ctx = tim::vx::Context::Create(); - auto graph = ctx->CreateGraph(); +TEST(Stack, shape_3_4_axis_1) { + auto ctx = tim::vx::Context::Create(); + auto graph = ctx->CreateGraph(); - tim::vx::ShapeType input_shape({2,3}); - tim::vx::ShapeType output_shape({2,3,2}); - tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32, - input_shape, tim::vx::TensorAttribute::INPUT); - tim::vx::TensorSpec output_spec(tim::vx::DataType::FLOAT32, - output_shape, tim::vx::TensorAttribute::OUTPUT); + tim::vx::ShapeType input_shape({4, 3}); + tim::vx::ShapeType output_shape({4, 2, 3}); + tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32, input_shape, + tim::vx::TensorAttribute::INPUT); + tim::vx::TensorSpec output_spec(tim::vx::DataType::FLOAT32, output_shape, + tim::vx::TensorAttribute::OUTPUT); - auto input_tensor1 = graph->CreateTensor(input_spec); - auto input_tensor2 = graph->CreateTensor(input_spec); - auto output_tensor = graph->CreateTensor(output_spec); + auto input_tensor1 = graph->CreateTensor(input_spec); + auto input_tensor2 = graph->CreateTensor(input_spec); + auto output_tensor = graph->CreateTensor(output_spec); - std::vector in_data1 = { - 1,4, - 2,5, - 3,6 - }; - std::vector in_data2 = { - 1,4, - 2,5, - 3,6 - }; - std::vector golden = { - 1,4, - 1,4, - 2,5, + std::vector in_data = { + 2, 1, 0, 1, + 2, 4, 4, 4, + 3, 2, 1, 4, + }; + std::vector golden = { + 2, 1, 0, 1, + 2, 1, 0, 1, - 2,5, - 3,6, - 3,6, - }; + 2, 4, 4, 4, + 2, 4, 4, 4, - EXPECT_TRUE(input_tensor1->CopyDataToTensor( - in_data1.data(), in_data1.size() * sizeof(float))); - EXPECT_TRUE(input_tensor2->CopyDataToTensor( - in_data2.data(), in_data2.size() * sizeof(float))); - auto op = graph->CreateOperation(1, 2); - (*op).BindInputs({input_tensor1,input_tensor2}).BindOutputs( - {output_tensor}); + 3, 2, 1, 4, + 3, 2, 1, 4, + }; - EXPECT_TRUE(graph->Compile()); - EXPECT_TRUE(graph->Run()); + EXPECT_TRUE(input_tensor1->CopyDataToTensor(in_data.data(), + in_data.size() * sizeof(float))); + EXPECT_TRUE(input_tensor2->CopyDataToTensor(in_data.data(), + in_data.size() * sizeof(float))); + auto op = graph->CreateOperation(1, 2); + (*op).BindInputs({input_tensor1, input_tensor2}).BindOutputs({output_tensor}); - std::vector output(golden.size()); - EXPECT_TRUE(output_tensor->CopyDataFromTensor(output.data())); - EXPECT_EQ(golden, output); + EXPECT_TRUE(graph->Compile()); + EXPECT_TRUE(graph->Run()); + + std::vector output(golden.size()); + EXPECT_TRUE(output_tensor->CopyDataFromTensor(output.data())); + EXPECT_EQ(golden, output); } -TEST(Stack, shape_2_3_axis_0) { - auto ctx = tim::vx::Context::Create(); - auto graph = ctx->CreateGraph(); +TEST(Stack, shape_3_4_axis_0) { + auto ctx = tim::vx::Context::Create(); + auto graph = ctx->CreateGraph(); - tim::vx::ShapeType input_shape({2,3}); - tim::vx::ShapeType output_shape({2,3,2}); - tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32, - input_shape, tim::vx::TensorAttribute::INPUT); - tim::vx::TensorSpec output_spec(tim::vx::DataType::FLOAT32, - output_shape, tim::vx::TensorAttribute::OUTPUT); + tim::vx::ShapeType input_shape({4, 3}); + tim::vx::ShapeType output_shape({2, 4, 3}); + tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32, input_shape, + tim::vx::TensorAttribute::INPUT); + tim::vx::TensorSpec output_spec(tim::vx::DataType::FLOAT32, output_shape, + tim::vx::TensorAttribute::OUTPUT); - auto input_tensor1 = graph->CreateTensor(input_spec); - auto input_tensor2 = graph->CreateTensor(input_spec); - auto output_tensor = graph->CreateTensor(output_spec); + auto input_tensor1 = graph->CreateTensor(input_spec); + auto input_tensor2 = graph->CreateTensor(input_spec); + auto output_tensor = graph->CreateTensor(output_spec); - std::vector in_data1 = { - 1,4, - 2,5, - 3,6 - }; - std::vector in_data2 = { - 1,4, - 2,5, - 3,6 - }; - std::vector golden = { + std::vector in_data = { + 2, 1, 0, 1, + 2, 4, 4, 4, + 3, 2, 1, 4, + }; + std::vector golden = { + 2, 2, + 1, 1, + 0, 0, + 1, 1, + + 2, 2, + 4, 4, + 4, 4, + 4, 4, + + 3, 3, + 2, 2, 1, 1, 4, 4, - 2, 2, + }; - 5, 5, - 3, 3, - 6, 6 - }; + EXPECT_TRUE(input_tensor1->CopyDataToTensor(in_data.data(), + in_data.size() * sizeof(float))); + EXPECT_TRUE(input_tensor2->CopyDataToTensor(in_data.data(), + in_data.size() * sizeof(float))); + auto op = graph->CreateOperation(0, 2); + (*op).BindInputs({input_tensor1, input_tensor2}).BindOutputs({output_tensor}); - EXPECT_TRUE(input_tensor1->CopyDataToTensor( - in_data1.data(), in_data1.size() * sizeof(float))); - EXPECT_TRUE(input_tensor2->CopyDataToTensor( - in_data2.data(), in_data2.size() * sizeof(float))); - auto op = graph->CreateOperation(0, 2); - (*op).BindInputs({input_tensor1,input_tensor2}).BindOutputs( - {output_tensor}); + EXPECT_TRUE(graph->Compile()); + EXPECT_TRUE(graph->Run()); - EXPECT_TRUE(graph->Compile()); - EXPECT_TRUE(graph->Run()); - - std::vector output(golden.size()); - EXPECT_TRUE(output_tensor->CopyDataFromTensor(output.data())); - EXPECT_EQ(golden, output); + std::vector output(golden.size()); + EXPECT_TRUE(output_tensor->CopyDataFromTensor(output.data())); + EXPECT_EQ(golden, output); }