Add layout inference & layout test for stack (#337)

* Added layout inference & layout test for stack

Signed-off-by: Chen Xin <jack.chen@verisilicon.com>
This commit is contained in:
chxin66 2022-04-06 13:01:41 +08:00 committed by GitHub
parent 8462f16dc0
commit 1ca89d2ffa
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 373 additions and 131 deletions

View File

@ -33,7 +33,8 @@ namespace ops {
* ## Stack
*
* Packs the list of tensors in inputs into a tensor with rank one higher than
* each tensor in values, by packing them along the **axis** dimension.
* each tensor in values, by packing them along the **axis** dimension.
* Dimensions below the dimension specified by axis will be packed together with other inputs.
*/
class Stack : public DirectMapOp {

View File

@ -25,6 +25,7 @@
#define TIM_LAYOUT_INFER_STACK_LAYOUT_INFERENCE_H_
#include "tim/vx/ops/stack.h"
#include "tim/vx/ops/transpose.h"
#include "direct_map_op_impl.h"
#include "permute_vector.h"
@ -40,17 +41,42 @@ class StackLayoutInfer : public OpLayoutInfer {
: OpLayoutInfer(op, context) {}
void OnInputs(
std::vector<std::shared_ptr<vx::Tensor>>& next_tensors) override {
ReverseInputsPermuteVector();
auto src_input = op_->impl()->InputsTensor()[0];
auto input_pv = context_->GetPermuteVector(src_input);
int32_t axis = op_->impl()->node()->nn_param.stack.axis;
auto stack = context_->infer_graph_->CreateOperation<vx::ops::Stack>(
axis, op_->impl()->input_cnt_);
auto aligninput_pv = AlignPermuteVectorForMutilInputs();
for (const auto& i_src : op_->impl()->InputsTensor()) {
(*stack).BindInput(context_->GetMapedTensor(i_src));
}
auto required_pv = MakeShared(op_->impl()->OutputsTensor()[0]->GetShape().size());
auto out_infer = CreateOutputsTensor(required_pv);
std::vector<uint32_t> v;
uint32_t dim_num = src_input->GetShape().size();
if (axis < 0) {
axis += dim_num;
}
for (uint32_t i = 0; i < src_input->GetShape().size(); ++i) {
if (input_pv->At(i) > (uint32_t)axis) {
v.push_back(input_pv->At(i) + 1);
} else if (input_pv->At(i) == (uint32_t)axis) {
v.push_back(input_pv->At(i));
v.push_back(input_pv->At(i) + 1);
} else {
v.push_back(input_pv->At(i));
}
}
auto out_pv =
MakeShared(op_->impl()->OutputsTensor()[0]->GetShape().size());
for (uint32_t i = 0; i < out_pv->Rank(); ++i) {
out_pv->At(i) = v[i];
}
auto out_infer = CreateOutputsTensor(out_pv);
(*stack).BindOutput(out_infer[0]);
context_->SetPermuteVector(op_->impl()->OutputsTensor()[0], required_pv);
context_->SetPermuteVector(op_->impl()->OutputsTensor()[0], out_pv);
// Add out tensor of src_graph into next_tensor
next_tensors.push_back(op_->impl()->OutputsTensor()[0]);
}

View File

@ -0,0 +1,226 @@
#include "tim/vx/context.h"
#include "tim/vx/graph.h"
#include "tim/vx/ops.h"
#include "tim/transform/layout_inference.h"
#include "gtest/gtest.h"
TEST(Stack, LayoutinferernceTest_1) {
auto ctx = tim::vx::Context::Create();
auto graph = ctx->CreateGraph();
tim::vx::ShapeType input_shape({2, 3, 4, 1}); //cwhn
tim::vx::ShapeType kernel_shape({2, 3, 3, 3}); //iwho
// tim::vx::ShapeType conv2dout_shape({3, 1, 2, 1}); //cwhn
tim::vx::ShapeType output_shape({2, 3, 1, 2, 1});
tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32, input_shape,
tim::vx::TensorAttribute::INPUT);
tim::vx::TensorSpec kernel_spec(tim::vx::DataType::FLOAT32, kernel_shape,
tim::vx::TensorAttribute::CONSTANT);
tim::vx::TensorSpec conv2dout_spec(tim::vx::DataType::FLOAT32,
{0,0,0,0},
tim::vx::TensorAttribute::TRANSIENT);
tim::vx::TensorSpec output_spec(tim::vx::DataType::FLOAT32, output_shape,
tim::vx::TensorAttribute::OUTPUT);
auto input_tensor = graph->CreateTensor(input_spec);
auto conv2dout_tensor = graph->CreateTensor(conv2dout_spec);
auto output_tensor = graph->CreateTensor(output_spec);
std::vector<float> in_data = {
1, 1, 1, 1, 2, 0, 5, 3, 6, 3, 1, 1,
1, 4, 2, 5, 7, 6, 3, 1, 1, 0, 2, 5,
};
std::vector<float> kernel_data = {
1, 1, 1, 1, 1, 1, 1, 1, 1, 5, 1, 1, 1, 1, 2, 1, 1, 1,
0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1,
1, 1, 1, 1, 2, 1, 1, 5, 3, 1, 2, 3, 1, 1, 2, 1, 1, 1,
};
std::vector<float> golden = {
64, 77, 49, 44, 81, 97, 64, 77, 49, 44, 81, 97
};
auto kernel_tensor = graph->CreateTensor(kernel_spec, kernel_data.data());
std::array<uint32_t, 2> stride({1, 1});
std::array<uint32_t, 2> dilation({1, 1});
auto op1 = graph->CreateOperation<tim::vx::ops::Conv2d>(
tim::vx::PadType::VALID, stride, dilation, 0, tim::vx::DataLayout::CWHN);
(*op1)
.BindInputs({input_tensor, kernel_tensor})
.BindOutputs({conv2dout_tensor});
auto op2 = graph->CreateOperation<tim::vx::ops::Stack>(0, 2);
(*op2).BindInputs({conv2dout_tensor, conv2dout_tensor}).BindOutputs({output_tensor});
auto transform = tim::transform::LayoutInference(graph, ctx);
auto infer_graph = transform.first;
auto graph_io_map = transform.second;
auto infer_input = graph_io_map[graph->InputsTensor()[0]];
auto infer_output = graph_io_map[graph->OutputsTensor()[0]];
infer_input->CopyDataToTensor(in_data.data(), in_data.size() * sizeof(float));
EXPECT_TRUE(infer_graph->Compile());
EXPECT_TRUE(infer_graph->Run());
std::vector<float> output(golden.size());
EXPECT_TRUE(infer_output->CopyDataFromTensor(output.data()));
EXPECT_EQ(golden, output);
}
TEST(Stack, LayoutinferernceTest_2) {
auto ctx = tim::vx::Context::Create();
auto graph = ctx->CreateGraph();
tim::vx::ShapeType input_shape({2, 3, 4, 1}); //cwhn
tim::vx::ShapeType kernel_shape({2, 2, 3, 3}); //iwho
tim::vx::ShapeType conv2dout_shape({3, 2, 2, 1}); //cwhn
// tim::vx::ShapeType output_shape({2, 1, 2, 1});
tim::vx::ShapeType output_shape({2, 3, 2});
tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32, input_shape,
tim::vx::TensorAttribute::INPUT);
tim::vx::TensorSpec kernel_spec(tim::vx::DataType::FLOAT32, kernel_shape,
tim::vx::TensorAttribute::CONSTANT);
tim::vx::TensorSpec conv2dout_spec(tim::vx::DataType::FLOAT32,
conv2dout_shape,
tim::vx::TensorAttribute::OUTPUT);
tim::vx::TensorSpec reduceout_spec(tim::vx::DataType::FLOAT32,
{0,0,0},
tim::vx::TensorAttribute::TRANSIENT);
tim::vx::TensorSpec output_spec(tim::vx::DataType::FLOAT32, output_shape,
tim::vx::TensorAttribute::OUTPUT);
auto input_tensor = graph->CreateTensor(input_spec);
auto conv2dout_tensor = graph->CreateTensor(conv2dout_spec);
auto reduceout_tensor = graph->CreateTensor(reduceout_spec);
auto output_tensor = graph->CreateTensor(output_spec);
std::vector<float> in_data = {
1, 1, 1, 1, 2, 0, 5, 3, 6, 3, 1, 1,
1, 4, 2, 5, 7, 6, 3, 1, 1, 0, 2, 5,
};
std::vector<float> kernel_data = {
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 5, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
};
std::vector<float> golden = {
33, 33, 37, 35, 35, 43, 34, 34, 58, 39, 39, 43,
};
auto kernel_tensor = graph->CreateTensor(kernel_spec, kernel_data.data());
std::array<uint32_t, 2> stride({1, 1});
std::array<uint32_t, 2> dilation({1, 1});
auto op1 = graph->CreateOperation<tim::vx::ops::Conv2d>(
tim::vx::PadType::VALID, stride, dilation, 0, tim::vx::DataLayout::CWHN);
(*op1)
.BindInputs({input_tensor, kernel_tensor})
.BindOutputs({conv2dout_tensor});
std::vector<int32_t> axis = {2,3};
auto op2 = graph->CreateOperation<tim::vx::ops::ReduceMax>(axis, false);
(*op2).BindInputs({conv2dout_tensor}).BindOutputs({reduceout_tensor});
auto op3 = graph->CreateOperation<tim::vx::ops::Stack>(0, 2);
(*op3).BindInputs({reduceout_tensor, reduceout_tensor}).BindOutputs({output_tensor});
auto transform = tim::transform::LayoutInference(graph, ctx);
auto infer_graph = transform.first;
auto graph_io_map = transform.second;
auto infer_input = graph_io_map[graph->InputsTensor()[0]];
auto infer_output = graph_io_map[graph->OutputsTensor()[0]];
infer_input->CopyDataToTensor(in_data.data(), in_data.size() * sizeof(float));
EXPECT_TRUE(infer_graph->Compile());
EXPECT_TRUE(infer_graph->Run());
std::vector<float> output(golden.size());
EXPECT_TRUE(infer_output->CopyDataFromTensor(output.data()));
EXPECT_EQ(golden, output);
}
TEST(Stack, LayoutinferernceTest_3) {
auto ctx = tim::vx::Context::Create();
auto graph = ctx->CreateGraph();
tim::vx::ShapeType input_shape({2, 3, 4, 1}); //cwhn
tim::vx::ShapeType kernel_shape({2, 2, 3, 3}); //iwho
tim::vx::ShapeType conv2dout_shape({3, 2, 2, 1}); //cwhn
tim::vx::ShapeType output_shape({2, 3, 2, 1});
tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32, input_shape,
tim::vx::TensorAttribute::INPUT);
tim::vx::TensorSpec kernel_spec(tim::vx::DataType::FLOAT32, kernel_shape,
tim::vx::TensorAttribute::CONSTANT);
tim::vx::TensorSpec conv2dout_spec(tim::vx::DataType::FLOAT32,
{0,0,0,0},
tim::vx::TensorAttribute::TRANSIENT);
tim::vx::TensorSpec reduceout_spec(tim::vx::DataType::FLOAT32,
{0,0,0},
tim::vx::TensorAttribute::TRANSIENT);
tim::vx::TensorSpec output_spec(tim::vx::DataType::FLOAT32, output_shape,
tim::vx::TensorAttribute::OUTPUT);
auto input_tensor = graph->CreateTensor(input_spec);
auto input2_tensor = graph->CreateTensor(input_spec);
auto conv2dout_tensor = graph->CreateTensor(conv2dout_spec);
auto conv2dout2_tensor = graph->CreateTensor(conv2dout_spec);
auto reduceout_tensor = graph->CreateTensor(reduceout_spec);
auto reduceout2_tensor = graph->CreateTensor(reduceout_spec);
auto output_tensor = graph->CreateTensor(output_spec);
std::vector<float> in_data = {
1, 1, 1, 1, 2, 0, 5, 3, 6, 3, 1, 1,
1, 4, 2, 5, 7, 6, 3, 1, 1, 0, 2, 5,
};
std::vector<float> kernel_data = {
1, 1, 1, 1, 1, 1, 1, 1, 1, 5, 1, 1, 1, 1, 2, 1, 1, 1,
0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1,
1, 1, 1, 1, 2, 1, 1, 5, 3, 1, 2, 3, 1, 1, 2, 1, 1, 1,
};
std::vector<float> golden = {
55, 39, 21, 28, 37, 41, 49, 55, 28, 24, 40, 41
};
auto kernel_tensor = graph->CreateTensor(kernel_spec, kernel_data.data());
auto kernel2_tensor = graph->CreateTensor(kernel_spec, kernel_data.data());
std::array<uint32_t, 2> stride({1, 1});
std::array<uint32_t, 2> dilation({1, 1});
auto op1 = graph->CreateOperation<tim::vx::ops::Conv2d>(
tim::vx::PadType::VALID, stride, dilation, 0, tim::vx::DataLayout::CWHN);
(*op1)
.BindInputs({input_tensor, kernel_tensor})
.BindOutputs({conv2dout_tensor});
auto op11 = graph->CreateOperation<tim::vx::ops::Conv2d>(
tim::vx::PadType::VALID, stride, dilation, 0, tim::vx::DataLayout::CWHN);
(*op11)
.BindInputs({input2_tensor, kernel2_tensor})
.BindOutputs({conv2dout2_tensor});
std::vector<int32_t> axis = {1};
auto op2 = graph->CreateOperation<tim::vx::ops::ReduceMax>(axis, false);
(*op2).BindInputs({conv2dout_tensor}).BindOutputs({reduceout_tensor});
axis = {2};
auto op22 = graph->CreateOperation<tim::vx::ops::ReduceMax>(axis, false);
(*op22).BindInputs({conv2dout2_tensor}).BindOutputs({reduceout2_tensor});
auto op3 = graph->CreateOperation<tim::vx::ops::Stack>(0, 2);
(*op3).BindInputs({reduceout_tensor, reduceout2_tensor}).BindOutputs({output_tensor});
auto transform = tim::transform::LayoutInference(graph, ctx);
auto infer_graph = transform.first;
auto graph_io_map = transform.second;
auto infer_input = graph_io_map[graph->InputsTensor()[0]];
auto infer_input2 = graph_io_map[graph->InputsTensor()[1]];
auto infer_output = graph_io_map[graph->OutputsTensor()[0]];
infer_input->CopyDataToTensor(in_data.data(), in_data.size() * sizeof(float));
infer_input2->CopyDataToTensor(in_data.data(), in_data.size() * sizeof(float));
EXPECT_TRUE(infer_graph->Compile());
EXPECT_TRUE(infer_graph->Run());
std::vector<float> output(golden.size());
EXPECT_TRUE(infer_output->CopyDataFromTensor(output.data()));
EXPECT_EQ(golden, output);
}

View File

@ -27,155 +27,144 @@
#include "gtest/gtest.h"
TEST(Stack, shape_2_3_axis_2) {
auto ctx = tim::vx::Context::Create();
auto graph = ctx->CreateGraph();
TEST(Stack, shape_3_4_axis_2) {
auto ctx = tim::vx::Context::Create();
auto graph = ctx->CreateGraph();
tim::vx::ShapeType input_shape({2,3});
tim::vx::ShapeType output_shape({2,3,2});
tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32,
input_shape, tim::vx::TensorAttribute::INPUT);
tim::vx::TensorSpec output_spec(tim::vx::DataType::FLOAT32,
output_shape, tim::vx::TensorAttribute::OUTPUT);
tim::vx::ShapeType input_shape({4, 3});
tim::vx::ShapeType output_shape({4, 3, 2});
tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32, input_shape,
tim::vx::TensorAttribute::INPUT);
tim::vx::TensorSpec output_spec(tim::vx::DataType::FLOAT32, output_shape,
tim::vx::TensorAttribute::OUTPUT);
auto input_tensor1 = graph->CreateTensor(input_spec);
auto input_tensor2 = graph->CreateTensor(input_spec);
auto output_tensor = graph->CreateTensor(output_spec);
auto input_tensor1 = graph->CreateTensor(input_spec);
auto input_tensor2 = graph->CreateTensor(input_spec);
auto output_tensor = graph->CreateTensor(output_spec);
std::vector<float> in_data1 = {
1,4,
2,5,
3,6
};
std::vector<float> in_data2 = {
1,4,
2,5,
3,6
};
std::vector<float> golden = {
1,4,
2,5,
3,6,
std::vector<float> in_data = {
2, 1, 0, 1,
2, 4, 4, 4,
3, 2, 1, 4,
};
std::vector<float> golden = {
2, 1, 0, 1,
2, 4, 4, 4,
3, 2, 1, 4,
2, 1, 0, 1,
2, 4, 4, 4,
3, 2, 1, 4,
};
1,4,
2,5,
3,6
};
EXPECT_TRUE(input_tensor1->CopyDataToTensor(in_data.data(),
in_data.size() * sizeof(float)));
EXPECT_TRUE(input_tensor2->CopyDataToTensor(in_data.data(),
in_data.size() * sizeof(float)));
auto op = graph->CreateOperation<tim::vx::ops::Stack>(2, 2);
(*op).BindInputs({input_tensor1, input_tensor2}).BindOutputs({output_tensor});
EXPECT_TRUE(input_tensor1->CopyDataToTensor(
in_data1.data(), in_data1.size() * sizeof(float)));
EXPECT_TRUE(input_tensor2->CopyDataToTensor(
in_data2.data(), in_data2.size() * sizeof(float)));
auto op = graph->CreateOperation<tim::vx::ops::Stack>(2, 2);
(*op).BindInputs({input_tensor1,input_tensor2}).BindOutputs(
{output_tensor});
EXPECT_TRUE(graph->Compile());
EXPECT_TRUE(graph->Run());
EXPECT_TRUE(graph->Compile());
EXPECT_TRUE(graph->Run());
std::vector<float> output(golden.size());
EXPECT_TRUE(output_tensor->CopyDataFromTensor(output.data()));
EXPECT_EQ(golden, output);
std::vector<float> output(golden.size());
EXPECT_TRUE(output_tensor->CopyDataFromTensor(output.data()));
EXPECT_EQ(golden, output);
}
TEST(Stack, shape_2_3_axis_1) {
auto ctx = tim::vx::Context::Create();
auto graph = ctx->CreateGraph();
TEST(Stack, shape_3_4_axis_1) {
auto ctx = tim::vx::Context::Create();
auto graph = ctx->CreateGraph();
tim::vx::ShapeType input_shape({2,3});
tim::vx::ShapeType output_shape({2,3,2});
tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32,
input_shape, tim::vx::TensorAttribute::INPUT);
tim::vx::TensorSpec output_spec(tim::vx::DataType::FLOAT32,
output_shape, tim::vx::TensorAttribute::OUTPUT);
tim::vx::ShapeType input_shape({4, 3});
tim::vx::ShapeType output_shape({4, 2, 3});
tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32, input_shape,
tim::vx::TensorAttribute::INPUT);
tim::vx::TensorSpec output_spec(tim::vx::DataType::FLOAT32, output_shape,
tim::vx::TensorAttribute::OUTPUT);
auto input_tensor1 = graph->CreateTensor(input_spec);
auto input_tensor2 = graph->CreateTensor(input_spec);
auto output_tensor = graph->CreateTensor(output_spec);
auto input_tensor1 = graph->CreateTensor(input_spec);
auto input_tensor2 = graph->CreateTensor(input_spec);
auto output_tensor = graph->CreateTensor(output_spec);
std::vector<float> in_data1 = {
1,4,
2,5,
3,6
};
std::vector<float> in_data2 = {
1,4,
2,5,
3,6
};
std::vector<float> golden = {
1,4,
1,4,
2,5,
std::vector<float> in_data = {
2, 1, 0, 1,
2, 4, 4, 4,
3, 2, 1, 4,
};
std::vector<float> golden = {
2, 1, 0, 1,
2, 1, 0, 1,
2,5,
3,6,
3,6,
};
2, 4, 4, 4,
2, 4, 4, 4,
EXPECT_TRUE(input_tensor1->CopyDataToTensor(
in_data1.data(), in_data1.size() * sizeof(float)));
EXPECT_TRUE(input_tensor2->CopyDataToTensor(
in_data2.data(), in_data2.size() * sizeof(float)));
auto op = graph->CreateOperation<tim::vx::ops::Stack>(1, 2);
(*op).BindInputs({input_tensor1,input_tensor2}).BindOutputs(
{output_tensor});
3, 2, 1, 4,
3, 2, 1, 4,
};
EXPECT_TRUE(graph->Compile());
EXPECT_TRUE(graph->Run());
EXPECT_TRUE(input_tensor1->CopyDataToTensor(in_data.data(),
in_data.size() * sizeof(float)));
EXPECT_TRUE(input_tensor2->CopyDataToTensor(in_data.data(),
in_data.size() * sizeof(float)));
auto op = graph->CreateOperation<tim::vx::ops::Stack>(1, 2);
(*op).BindInputs({input_tensor1, input_tensor2}).BindOutputs({output_tensor});
std::vector<float> output(golden.size());
EXPECT_TRUE(output_tensor->CopyDataFromTensor(output.data()));
EXPECT_EQ(golden, output);
EXPECT_TRUE(graph->Compile());
EXPECT_TRUE(graph->Run());
std::vector<float> output(golden.size());
EXPECT_TRUE(output_tensor->CopyDataFromTensor(output.data()));
EXPECT_EQ(golden, output);
}
TEST(Stack, shape_2_3_axis_0) {
auto ctx = tim::vx::Context::Create();
auto graph = ctx->CreateGraph();
TEST(Stack, shape_3_4_axis_0) {
auto ctx = tim::vx::Context::Create();
auto graph = ctx->CreateGraph();
tim::vx::ShapeType input_shape({2,3});
tim::vx::ShapeType output_shape({2,3,2});
tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32,
input_shape, tim::vx::TensorAttribute::INPUT);
tim::vx::TensorSpec output_spec(tim::vx::DataType::FLOAT32,
output_shape, tim::vx::TensorAttribute::OUTPUT);
tim::vx::ShapeType input_shape({4, 3});
tim::vx::ShapeType output_shape({2, 4, 3});
tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32, input_shape,
tim::vx::TensorAttribute::INPUT);
tim::vx::TensorSpec output_spec(tim::vx::DataType::FLOAT32, output_shape,
tim::vx::TensorAttribute::OUTPUT);
auto input_tensor1 = graph->CreateTensor(input_spec);
auto input_tensor2 = graph->CreateTensor(input_spec);
auto output_tensor = graph->CreateTensor(output_spec);
auto input_tensor1 = graph->CreateTensor(input_spec);
auto input_tensor2 = graph->CreateTensor(input_spec);
auto output_tensor = graph->CreateTensor(output_spec);
std::vector<float> in_data1 = {
1,4,
2,5,
3,6
};
std::vector<float> in_data2 = {
1,4,
2,5,
3,6
};
std::vector<float> golden = {
std::vector<float> in_data = {
2, 1, 0, 1,
2, 4, 4, 4,
3, 2, 1, 4,
};
std::vector<float> golden = {
2, 2,
1, 1,
0, 0,
1, 1,
2, 2,
4, 4,
4, 4,
4, 4,
3, 3,
2, 2,
1, 1,
4, 4,
2, 2,
};
5, 5,
3, 3,
6, 6
};
EXPECT_TRUE(input_tensor1->CopyDataToTensor(in_data.data(),
in_data.size() * sizeof(float)));
EXPECT_TRUE(input_tensor2->CopyDataToTensor(in_data.data(),
in_data.size() * sizeof(float)));
auto op = graph->CreateOperation<tim::vx::ops::Stack>(0, 2);
(*op).BindInputs({input_tensor1, input_tensor2}).BindOutputs({output_tensor});
EXPECT_TRUE(input_tensor1->CopyDataToTensor(
in_data1.data(), in_data1.size() * sizeof(float)));
EXPECT_TRUE(input_tensor2->CopyDataToTensor(
in_data2.data(), in_data2.size() * sizeof(float)));
auto op = graph->CreateOperation<tim::vx::ops::Stack>(0, 2);
(*op).BindInputs({input_tensor1,input_tensor2}).BindOutputs(
{output_tensor});
EXPECT_TRUE(graph->Compile());
EXPECT_TRUE(graph->Run());
EXPECT_TRUE(graph->Compile());
EXPECT_TRUE(graph->Run());
std::vector<float> output(golden.size());
EXPECT_TRUE(output_tensor->CopyDataFromTensor(output.data()));
EXPECT_EQ(golden, output);
std::vector<float> output(golden.size());
EXPECT_TRUE(output_tensor->CopyDataFromTensor(output.data()));
EXPECT_EQ(golden, output);
}