Refine support for data copy operation
* Properly support tensor handle for both input and output * Fix UT to use size_in_bytes instead of size in elements Signed-off-by: Kainan Cha <kainan.zha@verisilicon.com>
This commit is contained in:
parent
ef69e466c7
commit
d645494dcc
|
|
@ -144,7 +144,7 @@ class Tensor {
|
||||||
virtual const Quantization& GetQuantization() = 0;
|
virtual const Quantization& GetQuantization() = 0;
|
||||||
virtual const TensorSpec& GetSpec() = 0;
|
virtual const TensorSpec& GetSpec() = 0;
|
||||||
virtual uint32_t GetId() = 0;
|
virtual uint32_t GetId() = 0;
|
||||||
virtual bool CopyDataToTensor(const void* data, uint32_t size = 0) = 0;
|
virtual bool CopyDataToTensor(const void* data, uint32_t size_in_bytes = 0) = 0;
|
||||||
virtual bool CopyDataFromTensor(void* data) = 0;
|
virtual bool CopyDataFromTensor(void* data) = 0;
|
||||||
virtual bool IsPlaceHolder() = 0;
|
virtual bool IsPlaceHolder() = 0;
|
||||||
virtual bool IsConstTensor() = 0;
|
virtual bool IsConstTensor() = 0;
|
||||||
|
|
|
||||||
|
|
@ -87,7 +87,7 @@ TensorImpl::TensorImpl(Graph* graph, const TensorSpec& spec, const void* data)
|
||||||
|
|
||||||
TensorImpl::~TensorImpl() {}
|
TensorImpl::~TensorImpl() {}
|
||||||
|
|
||||||
bool TensorImpl::CopyDataToTensor(const void* data, uint32_t size) {
|
bool TensorImpl::CopyDataToTensor(const void* data, uint32_t size_in_bytes) {
|
||||||
if (!IsWriteable()) {
|
if (!IsWriteable()) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
@ -96,13 +96,25 @@ bool TensorImpl::CopyDataToTensor(const void* data, uint32_t size) {
|
||||||
if (data && VSI_NN_TENSOR_ID_NA != id_) {
|
if (data && VSI_NN_TENSOR_ID_NA != id_) {
|
||||||
retn = false;
|
retn = false;
|
||||||
vsi_nn_tensor_t* tensor = vsi_nn_GetTensor(graph_->graph(), id_);
|
vsi_nn_tensor_t* tensor = vsi_nn_GetTensor(graph_->graph(), id_);
|
||||||
|
uint32_t tensor_bytes = vsi_nn_GetTensorSize(
|
||||||
|
tensor->attr.size, tensor->attr.dim_num, tensor->attr.dtype.vx_type);
|
||||||
if (tensor) {
|
if (tensor) {
|
||||||
|
if (tensor->attr.is_created_from_handle) {
|
||||||
|
void *ptr = NULL;
|
||||||
|
vsi_nn_GetTensorHandle(tensor, &ptr);
|
||||||
|
if (ptr) {
|
||||||
|
memcpy(ptr, data, tensor_bytes);
|
||||||
|
vsi_nn_FlushHandle(tensor);
|
||||||
|
retn = true;
|
||||||
|
} else {
|
||||||
|
VSILOGE("GetTensorHandle fail");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else {
|
||||||
/*
|
/*
|
||||||
argument `data` of vsi_nn_CopyDataToTensor is non-const
|
argument `data` of vsi_nn_CopyDataToTensor is non-const
|
||||||
convert it from const data to non-const, will be fixed in ovxlib
|
convert it from const data to non-const, will be fixed in ovxlib
|
||||||
*/
|
*/
|
||||||
uint32_t tensor_bytes = vsi_nn_GetTensorSize(
|
|
||||||
tensor->attr.size, tensor->attr.dim_num, tensor->attr.dtype.vx_type);
|
|
||||||
const uint8_t* end = static_cast<const uint8_t*>(data) + tensor_bytes;
|
const uint8_t* end = static_cast<const uint8_t*>(data) + tensor_bytes;
|
||||||
std::vector<uint8_t> data_copy(static_cast<const uint8_t*>(data), end);
|
std::vector<uint8_t> data_copy(static_cast<const uint8_t*>(data), end);
|
||||||
|
|
||||||
|
|
@ -110,6 +122,7 @@ bool TensorImpl::CopyDataToTensor(const void* data, uint32_t size) {
|
||||||
vsi_nn_CopyDataToTensor(graph_->graph(), tensor, data_copy.data());
|
vsi_nn_CopyDataToTensor(graph_->graph(), tensor, data_copy.data());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
return retn;
|
return retn;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -122,19 +135,17 @@ bool TensorImpl::CopyDataFromTensor(void* data) {
|
||||||
if (data && VSI_NN_TENSOR_ID_NA != id_) {
|
if (data && VSI_NN_TENSOR_ID_NA != id_) {
|
||||||
retn = false;
|
retn = false;
|
||||||
vsi_nn_tensor_t* tensor = vsi_nn_GetTensor(graph_->graph(), id_);
|
vsi_nn_tensor_t* tensor = vsi_nn_GetTensor(graph_->graph(), id_);
|
||||||
|
uint32_t tensor_bytes = vsi_nn_GetTensorSize(
|
||||||
|
tensor->attr.size, tensor->attr.dim_num, tensor->attr.dtype.vx_type);
|
||||||
if (tensor) {
|
if (tensor) {
|
||||||
if (tensor->attr.is_created_from_handle) {
|
if (tensor->attr.is_created_from_handle) {
|
||||||
void* old_ptr = NULL;
|
void* ptr = NULL;
|
||||||
// TODO(jiangbo): current ovxlib didn't wrap this API
|
vsi_nn_GetTensorHandle(tensor, &ptr);
|
||||||
// use driver API directly
|
if (ptr) {
|
||||||
vxSwapTensorHandle(tensor->t, NULL, &old_ptr);
|
memcpy(data, ptr, tensor_bytes);
|
||||||
if (old_ptr) {
|
|
||||||
uint32_t tensor_bytes =
|
|
||||||
vsi_nn_GetTensorSize(tensor->attr.size, tensor->attr.dim_num,
|
|
||||||
tensor->attr.dtype.vx_type);
|
|
||||||
|
|
||||||
memcpy(data, old_ptr, tensor_bytes);
|
|
||||||
retn = true;
|
retn = true;
|
||||||
|
} else {
|
||||||
|
VSILOGE("GetTensorHandle fail");
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
vsi_nn_CopyTensorToBuffer(graph_->graph(), tensor,
|
vsi_nn_CopyTensorToBuffer(graph_->graph(), tensor,
|
||||||
|
|
|
||||||
|
|
@ -82,8 +82,8 @@ TEST(OP, notequal_shape_5_fp32) {
|
||||||
|
|
||||||
std::vector<uint8_t> golden = {1, 1, 1, 0, 0};
|
std::vector<uint8_t> golden = {1, 1, 1, 0, 0};
|
||||||
|
|
||||||
EXPECT_TRUE(input_tensor1->CopyDataToTensor(in_data1.data(), in_data1.size()));
|
EXPECT_TRUE(input_tensor1->CopyDataToTensor(in_data1.data(), in_data1.size()*4));
|
||||||
EXPECT_TRUE(input_tensor2->CopyDataToTensor(in_data2.data(), in_data2.size()));
|
EXPECT_TRUE(input_tensor2->CopyDataToTensor(in_data2.data(), in_data2.size()*4));
|
||||||
|
|
||||||
auto add = graph->CreateOperation<tim::vx::ops::NotEqual>();
|
auto add = graph->CreateOperation<tim::vx::ops::NotEqual>();
|
||||||
(*add).BindInputs({input_tensor1, input_tensor2}).BindOutputs({output_tensor});
|
(*add).BindInputs({input_tensor1, input_tensor2}).BindOutputs({output_tensor});
|
||||||
|
|
@ -117,8 +117,8 @@ TEST(OP, less_shape_5_1_fp32) {
|
||||||
|
|
||||||
std::vector<uint8_t> golden = {0, 0, 1, 0, 0};
|
std::vector<uint8_t> golden = {0, 0, 1, 0, 0};
|
||||||
|
|
||||||
EXPECT_TRUE(input_tensor1->CopyDataToTensor(in_data1.data(), in_data1.size()));
|
EXPECT_TRUE(input_tensor1->CopyDataToTensor(in_data1.data(), in_data1.size()*4));
|
||||||
EXPECT_TRUE(input_tensor2->CopyDataToTensor(in_data2.data(), in_data2.size()));
|
EXPECT_TRUE(input_tensor2->CopyDataToTensor(in_data2.data(), in_data2.size()*4));
|
||||||
|
|
||||||
auto add = graph->CreateOperation<tim::vx::ops::Less>();
|
auto add = graph->CreateOperation<tim::vx::ops::Less>();
|
||||||
(*add).BindInputs({input_tensor1, input_tensor2}).BindOutputs({output_tensor});
|
(*add).BindInputs({input_tensor1, input_tensor2}).BindOutputs({output_tensor});
|
||||||
|
|
@ -156,8 +156,8 @@ TEST(OP, greaterorequal_shape_5_2_1_fp32) {
|
||||||
|
|
||||||
std::vector<uint8_t> golden = {0, 1, 0, 1, 1, 0, 1, 0, 1, 1};
|
std::vector<uint8_t> golden = {0, 1, 0, 1, 1, 0, 1, 0, 1, 1};
|
||||||
|
|
||||||
EXPECT_TRUE(input_tensor1->CopyDataToTensor(in_data1.data(), in_data1.size()));
|
EXPECT_TRUE(input_tensor1->CopyDataToTensor(in_data1.data(), in_data1.size()*4));
|
||||||
EXPECT_TRUE(input_tensor2->CopyDataToTensor(in_data2.data(), in_data2.size()));
|
EXPECT_TRUE(input_tensor2->CopyDataToTensor(in_data2.data(), in_data2.size()*4));
|
||||||
|
|
||||||
auto add = graph->CreateOperation<tim::vx::ops::GreaterOrEqual>();
|
auto add = graph->CreateOperation<tim::vx::ops::GreaterOrEqual>();
|
||||||
(*add).BindInputs({input_tensor1, input_tensor2}).BindOutputs({output_tensor});
|
(*add).BindInputs({input_tensor1, input_tensor2}).BindOutputs({output_tensor});
|
||||||
|
|
@ -195,8 +195,8 @@ TEST(OP, greater_shape_5_2_1_1_fp32) {
|
||||||
|
|
||||||
std::vector<uint8_t> golden = {0, 1, 0, 0, 0, 0, 1, 0, 0, 0};
|
std::vector<uint8_t> golden = {0, 1, 0, 0, 0, 0, 1, 0, 0, 0};
|
||||||
|
|
||||||
EXPECT_TRUE(input_tensor1->CopyDataToTensor(in_data1.data(), in_data1.size()));
|
EXPECT_TRUE(input_tensor1->CopyDataToTensor(in_data1.data(), in_data1.size()*4));
|
||||||
EXPECT_TRUE(input_tensor2->CopyDataToTensor(in_data2.data(), in_data2.size()));
|
EXPECT_TRUE(input_tensor2->CopyDataToTensor(in_data2.data(), in_data2.size()*4));
|
||||||
|
|
||||||
auto add = graph->CreateOperation<tim::vx::ops::Greater>();
|
auto add = graph->CreateOperation<tim::vx::ops::Greater>();
|
||||||
(*add).BindInputs({input_tensor1, input_tensor2}).BindOutputs({output_tensor});
|
(*add).BindInputs({input_tensor1, input_tensor2}).BindOutputs({output_tensor});
|
||||||
|
|
@ -234,8 +234,8 @@ TEST(OP, lessorequal_shape_1_5_2_1_1_fp32) {
|
||||||
|
|
||||||
std::vector<uint8_t> golden = {1, 0, 1, 1, 1, 1, 0, 1, 1, 1};
|
std::vector<uint8_t> golden = {1, 0, 1, 1, 1, 1, 0, 1, 1, 1};
|
||||||
|
|
||||||
EXPECT_TRUE(input_tensor1->CopyDataToTensor(in_data1.data(), in_data1.size()));
|
EXPECT_TRUE(input_tensor1->CopyDataToTensor(in_data1.data(), in_data1.size()*4));
|
||||||
EXPECT_TRUE(input_tensor2->CopyDataToTensor(in_data2.data(), in_data2.size()));
|
EXPECT_TRUE(input_tensor2->CopyDataToTensor(in_data2.data(), in_data2.size()*4));
|
||||||
|
|
||||||
auto add = graph->CreateOperation<tim::vx::ops::LessOrEqual>();
|
auto add = graph->CreateOperation<tim::vx::ops::LessOrEqual>();
|
||||||
(*add).BindInputs({input_tensor1, input_tensor2}).BindOutputs({output_tensor});
|
(*add).BindInputs({input_tensor1, input_tensor2}).BindOutputs({output_tensor});
|
||||||
|
|
|
||||||
|
|
@ -96,7 +96,7 @@ TEST(OP, reorg_shape_4_4_4_1_fp32) {
|
||||||
9, 11, 13, 15, 25, 27, 29, 31, 9, 11, 13, 15, 25, 27, 29, 31
|
9, 11, 13, 15, 25, 27, 29, 31, 9, 11, 13, 15, 25, 27, 29, 31
|
||||||
};
|
};
|
||||||
|
|
||||||
EXPECT_TRUE(input_tensor->CopyDataToTensor(in_data.data(), in_data.size()));
|
EXPECT_TRUE(input_tensor->CopyDataToTensor(in_data.data(), in_data.size()*4));
|
||||||
|
|
||||||
auto add = graph->CreateOperation<tim::vx::ops::Reorg>(2);
|
auto add = graph->CreateOperation<tim::vx::ops::Reorg>(2);
|
||||||
(*add).BindInputs({input_tensor}).BindOutputs({output_tensor});
|
(*add).BindInputs({input_tensor}).BindOutputs({output_tensor});
|
||||||
|
|
|
||||||
|
|
@ -43,7 +43,7 @@ TEST(OP, floor_shape_5_1_fp32) {
|
||||||
std::vector<float> in_data = { -2.5, -0.1, 0, 0.55, std::numeric_limits<float>::infinity() };
|
std::vector<float> in_data = { -2.5, -0.1, 0, 0.55, std::numeric_limits<float>::infinity() };
|
||||||
std::vector<float> golden = {-3, -1, 0, 0, std::numeric_limits<float>::infinity() };
|
std::vector<float> golden = {-3, -1, 0, 0, std::numeric_limits<float>::infinity() };
|
||||||
|
|
||||||
EXPECT_TRUE(input_tensor->CopyDataToTensor(in_data.data(), in_data.size()));
|
EXPECT_TRUE(input_tensor->CopyDataToTensor(in_data.data(), in_data.size()*4));
|
||||||
|
|
||||||
auto add = graph->CreateOperation<tim::vx::ops::Floor>();
|
auto add = graph->CreateOperation<tim::vx::ops::Floor>();
|
||||||
(*add).BindInputs({input_tensor}).BindOutputs({output_tensor});
|
(*add).BindInputs({input_tensor}).BindOutputs({output_tensor});
|
||||||
|
|
@ -71,7 +71,7 @@ TEST(OP, cast_shape_5_1_fp32_to_int32) {
|
||||||
std::vector<float> in_data = { -2.5, -0.1, 0, 0.55, std::numeric_limits<float>::infinity() };
|
std::vector<float> in_data = { -2.5, -0.1, 0, 0.55, std::numeric_limits<float>::infinity() };
|
||||||
std::vector<int> golden = {-2, 0, 0, 0, std::numeric_limits<int>::max()};
|
std::vector<int> golden = {-2, 0, 0, 0, std::numeric_limits<int>::max()};
|
||||||
|
|
||||||
EXPECT_TRUE(input_tensor->CopyDataToTensor(in_data.data(), in_data.size()));
|
EXPECT_TRUE(input_tensor->CopyDataToTensor(in_data.data(), in_data.size()*4));
|
||||||
|
|
||||||
auto add = graph->CreateOperation<tim::vx::ops::Cast>();
|
auto add = graph->CreateOperation<tim::vx::ops::Cast>();
|
||||||
(*add).BindInputs({input_tensor}).BindOutputs({output_tensor});
|
(*add).BindInputs({input_tensor}).BindOutputs({output_tensor});
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue