Refine support for data copy operation
* Properly support tensor handle for both input and output * Fix UT to use size_in_bytes instead of size in elements Signed-off-by: Kainan Cha <kainan.zha@verisilicon.com>
This commit is contained in:
parent
ef69e466c7
commit
d645494dcc
|
|
@ -144,7 +144,7 @@ class Tensor {
|
|||
virtual const Quantization& GetQuantization() = 0;
|
||||
virtual const TensorSpec& GetSpec() = 0;
|
||||
virtual uint32_t GetId() = 0;
|
||||
virtual bool CopyDataToTensor(const void* data, uint32_t size = 0) = 0;
|
||||
virtual bool CopyDataToTensor(const void* data, uint32_t size_in_bytes = 0) = 0;
|
||||
virtual bool CopyDataFromTensor(void* data) = 0;
|
||||
virtual bool IsPlaceHolder() = 0;
|
||||
virtual bool IsConstTensor() = 0;
|
||||
|
|
|
|||
|
|
@ -87,7 +87,7 @@ TensorImpl::TensorImpl(Graph* graph, const TensorSpec& spec, const void* data)
|
|||
|
||||
TensorImpl::~TensorImpl() {}
|
||||
|
||||
bool TensorImpl::CopyDataToTensor(const void* data, uint32_t size) {
|
||||
bool TensorImpl::CopyDataToTensor(const void* data, uint32_t size_in_bytes) {
|
||||
if (!IsWriteable()) {
|
||||
return false;
|
||||
}
|
||||
|
|
@ -96,18 +96,31 @@ bool TensorImpl::CopyDataToTensor(const void* data, uint32_t size) {
|
|||
if (data && VSI_NN_TENSOR_ID_NA != id_) {
|
||||
retn = false;
|
||||
vsi_nn_tensor_t* tensor = vsi_nn_GetTensor(graph_->graph(), id_);
|
||||
uint32_t tensor_bytes = vsi_nn_GetTensorSize(
|
||||
tensor->attr.size, tensor->attr.dim_num, tensor->attr.dtype.vx_type);
|
||||
if (tensor) {
|
||||
/*
|
||||
argument `data` of vsi_nn_CopyDataToTensor is non-const
|
||||
convert it from const data to non-const, will be fixed in ovxlib
|
||||
*/
|
||||
uint32_t tensor_bytes = vsi_nn_GetTensorSize(
|
||||
tensor->attr.size, tensor->attr.dim_num, tensor->attr.dtype.vx_type);
|
||||
const uint8_t* end = static_cast<const uint8_t*>(data) + tensor_bytes;
|
||||
std::vector<uint8_t> data_copy(static_cast<const uint8_t*>(data), end);
|
||||
if (tensor->attr.is_created_from_handle) {
|
||||
void *ptr = NULL;
|
||||
vsi_nn_GetTensorHandle(tensor, &ptr);
|
||||
if (ptr) {
|
||||
memcpy(ptr, data, tensor_bytes);
|
||||
vsi_nn_FlushHandle(tensor);
|
||||
retn = true;
|
||||
} else {
|
||||
VSILOGE("GetTensorHandle fail");
|
||||
}
|
||||
}
|
||||
else {
|
||||
/*
|
||||
argument `data` of vsi_nn_CopyDataToTensor is non-const
|
||||
convert it from const data to non-const, will be fixed in ovxlib
|
||||
*/
|
||||
const uint8_t* end = static_cast<const uint8_t*>(data) + tensor_bytes;
|
||||
std::vector<uint8_t> data_copy(static_cast<const uint8_t*>(data), end);
|
||||
|
||||
retn = VSI_SUCCESS ==
|
||||
retn = VSI_SUCCESS ==
|
||||
vsi_nn_CopyDataToTensor(graph_->graph(), tensor, data_copy.data());
|
||||
}
|
||||
}
|
||||
}
|
||||
return retn;
|
||||
|
|
@ -122,19 +135,17 @@ bool TensorImpl::CopyDataFromTensor(void* data) {
|
|||
if (data && VSI_NN_TENSOR_ID_NA != id_) {
|
||||
retn = false;
|
||||
vsi_nn_tensor_t* tensor = vsi_nn_GetTensor(graph_->graph(), id_);
|
||||
uint32_t tensor_bytes = vsi_nn_GetTensorSize(
|
||||
tensor->attr.size, tensor->attr.dim_num, tensor->attr.dtype.vx_type);
|
||||
if (tensor) {
|
||||
if (tensor->attr.is_created_from_handle) {
|
||||
void* old_ptr = NULL;
|
||||
// TODO(jiangbo): current ovxlib didn't wrap this API
|
||||
// use driver API directly
|
||||
vxSwapTensorHandle(tensor->t, NULL, &old_ptr);
|
||||
if (old_ptr) {
|
||||
uint32_t tensor_bytes =
|
||||
vsi_nn_GetTensorSize(tensor->attr.size, tensor->attr.dim_num,
|
||||
tensor->attr.dtype.vx_type);
|
||||
|
||||
memcpy(data, old_ptr, tensor_bytes);
|
||||
void* ptr = NULL;
|
||||
vsi_nn_GetTensorHandle(tensor, &ptr);
|
||||
if (ptr) {
|
||||
memcpy(data, ptr, tensor_bytes);
|
||||
retn = true;
|
||||
} else {
|
||||
VSILOGE("GetTensorHandle fail");
|
||||
}
|
||||
} else {
|
||||
vsi_nn_CopyTensorToBuffer(graph_->graph(), tensor,
|
||||
|
|
@ -154,7 +165,7 @@ bool TensorImpl::Init() {
|
|||
attr.is_const = static_cast<bool>(spec_.attr_ & TensorAttribute::CONSTANT);
|
||||
attr.vtl = static_cast<bool>(spec_.attr_ & TensorAttribute::TRANSIENT);
|
||||
|
||||
// Use auto shape for virtual tensors so that tim-vx can perform it's own
|
||||
// Use auto shape for virtual tensors so that tim-vx can perform it's own
|
||||
// shape inference
|
||||
if (attr.vtl) {
|
||||
attr.dim_num = VSI_NN_DIM_AUTO;
|
||||
|
|
|
|||
|
|
@ -82,8 +82,8 @@ TEST(OP, notequal_shape_5_fp32) {
|
|||
|
||||
std::vector<uint8_t> golden = {1, 1, 1, 0, 0};
|
||||
|
||||
EXPECT_TRUE(input_tensor1->CopyDataToTensor(in_data1.data(), in_data1.size()));
|
||||
EXPECT_TRUE(input_tensor2->CopyDataToTensor(in_data2.data(), in_data2.size()));
|
||||
EXPECT_TRUE(input_tensor1->CopyDataToTensor(in_data1.data(), in_data1.size()*4));
|
||||
EXPECT_TRUE(input_tensor2->CopyDataToTensor(in_data2.data(), in_data2.size()*4));
|
||||
|
||||
auto add = graph->CreateOperation<tim::vx::ops::NotEqual>();
|
||||
(*add).BindInputs({input_tensor1, input_tensor2}).BindOutputs({output_tensor});
|
||||
|
|
@ -117,8 +117,8 @@ TEST(OP, less_shape_5_1_fp32) {
|
|||
|
||||
std::vector<uint8_t> golden = {0, 0, 1, 0, 0};
|
||||
|
||||
EXPECT_TRUE(input_tensor1->CopyDataToTensor(in_data1.data(), in_data1.size()));
|
||||
EXPECT_TRUE(input_tensor2->CopyDataToTensor(in_data2.data(), in_data2.size()));
|
||||
EXPECT_TRUE(input_tensor1->CopyDataToTensor(in_data1.data(), in_data1.size()*4));
|
||||
EXPECT_TRUE(input_tensor2->CopyDataToTensor(in_data2.data(), in_data2.size()*4));
|
||||
|
||||
auto add = graph->CreateOperation<tim::vx::ops::Less>();
|
||||
(*add).BindInputs({input_tensor1, input_tensor2}).BindOutputs({output_tensor});
|
||||
|
|
@ -156,8 +156,8 @@ TEST(OP, greaterorequal_shape_5_2_1_fp32) {
|
|||
|
||||
std::vector<uint8_t> golden = {0, 1, 0, 1, 1, 0, 1, 0, 1, 1};
|
||||
|
||||
EXPECT_TRUE(input_tensor1->CopyDataToTensor(in_data1.data(), in_data1.size()));
|
||||
EXPECT_TRUE(input_tensor2->CopyDataToTensor(in_data2.data(), in_data2.size()));
|
||||
EXPECT_TRUE(input_tensor1->CopyDataToTensor(in_data1.data(), in_data1.size()*4));
|
||||
EXPECT_TRUE(input_tensor2->CopyDataToTensor(in_data2.data(), in_data2.size()*4));
|
||||
|
||||
auto add = graph->CreateOperation<tim::vx::ops::GreaterOrEqual>();
|
||||
(*add).BindInputs({input_tensor1, input_tensor2}).BindOutputs({output_tensor});
|
||||
|
|
@ -195,8 +195,8 @@ TEST(OP, greater_shape_5_2_1_1_fp32) {
|
|||
|
||||
std::vector<uint8_t> golden = {0, 1, 0, 0, 0, 0, 1, 0, 0, 0};
|
||||
|
||||
EXPECT_TRUE(input_tensor1->CopyDataToTensor(in_data1.data(), in_data1.size()));
|
||||
EXPECT_TRUE(input_tensor2->CopyDataToTensor(in_data2.data(), in_data2.size()));
|
||||
EXPECT_TRUE(input_tensor1->CopyDataToTensor(in_data1.data(), in_data1.size()*4));
|
||||
EXPECT_TRUE(input_tensor2->CopyDataToTensor(in_data2.data(), in_data2.size()*4));
|
||||
|
||||
auto add = graph->CreateOperation<tim::vx::ops::Greater>();
|
||||
(*add).BindInputs({input_tensor1, input_tensor2}).BindOutputs({output_tensor});
|
||||
|
|
@ -234,8 +234,8 @@ TEST(OP, lessorequal_shape_1_5_2_1_1_fp32) {
|
|||
|
||||
std::vector<uint8_t> golden = {1, 0, 1, 1, 1, 1, 0, 1, 1, 1};
|
||||
|
||||
EXPECT_TRUE(input_tensor1->CopyDataToTensor(in_data1.data(), in_data1.size()));
|
||||
EXPECT_TRUE(input_tensor2->CopyDataToTensor(in_data2.data(), in_data2.size()));
|
||||
EXPECT_TRUE(input_tensor1->CopyDataToTensor(in_data1.data(), in_data1.size()*4));
|
||||
EXPECT_TRUE(input_tensor2->CopyDataToTensor(in_data2.data(), in_data2.size()*4));
|
||||
|
||||
auto add = graph->CreateOperation<tim::vx::ops::LessOrEqual>();
|
||||
(*add).BindInputs({input_tensor1, input_tensor2}).BindOutputs({output_tensor});
|
||||
|
|
|
|||
|
|
@ -96,7 +96,7 @@ TEST(OP, reorg_shape_4_4_4_1_fp32) {
|
|||
9, 11, 13, 15, 25, 27, 29, 31, 9, 11, 13, 15, 25, 27, 29, 31
|
||||
};
|
||||
|
||||
EXPECT_TRUE(input_tensor->CopyDataToTensor(in_data.data(), in_data.size()));
|
||||
EXPECT_TRUE(input_tensor->CopyDataToTensor(in_data.data(), in_data.size()*4));
|
||||
|
||||
auto add = graph->CreateOperation<tim::vx::ops::Reorg>(2);
|
||||
(*add).BindInputs({input_tensor}).BindOutputs({output_tensor});
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ TEST(OP, floor_shape_5_1_fp32) {
|
|||
std::vector<float> in_data = { -2.5, -0.1, 0, 0.55, std::numeric_limits<float>::infinity() };
|
||||
std::vector<float> golden = {-3, -1, 0, 0, std::numeric_limits<float>::infinity() };
|
||||
|
||||
EXPECT_TRUE(input_tensor->CopyDataToTensor(in_data.data(), in_data.size()));
|
||||
EXPECT_TRUE(input_tensor->CopyDataToTensor(in_data.data(), in_data.size()*4));
|
||||
|
||||
auto add = graph->CreateOperation<tim::vx::ops::Floor>();
|
||||
(*add).BindInputs({input_tensor}).BindOutputs({output_tensor});
|
||||
|
|
@ -71,7 +71,7 @@ TEST(OP, cast_shape_5_1_fp32_to_int32) {
|
|||
std::vector<float> in_data = { -2.5, -0.1, 0, 0.55, std::numeric_limits<float>::infinity() };
|
||||
std::vector<int> golden = {-2, 0, 0, 0, std::numeric_limits<int>::max()};
|
||||
|
||||
EXPECT_TRUE(input_tensor->CopyDataToTensor(in_data.data(), in_data.size()));
|
||||
EXPECT_TRUE(input_tensor->CopyDataToTensor(in_data.data(), in_data.size()*4));
|
||||
|
||||
auto add = graph->CreateOperation<tim::vx::ops::Cast>();
|
||||
(*add).BindInputs({input_tensor}).BindOutputs({output_tensor});
|
||||
|
|
|
|||
Loading…
Reference in New Issue