Remove tensor GetDataRef api (#569)

Co-authored-by: zhouheng.zheng <zhouheng.zheng@ouotlook.com>
This commit is contained in:
Zhouheng Zheng 2023-03-23 21:35:30 +08:00 committed by GitHub
parent 6424ef104e
commit e49f67b840
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 58 additions and 25 deletions

View File

@ -145,7 +145,6 @@ class Tensor {
virtual void unmap() = 0;
virtual bool IsPlaceHolder() = 0;
virtual bool IsConstTensor() = 0;
virtual const void* GetDataRef() const = 0;
};
} // namespace vx

View File

@ -317,8 +317,10 @@ LayoutInference(
auto const_inputs = src_graph->GetConstantInputs();
for (auto const_in : const_inputs) {
std::vector<uint8_t> dataRef(const_in->GetSpec().GetByteSize());
const_in->CopyDataFromTensor(dataRef.data());
auto input =
infer_graph->CreateTensor(const_in->GetSpec(), const_in->GetDataRef());
infer_graph->CreateTensor(const_in->GetSpec(), (const void*)dataRef.data());
layout_infer_ctx->UpdateTensorMap(const_in, input);
tensor_queue.push_back(const_in);
layout_infer_ctx->SetPermuteVector(

View File

@ -52,7 +52,9 @@ class BatchNormLayoutInfer : public OpLayoutInfer {
std::shared_ptr<IPermuteVector> input_pv;
auto src_in = input_tensors[idx];
if (src_in->IsConstTensor()) {
perm_out = context_->infer_graph_->CreateTensor(src_in->GetSpec(), src_in->GetDataRef());
std::vector<uint8_t> dataRef(src_in->GetSpec().GetByteSize());
src_in->CopyDataFromTensor(dataRef.data());
perm_out = context_->infer_graph_->CreateTensor(src_in->GetSpec(), (const void*)dataRef.data());
input_pv = MakeShared(src_in->GetShape().size());
} else {
perm_out = context_->GetMapedTensor(src_in);

View File

@ -56,8 +56,10 @@ class BidirectionalRnnLayoutInfer : public OpLayoutInfer {
std::shared_ptr<IPermuteVector> required_pv;
if ((i_src->IsConstTensor() &&
!(i_src->GetSpec().attr_ & vx::TensorAttribute::INPUT))) {
std::vector<uint8_t> dataRef(i_src->GetSpec().GetByteSize());
i_src->CopyDataFromTensor(dataRef.data());
infer_tensor = context_->infer_graph_->CreateTensor(
i_src->GetSpec(), i_src->GetDataRef());
i_src->GetSpec(), (const void*)dataRef.data());
context_->UpdateTensorMap(i_src, infer_tensor);
}
if (i_src->GetId() == (uint32_t)-1) {

View File

@ -92,8 +92,10 @@ class Conv2dLayoutInfer : public OpLayoutInfer {
if (!weight_required_pv->IsAligned()) {
infer_weight = PermuteConstTensor(input_tensors[1], weight_required_pv);
} else {
std::vector<uint8_t> dataRef(input_tensors[1]->GetSpec().GetByteSize());
input_tensors[1]->CopyDataFromTensor(dataRef.data());
infer_weight = context_->infer_graph_->CreateTensor(
input_tensors[1]->GetSpec(), input_tensors[1]->GetDataRef());
input_tensors[1]->GetSpec(), (const void*)dataRef.data());
}
context_->SetPermuteVector(input_tensors[1], weight_required_pv);
context_->UpdateTensorMap(input_tensors[1], infer_weight);
@ -114,8 +116,10 @@ class Conv2dLayoutInfer : public OpLayoutInfer {
// For bias
if (input_tensors.size() == 3) {
if (input_tensors[2]->IsConstTensor()) {
std::vector<uint8_t> dataRef(input_tensors[2]->GetSpec().GetByteSize());
input_tensors[2]->CopyDataFromTensor(dataRef.data());
infer_bias = context_->infer_graph_->CreateTensor(
input_tensors[2]->GetSpec(), input_tensors[2]->GetDataRef());
input_tensors[2]->GetSpec(), (const void*)dataRef.data());
} else {
infer_bias = context_->GetMapedTensor(input_tensors[2]);
}

View File

@ -53,8 +53,10 @@ class Conv3dLayoutInfer : public OpLayoutInfer {
!(in->GetSpec().attr_ & vx::TensorAttribute::INPUT)) {
// For bias
if (in->GetShape().size() == 1) {
std::vector<uint8_t> dataRef(in->GetSpec().GetByteSize());
in->CopyDataFromTensor(dataRef.data());
infer_tensor = context_->infer_graph_->CreateTensor(
in->GetSpec(), in->GetDataRef());
in->GetSpec(), (const void*)dataRef.data());
trans_pv = MakeShared(1);
} else {
// For input/weight
@ -69,8 +71,10 @@ class Conv3dLayoutInfer : public OpLayoutInfer {
trans_pv = required_pv;
}
} else {
std::vector<uint8_t> dataRef(in->GetSpec().GetByteSize());
in->CopyDataFromTensor(dataRef.data());
infer_tensor = context_->infer_graph_->CreateTensor(
in->GetSpec(), in->GetDataRef());
in->GetSpec(), (const void*)dataRef.data());
trans_pv = MakeShared(required_pv->Rank());
}
}

View File

@ -92,8 +92,10 @@ class DeConv2dLayoutInfer : public OpLayoutInfer {
if (!weight_required_pv->IsAligned()) {
infer_weight = PermuteConstTensor(input_tensors[1], weight_required_pv);
} else {
std::vector<uint8_t> dataRef(input_tensors[1]->GetSpec().GetByteSize());
input_tensors[1]->CopyDataFromTensor(dataRef.data());
infer_weight = context_->infer_graph_->CreateTensor(
input_tensors[1]->GetSpec(), input_tensors[1]->GetDataRef());
input_tensors[1]->GetSpec(), (const void*)dataRef.data());
}
context_->SetPermuteVector(input_tensors[1], weight_required_pv);
context_->UpdateTensorMap(input_tensors[1], infer_weight);
@ -114,8 +116,10 @@ class DeConv2dLayoutInfer : public OpLayoutInfer {
// For bias
if (input_tensors.size() == 3) {
if (input_tensors[2]->IsConstTensor()) {
std::vector<uint8_t> dataRef(input_tensors[2]->GetSpec().GetByteSize());
input_tensors[2]->CopyDataFromTensor(dataRef.data());
infer_bias = context_->infer_graph_->CreateTensor(
input_tensors[2]->GetSpec(), input_tensors[2]->GetDataRef());
input_tensors[2]->GetSpec(), (const void*)dataRef.data());
} else {
infer_bias = context_->GetMapedTensor(input_tensors[2]);
}

View File

@ -49,8 +49,10 @@ class FullyConnectedLayoutInfer : public OpLayoutInfer {
}
for (const auto& in : input_tensors) {
if (in->IsConstTensor()) {
std::vector<uint8_t> dataRef(in->GetSpec().GetByteSize());
in->CopyDataFromTensor(dataRef.data());
auto infer_tensor = context_->infer_graph_->CreateTensor(in->GetSpec(),
in->GetDataRef());
(const void*)dataRef.data());
auto trans_pv = MakeShared(in->GetShape().size());
context_->UpdateTensorMap(in, infer_tensor);

View File

@ -92,8 +92,10 @@ class GroupedConv2dLayoutInfer : public OpLayoutInfer {
if (!weight_required_pv->IsAligned()) {
infer_weight = PermuteConstTensor(input_tensors[1], weight_required_pv);
} else {
std::vector<uint8_t> dataRef(input_tensors[1]->GetSpec().GetByteSize());
input_tensors[1]->CopyDataFromTensor(dataRef.data());
infer_weight = context_->infer_graph_->CreateTensor(
input_tensors[1]->GetSpec(), input_tensors[1]->GetDataRef());
input_tensors[1]->GetSpec(), (const void*)dataRef.data());
}
context_->SetPermuteVector(input_tensors[1], weight_required_pv);
context_->UpdateTensorMap(input_tensors[1], infer_weight);
@ -114,8 +116,10 @@ class GroupedConv2dLayoutInfer : public OpLayoutInfer {
// For bias
if (input_tensors.size() == 3) {
if (input_tensors[2]->IsConstTensor()) {
std::vector<uint8_t> dataRef(input_tensors[2]->GetSpec().GetByteSize());
input_tensors[2]->CopyDataFromTensor(dataRef.data());
infer_bias = context_->infer_graph_->CreateTensor(
input_tensors[2]->GetSpec(), input_tensors[2]->GetDataRef());
input_tensors[2]->GetSpec(), (const void*)dataRef.data());
} else {
infer_bias = context_->GetMapedTensor(input_tensors[2]);
}

View File

@ -197,18 +197,22 @@ OpLayoutInfer::AlignPermuteVectorForMutilInputs() {
if (!required_pv) {
// all inputs are constant tensors
for (const auto& i_src : src_inputs) {
std::vector<uint8_t> dataRef(i_src->GetSpec().GetByteSize());
i_src->CopyDataFromTensor(dataRef.data());
context_->UpdateTensorMap(
i_src, context_->infer_graph_->CreateTensor(i_src->GetSpec(),
i_src->GetDataRef()));
(const void*)dataRef.data()));
context_->SetPermuteVector(i_src, MakeShared(i_src->GetShape().size()));
}
} else {
for (const auto& i_src : src_inputs) {
std::shared_ptr<vx::Tensor> perm_out;
if (i_src->IsConstTensor()) {
std::vector<uint8_t> dataRef(i_src->GetSpec().GetByteSize());
i_src->CopyDataFromTensor(dataRef.data());
required_pv->IsAligned()
? perm_out = context_->infer_graph_->CreateTensor(
i_src->GetSpec(), i_src->GetDataRef())
i_src->GetSpec(), (const void*)dataRef.data())
: perm_out = PermuteConstTensor(i_src, required_pv);
} else {
auto final_pv =
@ -241,8 +245,10 @@ OpLayoutInfer::AlignPermuteVectorForElementWise() {
std::shared_ptr<vx::Tensor> perm_out;
if (i_src->IsConstTensor()) {
if (required_pv->IsAligned()) {
std::vector<uint8_t> dataRef(i_src->GetSpec().GetByteSize());
i_src->CopyDataFromTensor(dataRef.data());
perm_out = context_->infer_graph_->CreateTensor(i_src->GetSpec(),
i_src->GetDataRef());
(const void*)dataRef.data());
} else if (i_src->GetShape().size() == required_pv->Rank()) {
perm_out = PermuteConstTensor(i_src, required_pv);
// need shape expansion
@ -272,8 +278,10 @@ void OpLayoutInfer::ReverseInputsPermuteVector() {
std::shared_ptr<IPermuteVector> input_pv;
if (i_src->GetId() != (uint32_t)-1) {
if (i_src->IsConstTensor()) {
std::vector<uint8_t> dataRef(i_src->GetSpec().GetByteSize());
i_src->CopyDataFromTensor(dataRef.data());
perm_out = context_->infer_graph_->CreateTensor(i_src->GetSpec(),
i_src->GetDataRef());
(const void*)dataRef.data());
input_pv = MakeShared(i_src->GetShape().size());
} else {
perm_out = context_->GetMapedTensor(i_src);
@ -312,7 +320,7 @@ bool OpLayoutInfer::TransposeConstTensorData(
for (const auto& s : input->GetShape()) out_size *= s;
out_size *= type_size;
out_data.resize(out_size);
if (!input->GetDataRef()) {
if (!input->IsConstTensor()) {
return false;
}
@ -339,7 +347,9 @@ bool OpLayoutInfer::TransposeConstTensorData(
[](const uint32_t& i) { return i; });
std::transform(perm.begin(), perm.end(), std::back_inserter(native_perm),
[](const uint32_t& i) { return i; });
vsi_nn_Transpose(out_data.data(), (uint8_t*)(input->GetDataRef()),
std::vector<uint8_t> dataRef(input->GetSpec().GetByteSize());
input->CopyDataFromTensor(dataRef.data());
vsi_nn_Transpose(out_data.data(), (uint8_t*)(dataRef.data()),
native_shape_array.data(),
static_cast<uint32_t>(input->GetShape().size()),
native_perm.data(), vx_type);

View File

@ -56,8 +56,10 @@ class UnidirectionalLstmLayoutInfer : public OpLayoutInfer {
std::shared_ptr<IPermuteVector> required_pv;
if ((i_src->IsConstTensor() &&
!(i_src->GetSpec().attr_ & vx::TensorAttribute::INPUT))) {
std::vector<uint8_t> dataRef(i_src->GetSpec().GetByteSize());
i_src->CopyDataFromTensor(dataRef.data());
infer_tensor = context_->infer_graph_->CreateTensor(
i_src->GetSpec(), i_src->GetDataRef());
i_src->GetSpec(), (const void*)dataRef.data());
context_->UpdateTensorMap(i_src, infer_tensor);
}
if (i_src->GetId() == (uint32_t)-1) {

View File

@ -56,8 +56,10 @@ class UnidirectionalRnnLayoutInfer : public OpLayoutInfer {
std::shared_ptr<IPermuteVector> required_pv;
if ((i_src->IsConstTensor() &&
!(i_src->GetSpec().attr_ & vx::TensorAttribute::INPUT))) {
std::vector<uint8_t> dataRef(i_src->GetSpec().GetByteSize());
i_src->CopyDataFromTensor(dataRef.data());
infer_tensor = context_->infer_graph_->CreateTensor(
i_src->GetSpec(), i_src->GetDataRef());
i_src->GetSpec(), (const void*)dataRef.data());
context_->UpdateTensorMap(i_src, infer_tensor);
}
if (i_src->GetId() == (uint32_t)-1) {

View File

@ -57,8 +57,6 @@ class TensorImpl : public Tensor {
return spec_.attr_ == tim::vx::TensorAttribute::CONSTANT;
}
const void* GetDataRef() const { return data_; }
GraphImpl* graph_;
vsi_nn_tensor_id_t id_;
TensorSpec spec_;
@ -96,8 +94,6 @@ class TensorPlaceholder : public Tensor {
return spec_.attr_ == tim::vx::TensorAttribute::CONSTANT;
}
const void* GetDataRef() const { return nullptr; }
vsi_nn_tensor_id_t id_;
TensorSpec spec_;
};