support DMAbuffer (#214)

Signed-off-by: Chen Xin <jack.chen@verisilicon.com>
This commit is contained in:
chxin66 2021-11-21 22:46:20 +08:00 committed by GitHub
parent 8ea83f137c
commit 8b1ec74f07
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 30 additions and 5 deletions

View File

@ -26,13 +26,12 @@
#include <memory>
#include <vector>
namespace tim {
namespace vx {
class Tensor;
struct TensorSpec;
struct DmaBufferDesc;
class Operation;
class Graph {
@ -43,6 +42,8 @@ class Graph {
virtual std::shared_ptr<Tensor> CreateTensor(const TensorSpec& spec,
const void* data = nullptr) = 0;
virtual std::shared_ptr<Tensor> CreateTensor(const TensorSpec& spec,
const DmaBufferDesc& dmafd) = 0;
/// Create a placeholder tensor for optional inputs of operations
virtual std::shared_ptr<Tensor> CreateTensorPlaceHolder() = 0;
@ -70,7 +71,7 @@ class Graph {
virtual const std::vector<std::shared_ptr<Operation>> GetConsumersOp(
std::shared_ptr<Tensor> tensor) const = 0;
virtual void PrintGraph() const = 0;
protected:

View File

@ -140,6 +140,10 @@ struct TensorSpec {
Quantization quantization_;
};
struct DmaBufferDesc {
int64_t fd;
};
class Tensor {
public:
virtual ~Tensor() {}

View File

@ -105,6 +105,11 @@ std::shared_ptr<Tensor> GraphImpl::CreateTensor(const TensorSpec& spec,
return std::make_shared<TensorImpl>(this, spec, data);
}
std::shared_ptr<Tensor> GraphImpl::CreateTensor(const TensorSpec& spec,
const DmaBufferDesc& dmafd) {
return std::make_shared<TensorImpl>(this, spec, dmafd);
}
std::shared_ptr<Tensor> GraphImpl::CreateTensorPlaceHolder() {
if (!tensor_placeholder_) {
tensor_placeholder_ = std::make_shared<TensorPlaceholder>(this);

View File

@ -62,6 +62,8 @@ class GraphImpl : public Graph {
/// Implement parents' virtual functions
std::shared_ptr<Tensor> CreateTensor(const TensorSpec& spec,
const void* data = nullptr) override;
std::shared_ptr<Tensor> CreateTensor(const TensorSpec& spec,
const DmaBufferDesc& dmafd) override;
std::shared_ptr<Tensor> CreateTensorPlaceHolder() override;
bool Compile() override;

View File

@ -85,6 +85,14 @@ TensorImpl::TensorImpl(Graph* graph, const TensorSpec& spec, const void* data)
Init();
}
TensorImpl::TensorImpl(Graph* graph, const TensorSpec& spec, const DmaBufferDesc& dmafd)
: graph_(reinterpret_cast<GraphImpl*>(graph)),
id_(VSI_NN_TENSOR_ID_NA),
spec_(spec),
fd_(dmafd.fd) {
Init();
}
TensorImpl::~TensorImpl() {}
bool TensorImpl::CopyDataToTensor(const void* data, uint32_t size_in_bytes) {
@ -183,8 +191,11 @@ bool TensorImpl::Init() {
if ((spec_.attr_ & TensorAttribute::INPUT) ||
(spec_.attr_ & TensorAttribute::OUTPUT)) {
id_ = vsi_nn_AddTensorFromHandle(graph_->graph(), VSI_NN_TENSOR_ID_AUTO,
&attr, nullptr);
id_ = vsi_nn_AddTensorFromHandle(graph_->graph(), VSI_NN_TENSOR_ID_AUTO, // DMABUF's fd is created by TensorFromHandle as input or output,
&attr, fd_ != -1 ? (uint8_t*)fd_ : nullptr);// and cannot be set to const
if (fd_ != -1) {
attr.vsi_memory_type = VSI_MEMORY_TYPE_DMABUF;
}
} else {
id_ = vsi_nn_AddTensor(graph_->graph(), VSI_NN_TENSOR_ID_AUTO, &attr,
nullptr);

View File

@ -33,6 +33,7 @@ namespace vx {
class TensorImpl : public Tensor {
public:
TensorImpl(Graph* graph, const TensorSpec& spec, const void* data = nullptr);
TensorImpl(Graph* graph, const TensorSpec& spec, const DmaBufferDesc& dmafd);
~TensorImpl();
bool Init();
@ -56,6 +57,7 @@ class TensorImpl : public Tensor {
vsi_nn_tensor_id_t id_;
TensorSpec spec_;
const void* data_;
int64_t fd_{-1};
};
class TensorPlaceholder : public Tensor {