Fix warnings relating to inheritance (#256)

* Remove unnecessary compiler flags

* Refactor CMakeLists.txt

* Tweak CMakeLists.txt for libtim_internal

* Tweak CMakeLists.txt for libtim-vx

* Make TIM_VX_ENABLE_TEST defaults to OFF

* Eliminate usage of include_directories

* Fix CI unit test

* Fix warnings relating to inheritance
This commit is contained in:
Goose Bomb 2022-01-04 14:35:17 +08:00 committed by GitHub
parent eecbe264b6
commit 8e4ab68213
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 59 additions and 58 deletions

View File

@ -41,7 +41,8 @@ class RNNCell : public Operation{
kHARDSIGMOID = 31, /* temporary use 31 */ kHARDSIGMOID = 31, /* temporary use 31 */
}; };
RNNCell(Graph* graph, ActivationType activation); RNNCell(Graph* graph, ActivationType activation);
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override; std::shared_ptr<Operation> Clone(
std::shared_ptr<Graph>& graph) const override;
protected: protected:
const ActivationType activation_; const ActivationType activation_;

View File

@ -36,7 +36,8 @@ DirectMapOpImpl::DirectMapOpImpl(Graph* graph, uint32_t kind, int input_cnt,
node_->uid = graph_->graph()->cur_nid; node_->uid = graph_->graph()->cur_nid;
} }
DirectMapOpImpl& DirectMapOpImpl::BindInput(const std::shared_ptr<Tensor>& tensor) { DirectMapOpImpl& DirectMapOpImpl::BindInput(
const std::shared_ptr<Tensor>& tensor) {
inputs_tensor_.push_back(tensor); inputs_tensor_.push_back(tensor);
uint32_t tensor_id = tensor->GetId(); uint32_t tensor_id = tensor->GetId();
node_->input.tensors[input_tensor_index++] = tensor_id; node_->input.tensors[input_tensor_index++] = tensor_id;
@ -59,8 +60,7 @@ DirectMapOpImpl& DirectMapOpImpl::BindOutput(
return *this; return *this;
} }
void DirectMapOpImpl::SetRoundingPolicy( void DirectMapOpImpl::SetRoundingPolicy(OverflowPolicy overflow_policy,
OverflowPolicy overflow_policy,
RoundingPolicy rounding_policy, RoundingPolicy rounding_policy,
RoundType down_scale_size_rounding, RoundType down_scale_size_rounding,
uint32_t accumulator_bits) { uint32_t accumulator_bits) {
@ -71,5 +71,5 @@ void DirectMapOpImpl::SetRoundingPolicy(
node_->vx_param.accumulator_bits = accumulator_bits; node_->vx_param.accumulator_bits = accumulator_bits;
} }
} } // namespace vx
} } // namespace tim

View File

@ -24,7 +24,6 @@
#ifndef TIM_VX_DIRECT_MAP_OP_IMPL_H_ #ifndef TIM_VX_DIRECT_MAP_OP_IMPL_H_
#define TIM_VX_DIRECT_MAP_OP_IMPL_H_ #define TIM_VX_DIRECT_MAP_OP_IMPL_H_
#include "vsi_nn_pub.h" #include "vsi_nn_pub.h"
#include "graph_private.h" #include "graph_private.h"
@ -52,10 +51,10 @@ class DirectMapOpImpl : public OpImpl {
RoundType down_scale_size_rounding = RoundType::FLOOR, RoundType down_scale_size_rounding = RoundType::FLOOR,
uint32_t accumulator_bits = 0); uint32_t accumulator_bits = 0);
std::vector<std::shared_ptr<Tensor>> InputsTensor() { std::vector<std::shared_ptr<Tensor>> InputsTensor() override {
return inputs_tensor_; return inputs_tensor_;
} }
std::vector<std::shared_ptr<Tensor>> OutputsTensor() { std::vector<std::shared_ptr<Tensor>> OutputsTensor() override {
return outputs_tensor_; return outputs_tensor_;
} }

View File

@ -33,5 +33,5 @@ OpImpl::OpImpl(Graph* graph, uint32_t kind, int input_cnt, int output_cnt,
input_cnt_(input_cnt), input_cnt_(input_cnt),
output_cnt_(output_cnt), output_cnt_(output_cnt),
layout_(layout) {} layout_(layout) {}
} } // namespace vx
} } // namespace tim

View File

@ -35,13 +35,14 @@ class OpImpl {
public: public:
OpImpl(Graph* graph, uint32_t kind, int input_cnt, int output_cnt, OpImpl(Graph* graph, uint32_t kind, int input_cnt, int output_cnt,
DataLayout layout); DataLayout layout);
virtual ~OpImpl() = default;
virtual OpImpl& BindInput(const std::shared_ptr<Tensor>& tensor) = 0; virtual OpImpl& BindInput(const std::shared_ptr<Tensor>& tensor) = 0;
virtual OpImpl& BindOutput(const std::shared_ptr<Tensor>& tensor) = 0; virtual OpImpl& BindOutput(const std::shared_ptr<Tensor>& tensor) = 0;
virtual std::vector<std::shared_ptr<Tensor>> InputsTensor() = 0; virtual std::vector<std::shared_ptr<Tensor>> InputsTensor() = 0;
virtual std::vector<std::shared_ptr<Tensor>> OutputsTensor() = 0; virtual std::vector<std::shared_ptr<Tensor>> OutputsTensor() = 0;
virtual vsi_nn_node_t* node() = 0; virtual vsi_nn_node_t* node() = 0;
GraphImpl* graph_; GraphImpl* graph_{nullptr};
uint32_t kind_{0}; uint32_t kind_{0};
int32_t input_cnt_{0}; int32_t input_cnt_{0};
int32_t output_cnt_{0}; int32_t output_cnt_{0};

View File

@ -32,7 +32,6 @@ namespace ops {
class RNNCellImpl : public OpImpl { class RNNCellImpl : public OpImpl {
public: public:
enum { enum {
// signature // signature
FULLY_CONNECTED_0_IN = 0, FULLY_CONNECTED_0_IN = 0,
@ -49,8 +48,8 @@ class RNNCellImpl : public OpImpl{
// signature end // signature end
}; };
RNNCellImpl(Graph* graph, int input_cnt, RNNCellImpl(Graph* graph, int input_cnt, int output_cnt,
int output_cnt, DataLayout layout = DataLayout::ANY) DataLayout layout = DataLayout::ANY)
: OpImpl(graph, -1, input_cnt, output_cnt, layout) { : OpImpl(graph, -1, input_cnt, output_cnt, layout) {
fc0_ = graph->CreateOperation<tim::vx::ops::FullyConnected>(0, 4); fc0_ = graph->CreateOperation<tim::vx::ops::FullyConnected>(0, 4);
fc1_ = graph->CreateOperation<tim::vx::ops::FullyConnected>(0, 4); fc1_ = graph->CreateOperation<tim::vx::ops::FullyConnected>(0, 4);
@ -61,8 +60,7 @@ class RNNCellImpl : public OpImpl{
~RNNCellImpl() {} ~RNNCellImpl() {}
RNNCellImpl& BindInput(const std::shared_ptr<Tensor>& tensor) override RNNCellImpl& BindInput(const std::shared_ptr<Tensor>& tensor) override {
{
in_tensors_[input_tensor_index] = tensor; in_tensors_[input_tensor_index] = tensor;
if (this->input_tensor_index == INPUT_CNT - 1) { if (this->input_tensor_index == INPUT_CNT - 1) {
@ -75,7 +73,6 @@ class RNNCellImpl : public OpImpl{
tim::vx::TensorSpec add_spec(tim::vx::DataType::FLOAT32, shape, tim::vx::TensorSpec add_spec(tim::vx::DataType::FLOAT32, shape,
tim::vx::TensorAttribute::TRANSIENT); tim::vx::TensorAttribute::TRANSIENT);
auto FC0_tensor = graph_->CreateTensor(FC0_spec); auto FC0_tensor = graph_->CreateTensor(FC0_spec);
auto FC1_tensor = graph_->CreateTensor(FC1_spec); auto FC1_tensor = graph_->CreateTensor(FC1_spec);
auto add_tensor = graph_->CreateTensor(add_spec); auto add_tensor = graph_->CreateTensor(add_spec);
@ -113,8 +110,10 @@ class RNNCellImpl : public OpImpl{
vsi_nn_node_t* node() override { return nullptr; } vsi_nn_node_t* node() override { return nullptr; }
std::vector<std::shared_ptr<Tensor>> InputsTensor() { return inputs_tensor_; } std::vector<std::shared_ptr<Tensor>> InputsTensor() override {
std::vector<std::shared_ptr<Tensor>> OutputsTensor() { return inputs_tensor_;
}
std::vector<std::shared_ptr<Tensor>> OutputsTensor() override {
return outputs_tensor_; return outputs_tensor_;
} }
@ -129,7 +128,8 @@ class RNNCellImpl : public OpImpl{
std::array<std::shared_ptr<tim::vx::Tensor>, OUT_CNT> out_tensors_; std::array<std::shared_ptr<tim::vx::Tensor>, OUT_CNT> out_tensors_;
}; };
RNNCell::RNNCell(Graph* graph, ActivationType activation) : activation_(activation){ RNNCell::RNNCell(Graph* graph, ActivationType activation)
: activation_(activation) {
impl_ = std::make_unique<RNNCellImpl>(graph, 0, 0, DataLayout::ANY); impl_ = std::make_unique<RNNCellImpl>(graph, 0, 0, DataLayout::ANY);
} }