Fix warnings relating to inheritance (#256)
* Remove unnecessary compiler flags * Refactor CMakeLists.txt * Tweak CMakeLists.txt for libtim_internal * Tweak CMakeLists.txt for libtim-vx * Make TIM_VX_ENABLE_TEST defaults to OFF * Eliminate usage of include_directories * Fix CI unit test * Fix warnings relating to inheritance
This commit is contained in:
parent
eecbe264b6
commit
8e4ab68213
|
|
@ -29,19 +29,20 @@ namespace tim {
|
||||||
namespace vx {
|
namespace vx {
|
||||||
namespace ops {
|
namespace ops {
|
||||||
|
|
||||||
class RNNCell : public Operation{
|
class RNNCell : public Operation {
|
||||||
public:
|
public:
|
||||||
enum ActivationType {
|
enum ActivationType {
|
||||||
kNONE = 0,
|
kNONE = 0,
|
||||||
kRELU = 1,
|
kRELU = 1,
|
||||||
kRELU1 = 2,
|
kRELU1 = 2,
|
||||||
kRELU6 = 3,
|
kRELU6 = 3,
|
||||||
kTANH = 4,
|
kTANH = 4,
|
||||||
kSIGMOID = 6,
|
kSIGMOID = 6,
|
||||||
kHARDSIGMOID = 31, /* temporary use 31*/
|
kHARDSIGMOID = 31, /* temporary use 31 */
|
||||||
};
|
};
|
||||||
RNNCell(Graph* graph, ActivationType activation);
|
RNNCell(Graph* graph, ActivationType activation);
|
||||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
std::shared_ptr<Operation> Clone(
|
||||||
|
std::shared_ptr<Graph>& graph) const override;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
const ActivationType activation_;
|
const ActivationType activation_;
|
||||||
|
|
|
||||||
|
|
@ -24,11 +24,11 @@
|
||||||
#include "direct_map_op_impl.h"
|
#include "direct_map_op_impl.h"
|
||||||
#include "type_utils.h"
|
#include "type_utils.h"
|
||||||
|
|
||||||
namespace tim{
|
namespace tim {
|
||||||
namespace vx{
|
namespace vx {
|
||||||
|
|
||||||
DirectMapOpImpl::DirectMapOpImpl(Graph* graph, uint32_t kind, int input_cnt,
|
DirectMapOpImpl::DirectMapOpImpl(Graph* graph, uint32_t kind, int input_cnt,
|
||||||
int output_cnt, DataLayout layout)
|
int output_cnt, DataLayout layout)
|
||||||
: OpImpl(graph, kind, input_cnt, output_cnt, layout),
|
: OpImpl(graph, kind, input_cnt, output_cnt, layout),
|
||||||
node_(vsi_nn_AddNode(graph_->graph(), kind_, input_cnt_, output_cnt_,
|
node_(vsi_nn_AddNode(graph_->graph(), kind_, input_cnt_, output_cnt_,
|
||||||
NULL)) {
|
NULL)) {
|
||||||
|
|
@ -36,7 +36,8 @@ DirectMapOpImpl::DirectMapOpImpl(Graph* graph, uint32_t kind, int input_cnt,
|
||||||
node_->uid = graph_->graph()->cur_nid;
|
node_->uid = graph_->graph()->cur_nid;
|
||||||
}
|
}
|
||||||
|
|
||||||
DirectMapOpImpl& DirectMapOpImpl::BindInput(const std::shared_ptr<Tensor>& tensor) {
|
DirectMapOpImpl& DirectMapOpImpl::BindInput(
|
||||||
|
const std::shared_ptr<Tensor>& tensor) {
|
||||||
inputs_tensor_.push_back(tensor);
|
inputs_tensor_.push_back(tensor);
|
||||||
uint32_t tensor_id = tensor->GetId();
|
uint32_t tensor_id = tensor->GetId();
|
||||||
node_->input.tensors[input_tensor_index++] = tensor_id;
|
node_->input.tensors[input_tensor_index++] = tensor_id;
|
||||||
|
|
@ -59,17 +60,16 @@ DirectMapOpImpl& DirectMapOpImpl::BindOutput(
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
void DirectMapOpImpl::SetRoundingPolicy(
|
void DirectMapOpImpl::SetRoundingPolicy(OverflowPolicy overflow_policy,
|
||||||
OverflowPolicy overflow_policy,
|
RoundingPolicy rounding_policy,
|
||||||
RoundingPolicy rounding_policy,
|
RoundType down_scale_size_rounding,
|
||||||
RoundType down_scale_size_rounding,
|
uint32_t accumulator_bits) {
|
||||||
uint32_t accumulator_bits) {
|
node_->vx_param.overflow_policy = TranslateOverflowPolicy(overflow_policy);
|
||||||
node_->vx_param.overflow_policy = TranslateOverflowPolicy(overflow_policy);
|
node_->vx_param.rounding_policy = TranslateRoundingPolicy(rounding_policy);
|
||||||
node_->vx_param.rounding_policy = TranslateRoundingPolicy(rounding_policy);
|
node_->vx_param.down_scale_size_rounding =
|
||||||
node_->vx_param.down_scale_size_rounding =
|
TranslateDownScaleSizeRounding(down_scale_size_rounding);
|
||||||
TranslateDownScaleSizeRounding(down_scale_size_rounding);
|
node_->vx_param.accumulator_bits = accumulator_bits;
|
||||||
node_->vx_param.accumulator_bits = accumulator_bits;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
} // namespace vx
|
||||||
}
|
} // namespace tim
|
||||||
|
|
@ -24,7 +24,6 @@
|
||||||
#ifndef TIM_VX_DIRECT_MAP_OP_IMPL_H_
|
#ifndef TIM_VX_DIRECT_MAP_OP_IMPL_H_
|
||||||
#define TIM_VX_DIRECT_MAP_OP_IMPL_H_
|
#define TIM_VX_DIRECT_MAP_OP_IMPL_H_
|
||||||
|
|
||||||
|
|
||||||
#include "vsi_nn_pub.h"
|
#include "vsi_nn_pub.h"
|
||||||
#include "graph_private.h"
|
#include "graph_private.h"
|
||||||
|
|
||||||
|
|
@ -38,7 +37,7 @@ class DirectMapOpImpl : public OpImpl {
|
||||||
// DirectMapOpImpl(Graph* graph, uint32_t kind, int input_cnt = 0,
|
// DirectMapOpImpl(Graph* graph, uint32_t kind, int input_cnt = 0,
|
||||||
// int output_cnt = 0);
|
// int output_cnt = 0);
|
||||||
DirectMapOpImpl(Graph* graph, uint32_t kind, int input_cnt = 0,
|
DirectMapOpImpl(Graph* graph, uint32_t kind, int input_cnt = 0,
|
||||||
int output_cnt = 0, DataLayout layout = DataLayout::ANY);
|
int output_cnt = 0, DataLayout layout = DataLayout::ANY);
|
||||||
~DirectMapOpImpl() {}
|
~DirectMapOpImpl() {}
|
||||||
|
|
||||||
DirectMapOpImpl& BindInput(const std::shared_ptr<Tensor>& tensor) override;
|
DirectMapOpImpl& BindInput(const std::shared_ptr<Tensor>& tensor) override;
|
||||||
|
|
@ -50,12 +49,12 @@ class DirectMapOpImpl : public OpImpl {
|
||||||
OverflowPolicy overflow_policy = OverflowPolicy::SATURATE,
|
OverflowPolicy overflow_policy = OverflowPolicy::SATURATE,
|
||||||
RoundingPolicy rounding_policy = RoundingPolicy::RTNE,
|
RoundingPolicy rounding_policy = RoundingPolicy::RTNE,
|
||||||
RoundType down_scale_size_rounding = RoundType::FLOOR,
|
RoundType down_scale_size_rounding = RoundType::FLOOR,
|
||||||
uint32_t accumulator_bits =0);
|
uint32_t accumulator_bits = 0);
|
||||||
|
|
||||||
std::vector<std::shared_ptr<Tensor>> InputsTensor() {
|
std::vector<std::shared_ptr<Tensor>> InputsTensor() override {
|
||||||
return inputs_tensor_;
|
return inputs_tensor_;
|
||||||
}
|
}
|
||||||
std::vector<std::shared_ptr<Tensor>> OutputsTensor() {
|
std::vector<std::shared_ptr<Tensor>> OutputsTensor() override {
|
||||||
return outputs_tensor_;
|
return outputs_tensor_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -23,15 +23,15 @@
|
||||||
*****************************************************************************/
|
*****************************************************************************/
|
||||||
#include "op_impl.h"
|
#include "op_impl.h"
|
||||||
|
|
||||||
namespace tim{
|
namespace tim {
|
||||||
namespace vx{
|
namespace vx {
|
||||||
|
|
||||||
OpImpl::OpImpl(Graph* graph, uint32_t kind, int input_cnt, int output_cnt,
|
OpImpl::OpImpl(Graph* graph, uint32_t kind, int input_cnt, int output_cnt,
|
||||||
DataLayout layout)
|
DataLayout layout)
|
||||||
: graph_(reinterpret_cast<GraphImpl*>(graph)),
|
: graph_(reinterpret_cast<GraphImpl*>(graph)),
|
||||||
kind_(kind),
|
kind_(kind),
|
||||||
input_cnt_(input_cnt),
|
input_cnt_(input_cnt),
|
||||||
output_cnt_(output_cnt),
|
output_cnt_(output_cnt),
|
||||||
layout_(layout) {}
|
layout_(layout) {}
|
||||||
}
|
} // namespace vx
|
||||||
}
|
} // namespace tim
|
||||||
|
|
|
||||||
|
|
@ -34,14 +34,15 @@ namespace vx {
|
||||||
class OpImpl {
|
class OpImpl {
|
||||||
public:
|
public:
|
||||||
OpImpl(Graph* graph, uint32_t kind, int input_cnt, int output_cnt,
|
OpImpl(Graph* graph, uint32_t kind, int input_cnt, int output_cnt,
|
||||||
DataLayout layout);
|
DataLayout layout);
|
||||||
|
virtual ~OpImpl() = default;
|
||||||
virtual OpImpl& BindInput(const std::shared_ptr<Tensor>& tensor) = 0;
|
virtual OpImpl& BindInput(const std::shared_ptr<Tensor>& tensor) = 0;
|
||||||
virtual OpImpl& BindOutput(const std::shared_ptr<Tensor>& tensor) = 0;
|
virtual OpImpl& BindOutput(const std::shared_ptr<Tensor>& tensor) = 0;
|
||||||
virtual std::vector<std::shared_ptr<Tensor>> InputsTensor() = 0;
|
virtual std::vector<std::shared_ptr<Tensor>> InputsTensor() = 0;
|
||||||
virtual std::vector<std::shared_ptr<Tensor>> OutputsTensor() = 0;
|
virtual std::vector<std::shared_ptr<Tensor>> OutputsTensor() = 0;
|
||||||
virtual vsi_nn_node_t* node() = 0;
|
virtual vsi_nn_node_t* node() = 0;
|
||||||
|
|
||||||
GraphImpl* graph_;
|
GraphImpl* graph_{nullptr};
|
||||||
uint32_t kind_{0};
|
uint32_t kind_{0};
|
||||||
int32_t input_cnt_{0};
|
int32_t input_cnt_{0};
|
||||||
int32_t output_cnt_{0};
|
int32_t output_cnt_{0};
|
||||||
|
|
|
||||||
|
|
@ -30,9 +30,8 @@ namespace tim {
|
||||||
namespace vx {
|
namespace vx {
|
||||||
namespace ops {
|
namespace ops {
|
||||||
|
|
||||||
class RNNCellImpl : public OpImpl{
|
class RNNCellImpl : public OpImpl {
|
||||||
public:
|
public:
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
// signature
|
// signature
|
||||||
FULLY_CONNECTED_0_IN = 0,
|
FULLY_CONNECTED_0_IN = 0,
|
||||||
|
|
@ -49,20 +48,19 @@ class RNNCellImpl : public OpImpl{
|
||||||
// signature end
|
// signature end
|
||||||
};
|
};
|
||||||
|
|
||||||
RNNCellImpl(Graph* graph, int input_cnt,
|
RNNCellImpl(Graph* graph, int input_cnt, int output_cnt,
|
||||||
int output_cnt, DataLayout layout = DataLayout::ANY)
|
DataLayout layout = DataLayout::ANY)
|
||||||
: OpImpl(graph, -1, input_cnt, output_cnt, layout){
|
: OpImpl(graph, -1, input_cnt, output_cnt, layout) {
|
||||||
fc0_ = graph->CreateOperation<tim::vx::ops::FullyConnected>(0, 4);
|
fc0_ = graph->CreateOperation<tim::vx::ops::FullyConnected>(0, 4);
|
||||||
fc1_ = graph->CreateOperation<tim::vx::ops::FullyConnected>(0, 4);
|
fc1_ = graph->CreateOperation<tim::vx::ops::FullyConnected>(0, 4);
|
||||||
add_ = graph->CreateOperation<tim::vx::ops::Add>();
|
add_ = graph->CreateOperation<tim::vx::ops::Add>();
|
||||||
tanh_ = graph->CreateOperation<tim::vx::ops::Tanh>();
|
tanh_ = graph->CreateOperation<tim::vx::ops::Tanh>();
|
||||||
data_convert_ = graph->CreateOperation<tim::vx::ops::DataConvert>();
|
data_convert_ = graph->CreateOperation<tim::vx::ops::DataConvert>();
|
||||||
}
|
}
|
||||||
|
|
||||||
~RNNCellImpl() {}
|
~RNNCellImpl() {}
|
||||||
|
|
||||||
RNNCellImpl& BindInput(const std::shared_ptr<Tensor>& tensor) override
|
RNNCellImpl& BindInput(const std::shared_ptr<Tensor>& tensor) override {
|
||||||
{
|
|
||||||
in_tensors_[input_tensor_index] = tensor;
|
in_tensors_[input_tensor_index] = tensor;
|
||||||
|
|
||||||
if (this->input_tensor_index == INPUT_CNT - 1) {
|
if (this->input_tensor_index == INPUT_CNT - 1) {
|
||||||
|
|
@ -75,7 +73,6 @@ class RNNCellImpl : public OpImpl{
|
||||||
tim::vx::TensorSpec add_spec(tim::vx::DataType::FLOAT32, shape,
|
tim::vx::TensorSpec add_spec(tim::vx::DataType::FLOAT32, shape,
|
||||||
tim::vx::TensorAttribute::TRANSIENT);
|
tim::vx::TensorAttribute::TRANSIENT);
|
||||||
|
|
||||||
|
|
||||||
auto FC0_tensor = graph_->CreateTensor(FC0_spec);
|
auto FC0_tensor = graph_->CreateTensor(FC0_spec);
|
||||||
auto FC1_tensor = graph_->CreateTensor(FC1_spec);
|
auto FC1_tensor = graph_->CreateTensor(FC1_spec);
|
||||||
auto add_tensor = graph_->CreateTensor(add_spec);
|
auto add_tensor = graph_->CreateTensor(add_spec);
|
||||||
|
|
@ -99,22 +96,24 @@ class RNNCellImpl : public OpImpl{
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
RNNCellImpl& BindOutput(const std::shared_ptr<Tensor>& tensor) override{
|
RNNCellImpl& BindOutput(const std::shared_ptr<Tensor>& tensor) override {
|
||||||
out_tensors_[output_tensor_index] = tensor;
|
out_tensors_[output_tensor_index] = tensor;
|
||||||
|
|
||||||
tanh_->BindOutput(out_tensors_[OUT]);
|
tanh_->BindOutput(out_tensors_[OUT]);
|
||||||
data_convert_->BindInput(out_tensors_[OUT]);
|
data_convert_->BindInput(out_tensors_[OUT]);
|
||||||
if (this->output_tensor_index == OUT_CNT - 1){
|
if (this->output_tensor_index == OUT_CNT - 1) {
|
||||||
data_convert_->BindOutput(out_tensors_[STATE_OUT]);
|
data_convert_->BindOutput(out_tensors_[STATE_OUT]);
|
||||||
}
|
}
|
||||||
this->output_tensor_index++;
|
this->output_tensor_index++;
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
vsi_nn_node_t* node() override{ return nullptr; }
|
vsi_nn_node_t* node() override { return nullptr; }
|
||||||
|
|
||||||
std::vector<std::shared_ptr<Tensor>> InputsTensor() { return inputs_tensor_; }
|
std::vector<std::shared_ptr<Tensor>> InputsTensor() override {
|
||||||
std::vector<std::shared_ptr<Tensor>> OutputsTensor() {
|
return inputs_tensor_;
|
||||||
|
}
|
||||||
|
std::vector<std::shared_ptr<Tensor>> OutputsTensor() override {
|
||||||
return outputs_tensor_;
|
return outputs_tensor_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -129,8 +128,9 @@ class RNNCellImpl : public OpImpl{
|
||||||
std::array<std::shared_ptr<tim::vx::Tensor>, OUT_CNT> out_tensors_;
|
std::array<std::shared_ptr<tim::vx::Tensor>, OUT_CNT> out_tensors_;
|
||||||
};
|
};
|
||||||
|
|
||||||
RNNCell::RNNCell(Graph* graph, ActivationType activation) : activation_(activation){
|
RNNCell::RNNCell(Graph* graph, ActivationType activation)
|
||||||
impl_ = std::make_unique<RNNCellImpl>(graph, 0, 0, DataLayout::ANY);
|
: activation_(activation) {
|
||||||
|
impl_ = std::make_unique<RNNCellImpl>(graph, 0, 0, DataLayout::ANY);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::shared_ptr<Operation> RNNCell::Clone(std::shared_ptr<Graph>& graph) const {
|
std::shared_ptr<Operation> RNNCell::Clone(std::shared_ptr<Graph>& graph) const {
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue