Fix warnings relating to inheritance (#256)
* Remove unnecessary compiler flags * Refactor CMakeLists.txt * Tweak CMakeLists.txt for libtim_internal * Tweak CMakeLists.txt for libtim-vx * Make TIM_VX_ENABLE_TEST defaults to OFF * Eliminate usage of include_directories * Fix CI unit test * Fix warnings relating to inheritance
This commit is contained in:
parent
eecbe264b6
commit
8e4ab68213
|
|
@ -41,7 +41,8 @@ class RNNCell : public Operation{
|
|||
kHARDSIGMOID = 31, /* temporary use 31 */
|
||||
};
|
||||
RNNCell(Graph* graph, ActivationType activation);
|
||||
std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;
|
||||
std::shared_ptr<Operation> Clone(
|
||||
std::shared_ptr<Graph>& graph) const override;
|
||||
|
||||
protected:
|
||||
const ActivationType activation_;
|
||||
|
|
|
|||
|
|
@ -36,7 +36,8 @@ DirectMapOpImpl::DirectMapOpImpl(Graph* graph, uint32_t kind, int input_cnt,
|
|||
node_->uid = graph_->graph()->cur_nid;
|
||||
}
|
||||
|
||||
DirectMapOpImpl& DirectMapOpImpl::BindInput(const std::shared_ptr<Tensor>& tensor) {
|
||||
DirectMapOpImpl& DirectMapOpImpl::BindInput(
|
||||
const std::shared_ptr<Tensor>& tensor) {
|
||||
inputs_tensor_.push_back(tensor);
|
||||
uint32_t tensor_id = tensor->GetId();
|
||||
node_->input.tensors[input_tensor_index++] = tensor_id;
|
||||
|
|
@ -59,8 +60,7 @@ DirectMapOpImpl& DirectMapOpImpl::BindOutput(
|
|||
return *this;
|
||||
}
|
||||
|
||||
void DirectMapOpImpl::SetRoundingPolicy(
|
||||
OverflowPolicy overflow_policy,
|
||||
void DirectMapOpImpl::SetRoundingPolicy(OverflowPolicy overflow_policy,
|
||||
RoundingPolicy rounding_policy,
|
||||
RoundType down_scale_size_rounding,
|
||||
uint32_t accumulator_bits) {
|
||||
|
|
@ -71,5 +71,5 @@ void DirectMapOpImpl::SetRoundingPolicy(
|
|||
node_->vx_param.accumulator_bits = accumulator_bits;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
} // namespace vx
|
||||
} // namespace tim
|
||||
|
|
@ -24,7 +24,6 @@
|
|||
#ifndef TIM_VX_DIRECT_MAP_OP_IMPL_H_
|
||||
#define TIM_VX_DIRECT_MAP_OP_IMPL_H_
|
||||
|
||||
|
||||
#include "vsi_nn_pub.h"
|
||||
#include "graph_private.h"
|
||||
|
||||
|
|
@ -52,10 +51,10 @@ class DirectMapOpImpl : public OpImpl {
|
|||
RoundType down_scale_size_rounding = RoundType::FLOOR,
|
||||
uint32_t accumulator_bits = 0);
|
||||
|
||||
std::vector<std::shared_ptr<Tensor>> InputsTensor() {
|
||||
std::vector<std::shared_ptr<Tensor>> InputsTensor() override {
|
||||
return inputs_tensor_;
|
||||
}
|
||||
std::vector<std::shared_ptr<Tensor>> OutputsTensor() {
|
||||
std::vector<std::shared_ptr<Tensor>> OutputsTensor() override {
|
||||
return outputs_tensor_;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -33,5 +33,5 @@ OpImpl::OpImpl(Graph* graph, uint32_t kind, int input_cnt, int output_cnt,
|
|||
input_cnt_(input_cnt),
|
||||
output_cnt_(output_cnt),
|
||||
layout_(layout) {}
|
||||
}
|
||||
}
|
||||
} // namespace vx
|
||||
} // namespace tim
|
||||
|
|
|
|||
|
|
@ -35,13 +35,14 @@ class OpImpl {
|
|||
public:
|
||||
OpImpl(Graph* graph, uint32_t kind, int input_cnt, int output_cnt,
|
||||
DataLayout layout);
|
||||
virtual ~OpImpl() = default;
|
||||
virtual OpImpl& BindInput(const std::shared_ptr<Tensor>& tensor) = 0;
|
||||
virtual OpImpl& BindOutput(const std::shared_ptr<Tensor>& tensor) = 0;
|
||||
virtual std::vector<std::shared_ptr<Tensor>> InputsTensor() = 0;
|
||||
virtual std::vector<std::shared_ptr<Tensor>> OutputsTensor() = 0;
|
||||
virtual vsi_nn_node_t* node() = 0;
|
||||
|
||||
GraphImpl* graph_;
|
||||
GraphImpl* graph_{nullptr};
|
||||
uint32_t kind_{0};
|
||||
int32_t input_cnt_{0};
|
||||
int32_t output_cnt_{0};
|
||||
|
|
|
|||
|
|
@ -32,7 +32,6 @@ namespace ops {
|
|||
|
||||
class RNNCellImpl : public OpImpl {
|
||||
public:
|
||||
|
||||
enum {
|
||||
// signature
|
||||
FULLY_CONNECTED_0_IN = 0,
|
||||
|
|
@ -49,8 +48,8 @@ class RNNCellImpl : public OpImpl{
|
|||
// signature end
|
||||
};
|
||||
|
||||
RNNCellImpl(Graph* graph, int input_cnt,
|
||||
int output_cnt, DataLayout layout = DataLayout::ANY)
|
||||
RNNCellImpl(Graph* graph, int input_cnt, int output_cnt,
|
||||
DataLayout layout = DataLayout::ANY)
|
||||
: OpImpl(graph, -1, input_cnt, output_cnt, layout) {
|
||||
fc0_ = graph->CreateOperation<tim::vx::ops::FullyConnected>(0, 4);
|
||||
fc1_ = graph->CreateOperation<tim::vx::ops::FullyConnected>(0, 4);
|
||||
|
|
@ -61,8 +60,7 @@ class RNNCellImpl : public OpImpl{
|
|||
|
||||
~RNNCellImpl() {}
|
||||
|
||||
RNNCellImpl& BindInput(const std::shared_ptr<Tensor>& tensor) override
|
||||
{
|
||||
RNNCellImpl& BindInput(const std::shared_ptr<Tensor>& tensor) override {
|
||||
in_tensors_[input_tensor_index] = tensor;
|
||||
|
||||
if (this->input_tensor_index == INPUT_CNT - 1) {
|
||||
|
|
@ -75,7 +73,6 @@ class RNNCellImpl : public OpImpl{
|
|||
tim::vx::TensorSpec add_spec(tim::vx::DataType::FLOAT32, shape,
|
||||
tim::vx::TensorAttribute::TRANSIENT);
|
||||
|
||||
|
||||
auto FC0_tensor = graph_->CreateTensor(FC0_spec);
|
||||
auto FC1_tensor = graph_->CreateTensor(FC1_spec);
|
||||
auto add_tensor = graph_->CreateTensor(add_spec);
|
||||
|
|
@ -113,8 +110,10 @@ class RNNCellImpl : public OpImpl{
|
|||
|
||||
vsi_nn_node_t* node() override { return nullptr; }
|
||||
|
||||
std::vector<std::shared_ptr<Tensor>> InputsTensor() { return inputs_tensor_; }
|
||||
std::vector<std::shared_ptr<Tensor>> OutputsTensor() {
|
||||
std::vector<std::shared_ptr<Tensor>> InputsTensor() override {
|
||||
return inputs_tensor_;
|
||||
}
|
||||
std::vector<std::shared_ptr<Tensor>> OutputsTensor() override {
|
||||
return outputs_tensor_;
|
||||
}
|
||||
|
||||
|
|
@ -129,7 +128,8 @@ class RNNCellImpl : public OpImpl{
|
|||
std::array<std::shared_ptr<tim::vx::Tensor>, OUT_CNT> out_tensors_;
|
||||
};
|
||||
|
||||
RNNCell::RNNCell(Graph* graph, ActivationType activation) : activation_(activation){
|
||||
RNNCell::RNNCell(Graph* graph, ActivationType activation)
|
||||
: activation_(activation) {
|
||||
impl_ = std::make_unique<RNNCellImpl>(graph, 0, 0, DataLayout::ANY);
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue