Refine the cmake build (#63)
Signed-off-by: xiang.zhang <xiang.zhang@verisilicon.com>
This commit is contained in:
parent
31af329b69
commit
410cd8e516
|
|
@ -6,8 +6,7 @@ OPTION(TIM_VX_ENABLE_LAYOUT_INFER "Enable layout inference support" ON)
|
|||
|
||||
set(CMAKE_CXX_STANDARD 14)
|
||||
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
|
||||
set(CMAKE_C_FLAGS "-Wall -Wextra -Wno-unused-parameter -Wno-sign-compare -Werror -Wno-enum-conversion")
|
||||
set(CMAKE_CXX_FLAGS "-Wall -Wextra -Wno-unused-parameter -Wno-sign-compare -Werror")
|
||||
set(CMAKE_CXX_FLAGS "-Wall -Wextra -Werror")
|
||||
|
||||
if(NOT DEFINED CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT)
|
||||
set(CMAKE_INSTALL_PREFIX "${CMAKE_BINARY_DIR}/install" CACHE PATH "..." FORCE)
|
||||
|
|
|
|||
|
|
@ -74,7 +74,7 @@ class Quantization {
|
|||
|
||||
protected:
|
||||
QuantType type_{QuantType::NONE};
|
||||
int32_t channel_dim_;
|
||||
int32_t channel_dim_{-1};
|
||||
std::vector<float> scales_;
|
||||
std::vector<int32_t> zero_points_;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -113,6 +113,7 @@ static void printTopN(const T* prob, int outputCount, int topNum) {
|
|||
}
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
(void) argc, (void) argv;
|
||||
auto context = tim::vx::Context::Create();
|
||||
auto graph = context->CreateGraph();
|
||||
|
||||
|
|
|
|||
|
|
@ -79,6 +79,8 @@ const std::shared_ptr<IPermuteVector> LayoutInferContext::GetPermuteVector(
|
|||
VSILOGE("Tensor PermuteVecor has not beed setted.");
|
||||
assert(false);
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void LayoutInferContext::MarkVisited(const std::shared_ptr<vx::Operation>& op) {
|
||||
|
|
@ -125,6 +127,8 @@ std::shared_ptr<vx::Tensor> LayoutInferContext::GetMapedTensor(
|
|||
VSILOGE("Tensor has not beed inserted in tensor map.");
|
||||
assert(false);
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void LayoutInferContext::UpdateGraphInputMap(const std::shared_ptr<vx::Tensor>& i_src,
|
||||
|
|
|
|||
|
|
@ -247,7 +247,10 @@ std::shared_ptr<vx::Tensor> OpLayoutInfer::PermuteConstTensor(
|
|||
const std::shared_ptr<IPermuteVector>& pv) {
|
||||
std::vector<uint8_t> data;
|
||||
bool is_ok = TransposeConstTensorData(input, pv, data);
|
||||
assert(is_ok);
|
||||
if (!is_ok) {
|
||||
assert(is_ok);
|
||||
return nullptr;
|
||||
}
|
||||
auto src_shape = input->GetShape();
|
||||
auto dst_spec = input->GetSpec();
|
||||
vx::ShapeType dst_shape;
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ message("src/tim/vx/internal")
|
|||
set(lib_name "tim_internal")
|
||||
set(OVXLIB_API_ATTR "__attribute__\(\(visibility\(\"default\"\)\)\)")
|
||||
add_definitions(-DOVXLIB_API=${OVXLIB_API_ATTR})
|
||||
add_compile_options(-Wno-strict-aliasing -Wno-unused-but-set-variable -Wno-maybe-uninitialized)
|
||||
|
||||
aux_source_directory(src INTERNAL_SRC)
|
||||
aux_source_directory(src/kernel INTERNAL_KERNEL)
|
||||
|
|
@ -41,4 +42,4 @@ include_directories(include)
|
|||
include_directories(${OVXDRV_INCLUDE_DIRS})
|
||||
|
||||
add_library(${lib_name} ${SRC})
|
||||
target_link_libraries(${lib_name} PRIVATE ${OVXDRV_LIBRARIES})
|
||||
target_link_libraries(${lib_name} PRIVATE ${OVXDRV_LIBRARIES})
|
||||
|
|
|
|||
|
|
@ -88,6 +88,7 @@ TensorImpl::TensorImpl(Graph* graph, const TensorSpec& spec, const void* data)
|
|||
TensorImpl::~TensorImpl() {}
|
||||
|
||||
bool TensorImpl::CopyDataToTensor(const void* data, uint32_t size_in_bytes) {
|
||||
(void)size_in_bytes;
|
||||
if (!IsWriteable()) {
|
||||
return false;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -60,7 +60,7 @@ class TensorImpl : public Tensor {
|
|||
|
||||
class TensorPlaceholder : public Tensor {
|
||||
public:
|
||||
TensorPlaceholder(Graph* graph) : id_(VSI_NN_TENSOR_ID_NA) {}
|
||||
TensorPlaceholder(Graph* graph) : id_(VSI_NN_TENSOR_ID_NA) {(void)(graph);}
|
||||
~TensorPlaceholder(){};
|
||||
|
||||
const ShapeType& GetShape() { return spec_.shape_; }
|
||||
|
|
@ -68,8 +68,14 @@ class TensorPlaceholder : public Tensor {
|
|||
const Quantization& GetQuantization() { return spec_.quantization_; }
|
||||
const TensorSpec& GetSpec() { return spec_; }
|
||||
uint32_t GetId() { return id_; };
|
||||
bool CopyDataToTensor(const void* data, uint32_t size = 0) { return false; }
|
||||
bool CopyDataFromTensor(void* data) { return false; }
|
||||
bool CopyDataToTensor(const void* data, uint32_t size = 0) {
|
||||
(void)data, void(size);
|
||||
return false;
|
||||
}
|
||||
bool CopyDataFromTensor(void* data) {
|
||||
(void)data;
|
||||
return false;
|
||||
}
|
||||
bool IsPlaceHolder() { return true; }
|
||||
bool IsConstTensor() {
|
||||
return spec_.attr_ == tim::vx::TensorAttribute::CONSTANT;
|
||||
|
|
|
|||
Loading…
Reference in New Issue