add test demo for multi_device (#371)

This commit is contained in:
lucklee 2022-04-29 22:54:03 +08:00 committed by GitHub
parent d108661a03
commit 4bd0ce943b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 5072 additions and 1 deletions

View File

@ -18,4 +18,5 @@ endif()
if(TIM_VX_ENABLE_PLATFORM)
add_subdirectory("lenet_multi_device")
add_subdirectory("multi_device")
endif()

View File

@ -0,0 +1,14 @@
message("samples/multi_device")
set(TARGET_NAME "multi_device")
find_package(Threads REQUIRED)
aux_source_directory(. ${TARGET_NAME}_SRCS)
add_executable(${TARGET_NAME} ${${TARGET_NAME}_SRCS})
target_link_libraries(${TARGET_NAME} PRIVATE tim-vx Threads::Threads)
target_include_directories(${TARGET_NAME} PRIVATE
${CMAKE_CURRENT_SOURCE_DIR}
${PROJECT_SOURCE_DIR}/include
)

View File

@ -0,0 +1,16 @@
## brief
The multi_device demo uses some acuity exported tim-vx networks, and running on 4 devices of NPU using platform api.
## environment
export VSIMULATOR_CONFIG=VIP9400O_PID0XD9
export VIV_MGPU_AFFINITY="1:0"
export VIV_OVX_USE_MULTI_DEVICE="1:1"
export TIM_VX_ROOT="${workspaceFolder}/tim-vx"
## build
cd build
cmake .. -DCMAKE_BUILD_TYPE=Debug -DTIM_VX_BUILD_EXAMPLES=ON -DTIM_VX_ENABLE_PLATFORM=ON
## run
cd build
./samples/multi_device/multi_device

Binary file not shown.

Binary file not shown.

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,187 @@
/****************************************************************************
*
* Copyright (c) 2020 Vivante Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*****************************************************************************/
#include <algorithm>
#include <iomanip>
#include <iostream>
#include <fstream>
#include <cstring>
#include <tuple>
#include <vector>
#include <assert.h>
#include <chrono>
#include <thread>
#include "tim/vx/context.h"
#include "tim/vx/graph.h"
#include "tim/vx/platform/platform.h"
#include "tim/vx/platform/native.h"
#include "vx_lenet.h"
#include "vx_mobilenet.h"
#include "vx_resnet50.h"
template <typename T>
static void printTopN(const T* prob, int outputCount, int topNum) {
std::vector<std::tuple<int, T>> data;
for (int i = 0; i < outputCount; i++) {
data.push_back(std::make_tuple(i, prob[i]));
}
std::sort(data.begin(), data.end(),
[](auto& a, auto& b) { return std::get<1>(a) > std::get<1>(b); });
std::cout << " --- Top" << topNum << " ---" << std::endl;
for (int i = 0; i < topNum; i++) {
std::cout << std::setw(3) << std::get<0>(data[i]) << ": " << std::fixed
<< std::setprecision(6) << std::get<1>(data[i]) << std::endl;
}
}
template <typename T>
void print_topN(std::size_t size, std::shared_ptr<tim::vx::platform::ITensorHandle> handle) {
std::vector<T> output_data;
output_data.resize(size);
if (!handle->CopyDataFromTensor(output_data.data())) {
std::cout << "Copy output data fail." << std::endl;
}
printTopN(output_data.data(), output_data.size(), 5);
}
std::vector<std::vector<char>> load_input_data(std::vector<std::string> filenames, std::vector<uint32_t> input_size_bytes) {
std::vector<std::vector<char>> Data;
for (std::size_t i = 0; i < filenames.size(); i++) {
std::ifstream fin(filenames[i], std::ios::in | std::ios::binary);
if (fin) {
std::vector<char> input_data;
fin.seekg(0, std::ios::end);
int size = fin.tellg();
fin.seekg(0, std::ios::beg);
char *buffer = new char[size];
std::cout<<"File "<<filenames[i] <<" size:"<<size<<std::endl;
fin.read(buffer, size);
fin.close();
input_data.assign(buffer, buffer + input_size_bytes[i]);
Data.push_back(input_data);
free(buffer);
}
}
return Data;
}
void executor_trigger(std::shared_ptr<tim::vx::platform::IExecutor> executor) {
executor->Trigger();
}
auto context = tim::vx::Context::Create();
std::pair<std::shared_ptr<tim::vx::platform::IExecutable>, std::shared_ptr<tim::vx::platform::ITensorHandle>> generate_executable(
std::shared_ptr<tim::vx::platform::IExecutor> executor,
std::function<void(std::shared_ptr<tim::vx::Graph>, const char*)> construct_func,
std::string weight_file,
std::vector<std::string> input_files, tim::vx::ShapeType input_size_bytes) {
auto graph = context->CreateGraph();
const char* weight_file_c = weight_file.c_str();
construct_func(graph, weight_file_c);
auto input_data = load_input_data(input_files, input_size_bytes);
auto executable = tim::vx::platform::Compile(graph, executor); // compile to nbg
auto input_handle = executable->AllocateTensor(graph->InputsTensor()[0]->GetSpec());
auto output_handle = executable->AllocateTensor(graph->OutputsTensor()[0]->GetSpec());
executable->SetInput(input_handle);
executable->SetOutput(output_handle);
input_handle->CopyDataToTensor(input_data[0].data(), input_data[0].size());
return std::make_pair(executable, output_handle);
}
int main(int argc, char** argv) {
(void) argc, (void) argv;
auto devices = tim::vx::platform::NativeDevice::Enumerate();
auto device0 = devices[0];
std::shared_ptr<tim::vx::platform::IExecutor> executor0 = std::make_shared<tim::vx::platform::NativeExecutor> (device0);
auto device1 = devices[1];
std::shared_ptr<tim::vx::platform::IExecutor> executor1 = std::make_shared<tim::vx::platform::NativeExecutor> (device1);
auto device2 = devices[2];
std::shared_ptr<tim::vx::platform::IExecutor> executor2 = std::make_shared<tim::vx::platform::NativeExecutor> (device2);
auto device3 = devices[3];
std::shared_ptr<tim::vx::platform::IExecutor> executor3 = std::make_shared<tim::vx::platform::NativeExecutor> (device3);
auto root = std::getenv("TIM_VX_ROOT");
assert(root != NULL);
std::string ROOT(root);
std::vector<std::string> lenet_input_files = {ROOT + "/samples/multi_device/lenet/lenet_input_1_1_28_28_uint8.bin"};
auto lenet_input_bytes = acuitylite::lenet::input_bytes_list;
auto lenet_weight_file = ROOT + "/samples/multi_device/lenet/lenet.export.data";
std::function<void(std::shared_ptr<tim::vx::Graph>, const char*)> lenet_construct_func = acuitylite::lenet::construct_graph;
std::vector<std::string> mobilenet_input_files = {ROOT + "/samples/multi_device/mobilenet/mobilenet_1_224_224_3_uint8.bin"};
auto mobilenet_input_bytes = acuitylite::mobilenet::input_bytes_list;
auto mobilenet_weight_file = ROOT + "/samples/multi_device/mobilenet/mobilenet.export.data";
std::function<void(std::shared_ptr<tim::vx::Graph>, const char*)> mobilenet_construct_func = acuitylite::mobilenet::construct_graph;
std::vector<std::string> resnet50_input_files = {ROOT + "/samples/multi_device/resnet50/resnet50_1_3_224_224_uint8.bin"};
auto resnet50_input_bytes = acuitylite::resnet50::input_bytes_list;
auto resnet50_weight_file = ROOT + "/samples/multi_device/resnet50/resnet50.export.data";
std::function<void(std::shared_ptr<tim::vx::Graph>, const char*)> resnet50_construct_func = acuitylite::resnet50::construct_graph;
std::shared_ptr<tim::vx::platform::IExecutable> lenet_0, lenet_2, lenet_3, mobilenet_1, mobilenet_2, mobilenet_3, resnet50_0, resnet50_1;
std::shared_ptr<tim::vx::platform::ITensorHandle> lenet_0_outhandle, lenet_2_outhandle, lenet_3_outhandle, mobilenet_1_outhandle, mobilenet_2_outhandle, mobilenet_3_outhandle,
resnet50_0_outhandle, resnet50_1_outhandle;
std::tie(lenet_0, lenet_0_outhandle) = generate_executable(executor0, lenet_construct_func, lenet_weight_file, lenet_input_files, lenet_input_bytes);
std::tie(resnet50_0, resnet50_0_outhandle) = generate_executable(executor0, resnet50_construct_func, resnet50_weight_file, resnet50_input_files, resnet50_input_bytes);
executor0->Submit(lenet_0, lenet_0);
executor0->Submit(resnet50_0, lenet_0);
std::tie(mobilenet_1, mobilenet_1_outhandle) = generate_executable(executor1, mobilenet_construct_func, mobilenet_weight_file, mobilenet_input_files, mobilenet_input_bytes);
std::tie(resnet50_1, resnet50_1_outhandle) = generate_executable(executor1, resnet50_construct_func, resnet50_weight_file, resnet50_input_files, resnet50_input_bytes);
auto executable_set1 = tim::vx::platform::CreateExecutableSet({mobilenet_1, resnet50_1});
executor1->Submit(executable_set1, executable_set1);
std::tie(lenet_2, lenet_2_outhandle) = generate_executable(executor2, lenet_construct_func, lenet_weight_file, lenet_input_files, lenet_input_bytes);
std::tie(mobilenet_2, mobilenet_2_outhandle) = generate_executable(executor2, mobilenet_construct_func, mobilenet_weight_file, mobilenet_input_files, mobilenet_input_bytes);
auto executable_set2 = tim::vx::platform::CreateExecutableSet({lenet_2, mobilenet_2});
executor2->Submit(executable_set2, executable_set2);
std::tie(lenet_3, lenet_3_outhandle) = generate_executable(executor3, lenet_construct_func, lenet_weight_file, lenet_input_files, lenet_input_bytes);
std::tie(mobilenet_3, mobilenet_3_outhandle) = generate_executable(executor3, mobilenet_construct_func, mobilenet_weight_file, mobilenet_input_files, mobilenet_input_bytes);
auto executable_set3 = tim::vx::platform::CreateExecutableSet({lenet_3, mobilenet_3});
executor3->Submit(executable_set3, executable_set3);
std::thread t0(executor_trigger, executor0);
std::thread t1(executor_trigger, executor1);
std::thread t2(executor_trigger, executor2);
std::thread t3(executor_trigger, executor3);
t0.join();
t1.join();
t2.join();
t3.join();
print_topN<float>(1 * 10, lenet_0_outhandle);
print_topN<float>(1 * 10, lenet_2_outhandle);
print_topN<float>(1 * 10, lenet_3_outhandle);
print_topN<float>(1 * 1001, mobilenet_1_outhandle);
print_topN<float>(1 * 1001, mobilenet_2_outhandle);
print_topN<float>(1 * 1001, mobilenet_3_outhandle);
print_topN<uint16_t>(1 * 1000, resnet50_0_outhandle);
print_topN<uint16_t>(1 * 1000, resnet50_1_outhandle);
return 0;
}

View File

@ -1,3 +1,26 @@
/****************************************************************************
*
* Copyright (c) 2020 Vivante Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*****************************************************************************/
#include <algorithm>
#include <iostream>
#include <vector>
@ -11,7 +34,7 @@
static void printTopN() {
}
int main(int argc, char** argv) {
int demo(int argc, char** argv) {
(void) argc, (void) argv;
std::vector<uint8_t> input_data = {};
auto context = tim::vx::Context::Create();

Binary file not shown.

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,235 @@
/****************************************************************************
* Generated by ACUITY 6.6.0
* Match timvx 1.1.30
*
* Neural Network appliction network definition source file
****************************************************************************/
#include "vx_lenet.h"
#include <iostream>
#include <fstream>
#include <vector>
namespace
{
char *get_const_data(const char *data_file_name)
{
std::ifstream fin(data_file_name, std::ios::in | std::ios::binary);
if (fin)
{
fin.seekg(0, std::ios::end);
int size = fin.tellg();
fin.seekg(0, std::ios::beg);
char *buffer = new char [size];
std::cout<<"File "<<data_file_name <<" size:"<<size<<std::endl;
fin.read(buffer, size);
fin.close();
return buffer;
}
else
{
std::cout<<"Load file "<<data_file_name <<" failed"<<std::endl;
return NULL;
}
}
} // namespace
namespace acuitylite
{
std::vector<std::vector<uint32_t>> lenet::input_size_list = {{28 , 28 , 1 , 1}};
std::vector<uint32_t> lenet::input_bytes_list = {28 * 28 * 1 * 1 * sizeof(input_0_type)};
std::vector<std::vector<uint32_t>> lenet::output_size_list = {{10 , 1}};
std::vector<std::shared_ptr<tim::vx::Tensor>> lenet::inputs_tensor;
std::vector<std::shared_ptr<tim::vx::Tensor>> lenet::outputs_tensor;
void lenet::construct_graph
(
std::shared_ptr<tim::vx::Graph> graph,
const char *data_file_name
)
{
char *coef_data_ptr = get_const_data(data_file_name);
tim::vx::Quantization convolution_1_out0_quant(tim::vx::QuantType::ASYMMETRIC, 5.209146976470947, 131);
tim::vx::TensorSpec convolution_1_out0_spec(tim::vx::DataType::UINT8, {},
tim::vx::TensorAttribute::TRANSIENT, convolution_1_out0_quant);
auto convolution_1_out0 = graph->CreateTensor(convolution_1_out0_spec);
tim::vx::ShapeType convolution_1_weight_shape({5,5,1,20});
tim::vx::Quantization convolution_1_weight_quant(tim::vx::QuantType::ASYMMETRIC, 0.0033623368944972754, 119);
tim::vx::TensorSpec convolution_1_weight_spec(tim::vx::DataType::UINT8, convolution_1_weight_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_1_weight_quant);
auto convolution_1_weight = graph->CreateTensor(convolution_1_weight_spec, coef_data_ptr + 80);
tim::vx::ShapeType convolution_1_bias_shape({20});
tim::vx::Quantization convolution_1_bias_quant(tim::vx::QuantType::ASYMMETRIC, 0.0033623368944972754, 0);
tim::vx::TensorSpec convolution_1_bias_spec(tim::vx::DataType::INT32, convolution_1_bias_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_1_bias_quant);
auto convolution_1_bias = graph->CreateTensor(convolution_1_bias_spec, coef_data_ptr + 0);
tim::vx::Quantization pooling_2_out0_quant(tim::vx::QuantType::ASYMMETRIC, 5.209146976470947, 131);
tim::vx::TensorSpec pooling_2_out0_spec(tim::vx::DataType::UINT8, {},
tim::vx::TensorAttribute::TRANSIENT, pooling_2_out0_quant);
auto pooling_2_out0 = graph->CreateTensor(pooling_2_out0_spec);
tim::vx::Quantization convolution_3_out0_quant(tim::vx::QuantType::ASYMMETRIC, 10.594023704528809, 145);
tim::vx::TensorSpec convolution_3_out0_spec(tim::vx::DataType::UINT8, {},
tim::vx::TensorAttribute::TRANSIENT, convolution_3_out0_quant);
auto convolution_3_out0 = graph->CreateTensor(convolution_3_out0_spec);
tim::vx::ShapeType convolution_3_weight_shape({5,5,20,50});
tim::vx::Quantization convolution_3_weight_quant(tim::vx::QuantType::ASYMMETRIC, 0.0011482049012556672, 128);
tim::vx::TensorSpec convolution_3_weight_spec(tim::vx::DataType::UINT8, convolution_3_weight_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_3_weight_quant);
auto convolution_3_weight = graph->CreateTensor(convolution_3_weight_spec, coef_data_ptr + 780);
tim::vx::ShapeType convolution_3_bias_shape({50});
tim::vx::Quantization convolution_3_bias_quant(tim::vx::QuantType::ASYMMETRIC, 0.005981168244034052, 0);
tim::vx::TensorSpec convolution_3_bias_spec(tim::vx::DataType::INT32, convolution_3_bias_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_3_bias_quant);
auto convolution_3_bias = graph->CreateTensor(convolution_3_bias_spec, coef_data_ptr + 580);
tim::vx::Quantization pooling_4_out0_quant(tim::vx::QuantType::ASYMMETRIC, 10.594023704528809, 145);
tim::vx::TensorSpec pooling_4_out0_spec(tim::vx::DataType::UINT8, {},
tim::vx::TensorAttribute::TRANSIENT, pooling_4_out0_quant);
auto pooling_4_out0 = graph->CreateTensor(pooling_4_out0_spec);
tim::vx::Quantization fullconnect_5_out0_quant(tim::vx::QuantType::ASYMMETRIC, 4.961546421051025, 0);
tim::vx::TensorSpec fullconnect_5_out0_spec(tim::vx::DataType::UINT8, {},
tim::vx::TensorAttribute::TRANSIENT, fullconnect_5_out0_quant);
auto fullconnect_5_out0 = graph->CreateTensor(fullconnect_5_out0_spec);
tim::vx::ShapeType fullconnect_5_weight_shape({800,500});
tim::vx::Quantization fullconnect_5_weight_quant(tim::vx::QuantType::ASYMMETRIC, 0.0007354848785325885, 130);
tim::vx::TensorSpec fullconnect_5_weight_spec(tim::vx::DataType::UINT8, fullconnect_5_weight_shape,
tim::vx::TensorAttribute::CONSTANT, fullconnect_5_weight_quant);
auto fullconnect_5_weight = graph->CreateTensor(fullconnect_5_weight_spec, coef_data_ptr + 27780);
tim::vx::ShapeType fullconnect_5_bias_shape({500});
tim::vx::Quantization fullconnect_5_bias_quant(tim::vx::QuantType::ASYMMETRIC, 0.007791744079440832, 0);
tim::vx::TensorSpec fullconnect_5_bias_spec(tim::vx::DataType::INT32, fullconnect_5_bias_shape,
tim::vx::TensorAttribute::CONSTANT, fullconnect_5_bias_quant);
auto fullconnect_5_bias = graph->CreateTensor(fullconnect_5_bias_spec, coef_data_ptr + 25780);
tim::vx::Quantization relu_6_out0_quant(tim::vx::QuantType::ASYMMETRIC, 4.961546421051025, 0);
tim::vx::TensorSpec relu_6_out0_spec(tim::vx::DataType::UINT8, {},
tim::vx::TensorAttribute::TRANSIENT, relu_6_out0_quant);
auto relu_6_out0 = graph->CreateTensor(relu_6_out0_spec);
tim::vx::Quantization fullconnect_7_out0_quant(tim::vx::QuantType::ASYMMETRIC, 16.404624938964844, 81);
tim::vx::TensorSpec fullconnect_7_out0_spec(tim::vx::DataType::UINT8, {},
tim::vx::TensorAttribute::TRANSIENT, fullconnect_7_out0_quant);
auto fullconnect_7_out0 = graph->CreateTensor(fullconnect_7_out0_spec);
tim::vx::ShapeType fullconnect_7_weight_shape({500,10});
tim::vx::Quantization fullconnect_7_weight_quant(tim::vx::QuantType::ASYMMETRIC, 0.0015804264694452286, 135);
tim::vx::TensorSpec fullconnect_7_weight_spec(tim::vx::DataType::UINT8, fullconnect_7_weight_shape,
tim::vx::TensorAttribute::CONSTANT, fullconnect_7_weight_quant);
auto fullconnect_7_weight = graph->CreateTensor(fullconnect_7_weight_spec, coef_data_ptr + 427820);
tim::vx::ShapeType fullconnect_7_bias_shape({10});
tim::vx::Quantization fullconnect_7_bias_quant(tim::vx::QuantType::ASYMMETRIC, 0.00784135889261961, 0);
tim::vx::TensorSpec fullconnect_7_bias_spec(tim::vx::DataType::INT32, fullconnect_7_bias_shape,
tim::vx::TensorAttribute::CONSTANT, fullconnect_7_bias_quant);
auto fullconnect_7_bias = graph->CreateTensor(fullconnect_7_bias_spec, coef_data_ptr + 427780);
tim::vx::ShapeType input_0_shape({28,28,1,1});
tim::vx::Quantization input_0_quant(tim::vx::QuantType::ASYMMETRIC, 1.0, 0);
tim::vx::TensorSpec input_0_spec(tim::vx::DataType::UINT8, input_0_shape,
tim::vx::TensorAttribute::INPUT, input_0_quant);
auto input_0 = graph->CreateTensor(input_0_spec);
tim::vx::ShapeType output_9_shape({10,1});
tim::vx::TensorSpec output_9_spec(tim::vx::DataType::FLOAT32, output_9_shape,
tim::vx::TensorAttribute::OUTPUT);
auto output_9 = graph->CreateTensor(output_9_spec);
lenet::inputs_tensor.push_back(input_0);
lenet::outputs_tensor.push_back(output_9);
auto convolution_1 = graph->CreateOperation <tim::vx::ops::Conv2d>(
20, // weights
tim::vx::PadType::NONE, // padding
std::array<uint32_t, 2>({5,5}), // ksize
std::array<uint32_t, 2>({1,1}), // stride
std::array<uint32_t, 2>({1,1}), // dilation
std::array<uint32_t, 4>({0,0,0,0}), // pad
0); // multiplier
auto pooling_2 = graph->CreateOperation <tim::vx::ops::Pool2d>(
tim::vx::PoolType::MAX, // type
std::array<uint32_t, 4>({0,0,0,0}), // pad
std::array<uint32_t, 2>({2,2}), // ksize
std::array<uint32_t, 2>({2,2}), // stride
tim::vx::RoundType::CEILING); // round_type
auto convolution_3 = graph->CreateOperation <tim::vx::ops::Conv2d>(
50, // weights
tim::vx::PadType::NONE, // padding
std::array<uint32_t, 2>({5,5}), // ksize
std::array<uint32_t, 2>({1,1}), // stride
std::array<uint32_t, 2>({1,1}), // dilation
std::array<uint32_t, 4>({0,0,0,0}), // pad
0); // multiplier
auto pooling_4 = graph->CreateOperation <tim::vx::ops::Pool2d>(
tim::vx::PoolType::MAX, // type
std::array<uint32_t, 4>({0,0,0,0}), // pad
std::array<uint32_t, 2>({2,2}), // ksize
std::array<uint32_t, 2>({2,2}), // stride
tim::vx::RoundType::CEILING); // round_type
auto fullconnect_5 = graph->CreateOperation <tim::vx::ops::FullyConnected>(
2, // axis
500); // weights
auto relu_6 = graph->CreateOperation <tim::vx::ops::Relu>();
auto fullconnect_7 = graph->CreateOperation <tim::vx::ops::FullyConnected>(
0, // axis
10); // weights
auto softmax_8 = graph->CreateOperation <tim::vx::ops::Softmax>(
1.0, // beta
0); // axis
(*convolution_1)
.BindInputs({input_0, convolution_1_weight, convolution_1_bias})
.BindOutputs({convolution_1_out0});
(*pooling_2)
.BindInputs({convolution_1_out0})
.BindOutputs({pooling_2_out0});
(*convolution_3)
.BindInputs({pooling_2_out0, convolution_3_weight, convolution_3_bias})
.BindOutputs({convolution_3_out0});
(*pooling_4)
.BindInputs({convolution_3_out0})
.BindOutputs({pooling_4_out0});
(*fullconnect_5)
.BindInputs({pooling_4_out0, fullconnect_5_weight, fullconnect_5_bias})
.BindOutputs({fullconnect_5_out0});
(*relu_6)
.BindInputs({fullconnect_5_out0})
.BindOutputs({relu_6_out0});
(*fullconnect_7)
.BindInputs({relu_6_out0, fullconnect_7_weight, fullconnect_7_bias})
.BindOutputs({fullconnect_7_out0});
(*softmax_8)
.BindInputs({fullconnect_7_out0})
.BindOutputs({output_9});
free(coef_data_ptr);
}
} // namespace acuitylite

View File

@ -0,0 +1,34 @@
/****************************************************************************
* Generated by ACUITY 6.6.0
* Match timvx 1.1.30
*
* Neural Network appliction network definition header file
****************************************************************************/
#ifndef _VX_LENET_H
#define _VX_LENET_H
#include "tim/vx/operation.h"
#include "tim/vx/tensor.h"
#include "tim/vx/graph.h"
#include "tim/vx/ops.h"
namespace acuitylite
{
class lenet
{
public:
using input_0_type = uint8_t;
using output_0_type = uint16_t;
static std::vector<std::vector<uint32_t>> input_size_list;
static std::vector<uint32_t> input_bytes_list;
static std::vector<std::vector<uint32_t>> output_size_list;
static std::vector<std::shared_ptr<tim::vx::Tensor>> inputs_tensor;
static std::vector<std::shared_ptr<tim::vx::Tensor>> outputs_tensor;
static void construct_graph(std::shared_ptr<tim::vx::Graph> graph, const char *data_file_name);
};
} // namespace acuitylite
#endif

View File

@ -0,0 +1,977 @@
/****************************************************************************
* Generated by ACUITY 6.6.0
* Match timvx 1.1.30
*
* Neural Network appliction network definition source file
****************************************************************************/
#include "vx_mobilenet.h"
#include <iostream>
#include <fstream>
#include <vector>
namespace
{
char *get_const_data(const char *data_file_name)
{
std::ifstream fin(data_file_name, std::ios::in | std::ios::binary);
if (fin)
{
fin.seekg(0, std::ios::end);
int size = fin.tellg();
fin.seekg(0, std::ios::beg);
char *buffer = new char [size];
std::cout<<"File "<<data_file_name <<" size:"<<size<<std::endl;
fin.read(buffer, size);
fin.close();
return buffer;
}
else
{
std::cout<<"Load file "<<data_file_name <<" failed"<<std::endl;
return NULL;
}
}
} // namespace
namespace acuitylite
{
std::vector<std::vector<uint32_t>> mobilenet::input_size_list = {{3 , 224 , 224 , 1}};
std::vector<uint32_t> mobilenet::input_bytes_list = {3 * 224 * 224 * 1 * sizeof(input_0_type)};
std::vector<std::vector<uint32_t>> mobilenet::output_size_list = {{1001 , 1}};
std::vector<std::shared_ptr<tim::vx::Tensor>> mobilenet::inputs_tensor;
std::vector<std::shared_ptr<tim::vx::Tensor>> mobilenet::outputs_tensor;
void mobilenet::construct_graph
(
std::shared_ptr<tim::vx::Graph> graph,
const char *data_file_name
)
{
char *coef_data_ptr = get_const_data(data_file_name);
tim::vx::Quantization permute_33_out0_quant(tim::vx::QuantType::ASYMMETRIC, 0.0078125, 128);
tim::vx::TensorSpec permute_33_out0_spec(tim::vx::DataType::UINT8, {},
tim::vx::TensorAttribute::TRANSIENT, permute_33_out0_quant);
auto permute_33_out0 = graph->CreateTensor(permute_33_out0_spec);
tim::vx::Quantization convolution_1_out0_quant(tim::vx::QuantType::ASYMMETRIC, 0.023528477177023888, 0);
tim::vx::TensorSpec convolution_1_out0_spec(tim::vx::DataType::UINT8, {},
tim::vx::TensorAttribute::TRANSIENT, convolution_1_out0_quant);
auto convolution_1_out0 = graph->CreateTensor(convolution_1_out0_spec);
tim::vx::ShapeType convolution_1_weight_shape({3,3,3,32});
tim::vx::Quantization convolution_1_weight_quant(tim::vx::QuantType::ASYMMETRIC, 0.02182667888700962, 151);
tim::vx::TensorSpec convolution_1_weight_spec(tim::vx::DataType::UINT8, convolution_1_weight_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_1_weight_quant);
auto convolution_1_weight = graph->CreateTensor(convolution_1_weight_spec, coef_data_ptr + 1029156);
tim::vx::ShapeType convolution_1_bias_shape({32});
tim::vx::Quantization convolution_1_bias_quant(tim::vx::QuantType::ASYMMETRIC, 0.00017052092880476266, 0);
tim::vx::TensorSpec convolution_1_bias_spec(tim::vx::DataType::INT32, convolution_1_bias_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_1_bias_quant);
auto convolution_1_bias = graph->CreateTensor(convolution_1_bias_spec, coef_data_ptr + 1029028);
tim::vx::Quantization convolution_2_out0_quant(tim::vx::QuantType::ASYMMETRIC, 0.023528477177023888, 0);
tim::vx::TensorSpec convolution_2_out0_spec(tim::vx::DataType::UINT8, {},
tim::vx::TensorAttribute::TRANSIENT, convolution_2_out0_quant);
auto convolution_2_out0 = graph->CreateTensor(convolution_2_out0_spec);
tim::vx::ShapeType convolution_2_weight_shape({3,3,32,1});
tim::vx::Quantization convolution_2_weight_quant(tim::vx::QuantType::ASYMMETRIC, 0.29219913482666016, 110);
tim::vx::TensorSpec convolution_2_weight_spec(tim::vx::DataType::UINT8, convolution_2_weight_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_2_weight_quant);
auto convolution_2_weight = graph->CreateTensor(convolution_2_weight_spec, coef_data_ptr + 3172868);
tim::vx::ShapeType convolution_2_bias_shape({32});
tim::vx::Quantization convolution_2_bias_quant(tim::vx::QuantType::ASYMMETRIC, 0.006875000894069672, 0);
tim::vx::TensorSpec convolution_2_bias_spec(tim::vx::DataType::INT32, convolution_2_bias_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_2_bias_quant);
auto convolution_2_bias = graph->CreateTensor(convolution_2_bias_spec, coef_data_ptr + 3172740);
tim::vx::Quantization convolution_3_out0_quant(tim::vx::QuantType::ASYMMETRIC, 0.023528477177023888, 0);
tim::vx::TensorSpec convolution_3_out0_spec(tim::vx::DataType::UINT8, {},
tim::vx::TensorAttribute::TRANSIENT, convolution_3_out0_quant);
auto convolution_3_out0 = graph->CreateTensor(convolution_3_out0_spec);
tim::vx::ShapeType convolution_3_weight_shape({1,1,32,64});
tim::vx::Quantization convolution_3_weight_quant(tim::vx::QuantType::ASYMMETRIC, 0.030420949682593346, 121);
tim::vx::TensorSpec convolution_3_weight_spec(tim::vx::DataType::UINT8, convolution_3_weight_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_3_weight_quant);
auto convolution_3_weight = graph->CreateTensor(convolution_3_weight_spec, coef_data_ptr + 3173412);
tim::vx::ShapeType convolution_3_bias_shape({64});
tim::vx::Quantization convolution_3_bias_quant(tim::vx::QuantType::ASYMMETRIC, 0.0007157585932873189, 0);
tim::vx::TensorSpec convolution_3_bias_spec(tim::vx::DataType::INT32, convolution_3_bias_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_3_bias_quant);
auto convolution_3_bias = graph->CreateTensor(convolution_3_bias_spec, coef_data_ptr + 3173156);
tim::vx::Quantization convolution_4_out0_quant(tim::vx::QuantType::ASYMMETRIC, 0.023528477177023888, 0);
tim::vx::TensorSpec convolution_4_out0_spec(tim::vx::DataType::UINT8, {},
tim::vx::TensorAttribute::TRANSIENT, convolution_4_out0_quant);
auto convolution_4_out0 = graph->CreateTensor(convolution_4_out0_spec);
tim::vx::ShapeType convolution_4_weight_shape({3,3,64,1});
tim::vx::Quantization convolution_4_weight_quant(tim::vx::QuantType::ASYMMETRIC, 0.40277284383773804, 130);
tim::vx::TensorSpec convolution_4_weight_spec(tim::vx::DataType::UINT8, convolution_4_weight_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_4_weight_quant);
auto convolution_4_weight = graph->CreateTensor(convolution_4_weight_spec, coef_data_ptr + 3175716);
tim::vx::ShapeType convolution_4_bias_shape({64});
tim::vx::Quantization convolution_4_bias_quant(tim::vx::QuantType::ASYMMETRIC, 0.009476631879806519, 0);
tim::vx::TensorSpec convolution_4_bias_spec(tim::vx::DataType::INT32, convolution_4_bias_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_4_bias_quant);
auto convolution_4_bias = graph->CreateTensor(convolution_4_bias_spec, coef_data_ptr + 3175460);
tim::vx::Quantization convolution_5_out0_quant(tim::vx::QuantType::ASYMMETRIC, 0.023528477177023888, 0);
tim::vx::TensorSpec convolution_5_out0_spec(tim::vx::DataType::UINT8, {},
tim::vx::TensorAttribute::TRANSIENT, convolution_5_out0_quant);
auto convolution_5_out0 = graph->CreateTensor(convolution_5_out0_spec);
tim::vx::ShapeType convolution_5_weight_shape({1,1,64,128});
tim::vx::Quantization convolution_5_weight_quant(tim::vx::QuantType::ASYMMETRIC, 0.015148180536925793, 104);
tim::vx::TensorSpec convolution_5_weight_spec(tim::vx::DataType::UINT8, convolution_5_weight_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_5_weight_quant);
auto convolution_5_weight = graph->CreateTensor(convolution_5_weight_spec, coef_data_ptr + 3176804);
tim::vx::ShapeType convolution_5_bias_shape({128});
tim::vx::Quantization convolution_5_bias_quant(tim::vx::QuantType::ASYMMETRIC, 0.00035641362774185836, 0);
tim::vx::TensorSpec convolution_5_bias_spec(tim::vx::DataType::INT32, convolution_5_bias_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_5_bias_quant);
auto convolution_5_bias = graph->CreateTensor(convolution_5_bias_spec, coef_data_ptr + 3176292);
tim::vx::Quantization convolution_6_out0_quant(tim::vx::QuantType::ASYMMETRIC, 0.023528477177023888, 0);
tim::vx::TensorSpec convolution_6_out0_spec(tim::vx::DataType::UINT8, {},
tim::vx::TensorAttribute::TRANSIENT, convolution_6_out0_quant);
auto convolution_6_out0 = graph->CreateTensor(convolution_6_out0_spec);
tim::vx::ShapeType convolution_6_weight_shape({3,3,128,1});
tim::vx::Quantization convolution_6_weight_quant(tim::vx::QuantType::ASYMMETRIC, 0.06053730100393295, 160);
tim::vx::TensorSpec convolution_6_weight_spec(tim::vx::DataType::UINT8, convolution_6_weight_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_6_weight_quant);
auto convolution_6_weight = graph->CreateTensor(convolution_6_weight_spec, coef_data_ptr + 3185508);
tim::vx::ShapeType convolution_6_bias_shape({128});
tim::vx::Quantization convolution_6_bias_quant(tim::vx::QuantType::ASYMMETRIC, 0.00142435054294765, 0);
tim::vx::TensorSpec convolution_6_bias_spec(tim::vx::DataType::INT32, convolution_6_bias_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_6_bias_quant);
auto convolution_6_bias = graph->CreateTensor(convolution_6_bias_spec, coef_data_ptr + 3184996);
tim::vx::Quantization convolution_7_out0_quant(tim::vx::QuantType::ASYMMETRIC, 0.023528477177023888, 0);
tim::vx::TensorSpec convolution_7_out0_spec(tim::vx::DataType::UINT8, {},
tim::vx::TensorAttribute::TRANSIENT, convolution_7_out0_quant);
auto convolution_7_out0 = graph->CreateTensor(convolution_7_out0_spec);
tim::vx::ShapeType convolution_7_weight_shape({1,1,128,128});
tim::vx::Quantization convolution_7_weight_quant(tim::vx::QuantType::ASYMMETRIC, 0.013755458407104015, 94);
tim::vx::TensorSpec convolution_7_weight_spec(tim::vx::DataType::UINT8, convolution_7_weight_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_7_weight_quant);
auto convolution_7_weight = graph->CreateTensor(convolution_7_weight_spec, coef_data_ptr + 3187172);
tim::vx::ShapeType convolution_7_bias_shape({128});
tim::vx::Quantization convolution_7_bias_quant(tim::vx::QuantType::ASYMMETRIC, 0.00032364498474635184, 0);
tim::vx::TensorSpec convolution_7_bias_spec(tim::vx::DataType::INT32, convolution_7_bias_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_7_bias_quant);
auto convolution_7_bias = graph->CreateTensor(convolution_7_bias_spec, coef_data_ptr + 3186660);
tim::vx::Quantization convolution_8_out0_quant(tim::vx::QuantType::ASYMMETRIC, 0.023528477177023888, 0);
tim::vx::TensorSpec convolution_8_out0_spec(tim::vx::DataType::UINT8, {},
tim::vx::TensorAttribute::TRANSIENT, convolution_8_out0_quant);
auto convolution_8_out0 = graph->CreateTensor(convolution_8_out0_spec);
tim::vx::ShapeType convolution_8_weight_shape({3,3,128,1});
tim::vx::Quantization convolution_8_weight_quant(tim::vx::QuantType::ASYMMETRIC, 0.01675807684659958, 123);
tim::vx::TensorSpec convolution_8_weight_spec(tim::vx::DataType::UINT8, convolution_8_weight_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_8_weight_quant);
auto convolution_8_weight = graph->CreateTensor(convolution_8_weight_spec, coef_data_ptr + 3204068);
tim::vx::ShapeType convolution_8_bias_shape({128});
tim::vx::Quantization convolution_8_bias_quant(tim::vx::QuantType::ASYMMETRIC, 0.0003942920302506536, 0);
tim::vx::TensorSpec convolution_8_bias_spec(tim::vx::DataType::INT32, convolution_8_bias_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_8_bias_quant);
auto convolution_8_bias = graph->CreateTensor(convolution_8_bias_spec, coef_data_ptr + 3203556);
tim::vx::Quantization convolution_9_out0_quant(tim::vx::QuantType::ASYMMETRIC, 0.023528477177023888, 0);
tim::vx::TensorSpec convolution_9_out0_spec(tim::vx::DataType::UINT8, {},
tim::vx::TensorAttribute::TRANSIENT, convolution_9_out0_quant);
auto convolution_9_out0 = graph->CreateTensor(convolution_9_out0_spec);
tim::vx::ShapeType convolution_9_weight_shape({1,1,128,256});
tim::vx::Quantization convolution_9_weight_quant(tim::vx::QuantType::ASYMMETRIC, 0.007601846940815449, 151);
tim::vx::TensorSpec convolution_9_weight_spec(tim::vx::DataType::UINT8, convolution_9_weight_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_9_weight_quant);
auto convolution_9_weight = graph->CreateTensor(convolution_9_weight_spec, coef_data_ptr + 3206244);
tim::vx::ShapeType convolution_9_bias_shape({256});
tim::vx::Quantization convolution_9_bias_quant(tim::vx::QuantType::ASYMMETRIC, 0.00017885988927446306, 0);
tim::vx::TensorSpec convolution_9_bias_spec(tim::vx::DataType::INT32, convolution_9_bias_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_9_bias_quant);
auto convolution_9_bias = graph->CreateTensor(convolution_9_bias_spec, coef_data_ptr + 3205220);
tim::vx::Quantization convolution_10_out0_quant(tim::vx::QuantType::ASYMMETRIC, 0.023528477177023888, 0);
tim::vx::TensorSpec convolution_10_out0_spec(tim::vx::DataType::UINT8, {},
tim::vx::TensorAttribute::TRANSIENT, convolution_10_out0_quant);
auto convolution_10_out0 = graph->CreateTensor(convolution_10_out0_spec);
tim::vx::ShapeType convolution_10_weight_shape({3,3,256,1});
tim::vx::Quantization convolution_10_weight_quant(tim::vx::QuantType::ASYMMETRIC, 0.04105526953935623, 129);
tim::vx::TensorSpec convolution_10_weight_spec(tim::vx::DataType::UINT8, convolution_10_weight_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_10_weight_quant);
auto convolution_10_weight = graph->CreateTensor(convolution_10_weight_spec, coef_data_ptr + 3240036);
tim::vx::ShapeType convolution_10_bias_shape({256});
tim::vx::Quantization convolution_10_bias_quant(tim::vx::QuantType::ASYMMETRIC, 0.0009659679490141571, 0);
tim::vx::TensorSpec convolution_10_bias_spec(tim::vx::DataType::INT32, convolution_10_bias_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_10_bias_quant);
auto convolution_10_bias = graph->CreateTensor(convolution_10_bias_spec, coef_data_ptr + 3239012);
tim::vx::Quantization convolution_11_out0_quant(tim::vx::QuantType::ASYMMETRIC, 0.023528477177023888, 0);
tim::vx::TensorSpec convolution_11_out0_spec(tim::vx::DataType::UINT8, {},
tim::vx::TensorAttribute::TRANSIENT, convolution_11_out0_quant);
auto convolution_11_out0 = graph->CreateTensor(convolution_11_out0_spec);
tim::vx::ShapeType convolution_11_weight_shape({1,1,256,256});
tim::vx::Quantization convolution_11_weight_quant(tim::vx::QuantType::ASYMMETRIC, 0.006431614048779011, 122);
tim::vx::TensorSpec convolution_11_weight_spec(tim::vx::DataType::UINT8, convolution_11_weight_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_11_weight_quant);
auto convolution_11_weight = graph->CreateTensor(convolution_11_weight_spec, coef_data_ptr + 3243364);
tim::vx::ShapeType convolution_11_bias_shape({256});
tim::vx::Quantization convolution_11_bias_quant(tim::vx::QuantType::ASYMMETRIC, 0.00015132607950363308, 0);
tim::vx::TensorSpec convolution_11_bias_spec(tim::vx::DataType::INT32, convolution_11_bias_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_11_bias_quant);
auto convolution_11_bias = graph->CreateTensor(convolution_11_bias_spec, coef_data_ptr + 3242340);
tim::vx::Quantization convolution_12_out0_quant(tim::vx::QuantType::ASYMMETRIC, 0.023528477177023888, 0);
tim::vx::TensorSpec convolution_12_out0_spec(tim::vx::DataType::UINT8, {},
tim::vx::TensorAttribute::TRANSIENT, convolution_12_out0_quant);
auto convolution_12_out0 = graph->CreateTensor(convolution_12_out0_spec);
tim::vx::ShapeType convolution_12_weight_shape({3,3,256,1});
tim::vx::Quantization convolution_12_weight_quant(tim::vx::QuantType::ASYMMETRIC, 0.013460792601108551, 122);
tim::vx::TensorSpec convolution_12_weight_spec(tim::vx::DataType::UINT8, convolution_12_weight_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_12_weight_quant);
auto convolution_12_weight = graph->CreateTensor(convolution_12_weight_spec, coef_data_ptr + 3309924);
tim::vx::ShapeType convolution_12_bias_shape({256});
tim::vx::Quantization convolution_12_bias_quant(tim::vx::QuantType::ASYMMETRIC, 0.0003167119575664401, 0);
tim::vx::TensorSpec convolution_12_bias_spec(tim::vx::DataType::INT32, convolution_12_bias_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_12_bias_quant);
auto convolution_12_bias = graph->CreateTensor(convolution_12_bias_spec, coef_data_ptr + 3308900);
tim::vx::Quantization convolution_13_out0_quant(tim::vx::QuantType::ASYMMETRIC, 0.023528477177023888, 0);
tim::vx::TensorSpec convolution_13_out0_spec(tim::vx::DataType::UINT8, {},
tim::vx::TensorAttribute::TRANSIENT, convolution_13_out0_quant);
auto convolution_13_out0 = graph->CreateTensor(convolution_13_out0_spec);
tim::vx::ShapeType convolution_13_weight_shape({1,1,256,512});
tim::vx::Quantization convolution_13_weight_quant(tim::vx::QuantType::ASYMMETRIC, 0.00917122047394514, 109);
tim::vx::TensorSpec convolution_13_weight_spec(tim::vx::DataType::UINT8, convolution_13_weight_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_13_weight_quant);
auto convolution_13_weight = graph->CreateTensor(convolution_13_weight_spec, coef_data_ptr + 3314276);
tim::vx::ShapeType convolution_13_bias_shape({512});
tim::vx::Quantization convolution_13_bias_quant(tim::vx::QuantType::ASYMMETRIC, 0.00021578485029749572, 0);
tim::vx::TensorSpec convolution_13_bias_spec(tim::vx::DataType::INT32, convolution_13_bias_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_13_bias_quant);
auto convolution_13_bias = graph->CreateTensor(convolution_13_bias_spec, coef_data_ptr + 3312228);
tim::vx::Quantization convolution_14_out0_quant(tim::vx::QuantType::ASYMMETRIC, 0.023528477177023888, 0);
tim::vx::TensorSpec convolution_14_out0_spec(tim::vx::DataType::UINT8, {},
tim::vx::TensorAttribute::TRANSIENT, convolution_14_out0_quant);
auto convolution_14_out0 = graph->CreateTensor(convolution_14_out0_spec);
tim::vx::ShapeType convolution_14_weight_shape({3,3,512,1});
tim::vx::Quantization convolution_14_weight_quant(tim::vx::QuantType::ASYMMETRIC, 0.036934755742549896, 132);
tim::vx::TensorSpec convolution_14_weight_spec(tim::vx::DataType::UINT8, convolution_14_weight_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_14_weight_quant);
auto convolution_14_weight = graph->CreateTensor(convolution_14_weight_spec, coef_data_ptr + 3447396);
tim::vx::ShapeType convolution_14_bias_shape({512});
tim::vx::Quantization convolution_14_bias_quant(tim::vx::QuantType::ASYMMETRIC, 0.0008690185495652258, 0);
tim::vx::TensorSpec convolution_14_bias_spec(tim::vx::DataType::INT32, convolution_14_bias_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_14_bias_quant);
auto convolution_14_bias = graph->CreateTensor(convolution_14_bias_spec, coef_data_ptr + 3445348);
tim::vx::Quantization convolution_15_out0_quant(tim::vx::QuantType::ASYMMETRIC, 0.023528477177023888, 0);
tim::vx::TensorSpec convolution_15_out0_spec(tim::vx::DataType::UINT8, {},
tim::vx::TensorAttribute::TRANSIENT, convolution_15_out0_quant);
auto convolution_15_out0 = graph->CreateTensor(convolution_15_out0_spec);
tim::vx::ShapeType convolution_15_weight_shape({1,1,512,512});
tim::vx::Quantization convolution_15_weight_quant(tim::vx::QuantType::ASYMMETRIC, 0.005300046876072884, 140);
tim::vx::TensorSpec convolution_15_weight_spec(tim::vx::DataType::UINT8, convolution_15_weight_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_15_weight_quant);
auto convolution_15_weight = graph->CreateTensor(convolution_15_weight_spec, coef_data_ptr + 3454052);
tim::vx::ShapeType convolution_15_bias_shape({512});
tim::vx::Quantization convolution_15_bias_quant(tim::vx::QuantType::ASYMMETRIC, 0.00012470202636905015, 0);
tim::vx::TensorSpec convolution_15_bias_spec(tim::vx::DataType::INT32, convolution_15_bias_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_15_bias_quant);
auto convolution_15_bias = graph->CreateTensor(convolution_15_bias_spec, coef_data_ptr + 3452004);
tim::vx::Quantization convolution_16_out0_quant(tim::vx::QuantType::ASYMMETRIC, 0.023528477177023888, 0);
tim::vx::TensorSpec convolution_16_out0_spec(tim::vx::DataType::UINT8, {},
tim::vx::TensorAttribute::TRANSIENT, convolution_16_out0_quant);
auto convolution_16_out0 = graph->CreateTensor(convolution_16_out0_spec);
tim::vx::ShapeType convolution_16_weight_shape({3,3,512,1});
tim::vx::Quantization convolution_16_weight_quant(tim::vx::QuantType::ASYMMETRIC, 0.042609862983226776, 94);
tim::vx::TensorSpec convolution_16_weight_spec(tim::vx::DataType::UINT8, convolution_16_weight_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_16_weight_quant);
auto convolution_16_weight = graph->CreateTensor(convolution_16_weight_spec, coef_data_ptr + 3718244);
tim::vx::ShapeType convolution_16_bias_shape({512});
tim::vx::Quantization convolution_16_bias_quant(tim::vx::QuantType::ASYMMETRIC, 0.0010025452356785536, 0);
tim::vx::TensorSpec convolution_16_bias_spec(tim::vx::DataType::INT32, convolution_16_bias_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_16_bias_quant);
auto convolution_16_bias = graph->CreateTensor(convolution_16_bias_spec, coef_data_ptr + 3716196);
tim::vx::Quantization convolution_17_out0_quant(tim::vx::QuantType::ASYMMETRIC, 0.023528477177023888, 0);
tim::vx::TensorSpec convolution_17_out0_spec(tim::vx::DataType::UINT8, {},
tim::vx::TensorAttribute::TRANSIENT, convolution_17_out0_quant);
auto convolution_17_out0 = graph->CreateTensor(convolution_17_out0_spec);
tim::vx::ShapeType convolution_17_weight_shape({1,1,512,512});
tim::vx::Quantization convolution_17_weight_quant(tim::vx::QuantType::ASYMMETRIC, 0.0049632852897048, 127);
tim::vx::TensorSpec convolution_17_weight_spec(tim::vx::DataType::UINT8, convolution_17_weight_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_17_weight_quant);
auto convolution_17_weight = graph->CreateTensor(convolution_17_weight_spec, coef_data_ptr + 3724900);
tim::vx::ShapeType convolution_17_bias_shape({512});
tim::vx::Quantization convolution_17_bias_quant(tim::vx::QuantType::ASYMMETRIC, 0.00011677854490699247, 0);
tim::vx::TensorSpec convolution_17_bias_spec(tim::vx::DataType::INT32, convolution_17_bias_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_17_bias_quant);
auto convolution_17_bias = graph->CreateTensor(convolution_17_bias_spec, coef_data_ptr + 3722852);
tim::vx::Quantization convolution_18_out0_quant(tim::vx::QuantType::ASYMMETRIC, 0.023528477177023888, 0);
tim::vx::TensorSpec convolution_18_out0_spec(tim::vx::DataType::UINT8, {},
tim::vx::TensorAttribute::TRANSIENT, convolution_18_out0_quant);
auto convolution_18_out0 = graph->CreateTensor(convolution_18_out0_spec);
tim::vx::ShapeType convolution_18_weight_shape({3,3,512,1});
tim::vx::Quantization convolution_18_weight_quant(tim::vx::QuantType::ASYMMETRIC, 0.028358859941363335, 127);
tim::vx::TensorSpec convolution_18_weight_spec(tim::vx::DataType::UINT8, convolution_18_weight_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_18_weight_quant);
auto convolution_18_weight = graph->CreateTensor(convolution_18_weight_spec, coef_data_ptr + 3989092);
tim::vx::ShapeType convolution_18_bias_shape({512});
tim::vx::Quantization convolution_18_bias_quant(tim::vx::QuantType::ASYMMETRIC, 0.0006672407616861165, 0);
tim::vx::TensorSpec convolution_18_bias_spec(tim::vx::DataType::INT32, convolution_18_bias_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_18_bias_quant);
auto convolution_18_bias = graph->CreateTensor(convolution_18_bias_spec, coef_data_ptr + 3987044);
tim::vx::Quantization convolution_19_out0_quant(tim::vx::QuantType::ASYMMETRIC, 0.023528477177023888, 0);
tim::vx::TensorSpec convolution_19_out0_spec(tim::vx::DataType::UINT8, {},
tim::vx::TensorAttribute::TRANSIENT, convolution_19_out0_quant);
auto convolution_19_out0 = graph->CreateTensor(convolution_19_out0_spec);
tim::vx::ShapeType convolution_19_weight_shape({1,1,512,512});
tim::vx::Quantization convolution_19_weight_quant(tim::vx::QuantType::ASYMMETRIC, 0.007770895957946777, 89);
tim::vx::TensorSpec convolution_19_weight_spec(tim::vx::DataType::UINT8, convolution_19_weight_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_19_weight_quant);
auto convolution_19_weight = graph->CreateTensor(convolution_19_weight_spec, coef_data_ptr + 3995748);
tim::vx::ShapeType convolution_19_bias_shape({512});
tim::vx::Quantization convolution_19_bias_quant(tim::vx::QuantType::ASYMMETRIC, 0.00018283734971191734, 0);
tim::vx::TensorSpec convolution_19_bias_spec(tim::vx::DataType::INT32, convolution_19_bias_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_19_bias_quant);
auto convolution_19_bias = graph->CreateTensor(convolution_19_bias_spec, coef_data_ptr + 3993700);
tim::vx::Quantization convolution_20_out0_quant(tim::vx::QuantType::ASYMMETRIC, 0.023528477177023888, 0);
tim::vx::TensorSpec convolution_20_out0_spec(tim::vx::DataType::UINT8, {},
tim::vx::TensorAttribute::TRANSIENT, convolution_20_out0_quant);
auto convolution_20_out0 = graph->CreateTensor(convolution_20_out0_spec);
tim::vx::ShapeType convolution_20_weight_shape({3,3,512,1});
tim::vx::Quantization convolution_20_weight_quant(tim::vx::QuantType::ASYMMETRIC, 0.024329448118805885, 134);
tim::vx::TensorSpec convolution_20_weight_spec(tim::vx::DataType::UINT8, convolution_20_weight_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_20_weight_quant);
auto convolution_20_weight = graph->CreateTensor(convolution_20_weight_spec, coef_data_ptr + 1032068);
tim::vx::ShapeType convolution_20_bias_shape({512});
tim::vx::Quantization convolution_20_bias_quant(tim::vx::QuantType::ASYMMETRIC, 0.0005724348593503237, 0);
tim::vx::TensorSpec convolution_20_bias_spec(tim::vx::DataType::INT32, convolution_20_bias_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_20_bias_quant);
auto convolution_20_bias = graph->CreateTensor(convolution_20_bias_spec, coef_data_ptr + 1030020);
tim::vx::Quantization convolution_21_out0_quant(tim::vx::QuantType::ASYMMETRIC, 0.023528477177023888, 0);
tim::vx::TensorSpec convolution_21_out0_spec(tim::vx::DataType::UINT8, {},
tim::vx::TensorAttribute::TRANSIENT, convolution_21_out0_quant);
auto convolution_21_out0 = graph->CreateTensor(convolution_21_out0_spec);
tim::vx::ShapeType convolution_21_weight_shape({1,1,512,512});
tim::vx::Quantization convolution_21_weight_quant(tim::vx::QuantType::ASYMMETRIC, 0.009658650495111942, 99);
tim::vx::TensorSpec convolution_21_weight_spec(tim::vx::DataType::UINT8, convolution_21_weight_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_21_weight_quant);
auto convolution_21_weight = graph->CreateTensor(convolution_21_weight_spec, coef_data_ptr + 1038724);
tim::vx::ShapeType convolution_21_bias_shape({512});
tim::vx::Quantization convolution_21_bias_quant(tim::vx::QuantType::ASYMMETRIC, 0.00022725333110429347, 0);
tim::vx::TensorSpec convolution_21_bias_spec(tim::vx::DataType::INT32, convolution_21_bias_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_21_bias_quant);
auto convolution_21_bias = graph->CreateTensor(convolution_21_bias_spec, coef_data_ptr + 1036676);
tim::vx::Quantization convolution_22_out0_quant(tim::vx::QuantType::ASYMMETRIC, 0.023528477177023888, 0);
tim::vx::TensorSpec convolution_22_out0_spec(tim::vx::DataType::UINT8, {},
tim::vx::TensorAttribute::TRANSIENT, convolution_22_out0_quant);
auto convolution_22_out0 = graph->CreateTensor(convolution_22_out0_spec);
tim::vx::ShapeType convolution_22_weight_shape({3,3,512,1});
tim::vx::Quantization convolution_22_weight_quant(tim::vx::QuantType::ASYMMETRIC, 0.019366811960935593, 106);
tim::vx::TensorSpec convolution_22_weight_spec(tim::vx::DataType::UINT8, convolution_22_weight_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_22_weight_quant);
auto convolution_22_weight = graph->CreateTensor(convolution_22_weight_spec, coef_data_ptr + 1302916);
tim::vx::ShapeType convolution_22_bias_shape({512});
tim::vx::Quantization convolution_22_bias_quant(tim::vx::QuantType::ASYMMETRIC, 0.0004556716012302786, 0);
tim::vx::TensorSpec convolution_22_bias_spec(tim::vx::DataType::INT32, convolution_22_bias_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_22_bias_quant);
auto convolution_22_bias = graph->CreateTensor(convolution_22_bias_spec, coef_data_ptr + 1300868);
tim::vx::Quantization convolution_23_out0_quant(tim::vx::QuantType::ASYMMETRIC, 0.023528477177023888, 0);
tim::vx::TensorSpec convolution_23_out0_spec(tim::vx::DataType::UINT8, {},
tim::vx::TensorAttribute::TRANSIENT, convolution_23_out0_quant);
auto convolution_23_out0 = graph->CreateTensor(convolution_23_out0_spec);
tim::vx::ShapeType convolution_23_weight_shape({1,1,512,512});
tim::vx::Quantization convolution_23_weight_quant(tim::vx::QuantType::ASYMMETRIC, 0.005446993745863438, 153);
tim::vx::TensorSpec convolution_23_weight_spec(tim::vx::DataType::UINT8, convolution_23_weight_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_23_weight_quant);
auto convolution_23_weight = graph->CreateTensor(convolution_23_weight_spec, coef_data_ptr + 1309572);
tim::vx::ShapeType convolution_23_bias_shape({512});
tim::vx::Quantization convolution_23_bias_quant(tim::vx::QuantType::ASYMMETRIC, 0.00012815947411581874, 0);
tim::vx::TensorSpec convolution_23_bias_spec(tim::vx::DataType::INT32, convolution_23_bias_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_23_bias_quant);
auto convolution_23_bias = graph->CreateTensor(convolution_23_bias_spec, coef_data_ptr + 1307524);
tim::vx::Quantization convolution_24_out0_quant(tim::vx::QuantType::ASYMMETRIC, 0.023528477177023888, 0);
tim::vx::TensorSpec convolution_24_out0_spec(tim::vx::DataType::UINT8, {},
tim::vx::TensorAttribute::TRANSIENT, convolution_24_out0_quant);
auto convolution_24_out0 = graph->CreateTensor(convolution_24_out0_spec);
tim::vx::ShapeType convolution_24_weight_shape({3,3,512,1});
tim::vx::Quantization convolution_24_weight_quant(tim::vx::QuantType::ASYMMETRIC, 0.007835594937205315, 126);
tim::vx::TensorSpec convolution_24_weight_spec(tim::vx::DataType::UINT8, convolution_24_weight_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_24_weight_quant);
auto convolution_24_weight = graph->CreateTensor(convolution_24_weight_spec, coef_data_ptr + 1573764);
tim::vx::ShapeType convolution_24_bias_shape({512});
tim::vx::Quantization convolution_24_bias_quant(tim::vx::QuantType::ASYMMETRIC, 0.00018435961101204157, 0);
tim::vx::TensorSpec convolution_24_bias_spec(tim::vx::DataType::INT32, convolution_24_bias_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_24_bias_quant);
auto convolution_24_bias = graph->CreateTensor(convolution_24_bias_spec, coef_data_ptr + 1571716);
tim::vx::Quantization convolution_25_out0_quant(tim::vx::QuantType::ASYMMETRIC, 0.023528477177023888, 0);
tim::vx::TensorSpec convolution_25_out0_spec(tim::vx::DataType::UINT8, {},
tim::vx::TensorAttribute::TRANSIENT, convolution_25_out0_quant);
auto convolution_25_out0 = graph->CreateTensor(convolution_25_out0_spec);
tim::vx::ShapeType convolution_25_weight_shape({1,1,512,1024});
tim::vx::Quantization convolution_25_weight_quant(tim::vx::QuantType::ASYMMETRIC, 0.00817922968417406, 130);
tim::vx::TensorSpec convolution_25_weight_spec(tim::vx::DataType::UINT8, convolution_25_weight_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_25_weight_quant);
auto convolution_25_weight = graph->CreateTensor(convolution_25_weight_spec, coef_data_ptr + 1582468);
tim::vx::ShapeType convolution_25_bias_shape({1024});
tim::vx::Quantization convolution_25_bias_quant(tim::vx::QuantType::ASYMMETRIC, 0.0001924448151839897, 0);
tim::vx::TensorSpec convolution_25_bias_spec(tim::vx::DataType::INT32, convolution_25_bias_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_25_bias_quant);
auto convolution_25_bias = graph->CreateTensor(convolution_25_bias_spec, coef_data_ptr + 1578372);
tim::vx::Quantization convolution_26_out0_quant(tim::vx::QuantType::ASYMMETRIC, 0.023528477177023888, 0);
tim::vx::TensorSpec convolution_26_out0_spec(tim::vx::DataType::UINT8, {},
tim::vx::TensorAttribute::TRANSIENT, convolution_26_out0_quant);
auto convolution_26_out0 = graph->CreateTensor(convolution_26_out0_spec);
tim::vx::ShapeType convolution_26_weight_shape({3,3,1024,1});
tim::vx::Quantization convolution_26_weight_quant(tim::vx::QuantType::ASYMMETRIC, 0.12616927921772003, 211);
tim::vx::TensorSpec convolution_26_weight_spec(tim::vx::DataType::UINT8, convolution_26_weight_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_26_weight_quant);
auto convolution_26_weight = graph->CreateTensor(convolution_26_weight_spec, coef_data_ptr + 2110852);
tim::vx::ShapeType convolution_26_bias_shape({1024});
tim::vx::Quantization convolution_26_bias_quant(tim::vx::QuantType::ASYMMETRIC, 0.002968570915982127, 0);
tim::vx::TensorSpec convolution_26_bias_spec(tim::vx::DataType::INT32, convolution_26_bias_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_26_bias_quant);
auto convolution_26_bias = graph->CreateTensor(convolution_26_bias_spec, coef_data_ptr + 2106756);
tim::vx::Quantization convolution_27_out0_quant(tim::vx::QuantType::ASYMMETRIC, 0.023528477177023888, 0);
tim::vx::TensorSpec convolution_27_out0_spec(tim::vx::DataType::UINT8, {},
tim::vx::TensorAttribute::TRANSIENT, convolution_27_out0_quant);
auto convolution_27_out0 = graph->CreateTensor(convolution_27_out0_spec);
tim::vx::ShapeType convolution_27_weight_shape({1,1,1024,1024});
tim::vx::Quantization convolution_27_weight_quant(tim::vx::QuantType::ASYMMETRIC, 0.018048152327537537, 95);
tim::vx::TensorSpec convolution_27_weight_spec(tim::vx::DataType::UINT8, convolution_27_weight_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_27_weight_quant);
auto convolution_27_weight = graph->CreateTensor(convolution_27_weight_spec, coef_data_ptr + 2124164);
tim::vx::ShapeType convolution_27_bias_shape({1024});
tim::vx::Quantization convolution_27_bias_quant(tim::vx::QuantType::ASYMMETRIC, 0.000424645550083369, 0);
tim::vx::TensorSpec convolution_27_bias_spec(tim::vx::DataType::INT32, convolution_27_bias_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_27_bias_quant);
auto convolution_27_bias = graph->CreateTensor(convolution_27_bias_spec, coef_data_ptr + 2120068);
tim::vx::Quantization pooling_28_out0_quant(tim::vx::QuantType::ASYMMETRIC, 0.023528477177023888, 0);
tim::vx::TensorSpec pooling_28_out0_spec(tim::vx::DataType::UINT8, {},
tim::vx::TensorAttribute::TRANSIENT, pooling_28_out0_quant);
auto pooling_28_out0 = graph->CreateTensor(pooling_28_out0_spec);
tim::vx::Quantization convolution_29_out0_quant(tim::vx::QuantType::ASYMMETRIC, 0.16609922051429749, 66);
tim::vx::TensorSpec convolution_29_out0_spec(tim::vx::DataType::UINT8, {},
tim::vx::TensorAttribute::TRANSIENT, convolution_29_out0_quant);
auto convolution_29_out0 = graph->CreateTensor(convolution_29_out0_spec);
tim::vx::ShapeType convolution_29_weight_shape({1,1,1024,1001});
tim::vx::Quantization convolution_29_weight_quant(tim::vx::QuantType::ASYMMETRIC, 0.004986600950360298, 74);
tim::vx::TensorSpec convolution_29_weight_spec(tim::vx::DataType::UINT8, convolution_29_weight_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_29_weight_quant);
auto convolution_29_weight = graph->CreateTensor(convolution_29_weight_spec, coef_data_ptr + 4004);
tim::vx::ShapeType convolution_29_bias_shape({1001});
tim::vx::Quantization convolution_29_bias_quant(tim::vx::QuantType::ASYMMETRIC, 0.00011732713028322905, 0);
tim::vx::TensorSpec convolution_29_bias_spec(tim::vx::DataType::INT32, convolution_29_bias_shape,
tim::vx::TensorAttribute::CONSTANT, convolution_29_bias_quant);
auto convolution_29_bias = graph->CreateTensor(convolution_29_bias_spec, coef_data_ptr + 0);
tim::vx::Quantization permute_34_out0_quant(tim::vx::QuantType::ASYMMETRIC, 0.16609922051429749, 66);
tim::vx::TensorSpec permute_34_out0_spec(tim::vx::DataType::UINT8, {},
tim::vx::TensorAttribute::TRANSIENT, permute_34_out0_quant);
auto permute_34_out0 = graph->CreateTensor(permute_34_out0_spec);
tim::vx::Quantization reshape_30_out0_quant(tim::vx::QuantType::ASYMMETRIC, 0.16609922051429749, 66);
tim::vx::TensorSpec reshape_30_out0_spec(tim::vx::DataType::UINT8, {},
tim::vx::TensorAttribute::TRANSIENT, reshape_30_out0_quant);
auto reshape_30_out0 = graph->CreateTensor(reshape_30_out0_spec);
tim::vx::ShapeType input_0_shape({3,224,224,1});
tim::vx::Quantization input_0_quant(tim::vx::QuantType::ASYMMETRIC, 0.0078125, 128);
tim::vx::TensorSpec input_0_spec(tim::vx::DataType::UINT8, input_0_shape,
tim::vx::TensorAttribute::INPUT, input_0_quant);
auto input_0 = graph->CreateTensor(input_0_spec);
tim::vx::ShapeType output_32_shape({1001,1});
// tim::vx::Quantization output_32_quant(tim::vx::QuantType::ASYMMETRIC, 0.00390625, 0);
// tim::vx::TensorSpec output_32_spec(tim::vx::DataType::UINT8, output_32_shape,
// tim::vx::TensorAttribute::OUTPUT, output_32_quant);
tim::vx::TensorSpec output_32_spec(tim::vx::DataType::FLOAT32, output_32_shape,
tim::vx::TensorAttribute::OUTPUT);
auto output_32 = graph->CreateTensor(output_32_spec);
mobilenet::inputs_tensor.push_back(input_0);
mobilenet::outputs_tensor.push_back(output_32);
auto permute_33 = graph->CreateOperation <tim::vx::ops::Transpose>(
std::vector<uint32_t>({1,2,0,3})); // perm
auto convolution_1 = graph->CreateOperation <tim::vx::ops::Conv2d>(
32, // weights
tim::vx::PadType::NONE, // padding
std::array<uint32_t, 2>({3,3}), // ksize
std::array<uint32_t, 2>({2,2}), // stride
std::array<uint32_t, 2>({1,1}), // dilation
std::array<uint32_t, 4>({0,1,0,1}), // pad
0); // multiplier
auto convolution_2 = graph->CreateOperation <tim::vx::ops::Conv2d>(
32, // weights
tim::vx::PadType::NONE, // padding
std::array<uint32_t, 2>({3,3}), // ksize
std::array<uint32_t, 2>({1,1}), // stride
std::array<uint32_t, 2>({1,1}), // dilation
std::array<uint32_t, 4>({1,1,1,1}), // pad
1.0); // multiplier
auto convolution_3 = graph->CreateOperation <tim::vx::ops::Conv2d>(
64, // weights
tim::vx::PadType::NONE, // padding
std::array<uint32_t, 2>({1,1}), // ksize
std::array<uint32_t, 2>({1,1}), // stride
std::array<uint32_t, 2>({1,1}), // dilation
std::array<uint32_t, 4>({0,0,0,0}), // pad
0); // multiplier
auto convolution_4 = graph->CreateOperation <tim::vx::ops::Conv2d>(
64, // weights
tim::vx::PadType::NONE, // padding
std::array<uint32_t, 2>({3,3}), // ksize
std::array<uint32_t, 2>({2,2}), // stride
std::array<uint32_t, 2>({1,1}), // dilation
std::array<uint32_t, 4>({0,1,0,1}), // pad
1.0); // multiplier
auto convolution_5 = graph->CreateOperation <tim::vx::ops::Conv2d>(
128, // weights
tim::vx::PadType::NONE, // padding
std::array<uint32_t, 2>({1,1}), // ksize
std::array<uint32_t, 2>({1,1}), // stride
std::array<uint32_t, 2>({1,1}), // dilation
std::array<uint32_t, 4>({0,0,0,0}), // pad
0); // multiplier
auto convolution_6 = graph->CreateOperation <tim::vx::ops::Conv2d>(
128, // weights
tim::vx::PadType::NONE, // padding
std::array<uint32_t, 2>({3,3}), // ksize
std::array<uint32_t, 2>({1,1}), // stride
std::array<uint32_t, 2>({1,1}), // dilation
std::array<uint32_t, 4>({1,1,1,1}), // pad
1.0); // multiplier
auto convolution_7 = graph->CreateOperation <tim::vx::ops::Conv2d>(
128, // weights
tim::vx::PadType::NONE, // padding
std::array<uint32_t, 2>({1,1}), // ksize
std::array<uint32_t, 2>({1,1}), // stride
std::array<uint32_t, 2>({1,1}), // dilation
std::array<uint32_t, 4>({0,0,0,0}), // pad
0); // multiplier
auto convolution_8 = graph->CreateOperation <tim::vx::ops::Conv2d>(
128, // weights
tim::vx::PadType::NONE, // padding
std::array<uint32_t, 2>({3,3}), // ksize
std::array<uint32_t, 2>({2,2}), // stride
std::array<uint32_t, 2>({1,1}), // dilation
std::array<uint32_t, 4>({0,1,0,1}), // pad
1.0); // multiplier
auto convolution_9 = graph->CreateOperation <tim::vx::ops::Conv2d>(
256, // weights
tim::vx::PadType::NONE, // padding
std::array<uint32_t, 2>({1,1}), // ksize
std::array<uint32_t, 2>({1,1}), // stride
std::array<uint32_t, 2>({1,1}), // dilation
std::array<uint32_t, 4>({0,0,0,0}), // pad
0); // multiplier
auto convolution_10 = graph->CreateOperation <tim::vx::ops::Conv2d>(
256, // weights
tim::vx::PadType::NONE, // padding
std::array<uint32_t, 2>({3,3}), // ksize
std::array<uint32_t, 2>({1,1}), // stride
std::array<uint32_t, 2>({1,1}), // dilation
std::array<uint32_t, 4>({1,1,1,1}), // pad
1.0); // multiplier
auto convolution_11 = graph->CreateOperation <tim::vx::ops::Conv2d>(
256, // weights
tim::vx::PadType::NONE, // padding
std::array<uint32_t, 2>({1,1}), // ksize
std::array<uint32_t, 2>({1,1}), // stride
std::array<uint32_t, 2>({1,1}), // dilation
std::array<uint32_t, 4>({0,0,0,0}), // pad
0); // multiplier
auto convolution_12 = graph->CreateOperation <tim::vx::ops::Conv2d>(
256, // weights
tim::vx::PadType::NONE, // padding
std::array<uint32_t, 2>({3,3}), // ksize
std::array<uint32_t, 2>({2,2}), // stride
std::array<uint32_t, 2>({1,1}), // dilation
std::array<uint32_t, 4>({0,1,0,1}), // pad
1.0); // multiplier
auto convolution_13 = graph->CreateOperation <tim::vx::ops::Conv2d>(
512, // weights
tim::vx::PadType::NONE, // padding
std::array<uint32_t, 2>({1,1}), // ksize
std::array<uint32_t, 2>({1,1}), // stride
std::array<uint32_t, 2>({1,1}), // dilation
std::array<uint32_t, 4>({0,0,0,0}), // pad
0); // multiplier
auto convolution_14 = graph->CreateOperation <tim::vx::ops::Conv2d>(
512, // weights
tim::vx::PadType::NONE, // padding
std::array<uint32_t, 2>({3,3}), // ksize
std::array<uint32_t, 2>({1,1}), // stride
std::array<uint32_t, 2>({1,1}), // dilation
std::array<uint32_t, 4>({1,1,1,1}), // pad
1.0); // multiplier
auto convolution_15 = graph->CreateOperation <tim::vx::ops::Conv2d>(
512, // weights
tim::vx::PadType::NONE, // padding
std::array<uint32_t, 2>({1,1}), // ksize
std::array<uint32_t, 2>({1,1}), // stride
std::array<uint32_t, 2>({1,1}), // dilation
std::array<uint32_t, 4>({0,0,0,0}), // pad
0); // multiplier
auto convolution_16 = graph->CreateOperation <tim::vx::ops::Conv2d>(
512, // weights
tim::vx::PadType::NONE, // padding
std::array<uint32_t, 2>({3,3}), // ksize
std::array<uint32_t, 2>({1,1}), // stride
std::array<uint32_t, 2>({1,1}), // dilation
std::array<uint32_t, 4>({1,1,1,1}), // pad
1.0); // multiplier
auto convolution_17 = graph->CreateOperation <tim::vx::ops::Conv2d>(
512, // weights
tim::vx::PadType::NONE, // padding
std::array<uint32_t, 2>({1,1}), // ksize
std::array<uint32_t, 2>({1,1}), // stride
std::array<uint32_t, 2>({1,1}), // dilation
std::array<uint32_t, 4>({0,0,0,0}), // pad
0); // multiplier
auto convolution_18 = graph->CreateOperation <tim::vx::ops::Conv2d>(
512, // weights
tim::vx::PadType::NONE, // padding
std::array<uint32_t, 2>({3,3}), // ksize
std::array<uint32_t, 2>({1,1}), // stride
std::array<uint32_t, 2>({1,1}), // dilation
std::array<uint32_t, 4>({1,1,1,1}), // pad
1.0); // multiplier
auto convolution_19 = graph->CreateOperation <tim::vx::ops::Conv2d>(
512, // weights
tim::vx::PadType::NONE, // padding
std::array<uint32_t, 2>({1,1}), // ksize
std::array<uint32_t, 2>({1,1}), // stride
std::array<uint32_t, 2>({1,1}), // dilation
std::array<uint32_t, 4>({0,0,0,0}), // pad
0); // multiplier
auto convolution_20 = graph->CreateOperation <tim::vx::ops::Conv2d>(
512, // weights
tim::vx::PadType::NONE, // padding
std::array<uint32_t, 2>({3,3}), // ksize
std::array<uint32_t, 2>({1,1}), // stride
std::array<uint32_t, 2>({1,1}), // dilation
std::array<uint32_t, 4>({1,1,1,1}), // pad
1.0); // multiplier
auto convolution_21 = graph->CreateOperation <tim::vx::ops::Conv2d>(
512, // weights
tim::vx::PadType::NONE, // padding
std::array<uint32_t, 2>({1,1}), // ksize
std::array<uint32_t, 2>({1,1}), // stride
std::array<uint32_t, 2>({1,1}), // dilation
std::array<uint32_t, 4>({0,0,0,0}), // pad
0); // multiplier
auto convolution_22 = graph->CreateOperation <tim::vx::ops::Conv2d>(
512, // weights
tim::vx::PadType::NONE, // padding
std::array<uint32_t, 2>({3,3}), // ksize
std::array<uint32_t, 2>({1,1}), // stride
std::array<uint32_t, 2>({1,1}), // dilation
std::array<uint32_t, 4>({1,1,1,1}), // pad
1.0); // multiplier
auto convolution_23 = graph->CreateOperation <tim::vx::ops::Conv2d>(
512, // weights
tim::vx::PadType::NONE, // padding
std::array<uint32_t, 2>({1,1}), // ksize
std::array<uint32_t, 2>({1,1}), // stride
std::array<uint32_t, 2>({1,1}), // dilation
std::array<uint32_t, 4>({0,0,0,0}), // pad
0); // multiplier
auto convolution_24 = graph->CreateOperation <tim::vx::ops::Conv2d>(
512, // weights
tim::vx::PadType::NONE, // padding
std::array<uint32_t, 2>({3,3}), // ksize
std::array<uint32_t, 2>({2,2}), // stride
std::array<uint32_t, 2>({1,1}), // dilation
std::array<uint32_t, 4>({0,1,0,1}), // pad
1.0); // multiplier
auto convolution_25 = graph->CreateOperation <tim::vx::ops::Conv2d>(
1024, // weights
tim::vx::PadType::NONE, // padding
std::array<uint32_t, 2>({1,1}), // ksize
std::array<uint32_t, 2>({1,1}), // stride
std::array<uint32_t, 2>({1,1}), // dilation
std::array<uint32_t, 4>({0,0,0,0}), // pad
0); // multiplier
auto convolution_26 = graph->CreateOperation <tim::vx::ops::Conv2d>(
1024, // weights
tim::vx::PadType::NONE, // padding
std::array<uint32_t, 2>({3,3}), // ksize
std::array<uint32_t, 2>({1,1}), // stride
std::array<uint32_t, 2>({1,1}), // dilation
std::array<uint32_t, 4>({1,1,1,1}), // pad
1.0); // multiplier
auto convolution_27 = graph->CreateOperation <tim::vx::ops::Conv2d>(
1024, // weights
tim::vx::PadType::NONE, // padding
std::array<uint32_t, 2>({1,1}), // ksize
std::array<uint32_t, 2>({1,1}), // stride
std::array<uint32_t, 2>({1,1}), // dilation
std::array<uint32_t, 4>({0,0,0,0}), // pad
0); // multiplier
auto pooling_28 = graph->CreateOperation <tim::vx::ops::Pool2d>(
tim::vx::PoolType::AVG, // type
std::array<uint32_t, 4>({0,0,0,0}), // pad
std::array<uint32_t, 2>({7,7}), // ksize
std::array<uint32_t, 2>({2,2}), // stride
tim::vx::RoundType::FLOOR); // round_type
auto convolution_29 = graph->CreateOperation <tim::vx::ops::Conv2d>(
1001, // weights
tim::vx::PadType::NONE, // padding
std::array<uint32_t, 2>({1,1}), // ksize
std::array<uint32_t, 2>({1,1}), // stride
std::array<uint32_t, 2>({1,1}), // dilation
std::array<uint32_t, 4>({0,0,0,0}), // pad
0); // multiplier
auto permute_34 = graph->CreateOperation <tim::vx::ops::Transpose>(
std::vector<uint32_t>({2,0,1,3})); // perm
auto reshape_30 = graph->CreateOperation <tim::vx::ops::Reshape>(
std::vector<uint32_t>({1001,1})); // size
auto softmax_31 = graph->CreateOperation <tim::vx::ops::Softmax>(
1.0, // beta
0); // axis
(*permute_33)
.BindInputs({input_0})
.BindOutputs({permute_33_out0});
(*convolution_1)
.BindInputs({permute_33_out0, convolution_1_weight, convolution_1_bias})
.BindOutputs({convolution_1_out0});
(*convolution_2)
.BindInputs({convolution_1_out0, convolution_2_weight, convolution_2_bias})
.BindOutputs({convolution_2_out0});
(*convolution_3)
.BindInputs({convolution_2_out0, convolution_3_weight, convolution_3_bias})
.BindOutputs({convolution_3_out0});
(*convolution_4)
.BindInputs({convolution_3_out0, convolution_4_weight, convolution_4_bias})
.BindOutputs({convolution_4_out0});
(*convolution_5)
.BindInputs({convolution_4_out0, convolution_5_weight, convolution_5_bias})
.BindOutputs({convolution_5_out0});
(*convolution_6)
.BindInputs({convolution_5_out0, convolution_6_weight, convolution_6_bias})
.BindOutputs({convolution_6_out0});
(*convolution_7)
.BindInputs({convolution_6_out0, convolution_7_weight, convolution_7_bias})
.BindOutputs({convolution_7_out0});
(*convolution_8)
.BindInputs({convolution_7_out0, convolution_8_weight, convolution_8_bias})
.BindOutputs({convolution_8_out0});
(*convolution_9)
.BindInputs({convolution_8_out0, convolution_9_weight, convolution_9_bias})
.BindOutputs({convolution_9_out0});
(*convolution_10)
.BindInputs({convolution_9_out0, convolution_10_weight, convolution_10_bias})
.BindOutputs({convolution_10_out0});
(*convolution_11)
.BindInputs({convolution_10_out0, convolution_11_weight, convolution_11_bias})
.BindOutputs({convolution_11_out0});
(*convolution_12)
.BindInputs({convolution_11_out0, convolution_12_weight, convolution_12_bias})
.BindOutputs({convolution_12_out0});
(*convolution_13)
.BindInputs({convolution_12_out0, convolution_13_weight, convolution_13_bias})
.BindOutputs({convolution_13_out0});
(*convolution_14)
.BindInputs({convolution_13_out0, convolution_14_weight, convolution_14_bias})
.BindOutputs({convolution_14_out0});
(*convolution_15)
.BindInputs({convolution_14_out0, convolution_15_weight, convolution_15_bias})
.BindOutputs({convolution_15_out0});
(*convolution_16)
.BindInputs({convolution_15_out0, convolution_16_weight, convolution_16_bias})
.BindOutputs({convolution_16_out0});
(*convolution_17)
.BindInputs({convolution_16_out0, convolution_17_weight, convolution_17_bias})
.BindOutputs({convolution_17_out0});
(*convolution_18)
.BindInputs({convolution_17_out0, convolution_18_weight, convolution_18_bias})
.BindOutputs({convolution_18_out0});
(*convolution_19)
.BindInputs({convolution_18_out0, convolution_19_weight, convolution_19_bias})
.BindOutputs({convolution_19_out0});
(*convolution_20)
.BindInputs({convolution_19_out0, convolution_20_weight, convolution_20_bias})
.BindOutputs({convolution_20_out0});
(*convolution_21)
.BindInputs({convolution_20_out0, convolution_21_weight, convolution_21_bias})
.BindOutputs({convolution_21_out0});
(*convolution_22)
.BindInputs({convolution_21_out0, convolution_22_weight, convolution_22_bias})
.BindOutputs({convolution_22_out0});
(*convolution_23)
.BindInputs({convolution_22_out0, convolution_23_weight, convolution_23_bias})
.BindOutputs({convolution_23_out0});
(*convolution_24)
.BindInputs({convolution_23_out0, convolution_24_weight, convolution_24_bias})
.BindOutputs({convolution_24_out0});
(*convolution_25)
.BindInputs({convolution_24_out0, convolution_25_weight, convolution_25_bias})
.BindOutputs({convolution_25_out0});
(*convolution_26)
.BindInputs({convolution_25_out0, convolution_26_weight, convolution_26_bias})
.BindOutputs({convolution_26_out0});
(*convolution_27)
.BindInputs({convolution_26_out0, convolution_27_weight, convolution_27_bias})
.BindOutputs({convolution_27_out0});
(*pooling_28)
.BindInputs({convolution_27_out0})
.BindOutputs({pooling_28_out0});
(*convolution_29)
.BindInputs({pooling_28_out0, convolution_29_weight, convolution_29_bias})
.BindOutputs({convolution_29_out0});
(*permute_34)
.BindInputs({convolution_29_out0})
.BindOutputs({permute_34_out0});
(*reshape_30)
.BindInputs({permute_34_out0})
.BindOutputs({reshape_30_out0});
(*softmax_31)
.BindInputs({reshape_30_out0})
.BindOutputs({output_32});
free(coef_data_ptr);
}
} // namespace acuitylite

View File

@ -0,0 +1,34 @@
/****************************************************************************
* Generated by ACUITY 6.6.0
* Match timvx 1.1.30
*
* Neural Network appliction network definition header file
****************************************************************************/
#ifndef _VX_MOBILENET_H
#define _VX_MOBILENET_H
#include "tim/vx/operation.h"
#include "tim/vx/tensor.h"
#include "tim/vx/graph.h"
#include "tim/vx/ops.h"
namespace acuitylite
{
class mobilenet
{
public:
using input_0_type = uint8_t;
using output_0_type = uint8_t;
static std::vector<std::vector<uint32_t>> input_size_list;
static std::vector<uint32_t> input_bytes_list;
static std::vector<std::vector<uint32_t>> output_size_list;
static std::vector<std::shared_ptr<tim::vx::Tensor>> inputs_tensor;
static std::vector<std::shared_ptr<tim::vx::Tensor>> outputs_tensor;
static void construct_graph(std::shared_ptr<tim::vx::Graph> graph, const char *data_file_name);
};
} // namespace acuitylite
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,34 @@
/****************************************************************************
* Generated by ACUITY 6.6.0
* Match timvx 1.1.30
*
* Neural Network appliction network definition header file
****************************************************************************/
#ifndef _VX_RESNET50_H
#define _VX_RESNET50_H
#include "tim/vx/operation.h"
#include "tim/vx/tensor.h"
#include "tim/vx/graph.h"
#include "tim/vx/ops.h"
namespace acuitylite
{
class resnet50
{
public:
using input_0_type = uint8_t;
using output_0_type = uint16_t;
static std::vector<std::vector<uint32_t>> input_size_list;
static std::vector<uint32_t> input_bytes_list;
static std::vector<std::vector<uint32_t>> output_size_list;
static std::vector<std::shared_ptr<tim::vx::Tensor>> inputs_tensor;
static std::vector<std::shared_ptr<tim::vx::Tensor>> outputs_tensor;
static void construct_graph(std::shared_ptr<tim::vx::Graph> graph, const char *data_file_name);
};
} // namespace acuitylite
#endif