[NFC] Change ONNF based names to ONNX-MLIR (#32)
* Rename onnf to onnx-mlir. * Change workspace name.
This commit is contained in:
parent
c25831094e
commit
1622b9f161
|
@ -3,9 +3,9 @@
|
||||||
# Exit on error:
|
# Exit on error:
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
# Check for required env variables ONNF_DEP_DIR, LLVM_PROJECT_ROOT
|
# Check for required env variables ONNX_MLIR_DEP_DIR, LLVM_PROJECT_ROOT
|
||||||
if [[ -z "${ONNF_DEP_DIR}" ]]; then
|
if [[ -z "${ONNX_MLIR_DEP_DIR}" ]]; then
|
||||||
echo "ONNF_DEP_DIR env var is missing."
|
echo "ONNX_MLIR_DEP_DIR env var is missing."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -14,11 +14,11 @@ if [[ -z "${LLVM_PROJECT_ROOT}" ]]; then
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Set up env variables to expose onnf dependencies:
|
# Set up env variables to expose onnx-mlir dependencies:
|
||||||
export PATH=$ONNF_DEP_DIR/bin:$PATH
|
export PATH=$ONNX_MLIR_DEP_DIR/bin:$PATH
|
||||||
export LD_LIBRARY_PATH=$ONNF_DEP_DIR/lib:$ONNF_DEP_DIR/lib64:
|
export LD_LIBRARY_PATH=$ONNX_MLIR_DEP_DIR/lib:$ONNX_MLIR_DEP_DIR/lib64:
|
||||||
export CPATH=$ONNF_DEP_DIR/include:$CPATH
|
export CPATH=$ONNX_MLIR_DEP_DIR/include:$CPATH
|
||||||
export PATH=$ONNF_DEP_DIR/bin:$PATH
|
export PATH=$ONNX_MLIR_DEP_DIR/bin:$PATH
|
||||||
|
|
||||||
# Set up mock installation path within current workspace:
|
# Set up mock installation path within current workspace:
|
||||||
export INSTALL_PATH=$WORKSPACE/INSTALL_PATH
|
export INSTALL_PATH=$WORKSPACE/INSTALL_PATH
|
||||||
|
@ -28,19 +28,19 @@ export LD_LIBRARY_PATH=$INSTALL_PATH/lib:$INSTALL_PATH/lib64:$LD_LIBRARY_PATH
|
||||||
export CPATH=$INSTALL_PATH/include:$CPATH
|
export CPATH=$INSTALL_PATH/include:$CPATH
|
||||||
|
|
||||||
# Create virtual environment specific to the current build instance:
|
# Create virtual environment specific to the current build instance:
|
||||||
conda create -n onnf_conda_workspace_"${BUILD_NUMBER}" python=3.7 numpy
|
conda create -n onnx_mlir_conda_workspace_"${BUILD_NUMBER}" python=3.7 numpy
|
||||||
source activate onnf_conda_workspace_"${BUILD_NUMBER}"
|
source activate onnx_mlir_conda_workspace_"${BUILD_NUMBER}"
|
||||||
|
|
||||||
# Create build directory and generate make files:
|
# Create build directory and generate make files:
|
||||||
mkdir build && cd build
|
mkdir build && cd build
|
||||||
CC=$ONNF_DEP_DIR/bin/gcc \
|
CC=$ONNX_MLIR_DEP_DIR/bin/gcc \
|
||||||
CXX=$ONNF_DEP_DIR/bin/g++ \
|
CXX=$ONNX_MLIR_DEP_DIR/bin/g++ \
|
||||||
BOOST_ROOT=$ONNF_DEP_DIR \
|
BOOST_ROOT=$ONNX_MLIR_DEP_DIR \
|
||||||
LLVM_SRC=$LLVM_PROJECT_ROOT/llvm \
|
LLVM_SRC=$LLVM_PROJECT_ROOT/llvm \
|
||||||
LLVM_BUILD=$LLVM_PROJECT_ROOT/build \
|
LLVM_BUILD=$LLVM_PROJECT_ROOT/build \
|
||||||
cmake3 -DONNF_ENABLE_MODEL_TEST_CPP=ON \
|
cmake3 -DONNX_MLIR_ENABLE_MODEL_TEST_CPP=ON \
|
||||||
-DONNF_ENABLE_BENCHMARK=ON \
|
-DONNX_MLIR_ENABLE_BENCHMARK=ON \
|
||||||
-DCMAKE_INSTALL_PREFIX="$INSTALL_PATH" \
|
-DCMAKE_INSTALL_PREFIX="$INSTALL_PATH" \
|
||||||
..
|
..
|
||||||
|
|
||||||
# Build and test:
|
# Build and test:
|
||||||
|
|
|
@ -34,10 +34,10 @@ cmake -DCMAKE_C_COMPILER=$CC \
|
||||||
-DCMAKE_CXX_COMPILER=$CXX \
|
-DCMAKE_CXX_COMPILER=$CXX \
|
||||||
-DCMAKE_VERBOSE_MAKEFILE=ON \
|
-DCMAKE_VERBOSE_MAKEFILE=ON \
|
||||||
-DCMAKE_INSTALL_PREFIX=$INSTALL_PATH \
|
-DCMAKE_INSTALL_PREFIX=$INSTALL_PATH \
|
||||||
-DONNF_ENABLE_NODE_TEST_JAVA=ON \
|
-DONNX_MLIR_ENABLE_NODE_TEST_JAVA=ON \
|
||||||
-DONNF_ENABLE_NODE_TEST_JNI=ON \
|
-DONNX_MLIR_ENABLE_NODE_TEST_JNI=ON \
|
||||||
-DONNF_ENABLE_NODE_TEST_CPP=OFF \
|
-DONNX_MLIR_ENABLE_NODE_TEST_CPP=OFF \
|
||||||
-DONNF_TARGET_ARCH=z13 ..
|
-DONNX_MLIR_TARGET_ARCH=z13 ..
|
||||||
|
|
||||||
make -j "$(nproc)" install
|
make -j "$(nproc)" install
|
||||||
ctest -j "$(nproc)"
|
ctest -j "$(nproc)"
|
||||||
|
|
|
@ -10,11 +10,11 @@ jobs:
|
||||||
name: Installing GCC, CMake, Ninja, Protobuf
|
name: Installing GCC, CMake, Ninja, Protobuf
|
||||||
command: sudo apt-get update && sudo apt-get install -y gcc g++ cmake ninja-build protobuf-compiler
|
command: sudo apt-get update && sudo apt-get install -y gcc g++ cmake ninja-build protobuf-compiler
|
||||||
- checkout:
|
- checkout:
|
||||||
path: ONNF
|
path: onnx-mlir
|
||||||
- run:
|
- run:
|
||||||
name: Pull Submodules
|
name: Pull Submodules
|
||||||
command: |
|
command: |
|
||||||
cd ONNF
|
cd onnx-mlir
|
||||||
git submodule update --init --recursive
|
git submodule update --init --recursive
|
||||||
# Use cached mlir installation if possible.
|
# Use cached mlir installation if possible.
|
||||||
- restore_cache:
|
- restore_cache:
|
||||||
|
@ -26,29 +26,29 @@ jobs:
|
||||||
# mlir-opt executable exists.
|
# mlir-opt executable exists.
|
||||||
if [ ! -f llvm-project/build/bin/mlir-opt ]; then
|
if [ ! -f llvm-project/build/bin/mlir-opt ]; then
|
||||||
export MAKEFLAGS=-j4
|
export MAKEFLAGS=-j4
|
||||||
source ONNF/utils/install-mlir.sh
|
source onnx-mlir/utils/install-mlir.sh
|
||||||
fi
|
fi
|
||||||
- save_cache:
|
- save_cache:
|
||||||
key: V9-LLVM-PROJECT-{{ arch }}
|
key: V9-LLVM-PROJECT-{{ arch }}
|
||||||
paths:
|
paths:
|
||||||
- llvm-project
|
- llvm-project
|
||||||
- run:
|
- run:
|
||||||
name: Install ONNF
|
name: Install ONNX MLIR
|
||||||
command: source ONNF/utils/install-onnf.sh
|
command: source onnx-mlir/utils/install-onnx-mlir.sh
|
||||||
- run:
|
- run:
|
||||||
name: Run End-To-End Tests
|
name: Run End-To-End Tests
|
||||||
command: |
|
command: |
|
||||||
sudo pip install -q -e ./ONNF/third_party/onnx
|
sudo pip install -q -e ./onnx-mlir/third_party/onnx
|
||||||
cd ONNF/build
|
cd onnx-mlir/build
|
||||||
cmake --build . --target run-onnx-backend-test
|
cmake --build . --target run-onnx-backend-test
|
||||||
- run:
|
- run:
|
||||||
name: Run DocCheck
|
name: Run DocCheck
|
||||||
command: cd ONNF/build && cmake --build . --target check-doc
|
command: cd onnx-mlir/build && cmake --build . --target check-doc
|
||||||
- run:
|
- run:
|
||||||
name: Ensure tablegen documentation is up-to-date
|
name: Ensure tablegen documentation is up-to-date
|
||||||
command: |
|
command: |
|
||||||
cd ONNF/build
|
cd onnx-mlir/build
|
||||||
cmake --build . --target onnf-doc
|
cmake --build . --target onnx-mlir-doc
|
||||||
# Check whether dialect documentation is up-to-date.
|
# Check whether dialect documentation is up-to-date.
|
||||||
diff doc/Dialects ../doc/Dialects
|
diff doc/Dialects ../doc/Dialects
|
||||||
- run:
|
- run:
|
||||||
|
|
|
@ -6,13 +6,13 @@ endif()
|
||||||
# Require 3.3 and set policy CMP0057 for IN_LIST operator support
|
# Require 3.3 and set policy CMP0057 for IN_LIST operator support
|
||||||
cmake_minimum_required(VERSION 3.3)
|
cmake_minimum_required(VERSION 3.3)
|
||||||
cmake_policy(SET CMP0057 NEW)
|
cmake_policy(SET CMP0057 NEW)
|
||||||
project(onnf)
|
project(onnx-mlir)
|
||||||
|
|
||||||
set(CMAKE_CXX_FLAGS_DEBUG "-g")
|
set(CMAKE_CXX_FLAGS_DEBUG "-g")
|
||||||
set(CMAKE_CXX_FLAGS_RELEASE "-O2 -DNDEBUG")
|
set(CMAKE_CXX_FLAGS_RELEASE "-O2 -DNDEBUG")
|
||||||
|
|
||||||
set(ONNF_SRC_ROOT "${CMAKE_CURRENT_SOURCE_DIR}")
|
set(ONNX_MLIR_SRC_ROOT "${CMAKE_CURRENT_SOURCE_DIR}")
|
||||||
set(ONNF_BIN_ROOT "${CMAKE_CURRENT_BINARY_DIR}")
|
set(ONNX_MLIR_BIN_ROOT "${CMAKE_CURRENT_BINARY_DIR}")
|
||||||
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
|
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
|
||||||
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
|
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
|
||||||
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
|
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
|
||||||
|
|
24
MLIR.cmake
24
MLIR.cmake
|
@ -34,9 +34,9 @@ set(MLIR_SRC_INCLUDE_PATH ${LLVM_PROJ_SRC}/mlir/include)
|
||||||
set(MLIR_BIN_INCLUDE_PATH ${LLVM_PROJ_BUILD}/tools/mlir/include)
|
set(MLIR_BIN_INCLUDE_PATH ${LLVM_PROJ_BUILD}/tools/mlir/include)
|
||||||
set(MLIR_TOOLS_DIR ${LLVM_PROJ_BUILD}/bin)
|
set(MLIR_TOOLS_DIR ${LLVM_PROJ_BUILD}/bin)
|
||||||
|
|
||||||
set(ONNF_TOOLS_DIR ${ONNF_BIN_ROOT}/bin)
|
set(ONNX_MLIR_TOOLS_DIR ${ONNX_MLIR_BIN_ROOT}/bin)
|
||||||
set(ONNF_LIT_TEST_SRC_DIR ${CMAKE_SOURCE_DIR}/test/mlir)
|
set(ONNX_MLIR_LIT_TEST_SRC_DIR ${CMAKE_SOURCE_DIR}/test/mlir)
|
||||||
set(ONNF_LIT_TEST_BUILD_DIR ${CMAKE_BINARY_DIR}/test/mlir)
|
set(ONNX_MLIR_LIT_TEST_BUILD_DIR ${CMAKE_BINARY_DIR}/test/mlir)
|
||||||
|
|
||||||
set(
|
set(
|
||||||
MLIR_INCLUDE_PATHS
|
MLIR_INCLUDE_PATHS
|
||||||
|
@ -182,12 +182,12 @@ function(whole_archive_link_mlir target)
|
||||||
whole_archive_link(${target} ${LLVM_PROJ_BUILD}/lib ${ARGN})
|
whole_archive_link(${target} ${LLVM_PROJ_BUILD}/lib ${ARGN})
|
||||||
endfunction(whole_archive_link_mlir)
|
endfunction(whole_archive_link_mlir)
|
||||||
|
|
||||||
function(whole_archive_link_onnf target)
|
function(whole_archive_link_onnx_mlir target)
|
||||||
foreach(lib_target ${ARGN})
|
foreach(lib_target ${ARGN})
|
||||||
add_dependencies(${target} ${lib_target})
|
add_dependencies(${target} ${lib_target})
|
||||||
endforeach(lib_target)
|
endforeach(lib_target)
|
||||||
whole_archive_link(${target} ${CMAKE_BINARY_DIR}/lib ${ARGN})
|
whole_archive_link(${target} ${CMAKE_BINARY_DIR}/lib ${ARGN})
|
||||||
endfunction(whole_archive_link_onnf)
|
endfunction(whole_archive_link_onnx_mlir)
|
||||||
|
|
||||||
set(LLVM_CMAKE_DIR
|
set(LLVM_CMAKE_DIR
|
||||||
"${LLVM_PROJ_BUILD}/lib/cmake/llvm"
|
"${LLVM_PROJ_BUILD}/lib/cmake/llvm"
|
||||||
|
@ -196,7 +196,7 @@ list(APPEND CMAKE_MODULE_PATH "${LLVM_CMAKE_DIR}")
|
||||||
include(AddLLVM)
|
include(AddLLVM)
|
||||||
include(TableGen)
|
include(TableGen)
|
||||||
|
|
||||||
function(onnf_tablegen ofn)
|
function(onnx_mlir_tablegen ofn)
|
||||||
tablegen(MLIR
|
tablegen(MLIR
|
||||||
${ARGV}
|
${ARGV}
|
||||||
"-I${MLIR_SRC_INCLUDE_PATH}"
|
"-I${MLIR_SRC_INCLUDE_PATH}"
|
||||||
|
@ -214,14 +214,14 @@ set_property(TARGET mlir-tblgen
|
||||||
PROPERTY IMPORTED_LOCATION ${LLVM_PROJ_BUILD}/bin/mlir-tblgen)
|
PROPERTY IMPORTED_LOCATION ${LLVM_PROJ_BUILD}/bin/mlir-tblgen)
|
||||||
set(MLIR_TABLEGEN_EXE mlir-tblgen)
|
set(MLIR_TABLEGEN_EXE mlir-tblgen)
|
||||||
|
|
||||||
# Add a dialect used by ONNF and copy the generated operation
|
# Add a dialect used by ONNX MLIR and copy the generated operation
|
||||||
# documentation to the desired places.
|
# documentation to the desired places.
|
||||||
# c.f. https://github.com/llvm/llvm-project/blob/e298e216501abf38b44e690d2b28fc788ffc96cf/mlir/CMakeLists.txt#L11
|
# c.f. https://github.com/llvm/llvm-project/blob/e298e216501abf38b44e690d2b28fc788ffc96cf/mlir/CMakeLists.txt#L11
|
||||||
function(add_onnf_dialect_doc dialect dialect_tablegen_file)
|
function(add_onnx_mlir_dialect_doc dialect dialect_tablegen_file)
|
||||||
# Generate Dialect Documentation
|
# Generate Dialect Documentation
|
||||||
set(LLVM_TARGET_DEFINITIONS ${dialect_tablegen_file})
|
set(LLVM_TARGET_DEFINITIONS ${dialect_tablegen_file})
|
||||||
onnf_tablegen(${dialect}.md -gen-op-doc)
|
onnx_mlir_tablegen(${dialect}.md -gen-op-doc)
|
||||||
set(GEN_DOC_FILE ${ONNF_BIN_ROOT}/doc/Dialects/${dialect}.md)
|
set(GEN_DOC_FILE ${ONNX_MLIR_BIN_ROOT}/doc/Dialects/${dialect}.md)
|
||||||
add_custom_command(
|
add_custom_command(
|
||||||
OUTPUT ${GEN_DOC_FILE}
|
OUTPUT ${GEN_DOC_FILE}
|
||||||
COMMAND ${CMAKE_COMMAND} -E copy
|
COMMAND ${CMAKE_COMMAND} -E copy
|
||||||
|
@ -229,7 +229,7 @@ function(add_onnf_dialect_doc dialect dialect_tablegen_file)
|
||||||
${GEN_DOC_FILE}
|
${GEN_DOC_FILE}
|
||||||
DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/${dialect}.md)
|
DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/${dialect}.md)
|
||||||
add_custom_target(${dialect}DocGen DEPENDS ${GEN_DOC_FILE})
|
add_custom_target(${dialect}DocGen DEPENDS ${GEN_DOC_FILE})
|
||||||
add_dependencies(onnf-doc ${dialect}DocGen)
|
add_dependencies(onnx-mlir-doc ${dialect}DocGen)
|
||||||
endfunction()
|
endfunction()
|
||||||
|
|
||||||
add_custom_target(onnf-doc)
|
add_custom_target(onnx-mlir-doc)
|
||||||
|
|
30
README.md
30
README.md
|
@ -1,5 +1,5 @@
|
||||||
# ONNF
|
# ONNX MLIR
|
||||||
Open Neural Network Frontend : an ONNX frontend for MLIR.
|
The Open Neural Network Exchange implementation in MLIR.
|
||||||
|
|
||||||
[![CircleCI](https://circleci.com/gh/onnx/onnx-mlir/tree/master.svg?style=svg)](https://circleci.com/gh/onnx/onnx-mlir/tree/master)
|
[![CircleCI](https://circleci.com/gh/onnx/onnx-mlir/tree/master.svg?style=svg)](https://circleci.com/gh/onnx/onnx-mlir/tree/master)
|
||||||
|
|
||||||
|
@ -18,7 +18,7 @@ Firstly, install MLIR (as a part of LLVM-Project):
|
||||||
[same-as-file]: <> (utils/install-mlir.sh)
|
[same-as-file]: <> (utils/install-mlir.sh)
|
||||||
``` bash
|
``` bash
|
||||||
git clone https://github.com/llvm/llvm-project.git
|
git clone https://github.com/llvm/llvm-project.git
|
||||||
# Check out a specific branch that is known to work with ONNF.
|
# Check out a specific branch that is known to work with ONNX MLIR.
|
||||||
cd llvm-project && git checkout 076475713c236081a3247a53e9dbab9043c3eac2 && cd ..
|
cd llvm-project && git checkout 076475713c236081a3247a53e9dbab9043c3eac2 && cd ..
|
||||||
mkdir llvm-project/build
|
mkdir llvm-project/build
|
||||||
cd llvm-project/build
|
cd llvm-project/build
|
||||||
|
@ -38,34 +38,34 @@ Two environment variables need to be set:
|
||||||
- LLVM_PROJ_SRC should point to the llvm-project src directory (e.g., llvm-project/).
|
- LLVM_PROJ_SRC should point to the llvm-project src directory (e.g., llvm-project/).
|
||||||
- LLVM_PROJ_BUILD should point to the llvm-project build directory (e.g., llvm-project/build).
|
- LLVM_PROJ_BUILD should point to the llvm-project build directory (e.g., llvm-project/build).
|
||||||
|
|
||||||
To build ONNF, use the following command:
|
To build ONNX-MLIR, use the following command:
|
||||||
|
|
||||||
[same-as-file]: <> ({"ref": "utils/install-onnf.sh", "skip-doc": 2})
|
[same-as-file]: <> ({"ref": "utils/install-onnx-mlir.sh", "skip-doc": 2})
|
||||||
```
|
```
|
||||||
git clone --recursive git@github.com:clang-ykt/ONNF.git
|
git clone --recursive git@github.com:onnx/onnx-mlir.git
|
||||||
|
|
||||||
# Export environment variables pointing to LLVM-Projects.
|
# Export environment variables pointing to LLVM-Projects.
|
||||||
export LLVM_PROJ_SRC=$(pwd)/llvm-project/
|
export LLVM_PROJ_SRC=$(pwd)/llvm-project/
|
||||||
export LLVM_PROJ_BUILD=$(pwd)/llvm-project/build
|
export LLVM_PROJ_BUILD=$(pwd)/llvm-project/build
|
||||||
|
|
||||||
mkdir ONNF/build && cd ONNF/build
|
mkdir onnx-mlir/build && cd onnx-mlir/build
|
||||||
cmake ..
|
cmake ..
|
||||||
cmake --build . --target onnf
|
cmake --build . --target onnx-mlir
|
||||||
|
|
||||||
# Run FileCheck tests:
|
# Run FileCheck tests:
|
||||||
export LIT_OPTS=-v
|
export LIT_OPTS=-v
|
||||||
cmake --build . --target check-mlir-lit
|
cmake --build . --target check-mlir-lit
|
||||||
```
|
```
|
||||||
|
|
||||||
After the above commands succeed, an `onnf` executable should appear in the `bin` directory.
|
After the above commands succeed, an `onnx-mlir` executable should appear in the `bin` directory.
|
||||||
|
|
||||||
## Using ONNF
|
## Using ONNX MLIR
|
||||||
|
|
||||||
The usage of `onnf` is as such:
|
The usage of `onnx-mlir` is as such:
|
||||||
```
|
```
|
||||||
OVERVIEW: ONNF MLIR modular optimizer driver
|
OVERVIEW: ONNX MLIR modular optimizer driver
|
||||||
|
|
||||||
USAGE: onnf [options] <input file>
|
USAGE: onnx-mlir [options] <input file>
|
||||||
|
|
||||||
OPTIONS:
|
OPTIONS:
|
||||||
|
|
||||||
|
@ -75,7 +75,7 @@ Generic Options:
|
||||||
--help-list - Display list of available options (--help-list-hidden for more)
|
--help-list - Display list of available options (--help-list-hidden for more)
|
||||||
--version - Display the version of this program
|
--version - Display the version of this program
|
||||||
|
|
||||||
ONNF Options:
|
ONNX MLIR Options:
|
||||||
These are frontend options.
|
These are frontend options.
|
||||||
|
|
||||||
Choose target to emit:
|
Choose target to emit:
|
||||||
|
@ -89,7 +89,7 @@ These are frontend options.
|
||||||
|
|
||||||
For example, to lower an ONNX model (e.g., add.onnx) to ONNX dialect, use the following command:
|
For example, to lower an ONNX model (e.g., add.onnx) to ONNX dialect, use the following command:
|
||||||
```
|
```
|
||||||
./onnf --EmitONNXIR add.onnx
|
./onnx-mlir --EmitONNXIR add.onnx
|
||||||
```
|
```
|
||||||
The output should look like:
|
The output should look like:
|
||||||
```
|
```
|
||||||
|
|
|
@ -1,23 +1,23 @@
|
||||||
# Import ONNX specifications into ONNF
|
# Import ONNX specifications into ONNX MLIR
|
||||||
The specifications of ONNX are defined under onnx/defs directory in ONNX projects.
|
The specifications of ONNX are defined under onnx/defs directory in ONNX projects.
|
||||||
There is a python script onnx/defs/gen_doc.py that automatically generate documents about operations in ONNX (docs/Operations.md).
|
There is a python script onnx/defs/gen_doc.py that automatically generate documents about operations in ONNX (docs/Operations.md).
|
||||||
ONNF modified this script to import ONNX specifications into ONNF. There are two files generated for ONNF with the modified gen_doc.py:
|
ONNX MLIR modified this script to import ONNX specifications into ONNX MLIR. There are two files generated for ONNX MLIR with the modified gen_doc.py:
|
||||||
1. src/dialect/onnx/onnxop.inc: Operation defintion for MLIR tablegen. Will be included in src/dialect/onnx/onnx.td
|
1. src/dialect/onnx/onnxop.inc: Operation defintion for MLIR tablegen. Will be included in src/dialect/onnx/onnx.td
|
||||||
2. src/builder/op_build_table.inc: c code for ONNF frontend to import operation nodes from ONNX model. Will be included in src/builder/frontend_dialect_transformer.cpp
|
2. src/builder/op_build_table.inc: c code for ONNX MLIR frontend to import operation nodes from ONNX model. Will be included in src/builder/frontend_dialect_transformer.cpp
|
||||||
|
|
||||||
## How to use the script
|
## How to use the script
|
||||||
1. Get ONNX. You can use ONNF/third_party/onnx
|
1. Get ONNX. You can use onnx-mlir/third_party/onnx
|
||||||
2. In your ONNX directory, copy the script docs/gen_doc.py in your ONNF to onnx/defs in ONNX
|
2. In your ONNX directory, copy the script docs/gen_doc.py in your ONNX MLIR to onnx/defs in ONNX
|
||||||
3. Run the script: python onnx/defs/gen_doc.py
|
3. Run the script: python onnx/defs/gen_doc.py
|
||||||
4. Two files, onnxop.inc and op_buid_table.inc should be generated in current directory
|
4. Two files, onnxop.inc and op_buid_table.inc should be generated in current directory
|
||||||
5. copy the two file into your ONNF: cp onnxop.inc your_ONNF/src/dialect/onnx/onnxop.inc; cp op_build_table.inc your_ONNF/src/builder
|
5. copy the two file into your ONNX MLIR: cp onnxop.inc your_onnx-mlir/src/dialect/onnx/onnxop.inc; cp op_build_table.inc your_onnx-mlir/src/builder
|
||||||
6. go to your ONNF and build
|
6. go to your ONNX MLIR and build
|
||||||
|
|
||||||
## Consistency
|
## Consistency
|
||||||
The Operators.md generated by gen_doc.py is copied into doc. Please refer to this specification, not the one in onnx github, to make sure operators are consistent in version with onnxop.inc.
|
The Operators.md generated by gen_doc.py is copied into doc. Please refer to this specification, not the one in onnx github, to make sure operators are consistent in version with onnxop.inc.
|
||||||
|
|
||||||
## Customization
|
## Customization
|
||||||
In addition to following the ONNF specification, the modified gen_doc.py provides some mechanism for you to customize the output.
|
In addition to following the ONNX specification, the modified gen_doc.py provides some mechanism for you to customize the output.
|
||||||
Several tables are defined at the beginning of the script:
|
Several tables are defined at the beginning of the script:
|
||||||
1. special_attr_defaults: gives attribute special default value.
|
1. special_attr_defaults: gives attribute special default value.
|
||||||
2. special_op_handler: creates special import function in frontend_dialect_transformer.cpp. Currently special handler is used for operations with oprational arguments
|
2. special_op_handler: creates special import function in frontend_dialect_transformer.cpp. Currently special handler is used for operations with oprational arguments
|
||||||
|
|
|
@ -15,10 +15,10 @@ add_library(compiler
|
||||||
pass/passes.hpp)
|
pass/passes.hpp)
|
||||||
|
|
||||||
# Include root src directory.
|
# Include root src directory.
|
||||||
target_include_directories(compiler PRIVATE ${ONNF_SRC_ROOT})
|
target_include_directories(compiler PRIVATE ${ONNX_MLIR_SRC_ROOT})
|
||||||
|
|
||||||
# Include tablegen generated header files.
|
# Include tablegen generated header files.
|
||||||
target_include_directories(compiler PRIVATE ${ONNF_BIN_ROOT})
|
target_include_directories(compiler PRIVATE ${ONNX_MLIR_BIN_ROOT})
|
||||||
|
|
||||||
target_link_libraries(compiler
|
target_link_libraries(compiler
|
||||||
${CMAKE_THREAD_LIBS_INIT}
|
${CMAKE_THREAD_LIBS_INIT}
|
||||||
|
@ -27,55 +27,55 @@ target_link_libraries(compiler
|
||||||
curses)
|
curses)
|
||||||
|
|
||||||
set(LLVM_TARGET_DEFINITIONS pass/onnx_decompose.td)
|
set(LLVM_TARGET_DEFINITIONS pass/onnx_decompose.td)
|
||||||
onnf_tablegen(onnx_decompose.inc -gen-rewriters)
|
onnx_mlir_tablegen(onnx_decompose.inc -gen-rewriters)
|
||||||
add_public_tablegen_target(gen_onnx_decompose)
|
add_public_tablegen_target(gen_onnx_decompose)
|
||||||
add_dependencies(compiler gen_onnx_decompose)
|
add_dependencies(compiler gen_onnx_decompose)
|
||||||
|
|
||||||
set(LLVM_TARGET_DEFINITIONS pass/shape_inference_interface.td)
|
set(LLVM_TARGET_DEFINITIONS pass/shape_inference_interface.td)
|
||||||
onnf_tablegen(shape_inference.hpp.inc -gen-op-interface-decls)
|
onnx_mlir_tablegen(shape_inference.hpp.inc -gen-op-interface-decls)
|
||||||
onnf_tablegen(shape_inference.cpp.inc -gen-op-interface-defs)
|
onnx_mlir_tablegen(shape_inference.cpp.inc -gen-op-interface-defs)
|
||||||
add_public_tablegen_target(gen_shape_inference)
|
add_public_tablegen_target(gen_shape_inference)
|
||||||
add_dependencies(compiler gen_shape_inference)
|
add_dependencies(compiler gen_shape_inference)
|
||||||
|
|
||||||
set(LLVM_TARGET_DEFINITIONS pass/onnx_combine.td)
|
set(LLVM_TARGET_DEFINITIONS pass/onnx_combine.td)
|
||||||
onnf_tablegen(onnx_combine.inc -gen-rewriters)
|
onnx_mlir_tablegen(onnx_combine.inc -gen-rewriters)
|
||||||
add_public_tablegen_target(gen_onnx_combine)
|
add_public_tablegen_target(gen_onnx_combine)
|
||||||
add_dependencies(compiler gen_onnx_combine)
|
add_dependencies(compiler gen_onnx_combine)
|
||||||
|
|
||||||
set(LLVM_TARGET_DEFINITIONS pass/onnx_rewrite.td)
|
set(LLVM_TARGET_DEFINITIONS pass/onnx_rewrite.td)
|
||||||
onnf_tablegen(onnx_rewrite.inc -gen-rewriters)
|
onnx_mlir_tablegen(onnx_rewrite.inc -gen-rewriters)
|
||||||
add_public_tablegen_target(gen_onnx_rewrite)
|
add_public_tablegen_target(gen_onnx_rewrite)
|
||||||
add_dependencies(compiler gen_onnx_rewrite)
|
add_dependencies(compiler gen_onnx_rewrite)
|
||||||
|
|
||||||
set(LLVM_TARGET_DEFINITIONS dialect/onnx/onnx.td)
|
set(LLVM_TARGET_DEFINITIONS dialect/onnx/onnx.td)
|
||||||
onnf_tablegen(onnx.hpp.inc -gen-op-decls "-I${CMAKE_SOURCE_DIR}/compiler/pass")
|
onnx_mlir_tablegen(onnx.hpp.inc -gen-op-decls "-I${CMAKE_SOURCE_DIR}/compiler/pass")
|
||||||
onnf_tablegen(onnx.cpp.inc -gen-op-defs "-I${CMAKE_SOURCE_DIR}/compiler/pass")
|
onnx_mlir_tablegen(onnx.cpp.inc -gen-op-defs "-I${CMAKE_SOURCE_DIR}/compiler/pass")
|
||||||
set(GEN_DOC_FILE ${CMAKE_BINARY_DIR}/docs/Dialects/onnx.md)
|
set(GEN_DOC_FILE ${CMAKE_BINARY_DIR}/docs/Dialects/onnx.md)
|
||||||
add_public_tablegen_target(gen_onnx)
|
add_public_tablegen_target(gen_onnx)
|
||||||
add_dependencies(compiler gen_onnx)
|
add_dependencies(compiler gen_onnx)
|
||||||
add_onnf_dialect_doc(onnx dialect/onnx/onnx.td)
|
add_onnx_mlir_dialect_doc(onnx dialect/onnx/onnx.td)
|
||||||
|
|
||||||
set(LLVM_TARGET_DEFINITIONS dialect/krnl/krnl_ops.td)
|
set(LLVM_TARGET_DEFINITIONS dialect/krnl/krnl_ops.td)
|
||||||
onnf_tablegen(krnl.hpp.inc -gen-op-decls)
|
onnx_mlir_tablegen(krnl.hpp.inc -gen-op-decls)
|
||||||
onnf_tablegen(krnl.cpp.inc -gen-op-defs)
|
onnx_mlir_tablegen(krnl.cpp.inc -gen-op-defs)
|
||||||
add_public_tablegen_target(gen_krnl_ops)
|
add_public_tablegen_target(gen_krnl_ops)
|
||||||
add_dependencies(compiler gen_krnl_ops)
|
add_dependencies(compiler gen_krnl_ops)
|
||||||
|
|
||||||
add_library(onnf_onnx_decompose pass/onnx_decompose.cpp)
|
add_library(onnx_mlir_onnx_decompose pass/onnx_decompose.cpp)
|
||||||
target_include_directories(onnf_onnx_decompose
|
target_include_directories(onnx_mlir_onnx_decompose
|
||||||
PRIVATE ${ONNF_SRC_ROOT} ${ONNF_BIN_ROOT}
|
PRIVATE ${ONNX_MLIR_SRC_ROOT} ${ONNX_MLIR_BIN_ROOT}
|
||||||
${ONNF_SRC_ROOT})
|
${ONNX_MLIR_SRC_ROOT})
|
||||||
target_link_libraries(onnf_onnx_decompose ${MLIRLibs})
|
target_link_libraries(onnx_mlir_onnx_decompose ${MLIRLibs})
|
||||||
add_dependencies(onnf_onnx_decompose gen_krnl_ops)
|
add_dependencies(onnx_mlir_onnx_decompose gen_krnl_ops)
|
||||||
|
|
||||||
add_library(onnf_shape_inference pass/shape_inference_pass.cpp)
|
add_library(onnx_mlir_shape_inference pass/shape_inference_pass.cpp)
|
||||||
target_include_directories(onnf_shape_inference
|
target_include_directories(onnx_mlir_shape_inference
|
||||||
PRIVATE ${ONNF_SRC_ROOT} ${ONNF_BIN_ROOT}
|
PRIVATE ${ONNX_MLIR_SRC_ROOT} ${ONNX_MLIR_BIN_ROOT}
|
||||||
${ONNF_SRC_ROOT})
|
${ONNX_MLIR_SRC_ROOT})
|
||||||
target_link_libraries(onnf_shape_inference ${MLIRLibs})
|
target_link_libraries(onnx_mlir_shape_inference ${MLIRLibs})
|
||||||
add_dependencies(onnf_shape_inference gen_krnl_ops)
|
add_dependencies(onnx_mlir_shape_inference gen_krnl_ops)
|
||||||
|
|
||||||
add_library(onnf_lower_frontend
|
add_library(onnx_mlir_lower_frontend
|
||||||
conversion/onnx_to_krnl/onnx_to_krnl_common.cpp
|
conversion/onnx_to_krnl/onnx_to_krnl_common.cpp
|
||||||
conversion/onnx_to_krnl/onnx_to_krnl_common.hpp
|
conversion/onnx_to_krnl/onnx_to_krnl_common.hpp
|
||||||
conversion/onnx_to_krnl/math/elementwise.cpp
|
conversion/onnx_to_krnl/math/elementwise.cpp
|
||||||
|
@ -93,25 +93,25 @@ add_library(onnf_lower_frontend
|
||||||
conversion/onnx_to_krnl/tensor/unsqueeze.cpp
|
conversion/onnx_to_krnl/tensor/unsqueeze.cpp
|
||||||
conversion/onnx_to_krnl/tensor/constant.cpp
|
conversion/onnx_to_krnl/tensor/constant.cpp
|
||||||
conversion/onnx_to_krnl/convert_onnx_to_krnl.cpp)
|
conversion/onnx_to_krnl/convert_onnx_to_krnl.cpp)
|
||||||
target_include_directories(onnf_lower_frontend
|
target_include_directories(onnx_mlir_lower_frontend
|
||||||
PRIVATE ${ONNF_SRC_ROOT} ${ONNF_BIN_ROOT}
|
PRIVATE ${ONNX_MLIR_SRC_ROOT} ${ONNX_MLIR_BIN_ROOT}
|
||||||
${ONNF_SRC_ROOT})
|
${ONNX_MLIR_SRC_ROOT})
|
||||||
target_link_libraries(onnf_lower_frontend ${MLIRLibs})
|
target_link_libraries(onnx_mlir_lower_frontend ${MLIRLibs})
|
||||||
add_dependencies(onnf_lower_frontend gen_krnl_ops)
|
add_dependencies(onnx_mlir_lower_frontend gen_krnl_ops)
|
||||||
|
|
||||||
add_subdirectory(transform)
|
add_subdirectory(transform)
|
||||||
add_subdirectory(tool)
|
add_subdirectory(tool)
|
||||||
add_subdirectory(builder)
|
add_subdirectory(builder)
|
||||||
add_subdirectory(runtime)
|
add_subdirectory(runtime)
|
||||||
|
|
||||||
add_executable(onnf main.cpp)
|
add_executable(onnx-mlir main.cpp)
|
||||||
|
|
||||||
target_link_libraries(onnf builder ${MLIRLibs} onnf_transform onnf_onnx_decompose onnf_shape_inference onnf_lower_frontend)
|
target_link_libraries(onnx-mlir builder ${MLIRLibs} onnx_mlir_transform onnx_mlir_onnx_decompose onnx_mlir_shape_inference onnx_mlir_lower_frontend)
|
||||||
whole_archive_link_mlir(onnf ${MLIRWholeArchiveLibs})
|
whole_archive_link_mlir(onnx-mlir ${MLIRWholeArchiveLibs})
|
||||||
find_package(ZLIB REQUIRED)
|
find_package(ZLIB REQUIRED)
|
||||||
target_link_libraries(onnf ${ZLIB_LIBRARIES})
|
target_link_libraries(onnx-mlir ${ZLIB_LIBRARIES})
|
||||||
|
|
||||||
target_include_directories(onnf PRIVATE ${CMAKE_SOURCE_DIR})
|
target_include_directories(onnx-mlir PRIVATE ${CMAKE_SOURCE_DIR})
|
||||||
target_include_directories(onnf PRIVATE ${CMAKE_BINARY_DIR})
|
target_include_directories(onnx-mlir PRIVATE ${CMAKE_BINARY_DIR})
|
||||||
|
|
||||||
install(TARGETS onnf DESTINATION bin)
|
install(TARGETS onnx-mlir DESTINATION bin)
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
|
|
||||||
#include "src/builder/frontend_dialect_helper.hpp"
|
#include "src/builder/frontend_dialect_helper.hpp"
|
||||||
|
|
||||||
namespace onnf {
|
namespace onnx_mlir {
|
||||||
|
|
||||||
void replaceAll(std::string &str, const std::string &from,
|
void replaceAll(std::string &str, const std::string &from,
|
||||||
const std::string &to) {
|
const std::string &to) {
|
||||||
|
@ -35,23 +35,23 @@ std::string legalize_name(std::string name) {
|
||||||
return name;
|
return name;
|
||||||
}
|
}
|
||||||
|
|
||||||
mlir::Value OnnxOnnfSymbolMapping::GetTensorByOnnxName(
|
mlir::Value OnnxMlirSymbolMapping::GetTensorByOnnxName(
|
||||||
const std::string &name) {
|
const std::string &name) {
|
||||||
assert(onnx_name2onnf_tensor.find(legalize_name(name)) !=
|
assert(onnx_name2onnx_mlir_tensor.find(legalize_name(name)) !=
|
||||||
onnx_name2onnf_tensor.end() &&
|
onnx_name2onnx_mlir_tensor.end() &&
|
||||||
"Tensor not found");
|
"Tensor not found");
|
||||||
return onnx_name2onnf_tensor.at(legalize_name(name));
|
return onnx_name2onnx_mlir_tensor.at(legalize_name(name));
|
||||||
}
|
}
|
||||||
|
|
||||||
void OnnxOnnfSymbolMapping::AddMapping(
|
void OnnxMlirSymbolMapping::AddMapping(
|
||||||
const std::string &name, mlir::Value tensor) {
|
const std::string &name, mlir::Value tensor) {
|
||||||
assert(onnx_name2onnf_tensor.count(legalize_name(name)) == 0 &&
|
assert(onnx_name2onnx_mlir_tensor.count(legalize_name(name)) == 0 &&
|
||||||
"Tensor already exists.");
|
"Tensor already exists.");
|
||||||
onnx_name2onnf_tensor.emplace(legalize_name(name), tensor);
|
onnx_name2onnx_mlir_tensor.emplace(legalize_name(name), tensor);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool OnnxOnnfSymbolMapping::ContainKey(std::string name) {
|
bool OnnxMlirSymbolMapping::ContainKey(std::string name) {
|
||||||
return onnx_name2onnf_tensor.count(name) != 0;
|
return onnx_name2onnx_mlir_tensor.count(name) != 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
|
@ -181,4 +181,4 @@ mlir::Value InitializedTensorMapping::EmitInitializerForInputTensor(
|
||||||
loc, tensorType, nullptr, constantDenseAttribute);
|
loc, tensorType, nullptr, constantDenseAttribute);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace onnf
|
} // namespace onnx_mlir
|
||||||
|
|
|
@ -34,18 +34,18 @@
|
||||||
#include "src/dialect/onnx/onnx_ops.hpp"
|
#include "src/dialect/onnx/onnx_ops.hpp"
|
||||||
#include "onnx/onnx_pb.h"
|
#include "onnx/onnx_pb.h"
|
||||||
|
|
||||||
namespace onnf {
|
namespace onnx_mlir {
|
||||||
|
|
||||||
void replaceAll(std::string &str, const std::string &from,
|
void replaceAll(std::string &str, const std::string &from,
|
||||||
const std::string &to);
|
const std::string &to);
|
||||||
|
|
||||||
std::string legalize_name(std::string name);
|
std::string legalize_name(std::string name);
|
||||||
|
|
||||||
struct OnnxOnnfSymbolMapping {
|
struct OnnxMlirSymbolMapping {
|
||||||
/*!
|
/*!
|
||||||
* Get MLIR tensor by onnx tensor name.
|
* Get MLIR tensor by onnx tensor name.
|
||||||
* @param name onnx tensor name.
|
* @param name onnx tensor name.
|
||||||
* @return onnf tensor corresponding to `name`.
|
* @return onnx mlir tensor corresponding to `name`.
|
||||||
*/
|
*/
|
||||||
mlir::Value GetTensorByOnnxName(const std::string &name);
|
mlir::Value GetTensorByOnnxName(const std::string &name);
|
||||||
|
|
||||||
|
@ -62,7 +62,7 @@ private:
|
||||||
/*!
|
/*!
|
||||||
* mapping from onnx tensor names to MLIR tensor.
|
* mapping from onnx tensor names to MLIR tensor.
|
||||||
*/
|
*/
|
||||||
std::map<std::string, mlir::Value> onnx_name2onnf_tensor;
|
std::map<std::string, mlir::Value> onnx_name2onnx_mlir_tensor;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct InitializedTensorMapping {
|
struct InitializedTensorMapping {
|
||||||
|
@ -98,4 +98,4 @@ private:
|
||||||
std::map<std::string, onnx::TensorProto> nameToInitializedTensor;
|
std::map<std::string, onnx::TensorProto> nameToInitializedTensor;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace onnf
|
} // namespace onnx_mlir
|
|
@ -21,7 +21,7 @@ namespace bstd = mpark;
|
||||||
|
|
||||||
#include "frontend_dialect_transformer.hpp"
|
#include "frontend_dialect_transformer.hpp"
|
||||||
|
|
||||||
namespace onnf {
|
namespace onnx_mlir {
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
/*!
|
/*!
|
||||||
|
@ -47,13 +47,13 @@ private:
|
||||||
mlir::OpBuilder builder_;
|
mlir::OpBuilder builder_;
|
||||||
mlir::Value none_;
|
mlir::Value none_;
|
||||||
// mapping between string name and symbol
|
// mapping between string name and symbol
|
||||||
OnnxOnnfSymbolMapping frontend_symbols_;
|
OnnxMlirSymbolMapping frontend_symbols_;
|
||||||
|
|
||||||
mlir::Location UnknownLoc() { return mlir::UnknownLoc::get(&context_); }
|
mlir::Location UnknownLoc() { return mlir::UnknownLoc::get(&context_); }
|
||||||
|
|
||||||
// Convert type to MLIR type.
|
// Convert type to MLIR type.
|
||||||
// A complete list of types can be found in:
|
// A complete list of types can be found in:
|
||||||
// <onnf-build-folder>/third_party/onnx/onnx/onnx.pb.h
|
// <onnx-mlir-build-folder>/third_party/onnx/onnx/onnx.pb.h
|
||||||
mlir::Type convertONNXTypeToMLIRType(onnx::TensorProto_DataType onnxType) {
|
mlir::Type convertONNXTypeToMLIRType(onnx::TensorProto_DataType onnxType) {
|
||||||
switch (onnxType) {
|
switch (onnxType) {
|
||||||
case onnx::TensorProto_DataType::TensorProto_DataType_FLOAT16:
|
case onnx::TensorProto_DataType::TensorProto_DataType_FLOAT16:
|
||||||
|
@ -473,9 +473,9 @@ private:
|
||||||
}
|
}
|
||||||
}; // FrontendGenImpl class
|
}; // FrontendGenImpl class
|
||||||
} // namespace
|
} // namespace
|
||||||
} // namespace onnf
|
} // namespace onnx_mlir
|
||||||
|
|
||||||
namespace onnf {
|
namespace onnx_mlir {
|
||||||
|
|
||||||
void ImportFrontendModelFile(std::string model_fname,
|
void ImportFrontendModelFile(std::string model_fname,
|
||||||
mlir::MLIRContext &context,
|
mlir::MLIRContext &context,
|
||||||
|
@ -489,4 +489,4 @@ void ImportFrontendModelFile(std::string model_fname,
|
||||||
FrontendGenImpl myONNXGen(context);
|
FrontendGenImpl myONNXGen(context);
|
||||||
module = myONNXGen.ImportONNXModel(model);
|
module = myONNXGen.ImportONNXModel(model);
|
||||||
}
|
}
|
||||||
} // namespace onnf
|
} // namespace onnx_mlir
|
||||||
|
|
|
@ -26,12 +26,12 @@ class OwningModuleRef;
|
||||||
} // namespace mlir
|
} // namespace mlir
|
||||||
|
|
||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
// Import a model into one of ONNF's frontend models.
|
// Import a model into the ONNX MLIR dialect.
|
||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
|
|
||||||
namespace onnf {
|
namespace onnx_mlir {
|
||||||
/*!
|
/*!
|
||||||
* Import an ONNX model file into ONNF's ONNX Dialect.
|
* Import an ONNX model file into the ONNX Dialect.
|
||||||
* @param model_fname file name pointing to the onnx model protobuf.
|
* @param model_fname file name pointing to the onnx model protobuf.
|
||||||
* @return MLIR::module generated for the ONNX model.
|
* @return MLIR::module generated for the ONNX model.
|
||||||
*/
|
*/
|
||||||
|
@ -43,4 +43,4 @@ void ImportFrontendModelFile(std::string model_fname,
|
||||||
* TODO: Import models into other extension dialects that cover the
|
* TODO: Import models into other extension dialects that cover the
|
||||||
* operations specific to other frameworks such as Tensorflow or Pytorch.
|
* operations specific to other frameworks such as Tensorflow or Pytorch.
|
||||||
*/
|
*/
|
||||||
} // namespace onnf
|
} // namespace onnx_mlir
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
|
|
||||||
#include "krnl_helper.hpp"
|
#include "krnl_helper.hpp"
|
||||||
|
|
||||||
namespace onnf {
|
namespace onnx_mlir {
|
||||||
|
|
||||||
using namespace mlir;
|
using namespace mlir;
|
||||||
|
|
||||||
|
@ -120,7 +120,7 @@ void printBound(AffineMapAttr boundMap,
|
||||||
printDimAndSymbolList(
|
printDimAndSymbolList(
|
||||||
boundOperandsBeg, map.getNumDims(), map.getNumSymbols(), p);
|
boundOperandsBeg, map.getNumDims(), map.getNumSymbols(), p);
|
||||||
}
|
}
|
||||||
} // namespace onnf
|
} // namespace onnx_mlir
|
||||||
|
|
||||||
namespace mlir {
|
namespace mlir {
|
||||||
|
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
#include "mlir/IR/StandardTypes.h"
|
#include "mlir/IR/StandardTypes.h"
|
||||||
#include "mlir/Transforms/DialectConversion.h"
|
#include "mlir/Transforms/DialectConversion.h"
|
||||||
|
|
||||||
namespace onnf {
|
namespace onnx_mlir {
|
||||||
|
|
||||||
class KrnlDialectOperandParser {
|
class KrnlDialectOperandParser {
|
||||||
public:
|
public:
|
||||||
|
@ -59,7 +59,7 @@ void printDimAndSymbolList(mlir::Operation::operand_iterator &begin,
|
||||||
void printBound(mlir::AffineMapAttr boundMap,
|
void printBound(mlir::AffineMapAttr boundMap,
|
||||||
mlir::Operation::operand_iterator &boundOperandsBeg, const char *prefix,
|
mlir::Operation::operand_iterator &boundOperandsBeg, const char *prefix,
|
||||||
mlir::OpAsmPrinter &p);
|
mlir::OpAsmPrinter &p);
|
||||||
} // namespace onnf
|
} // namespace onnx_mlir
|
||||||
|
|
||||||
namespace mlir {
|
namespace mlir {
|
||||||
|
|
||||||
|
|
|
@ -188,9 +188,9 @@ void print(OpAsmPrinter &p, KrnlIterateOp &op) {
|
||||||
p << " -> ";
|
p << " -> ";
|
||||||
p.printOperand(var);
|
p.printOperand(var);
|
||||||
p << " = ";
|
p << " = ";
|
||||||
onnf::printBound((*boundItr++).cast<AffineMapAttr>(), operandItr, "max", p);
|
onnx_mlir::printBound((*boundItr++).cast<AffineMapAttr>(), operandItr, "max", p);
|
||||||
p << " to ";
|
p << " to ";
|
||||||
onnf::printBound((*boundItr++).cast<AffineMapAttr>(), operandItr, "min", p);
|
onnx_mlir::printBound((*boundItr++).cast<AffineMapAttr>(), operandItr, "min", p);
|
||||||
delimiter = ", ";
|
delimiter = ", ";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -202,7 +202,7 @@ void print(OpAsmPrinter &p, KrnlIterateOp &op) {
|
||||||
ParseResult parseKrnlIterateOp(OpAsmParser &parser, OperationState &result) {
|
ParseResult parseKrnlIterateOp(OpAsmParser &parser, OperationState &result) {
|
||||||
auto builder = parser.getBuilder();
|
auto builder = parser.getBuilder();
|
||||||
auto context = builder.getContext();
|
auto context = builder.getContext();
|
||||||
onnf::KrnlDialectOperandParser operandParser(parser);
|
onnx_mlir::KrnlDialectOperandParser operandParser(parser);
|
||||||
|
|
||||||
// Parse optimized loops:
|
// Parse optimized loops:
|
||||||
SmallVector<OpAsmParser::OperandType, 4> optimizedLoopRefs;
|
SmallVector<OpAsmParser::OperandType, 4> optimizedLoopRefs;
|
||||||
|
|
|
@ -42,7 +42,7 @@ class ONNX_Op<string mnemonic, list<OpTrait> traits = []> :
|
||||||
//the tablegen code onnxop.in is generated with gen_doc.py
|
//the tablegen code onnxop.in is generated with gen_doc.py
|
||||||
//clone and install onnx
|
//clone and install onnx
|
||||||
// git clone --recursive https://github.com/onnx/onnx.git
|
// git clone --recursive https://github.com/onnx/onnx.git
|
||||||
// set up env for anaconda3 and for ONNF (BOOSTROOT, cmake, gcc ...)
|
// set up env for anaconda3 and for ONNX MLIR (BOOSTROOT, cmake, gcc ...)
|
||||||
// cd onnx
|
// cd onnx
|
||||||
//install onnx
|
//install onnx
|
||||||
// CC=gcc CXX=g++ pip install -e .
|
// CC=gcc CXX=g++ pip install -e .
|
||||||
|
|
|
@ -36,4 +36,4 @@ class ONNXOpsDialect : public Dialect {
|
||||||
|
|
||||||
} // end namespace mlir
|
} // end namespace mlir
|
||||||
|
|
||||||
namespace onnf {}
|
namespace onnx_mlir {}
|
||||||
|
|
14
src/main.cpp
14
src/main.cpp
|
@ -36,11 +36,11 @@
|
||||||
void EmitLLVMBitCode(const mlir::OwningModuleRef &module);
|
void EmitLLVMBitCode(const mlir::OwningModuleRef &module);
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
using namespace onnf;
|
using namespace onnx_mlir;
|
||||||
|
|
||||||
void LoadMLIR(string inputFilename, mlir::MLIRContext &context,
|
void LoadMLIR(string inputFilename, mlir::MLIRContext &context,
|
||||||
mlir::OwningModuleRef &module) {
|
mlir::OwningModuleRef &module) {
|
||||||
// Handle '.mlir' input to the ONNF frontend.
|
// Handle '.mlir' input to the ONNX MLIR frontend.
|
||||||
// The mlir format indicates that one or more of the supported
|
// The mlir format indicates that one or more of the supported
|
||||||
// representations are used in the file.
|
// representations are used in the file.
|
||||||
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> fileOrErr =
|
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> fileOrErr =
|
||||||
|
@ -77,11 +77,11 @@ int main(int argc, char *argv[]) {
|
||||||
mlir::registerDialect<mlir::ONNXOpsDialect>();
|
mlir::registerDialect<mlir::ONNXOpsDialect>();
|
||||||
mlir::registerDialect<mlir::KrnlOpsDialect>();
|
mlir::registerDialect<mlir::KrnlOpsDialect>();
|
||||||
|
|
||||||
llvm::cl::OptionCategory OnnfOptions("ONNF Options",
|
llvm::cl::OptionCategory OnnxMlirOptions("ONNX MLIR Options",
|
||||||
"These are frontend options.");
|
"These are frontend options.");
|
||||||
llvm::cl::opt<string> inputFilename(
|
llvm::cl::opt<string> inputFilename(
|
||||||
llvm::cl::Positional, llvm::cl::desc("<input file>"), llvm::cl::init("-"),
|
llvm::cl::Positional, llvm::cl::desc("<input file>"), llvm::cl::init("-"),
|
||||||
llvm::cl::cat(OnnfOptions));
|
llvm::cl::cat(OnnxMlirOptions));
|
||||||
|
|
||||||
enum EmissionTargetType {
|
enum EmissionTargetType {
|
||||||
EmitONNXIR,
|
EmitONNXIR,
|
||||||
|
@ -99,11 +99,11 @@ int main(int argc, char *argv[]) {
|
||||||
clEnumVal(EmitLLVMIR, "Lower model to LLVM IR (LLVM dialect)."),
|
clEnumVal(EmitLLVMIR, "Lower model to LLVM IR (LLVM dialect)."),
|
||||||
clEnumVal(EmitLLVMBC, "Lower model to LLVM IR and emit (to file) "
|
clEnumVal(EmitLLVMBC, "Lower model to LLVM IR and emit (to file) "
|
||||||
"LLVM bitcode for model.")),
|
"LLVM bitcode for model.")),
|
||||||
llvm::cl::init(EmitLLVMBC), llvm::cl::cat(OnnfOptions));
|
llvm::cl::init(EmitLLVMBC), llvm::cl::cat(OnnxMlirOptions));
|
||||||
|
|
||||||
llvm::cl::HideUnrelatedOptions(OnnfOptions);
|
llvm::cl::HideUnrelatedOptions(OnnxMlirOptions);
|
||||||
llvm::cl::ParseCommandLineOptions(argc, argv,
|
llvm::cl::ParseCommandLineOptions(argc, argv,
|
||||||
"ONNF MLIR modular optimizer driver\n");
|
"ONNX MLIR modular optimizer driver\n");
|
||||||
|
|
||||||
// Decide if the input file is an ONNX model or a model specified
|
// Decide if the input file is an ONNX model or a model specified
|
||||||
// in MLIR. The extension of the file is the decider.
|
// in MLIR. The extension of the file is the decider.
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
//===- passes.hpp - ONNF Passes Definition --------------------------------===//
|
//===- passes.hpp - ONNX MLIR Passes Definition ---------------------------===//
|
||||||
//
|
//
|
||||||
// Copyright 2019 The IBM Research Authors.
|
// Copyright 2019 The IBM Research Authors.
|
||||||
//
|
//
|
||||||
// =============================================================================
|
// =============================================================================
|
||||||
//
|
//
|
||||||
// This file exposes the entry points to create compiler passes for ONNF.
|
// This file exposes the entry points to create compiler passes for ONNX MLIR.
|
||||||
//
|
//
|
||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
|
|
||||||
|
|
|
@ -3,8 +3,8 @@ add_library(cruntime
|
||||||
dyn_memref.h
|
dyn_memref.h
|
||||||
data_type.h)
|
data_type.h)
|
||||||
target_include_directories(cruntime
|
target_include_directories(cruntime
|
||||||
PRIVATE ${ONNF_SRC_ROOT} ${ONNF_BIN_ROOT}
|
PRIVATE ${ONNX_MLIR_SRC_ROOT} ${ONNX_MLIR_BIN_ROOT}
|
||||||
${ONNF_SRC_ROOT})
|
${ONNX_MLIR_SRC_ROOT})
|
||||||
|
|
||||||
pybind11_add_module(pyruntime
|
pybind11_add_module(pyruntime
|
||||||
dyn_memref.cpp
|
dyn_memref.cpp
|
||||||
|
@ -13,6 +13,6 @@ pybind11_add_module(pyruntime
|
||||||
runtime.hpp)
|
runtime.hpp)
|
||||||
target_link_libraries(pyruntime PRIVATE ${CMAKE_DL_LIBS})
|
target_link_libraries(pyruntime PRIVATE ${CMAKE_DL_LIBS})
|
||||||
target_include_directories(pyruntime
|
target_include_directories(pyruntime
|
||||||
PRIVATE ${ONNF_SRC_ROOT} ${ONNF_BIN_ROOT}
|
PRIVATE ${ONNX_MLIR_SRC_ROOT} ${ONNX_MLIR_BIN_ROOT}
|
||||||
${ONNF_SRC_ROOT})
|
${ONNX_MLIR_SRC_ROOT})
|
||||||
add_dependencies(pyruntime cruntime)
|
add_dependencies(pyruntime cruntime)
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
add_subdirectory(onnf_opt)
|
add_subdirectory(onnx_mlir_opt)
|
|
@ -1,9 +0,0 @@
|
||||||
add_executable(onnf-opt onnf_opt.cpp)
|
|
||||||
add_dependencies(onnf-opt gen_krnl_ops)
|
|
||||||
|
|
||||||
target_include_directories(onnf-opt PRIVATE ${ONNF_SRC_ROOT})
|
|
||||||
target_include_directories(onnf-opt PRIVATE ${ONNF_BIN_ROOT})
|
|
||||||
|
|
||||||
target_link_libraries(onnf-opt builder ${MLIRLibs} onnf_transform onnf_shape_inference onnf_lower_frontend curses)
|
|
||||||
whole_archive_link_mlir(onnf-opt ${MLIRWholeArchiveLibs})
|
|
||||||
whole_archive_link_onnf(onnf-opt compiler onnf_transform onnf_lower_frontend onnf_shape_inference)
|
|
|
@ -0,0 +1,9 @@
|
||||||
|
add_executable(onnx-mlir-opt onnx_mlir_opt.cpp)
|
||||||
|
add_dependencies(onnx-mlir-opt gen_krnl_ops)
|
||||||
|
|
||||||
|
target_include_directories(onnx-mlir-opt PRIVATE ${ONNX_MLIR_SRC_ROOT})
|
||||||
|
target_include_directories(onnx-mlir-opt PRIVATE ${ONNX_MLIR_BIN_ROOT})
|
||||||
|
|
||||||
|
target_link_libraries(onnx-mlir-opt builder ${MLIRLibs} onnx_mlir_transform onnx_mlir_shape_inference onnx_mlir_lower_frontend curses)
|
||||||
|
whole_archive_link_mlir(onnx-mlir-opt ${MLIRWholeArchiveLibs})
|
||||||
|
whole_archive_link_onnx_mlir(onnx-mlir-opt compiler onnx_mlir_transform onnx_mlir_lower_frontend onnx_mlir_shape_inference)
|
|
@ -1,4 +1,4 @@
|
||||||
//===--------------------- onnf_opt.cpp - MLIR Operations -----------------===//
|
//===---------------- onnx_mlir_opt.cpp - MLIR Operations -----------------===//
|
||||||
//
|
//
|
||||||
// Copyright 2019 The IBM Research Authors.
|
// Copyright 2019 The IBM Research Authors.
|
||||||
//
|
//
|
||||||
|
@ -19,7 +19,7 @@
|
||||||
#include "src/dialect/onnx/onnx_ops.hpp"
|
#include "src/dialect/onnx/onnx_ops.hpp"
|
||||||
#include "src/pass/passes.hpp"
|
#include "src/pass/passes.hpp"
|
||||||
|
|
||||||
using namespace onnf;
|
using namespace onnx_mlir;
|
||||||
|
|
||||||
static llvm::cl::opt<std::string> input_filename(llvm::cl::Positional,
|
static llvm::cl::opt<std::string> input_filename(llvm::cl::Positional,
|
||||||
llvm::cl::desc("<input file>"),
|
llvm::cl::desc("<input file>"),
|
||||||
|
@ -60,7 +60,7 @@ int main(int argc, char **argv) {
|
||||||
mlir::registerPassManagerCLOptions();
|
mlir::registerPassManagerCLOptions();
|
||||||
mlir::PassPipelineCLParser passPipeline("", "Compiler passes to run");
|
mlir::PassPipelineCLParser passPipeline("", "Compiler passes to run");
|
||||||
llvm::cl::ParseCommandLineOptions(argc, argv,
|
llvm::cl::ParseCommandLineOptions(argc, argv,
|
||||||
"ONNF MLIR modular optimizer driver\n");
|
"ONNX MLIR modular optimizer driver\n");
|
||||||
|
|
||||||
// Set up the input file.
|
// Set up the input file.
|
||||||
std::string error_message;
|
std::string error_message;
|
|
@ -1,9 +1,9 @@
|
||||||
add_library(onnf_transform
|
add_library(onnx_mlir_transform
|
||||||
lower_krnl.cpp
|
lower_krnl.cpp
|
||||||
lower_to_llvm.cpp)
|
lower_to_llvm.cpp)
|
||||||
|
|
||||||
target_include_directories(onnf_transform
|
target_include_directories(onnx_mlir_transform
|
||||||
PRIVATE ${ONNF_SRC_ROOT} ${ONNF_BIN_ROOT}
|
PRIVATE ${ONNX_MLIR_SRC_ROOT} ${ONNX_MLIR_BIN_ROOT}
|
||||||
${ONNF_SRC_ROOT})
|
${ONNX_MLIR_SRC_ROOT})
|
||||||
target_link_libraries(onnf_transform ${MLIRLibs})
|
target_link_libraries(onnx_mlir_transform ${MLIRLibs})
|
||||||
add_dependencies(onnf_transform gen_krnl_ops)
|
add_dependencies(onnx_mlir_transform gen_krnl_ops)
|
||||||
|
|
|
@ -6,5 +6,5 @@ add_custom_target(run-onnx-backend-test
|
||||||
COMMAND ${PYTHON_EXECUTABLE}
|
COMMAND ${PYTHON_EXECUTABLE}
|
||||||
${CMAKE_CURRENT_BINARY_DIR}/test.py)
|
${CMAKE_CURRENT_BINARY_DIR}/test.py)
|
||||||
|
|
||||||
add_dependencies(run-onnx-backend-test onnf)
|
add_dependencies(run-onnx-backend-test onnx-mlir)
|
||||||
add_dependencies(run-onnx-backend-test pyruntime)
|
add_dependencies(run-onnx-backend-test pyruntime)
|
||||||
|
|
|
@ -16,12 +16,12 @@ import test_config
|
||||||
VERBOSE = bool(os.environ.get("VERBOSE"))
|
VERBOSE = bool(os.environ.get("VERBOSE"))
|
||||||
|
|
||||||
CXX = test_config.CXX_PATH
|
CXX = test_config.CXX_PATH
|
||||||
ONNF = os.path.join(test_config.ONNF_BUILD_PATH, "bin/onnf")
|
ONNX_MLIR = os.path.join(test_config.ONNX_MLIR_BUILD_PATH, "bin/onnx-mlir")
|
||||||
LLC = os.path.join(test_config.LLVM_PROJ_BUILD_PATH, "bin/llc")
|
LLC = os.path.join(test_config.LLVM_PROJ_BUILD_PATH, "bin/llc")
|
||||||
|
|
||||||
# Make lib folder under build directory visible in PYTHONPATH
|
# Make lib folder under build directory visible in PYTHONPATH
|
||||||
doc_check_base_dir = os.path.dirname(os.path.realpath(__file__))
|
doc_check_base_dir = os.path.dirname(os.path.realpath(__file__))
|
||||||
RUNTIME_DIR = os.path.join(test_config.ONNF_BUILD_PATH, "lib")
|
RUNTIME_DIR = os.path.join(test_config.ONNX_MLIR_BUILD_PATH, "lib")
|
||||||
sys.path.append(RUNTIME_DIR)
|
sys.path.append(RUNTIME_DIR)
|
||||||
from pyruntime import ExecutionSession
|
from pyruntime import ExecutionSession
|
||||||
|
|
||||||
|
@ -39,7 +39,7 @@ class DummyBackend(onnx.backend.base.Backend):
|
||||||
# Save model to disk as temp_model.onnx.
|
# Save model to disk as temp_model.onnx.
|
||||||
onnx.save(model, "temp_model.onnx")
|
onnx.save(model, "temp_model.onnx")
|
||||||
# Call frontend to process temp_model.onnx, bit code will be generated.
|
# Call frontend to process temp_model.onnx, bit code will be generated.
|
||||||
execute_commands([ONNF, "temp_model.onnx"])
|
execute_commands([ONNX_MLIR, "temp_model.onnx"])
|
||||||
# Call llc to generate object file from bitcode.
|
# Call llc to generate object file from bitcode.
|
||||||
execute_commands(
|
execute_commands(
|
||||||
[LLC, "-filetype=obj", "-relocation-model=pic", "model.bc"])
|
[LLC, "-filetype=obj", "-relocation-model=pic", "model.bc"])
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
ONNF_BUILD_PATH = "@CMAKE_BINARY_DIR@"
|
ONNX_MLIR_BUILD_PATH = "@CMAKE_BINARY_DIR@"
|
||||||
LLVM_PROJ_BUILD_PATH = "@LLVM_PROJ_BUILD@"
|
LLVM_PROJ_BUILD_PATH = "@LLVM_PROJ_BUILD@"
|
||||||
CXX_PATH = "@CMAKE_CXX_COMPILER@"
|
CXX_PATH = "@CMAKE_CXX_COMPILER@"
|
||||||
|
|
|
@ -6,16 +6,16 @@ configure_lit_site_cfg(${CMAKE_CURRENT_SOURCE_DIR}/lit.site.cfg.py.in
|
||||||
MAIN_CONFIG
|
MAIN_CONFIG
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/lit.cfg.py)
|
${CMAKE_CURRENT_SOURCE_DIR}/lit.cfg.py)
|
||||||
|
|
||||||
set(ONNF_MLIR_TEST_DEPENDS onnf-opt)
|
set(ONNX_MLIR_TEST_DEPENDS onnx-mlir-opt)
|
||||||
|
|
||||||
add_lit_testsuite(check-mlir-lit
|
add_lit_testsuite(check-mlir-lit
|
||||||
"Running the ONNF MLIR regression tests"
|
"Running the ONNX MLIR regression tests"
|
||||||
${CMAKE_CURRENT_BINARY_DIR}
|
${CMAKE_CURRENT_BINARY_DIR}
|
||||||
DEPENDS
|
DEPENDS
|
||||||
${ONNF_MLIR_TEST_DEPENDS})
|
${ONNX_MLIR_TEST_DEPENDS})
|
||||||
set_target_properties(check-mlir-lit PROPERTIES FOLDER "Tests")
|
set_target_properties(check-mlir-lit PROPERTIES FOLDER "Tests")
|
||||||
|
|
||||||
add_lit_testsuites(ONNF_MLIR
|
add_lit_testsuites(ONNX_MLIR
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}
|
${CMAKE_CURRENT_SOURCE_DIR}
|
||||||
DEPENDS
|
DEPENDS
|
||||||
${ONNF_MLIR_TEST_DEPS})
|
${ONNX_MLIR_TEST_DEPS})
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
// RUN: onnf-opt %s -mlir-print-op-generic | FileCheck -check-prefix=GENERIC %s
|
// RUN: onnx-mlir-opt %s -mlir-print-op-generic | FileCheck -check-prefix=GENERIC %s
|
||||||
// RUN: onnf-opt %s | FileCheck %s
|
// RUN: onnx-mlir-opt %s | FileCheck %s
|
||||||
|
|
||||||
// GENERIC-DAG: #{{.*}} = affine_map<() -> (0)>
|
// GENERIC-DAG: #{{.*}} = affine_map<() -> (0)>
|
||||||
// GENERIC-DAG: #{{.*}} = affine_map<() -> (10)>
|
// GENERIC-DAG: #{{.*}} = affine_map<() -> (10)>
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// RUN: onnf-opt --shape-inference --lower-frontend --lower-krnl --lower-all-llvm %s -split-input-file | FileCheck %s
|
// RUN: onnx-mlir-opt --shape-inference --lower-frontend --lower-krnl --lower-all-llvm %s -split-input-file | FileCheck %s
|
||||||
|
|
||||||
func @test_reshape(%arg0 : tensor<?x10xf32>, %arg1 : tensor<4xi32>) -> tensor<*xf32> {
|
func @test_reshape(%arg0 : tensor<?x10xf32>, %arg1 : tensor<4xi32>) -> tensor<*xf32> {
|
||||||
%0 = "onnx.Reshape"(%arg0, %arg1) : (tensor<?x10xf32>, tensor<4xi32>) -> tensor<*xf32>
|
%0 = "onnx.Reshape"(%arg0, %arg1) : (tensor<?x10xf32>, tensor<4xi32>) -> tensor<*xf32>
|
||||||
|
|
|
@ -17,10 +17,10 @@ config.name = 'Open Neural Network Frontend'
|
||||||
config.test_format = lit.formats.ShTest(not llvm_config.use_lit_shell)
|
config.test_format = lit.formats.ShTest(not llvm_config.use_lit_shell)
|
||||||
|
|
||||||
# test_source_root: The root path where tests are located.
|
# test_source_root: The root path where tests are located.
|
||||||
config.test_source_root = config.onnf_mlir_test_src_dir
|
config.test_source_root = config.onnx_mlir_test_src_dir
|
||||||
|
|
||||||
# test_exec_root: The root path where tests should be run.
|
# test_exec_root: The root path where tests should be run.
|
||||||
config.test_exec_root = config.onnf_mlir_test_build_dir
|
config.test_exec_root = config.onnx_mlir_test_build_dir
|
||||||
|
|
||||||
llvm_config.use_default_substitutions()
|
llvm_config.use_default_substitutions()
|
||||||
|
|
||||||
|
@ -28,10 +28,10 @@ llvm_config.use_default_substitutions()
|
||||||
llvm_config.with_environment('PATH', config.llvm_tools_dir, append_path=True)
|
llvm_config.with_environment('PATH', config.llvm_tools_dir, append_path=True)
|
||||||
|
|
||||||
tool_dirs = [
|
tool_dirs = [
|
||||||
config.onnf_mlir_tools_dir, config.mlir_tools_dir, config.llvm_tools_dir
|
config.onnx_mlir_tools_dir, config.mlir_tools_dir, config.llvm_tools_dir
|
||||||
]
|
]
|
||||||
tool_names = [
|
tool_names = [
|
||||||
'onnf-opt', 'mlir-opt', 'mlir-translate'
|
'onnx-mlir-opt', 'mlir-opt', 'mlir-translate'
|
||||||
]
|
]
|
||||||
tools = [ToolSubst(s, unresolved='ignore') for s in tool_names]
|
tools = [ToolSubst(s, unresolved='ignore') for s in tool_names]
|
||||||
llvm_config.add_tool_substitutions(tools, tool_dirs)
|
llvm_config.add_tool_substitutions(tools, tool_dirs)
|
|
@ -6,11 +6,11 @@ config.mlir_obj_root = "@LLVM_PROJ_BUILD@"
|
||||||
config.mlir_tools_dir = "@MLIR_TOOLS_DIR@"
|
config.mlir_tools_dir = "@MLIR_TOOLS_DIR@"
|
||||||
config.suffixes = ['.mlir']
|
config.suffixes = ['.mlir']
|
||||||
|
|
||||||
config.onnf_mlir_tools_dir = "@ONNF_TOOLS_DIR@"
|
config.onnx_mlir_tools_dir = "@ONNX_MLIR_TOOLS_DIR@"
|
||||||
config.onnf_mlir_test_src_dir = "@ONNF_LIT_TEST_SRC_DIR@"
|
config.onnx_mlir_test_src_dir = "@ONNX_MLIR_LIT_TEST_SRC_DIR@"
|
||||||
config.onnf_mlir_test_build_dir = "@ONNF_LIT_TEST_BUILD_DIR@"
|
config.onnx_mlir_test_build_dir = "@ONNX_MLIR_LIT_TEST_BUILD_DIR@"
|
||||||
|
|
||||||
lit.llvm.initialize(lit_config, config)
|
lit.llvm.initialize(lit_config, config)
|
||||||
|
|
||||||
# Let the main config do the real work.
|
# Let the main config do the real work.
|
||||||
lit_config.load_config(config, "@ONNF_LIT_TEST_SRC_DIR@/lit.cfg.py")
|
lit_config.load_config(config, "@ONNX_MLIR_LIT_TEST_SRC_DIR@/lit.cfg.py")
|
|
@ -1,4 +1,4 @@
|
||||||
// RUN: onnf-opt --canonicalize %s -split-input-file | FileCheck %s
|
// RUN: onnx-mlir-opt --canonicalize %s -split-input-file | FileCheck %s
|
||||||
|
|
||||||
// CHECK-LABEL: func @test_matmul_add_fused(%{{.*}}: tensor<10x10xf32>, %{{.*}}: tensor<10x10xf32>, %{{.*}}: tensor<10x10xf32>) -> tensor<10x10xf32> {
|
// CHECK-LABEL: func @test_matmul_add_fused(%{{.*}}: tensor<10x10xf32>, %{{.*}}: tensor<10x10xf32>, %{{.*}}: tensor<10x10xf32>) -> tensor<10x10xf32> {
|
||||||
func @test_matmul_add_fused(%a0: tensor<10x10xf32>, %a1: tensor<10x10xf32>, %a2: tensor<10x10xf32>) -> tensor<10x10xf32> {
|
func @test_matmul_add_fused(%a0: tensor<10x10xf32>, %a1: tensor<10x10xf32>, %a2: tensor<10x10xf32>) -> tensor<10x10xf32> {
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// RUN: onnf-opt --decompose-onnx %s -split-input-file | FileCheck %s
|
// RUN: onnx-mlir-opt --decompose-onnx %s -split-input-file | FileCheck %s
|
||||||
|
|
||||||
// CHECK-LABEL: @test_reducel1(%{{.*}}: tensor<?x?x?xf32>) -> tensor<*xf32>
|
// CHECK-LABEL: @test_reducel1(%{{.*}}: tensor<?x?x?xf32>) -> tensor<*xf32>
|
||||||
func @test_reducel1(%arg0 : tensor<?x?x?xf32>) -> tensor<*xf32> {
|
func @test_reducel1(%arg0 : tensor<?x?x?xf32>) -> tensor<*xf32> {
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// RUN: onnf-opt --shape-inference --lower-frontend %s -split-input-file | FileCheck %s
|
// RUN: onnx-mlir-opt --shape-inference --lower-frontend %s -split-input-file | FileCheck %s
|
||||||
|
|
||||||
func @test_add(%arg0 : tensor<10x10xf32>, %arg1 : tensor<10x10xf32>) -> tensor<*xf32> {
|
func @test_add(%arg0 : tensor<10x10xf32>, %arg1 : tensor<10x10xf32>) -> tensor<*xf32> {
|
||||||
%0 = "onnx.Add"(%arg0, %arg1) : (tensor<10x10xf32>, tensor<10x10xf32>) -> tensor<*xf32>
|
%0 = "onnx.Add"(%arg0, %arg1) : (tensor<10x10xf32>, tensor<10x10xf32>) -> tensor<*xf32>
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// RUN: onnf-opt --shape-inference --lower-frontend %s -split-input-file | FileCheck %s
|
// RUN: onnx-mlir-opt --shape-inference --lower-frontend %s -split-input-file | FileCheck %s
|
||||||
|
|
||||||
func @test_add_add(%arg0 : tensor<10x10xf32>, %arg1 : tensor<10x10xf32>) -> tensor<*xf32> {
|
func @test_add_add(%arg0 : tensor<10x10xf32>, %arg1 : tensor<10x10xf32>) -> tensor<*xf32> {
|
||||||
%0 = "onnx.Add"(%arg0, %arg1) : (tensor<10x10xf32>, tensor<10x10xf32>) -> tensor<*xf32>
|
%0 = "onnx.Add"(%arg0, %arg1) : (tensor<10x10xf32>, tensor<10x10xf32>) -> tensor<*xf32>
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// RUN: onnf-opt --shape-inference %s -split-input-file | FileCheck %s
|
// RUN: onnx-mlir-opt --shape-inference %s -split-input-file | FileCheck %s
|
||||||
|
|
||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
/// Test the default behavior of transpose when no information for the
|
/// Test the default behavior of transpose when no information for the
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// RUN: onnf-opt --shape-inference %s -split-input-file | FileCheck %s
|
// RUN: onnx-mlir-opt --shape-inference %s -split-input-file | FileCheck %s
|
||||||
|
|
||||||
/// Test the default behavior of Max Pool with no padding (pad are set but shoudl be ignored)
|
/// Test the default behavior of Max Pool with no padding (pad are set but shoudl be ignored)
|
||||||
func @test_default_maxpoolsingleout(%arg0 : tensor<5x5x32x32xf32>) -> tensor<*xf32> {
|
func @test_default_maxpoolsingleout(%arg0 : tensor<5x5x32x32xf32>) -> tensor<*xf32> {
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
git clone https://github.com/llvm/llvm-project.git
|
git clone https://github.com/llvm/llvm-project.git
|
||||||
# Check out a specific branch that is known to work with ONNF.
|
# Check out a specific branch that is known to work with ONNX MLIR.
|
||||||
cd llvm-project && git checkout 076475713c236081a3247a53e9dbab9043c3eac2 && cd ..
|
cd llvm-project && git checkout 076475713c236081a3247a53e9dbab9043c3eac2 && cd ..
|
||||||
mkdir llvm-project/build
|
mkdir llvm-project/build
|
||||||
cd llvm-project/build
|
cd llvm-project/build
|
||||||
|
|
|
@ -2,9 +2,9 @@
|
||||||
export LLVM_PROJ_SRC=$(pwd)/llvm-project/
|
export LLVM_PROJ_SRC=$(pwd)/llvm-project/
|
||||||
export LLVM_PROJ_BUILD=$(pwd)/llvm-project/build
|
export LLVM_PROJ_BUILD=$(pwd)/llvm-project/build
|
||||||
|
|
||||||
mkdir ONNF/build && cd ONNF/build
|
mkdir onnx-mlir/build && cd onnx-mlir/build
|
||||||
cmake ..
|
cmake ..
|
||||||
cmake --build . --target onnf
|
cmake --build . --target onnx-mlir
|
||||||
|
|
||||||
# Run FileCheck tests:
|
# Run FileCheck tests:
|
||||||
export LIT_OPTS=-v
|
export LIT_OPTS=-v
|
Loading…
Reference in New Issue