diff --git a/.buildbot/p9.sh b/.buildbot/p9.sh index a014d3c..99c2938 100644 --- a/.buildbot/p9.sh +++ b/.buildbot/p9.sh @@ -3,9 +3,9 @@ # Exit on error: set -e -# Check for required env variables ONNF_DEP_DIR, LLVM_PROJECT_ROOT -if [[ -z "${ONNF_DEP_DIR}" ]]; then - echo "ONNF_DEP_DIR env var is missing." +# Check for required env variables ONNX_MLIR_DEP_DIR, LLVM_PROJECT_ROOT +if [[ -z "${ONNX_MLIR_DEP_DIR}" ]]; then + echo "ONNX_MLIR_DEP_DIR env var is missing." exit 1 fi @@ -14,11 +14,11 @@ if [[ -z "${LLVM_PROJECT_ROOT}" ]]; then exit 1 fi -# Set up env variables to expose onnf dependencies: -export PATH=$ONNF_DEP_DIR/bin:$PATH -export LD_LIBRARY_PATH=$ONNF_DEP_DIR/lib:$ONNF_DEP_DIR/lib64: -export CPATH=$ONNF_DEP_DIR/include:$CPATH -export PATH=$ONNF_DEP_DIR/bin:$PATH +# Set up env variables to expose onnx-mlir dependencies: +export PATH=$ONNX_MLIR_DEP_DIR/bin:$PATH +export LD_LIBRARY_PATH=$ONNX_MLIR_DEP_DIR/lib:$ONNX_MLIR_DEP_DIR/lib64: +export CPATH=$ONNX_MLIR_DEP_DIR/include:$CPATH +export PATH=$ONNX_MLIR_DEP_DIR/bin:$PATH # Set up mock installation path within current workspace: export INSTALL_PATH=$WORKSPACE/INSTALL_PATH @@ -28,19 +28,19 @@ export LD_LIBRARY_PATH=$INSTALL_PATH/lib:$INSTALL_PATH/lib64:$LD_LIBRARY_PATH export CPATH=$INSTALL_PATH/include:$CPATH # Create virtual environment specific to the current build instance: -conda create -n onnf_conda_workspace_"${BUILD_NUMBER}" python=3.7 numpy -source activate onnf_conda_workspace_"${BUILD_NUMBER}" +conda create -n onnx_mlir_conda_workspace_"${BUILD_NUMBER}" python=3.7 numpy +source activate onnx_mlir_conda_workspace_"${BUILD_NUMBER}" # Create build directory and generate make files: mkdir build && cd build -CC=$ONNF_DEP_DIR/bin/gcc \ -CXX=$ONNF_DEP_DIR/bin/g++ \ -BOOST_ROOT=$ONNF_DEP_DIR \ -LLVM_SRC=$LLVM_PROJECT_ROOT/llvm \ -LLVM_BUILD=$LLVM_PROJECT_ROOT/build \ -cmake3 -DONNF_ENABLE_MODEL_TEST_CPP=ON \ - -DONNF_ENABLE_BENCHMARK=ON \ - -DCMAKE_INSTALL_PREFIX="$INSTALL_PATH" \ +CC=$ONNX_MLIR_DEP_DIR/bin/gcc \ +CXX=$ONNX_MLIR_DEP_DIR/bin/g++ \ +BOOST_ROOT=$ONNX_MLIR_DEP_DIR \ +LLVM_SRC=$LLVM_PROJECT_ROOT/llvm \ +LLVM_BUILD=$LLVM_PROJECT_ROOT/build \ +cmake3 -DONNX_MLIR_ENABLE_MODEL_TEST_CPP=ON \ + -DONNX_MLIR_ENABLE_BENCHMARK=ON \ + -DCMAKE_INSTALL_PREFIX="$INSTALL_PATH" \ .. # Build and test: diff --git a/.buildbot/z13.sh b/.buildbot/z13.sh index 412224a..5782f24 100644 --- a/.buildbot/z13.sh +++ b/.buildbot/z13.sh @@ -34,10 +34,10 @@ cmake -DCMAKE_C_COMPILER=$CC \ -DCMAKE_CXX_COMPILER=$CXX \ -DCMAKE_VERBOSE_MAKEFILE=ON \ -DCMAKE_INSTALL_PREFIX=$INSTALL_PATH \ - -DONNF_ENABLE_NODE_TEST_JAVA=ON \ - -DONNF_ENABLE_NODE_TEST_JNI=ON \ - -DONNF_ENABLE_NODE_TEST_CPP=OFF \ - -DONNF_TARGET_ARCH=z13 .. + -DONNX_MLIR_ENABLE_NODE_TEST_JAVA=ON \ + -DONNX_MLIR_ENABLE_NODE_TEST_JNI=ON \ + -DONNX_MLIR_ENABLE_NODE_TEST_CPP=OFF \ + -DONNX_MLIR_TARGET_ARCH=z13 .. make -j "$(nproc)" install ctest -j "$(nproc)" diff --git a/.circleci/config.yml b/.circleci/config.yml index e459df1..27651be 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -10,11 +10,11 @@ jobs: name: Installing GCC, CMake, Ninja, Protobuf command: sudo apt-get update && sudo apt-get install -y gcc g++ cmake ninja-build protobuf-compiler - checkout: - path: ONNF + path: onnx-mlir - run: name: Pull Submodules command: | - cd ONNF + cd onnx-mlir git submodule update --init --recursive # Use cached mlir installation if possible. - restore_cache: @@ -26,29 +26,29 @@ jobs: # mlir-opt executable exists. if [ ! -f llvm-project/build/bin/mlir-opt ]; then export MAKEFLAGS=-j4 - source ONNF/utils/install-mlir.sh + source onnx-mlir/utils/install-mlir.sh fi - save_cache: key: V9-LLVM-PROJECT-{{ arch }} paths: - llvm-project - run: - name: Install ONNF - command: source ONNF/utils/install-onnf.sh + name: Install ONNX MLIR + command: source onnx-mlir/utils/install-onnx-mlir.sh - run: name: Run End-To-End Tests command: | - sudo pip install -q -e ./ONNF/third_party/onnx - cd ONNF/build + sudo pip install -q -e ./onnx-mlir/third_party/onnx + cd onnx-mlir/build cmake --build . --target run-onnx-backend-test - run: name: Run DocCheck - command: cd ONNF/build && cmake --build . --target check-doc + command: cd onnx-mlir/build && cmake --build . --target check-doc - run: name: Ensure tablegen documentation is up-to-date command: | - cd ONNF/build - cmake --build . --target onnf-doc + cd onnx-mlir/build + cmake --build . --target onnx-mlir-doc # Check whether dialect documentation is up-to-date. diff doc/Dialects ../doc/Dialects - run: diff --git a/CMakeLists.txt b/CMakeLists.txt index 7ec7054..9610482 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -6,13 +6,13 @@ endif() # Require 3.3 and set policy CMP0057 for IN_LIST operator support cmake_minimum_required(VERSION 3.3) cmake_policy(SET CMP0057 NEW) -project(onnf) +project(onnx-mlir) set(CMAKE_CXX_FLAGS_DEBUG "-g") set(CMAKE_CXX_FLAGS_RELEASE "-O2 -DNDEBUG") -set(ONNF_SRC_ROOT "${CMAKE_CURRENT_SOURCE_DIR}") -set(ONNF_BIN_ROOT "${CMAKE_CURRENT_BINARY_DIR}") +set(ONNX_MLIR_SRC_ROOT "${CMAKE_CURRENT_SOURCE_DIR}") +set(ONNX_MLIR_BIN_ROOT "${CMAKE_CURRENT_BINARY_DIR}") set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) diff --git a/MLIR.cmake b/MLIR.cmake index e330316..ee46e82 100644 --- a/MLIR.cmake +++ b/MLIR.cmake @@ -34,9 +34,9 @@ set(MLIR_SRC_INCLUDE_PATH ${LLVM_PROJ_SRC}/mlir/include) set(MLIR_BIN_INCLUDE_PATH ${LLVM_PROJ_BUILD}/tools/mlir/include) set(MLIR_TOOLS_DIR ${LLVM_PROJ_BUILD}/bin) -set(ONNF_TOOLS_DIR ${ONNF_BIN_ROOT}/bin) -set(ONNF_LIT_TEST_SRC_DIR ${CMAKE_SOURCE_DIR}/test/mlir) -set(ONNF_LIT_TEST_BUILD_DIR ${CMAKE_BINARY_DIR}/test/mlir) +set(ONNX_MLIR_TOOLS_DIR ${ONNX_MLIR_BIN_ROOT}/bin) +set(ONNX_MLIR_LIT_TEST_SRC_DIR ${CMAKE_SOURCE_DIR}/test/mlir) +set(ONNX_MLIR_LIT_TEST_BUILD_DIR ${CMAKE_BINARY_DIR}/test/mlir) set( MLIR_INCLUDE_PATHS @@ -182,12 +182,12 @@ function(whole_archive_link_mlir target) whole_archive_link(${target} ${LLVM_PROJ_BUILD}/lib ${ARGN}) endfunction(whole_archive_link_mlir) -function(whole_archive_link_onnf target) +function(whole_archive_link_onnx_mlir target) foreach(lib_target ${ARGN}) add_dependencies(${target} ${lib_target}) endforeach(lib_target) whole_archive_link(${target} ${CMAKE_BINARY_DIR}/lib ${ARGN}) -endfunction(whole_archive_link_onnf) +endfunction(whole_archive_link_onnx_mlir) set(LLVM_CMAKE_DIR "${LLVM_PROJ_BUILD}/lib/cmake/llvm" @@ -196,7 +196,7 @@ list(APPEND CMAKE_MODULE_PATH "${LLVM_CMAKE_DIR}") include(AddLLVM) include(TableGen) -function(onnf_tablegen ofn) +function(onnx_mlir_tablegen ofn) tablegen(MLIR ${ARGV} "-I${MLIR_SRC_INCLUDE_PATH}" @@ -214,14 +214,14 @@ set_property(TARGET mlir-tblgen PROPERTY IMPORTED_LOCATION ${LLVM_PROJ_BUILD}/bin/mlir-tblgen) set(MLIR_TABLEGEN_EXE mlir-tblgen) -# Add a dialect used by ONNF and copy the generated operation +# Add a dialect used by ONNX MLIR and copy the generated operation # documentation to the desired places. # c.f. https://github.com/llvm/llvm-project/blob/e298e216501abf38b44e690d2b28fc788ffc96cf/mlir/CMakeLists.txt#L11 -function(add_onnf_dialect_doc dialect dialect_tablegen_file) +function(add_onnx_mlir_dialect_doc dialect dialect_tablegen_file) # Generate Dialect Documentation set(LLVM_TARGET_DEFINITIONS ${dialect_tablegen_file}) - onnf_tablegen(${dialect}.md -gen-op-doc) - set(GEN_DOC_FILE ${ONNF_BIN_ROOT}/doc/Dialects/${dialect}.md) + onnx_mlir_tablegen(${dialect}.md -gen-op-doc) + set(GEN_DOC_FILE ${ONNX_MLIR_BIN_ROOT}/doc/Dialects/${dialect}.md) add_custom_command( OUTPUT ${GEN_DOC_FILE} COMMAND ${CMAKE_COMMAND} -E copy @@ -229,7 +229,7 @@ function(add_onnf_dialect_doc dialect dialect_tablegen_file) ${GEN_DOC_FILE} DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/${dialect}.md) add_custom_target(${dialect}DocGen DEPENDS ${GEN_DOC_FILE}) - add_dependencies(onnf-doc ${dialect}DocGen) + add_dependencies(onnx-mlir-doc ${dialect}DocGen) endfunction() -add_custom_target(onnf-doc) +add_custom_target(onnx-mlir-doc) diff --git a/README.md b/README.md index b1266b9..4e54960 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ -# ONNF -Open Neural Network Frontend : an ONNX frontend for MLIR. +# ONNX MLIR +The Open Neural Network Exchange implementation in MLIR. [![CircleCI](https://circleci.com/gh/onnx/onnx-mlir/tree/master.svg?style=svg)](https://circleci.com/gh/onnx/onnx-mlir/tree/master) @@ -18,7 +18,7 @@ Firstly, install MLIR (as a part of LLVM-Project): [same-as-file]: <> (utils/install-mlir.sh) ``` bash git clone https://github.com/llvm/llvm-project.git -# Check out a specific branch that is known to work with ONNF. +# Check out a specific branch that is known to work with ONNX MLIR. cd llvm-project && git checkout 076475713c236081a3247a53e9dbab9043c3eac2 && cd .. mkdir llvm-project/build cd llvm-project/build @@ -38,34 +38,34 @@ Two environment variables need to be set: - LLVM_PROJ_SRC should point to the llvm-project src directory (e.g., llvm-project/). - LLVM_PROJ_BUILD should point to the llvm-project build directory (e.g., llvm-project/build). -To build ONNF, use the following command: +To build ONNX-MLIR, use the following command: -[same-as-file]: <> ({"ref": "utils/install-onnf.sh", "skip-doc": 2}) +[same-as-file]: <> ({"ref": "utils/install-onnx-mlir.sh", "skip-doc": 2}) ``` -git clone --recursive git@github.com:clang-ykt/ONNF.git +git clone --recursive git@github.com:onnx/onnx-mlir.git # Export environment variables pointing to LLVM-Projects. export LLVM_PROJ_SRC=$(pwd)/llvm-project/ export LLVM_PROJ_BUILD=$(pwd)/llvm-project/build -mkdir ONNF/build && cd ONNF/build +mkdir onnx-mlir/build && cd onnx-mlir/build cmake .. -cmake --build . --target onnf +cmake --build . --target onnx-mlir # Run FileCheck tests: export LIT_OPTS=-v cmake --build . --target check-mlir-lit ``` -After the above commands succeed, an `onnf` executable should appear in the `bin` directory. +After the above commands succeed, an `onnx-mlir` executable should appear in the `bin` directory. -## Using ONNF +## Using ONNX MLIR -The usage of `onnf` is as such: +The usage of `onnx-mlir` is as such: ``` -OVERVIEW: ONNF MLIR modular optimizer driver +OVERVIEW: ONNX MLIR modular optimizer driver -USAGE: onnf [options] +USAGE: onnx-mlir [options] OPTIONS: @@ -75,7 +75,7 @@ Generic Options: --help-list - Display list of available options (--help-list-hidden for more) --version - Display the version of this program -ONNF Options: +ONNX MLIR Options: These are frontend options. Choose target to emit: @@ -89,7 +89,7 @@ These are frontend options. For example, to lower an ONNX model (e.g., add.onnx) to ONNX dialect, use the following command: ``` -./onnf --EmitONNXIR add.onnx +./onnx-mlir --EmitONNXIR add.onnx ``` The output should look like: ``` diff --git a/doc/ImportONNXDefs.md b/doc/ImportONNXDefs.md index 20b74f2..1413341 100644 --- a/doc/ImportONNXDefs.md +++ b/doc/ImportONNXDefs.md @@ -1,23 +1,23 @@ -# Import ONNX specifications into ONNF +# Import ONNX specifications into ONNX MLIR The specifications of ONNX are defined under onnx/defs directory in ONNX projects. There is a python script onnx/defs/gen_doc.py that automatically generate documents about operations in ONNX (docs/Operations.md). -ONNF modified this script to import ONNX specifications into ONNF. There are two files generated for ONNF with the modified gen_doc.py: +ONNX MLIR modified this script to import ONNX specifications into ONNX MLIR. There are two files generated for ONNX MLIR with the modified gen_doc.py: 1. src/dialect/onnx/onnxop.inc: Operation defintion for MLIR tablegen. Will be included in src/dialect/onnx/onnx.td -2. src/builder/op_build_table.inc: c code for ONNF frontend to import operation nodes from ONNX model. Will be included in src/builder/frontend_dialect_transformer.cpp +2. src/builder/op_build_table.inc: c code for ONNX MLIR frontend to import operation nodes from ONNX model. Will be included in src/builder/frontend_dialect_transformer.cpp ## How to use the script -1. Get ONNX. You can use ONNF/third_party/onnx -2. In your ONNX directory, copy the script docs/gen_doc.py in your ONNF to onnx/defs in ONNX +1. Get ONNX. You can use onnx-mlir/third_party/onnx +2. In your ONNX directory, copy the script docs/gen_doc.py in your ONNX MLIR to onnx/defs in ONNX 3. Run the script: python onnx/defs/gen_doc.py 4. Two files, onnxop.inc and op_buid_table.inc should be generated in current directory -5. copy the two file into your ONNF: cp onnxop.inc your_ONNF/src/dialect/onnx/onnxop.inc; cp op_build_table.inc your_ONNF/src/builder -6. go to your ONNF and build +5. copy the two file into your ONNX MLIR: cp onnxop.inc your_onnx-mlir/src/dialect/onnx/onnxop.inc; cp op_build_table.inc your_onnx-mlir/src/builder +6. go to your ONNX MLIR and build ## Consistency The Operators.md generated by gen_doc.py is copied into doc. Please refer to this specification, not the one in onnx github, to make sure operators are consistent in version with onnxop.inc. ## Customization -In addition to following the ONNF specification, the modified gen_doc.py provides some mechanism for you to customize the output. +In addition to following the ONNX specification, the modified gen_doc.py provides some mechanism for you to customize the output. Several tables are defined at the beginning of the script: 1. special_attr_defaults: gives attribute special default value. 2. special_op_handler: creates special import function in frontend_dialect_transformer.cpp. Currently special handler is used for operations with oprational arguments diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 488f77a..8196b2d 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -15,10 +15,10 @@ add_library(compiler pass/passes.hpp) # Include root src directory. -target_include_directories(compiler PRIVATE ${ONNF_SRC_ROOT}) +target_include_directories(compiler PRIVATE ${ONNX_MLIR_SRC_ROOT}) # Include tablegen generated header files. -target_include_directories(compiler PRIVATE ${ONNF_BIN_ROOT}) +target_include_directories(compiler PRIVATE ${ONNX_MLIR_BIN_ROOT}) target_link_libraries(compiler ${CMAKE_THREAD_LIBS_INIT} @@ -27,55 +27,55 @@ target_link_libraries(compiler curses) set(LLVM_TARGET_DEFINITIONS pass/onnx_decompose.td) -onnf_tablegen(onnx_decompose.inc -gen-rewriters) +onnx_mlir_tablegen(onnx_decompose.inc -gen-rewriters) add_public_tablegen_target(gen_onnx_decompose) add_dependencies(compiler gen_onnx_decompose) set(LLVM_TARGET_DEFINITIONS pass/shape_inference_interface.td) -onnf_tablegen(shape_inference.hpp.inc -gen-op-interface-decls) -onnf_tablegen(shape_inference.cpp.inc -gen-op-interface-defs) +onnx_mlir_tablegen(shape_inference.hpp.inc -gen-op-interface-decls) +onnx_mlir_tablegen(shape_inference.cpp.inc -gen-op-interface-defs) add_public_tablegen_target(gen_shape_inference) add_dependencies(compiler gen_shape_inference) set(LLVM_TARGET_DEFINITIONS pass/onnx_combine.td) -onnf_tablegen(onnx_combine.inc -gen-rewriters) +onnx_mlir_tablegen(onnx_combine.inc -gen-rewriters) add_public_tablegen_target(gen_onnx_combine) add_dependencies(compiler gen_onnx_combine) set(LLVM_TARGET_DEFINITIONS pass/onnx_rewrite.td) -onnf_tablegen(onnx_rewrite.inc -gen-rewriters) +onnx_mlir_tablegen(onnx_rewrite.inc -gen-rewriters) add_public_tablegen_target(gen_onnx_rewrite) add_dependencies(compiler gen_onnx_rewrite) set(LLVM_TARGET_DEFINITIONS dialect/onnx/onnx.td) -onnf_tablegen(onnx.hpp.inc -gen-op-decls "-I${CMAKE_SOURCE_DIR}/compiler/pass") -onnf_tablegen(onnx.cpp.inc -gen-op-defs "-I${CMAKE_SOURCE_DIR}/compiler/pass") +onnx_mlir_tablegen(onnx.hpp.inc -gen-op-decls "-I${CMAKE_SOURCE_DIR}/compiler/pass") +onnx_mlir_tablegen(onnx.cpp.inc -gen-op-defs "-I${CMAKE_SOURCE_DIR}/compiler/pass") set(GEN_DOC_FILE ${CMAKE_BINARY_DIR}/docs/Dialects/onnx.md) add_public_tablegen_target(gen_onnx) add_dependencies(compiler gen_onnx) -add_onnf_dialect_doc(onnx dialect/onnx/onnx.td) +add_onnx_mlir_dialect_doc(onnx dialect/onnx/onnx.td) set(LLVM_TARGET_DEFINITIONS dialect/krnl/krnl_ops.td) -onnf_tablegen(krnl.hpp.inc -gen-op-decls) -onnf_tablegen(krnl.cpp.inc -gen-op-defs) +onnx_mlir_tablegen(krnl.hpp.inc -gen-op-decls) +onnx_mlir_tablegen(krnl.cpp.inc -gen-op-defs) add_public_tablegen_target(gen_krnl_ops) add_dependencies(compiler gen_krnl_ops) -add_library(onnf_onnx_decompose pass/onnx_decompose.cpp) -target_include_directories(onnf_onnx_decompose - PRIVATE ${ONNF_SRC_ROOT} ${ONNF_BIN_ROOT} - ${ONNF_SRC_ROOT}) -target_link_libraries(onnf_onnx_decompose ${MLIRLibs}) -add_dependencies(onnf_onnx_decompose gen_krnl_ops) +add_library(onnx_mlir_onnx_decompose pass/onnx_decompose.cpp) +target_include_directories(onnx_mlir_onnx_decompose + PRIVATE ${ONNX_MLIR_SRC_ROOT} ${ONNX_MLIR_BIN_ROOT} + ${ONNX_MLIR_SRC_ROOT}) +target_link_libraries(onnx_mlir_onnx_decompose ${MLIRLibs}) +add_dependencies(onnx_mlir_onnx_decompose gen_krnl_ops) -add_library(onnf_shape_inference pass/shape_inference_pass.cpp) -target_include_directories(onnf_shape_inference - PRIVATE ${ONNF_SRC_ROOT} ${ONNF_BIN_ROOT} - ${ONNF_SRC_ROOT}) -target_link_libraries(onnf_shape_inference ${MLIRLibs}) -add_dependencies(onnf_shape_inference gen_krnl_ops) +add_library(onnx_mlir_shape_inference pass/shape_inference_pass.cpp) +target_include_directories(onnx_mlir_shape_inference + PRIVATE ${ONNX_MLIR_SRC_ROOT} ${ONNX_MLIR_BIN_ROOT} + ${ONNX_MLIR_SRC_ROOT}) +target_link_libraries(onnx_mlir_shape_inference ${MLIRLibs}) +add_dependencies(onnx_mlir_shape_inference gen_krnl_ops) -add_library(onnf_lower_frontend +add_library(onnx_mlir_lower_frontend conversion/onnx_to_krnl/onnx_to_krnl_common.cpp conversion/onnx_to_krnl/onnx_to_krnl_common.hpp conversion/onnx_to_krnl/math/elementwise.cpp @@ -93,25 +93,25 @@ add_library(onnf_lower_frontend conversion/onnx_to_krnl/tensor/unsqueeze.cpp conversion/onnx_to_krnl/tensor/constant.cpp conversion/onnx_to_krnl/convert_onnx_to_krnl.cpp) -target_include_directories(onnf_lower_frontend - PRIVATE ${ONNF_SRC_ROOT} ${ONNF_BIN_ROOT} - ${ONNF_SRC_ROOT}) -target_link_libraries(onnf_lower_frontend ${MLIRLibs}) -add_dependencies(onnf_lower_frontend gen_krnl_ops) +target_include_directories(onnx_mlir_lower_frontend + PRIVATE ${ONNX_MLIR_SRC_ROOT} ${ONNX_MLIR_BIN_ROOT} + ${ONNX_MLIR_SRC_ROOT}) +target_link_libraries(onnx_mlir_lower_frontend ${MLIRLibs}) +add_dependencies(onnx_mlir_lower_frontend gen_krnl_ops) add_subdirectory(transform) add_subdirectory(tool) add_subdirectory(builder) add_subdirectory(runtime) -add_executable(onnf main.cpp) +add_executable(onnx-mlir main.cpp) -target_link_libraries(onnf builder ${MLIRLibs} onnf_transform onnf_onnx_decompose onnf_shape_inference onnf_lower_frontend) -whole_archive_link_mlir(onnf ${MLIRWholeArchiveLibs}) +target_link_libraries(onnx-mlir builder ${MLIRLibs} onnx_mlir_transform onnx_mlir_onnx_decompose onnx_mlir_shape_inference onnx_mlir_lower_frontend) +whole_archive_link_mlir(onnx-mlir ${MLIRWholeArchiveLibs}) find_package(ZLIB REQUIRED) -target_link_libraries(onnf ${ZLIB_LIBRARIES}) +target_link_libraries(onnx-mlir ${ZLIB_LIBRARIES}) -target_include_directories(onnf PRIVATE ${CMAKE_SOURCE_DIR}) -target_include_directories(onnf PRIVATE ${CMAKE_BINARY_DIR}) +target_include_directories(onnx-mlir PRIVATE ${CMAKE_SOURCE_DIR}) +target_include_directories(onnx-mlir PRIVATE ${CMAKE_BINARY_DIR}) -install(TARGETS onnf DESTINATION bin) +install(TARGETS onnx-mlir DESTINATION bin) diff --git a/src/builder/frontend_dialect_helper.cpp b/src/builder/frontend_dialect_helper.cpp index 36e362a..41f244b 100644 --- a/src/builder/frontend_dialect_helper.cpp +++ b/src/builder/frontend_dialect_helper.cpp @@ -10,7 +10,7 @@ #include "src/builder/frontend_dialect_helper.hpp" -namespace onnf { +namespace onnx_mlir { void replaceAll(std::string &str, const std::string &from, const std::string &to) { @@ -35,23 +35,23 @@ std::string legalize_name(std::string name) { return name; } -mlir::Value OnnxOnnfSymbolMapping::GetTensorByOnnxName( +mlir::Value OnnxMlirSymbolMapping::GetTensorByOnnxName( const std::string &name) { - assert(onnx_name2onnf_tensor.find(legalize_name(name)) != - onnx_name2onnf_tensor.end() && + assert(onnx_name2onnx_mlir_tensor.find(legalize_name(name)) != + onnx_name2onnx_mlir_tensor.end() && "Tensor not found"); - return onnx_name2onnf_tensor.at(legalize_name(name)); + return onnx_name2onnx_mlir_tensor.at(legalize_name(name)); } -void OnnxOnnfSymbolMapping::AddMapping( +void OnnxMlirSymbolMapping::AddMapping( const std::string &name, mlir::Value tensor) { - assert(onnx_name2onnf_tensor.count(legalize_name(name)) == 0 && + assert(onnx_name2onnx_mlir_tensor.count(legalize_name(name)) == 0 && "Tensor already exists."); - onnx_name2onnf_tensor.emplace(legalize_name(name), tensor); + onnx_name2onnx_mlir_tensor.emplace(legalize_name(name), tensor); } -bool OnnxOnnfSymbolMapping::ContainKey(std::string name) { - return onnx_name2onnf_tensor.count(name) != 0; +bool OnnxMlirSymbolMapping::ContainKey(std::string name) { + return onnx_name2onnx_mlir_tensor.count(name) != 0; } template @@ -181,4 +181,4 @@ mlir::Value InitializedTensorMapping::EmitInitializerForInputTensor( loc, tensorType, nullptr, constantDenseAttribute); } -} // namespace onnf +} // namespace onnx_mlir diff --git a/src/builder/frontend_dialect_helper.hpp b/src/builder/frontend_dialect_helper.hpp index f47c685..fb1cdb1 100644 --- a/src/builder/frontend_dialect_helper.hpp +++ b/src/builder/frontend_dialect_helper.hpp @@ -34,18 +34,18 @@ #include "src/dialect/onnx/onnx_ops.hpp" #include "onnx/onnx_pb.h" -namespace onnf { +namespace onnx_mlir { void replaceAll(std::string &str, const std::string &from, const std::string &to); std::string legalize_name(std::string name); -struct OnnxOnnfSymbolMapping { +struct OnnxMlirSymbolMapping { /*! * Get MLIR tensor by onnx tensor name. * @param name onnx tensor name. - * @return onnf tensor corresponding to `name`. + * @return onnx mlir tensor corresponding to `name`. */ mlir::Value GetTensorByOnnxName(const std::string &name); @@ -62,7 +62,7 @@ private: /*! * mapping from onnx tensor names to MLIR tensor. */ - std::map onnx_name2onnf_tensor; + std::map onnx_name2onnx_mlir_tensor; }; struct InitializedTensorMapping { @@ -98,4 +98,4 @@ private: std::map nameToInitializedTensor; }; -} // namespace onnf \ No newline at end of file +} // namespace onnx_mlir \ No newline at end of file diff --git a/src/builder/frontend_dialect_transformer.cpp b/src/builder/frontend_dialect_transformer.cpp index bec4094..5fc12ac 100644 --- a/src/builder/frontend_dialect_transformer.cpp +++ b/src/builder/frontend_dialect_transformer.cpp @@ -21,7 +21,7 @@ namespace bstd = mpark; #include "frontend_dialect_transformer.hpp" -namespace onnf { +namespace onnx_mlir { namespace { /*! @@ -47,13 +47,13 @@ private: mlir::OpBuilder builder_; mlir::Value none_; // mapping between string name and symbol - OnnxOnnfSymbolMapping frontend_symbols_; + OnnxMlirSymbolMapping frontend_symbols_; mlir::Location UnknownLoc() { return mlir::UnknownLoc::get(&context_); } // Convert type to MLIR type. // A complete list of types can be found in: - // /third_party/onnx/onnx/onnx.pb.h + // /third_party/onnx/onnx/onnx.pb.h mlir::Type convertONNXTypeToMLIRType(onnx::TensorProto_DataType onnxType) { switch (onnxType) { case onnx::TensorProto_DataType::TensorProto_DataType_FLOAT16: @@ -473,9 +473,9 @@ private: } }; // FrontendGenImpl class } // namespace -} // namespace onnf +} // namespace onnx_mlir -namespace onnf { +namespace onnx_mlir { void ImportFrontendModelFile(std::string model_fname, mlir::MLIRContext &context, @@ -489,4 +489,4 @@ void ImportFrontendModelFile(std::string model_fname, FrontendGenImpl myONNXGen(context); module = myONNXGen.ImportONNXModel(model); } -} // namespace onnf +} // namespace onnx_mlir diff --git a/src/builder/frontend_dialect_transformer.hpp b/src/builder/frontend_dialect_transformer.hpp index 234415a..01c29b0 100644 --- a/src/builder/frontend_dialect_transformer.hpp +++ b/src/builder/frontend_dialect_transformer.hpp @@ -26,12 +26,12 @@ class OwningModuleRef; } // namespace mlir //===----------------------------------------------------------------------===// -// Import a model into one of ONNF's frontend models. +// Import a model into the ONNX MLIR dialect. //===----------------------------------------------------------------------===// -namespace onnf { +namespace onnx_mlir { /*! - * Import an ONNX model file into ONNF's ONNX Dialect. + * Import an ONNX model file into the ONNX Dialect. * @param model_fname file name pointing to the onnx model protobuf. * @return MLIR::module generated for the ONNX model. */ @@ -43,4 +43,4 @@ void ImportFrontendModelFile(std::string model_fname, * TODO: Import models into other extension dialects that cover the * operations specific to other frameworks such as Tensorflow or Pytorch. */ -} // namespace onnf +} // namespace onnx_mlir diff --git a/src/dialect/krnl/krnl_helper.cpp b/src/dialect/krnl/krnl_helper.cpp index 91e9825..c4f2d53 100644 --- a/src/dialect/krnl/krnl_helper.cpp +++ b/src/dialect/krnl/krnl_helper.cpp @@ -6,7 +6,7 @@ #include "krnl_helper.hpp" -namespace onnf { +namespace onnx_mlir { using namespace mlir; @@ -120,7 +120,7 @@ void printBound(AffineMapAttr boundMap, printDimAndSymbolList( boundOperandsBeg, map.getNumDims(), map.getNumSymbols(), p); } -} // namespace onnf +} // namespace onnx_mlir namespace mlir { diff --git a/src/dialect/krnl/krnl_helper.hpp b/src/dialect/krnl/krnl_helper.hpp index aebbe0b..cee6e18 100644 --- a/src/dialect/krnl/krnl_helper.hpp +++ b/src/dialect/krnl/krnl_helper.hpp @@ -10,7 +10,7 @@ #include "mlir/IR/StandardTypes.h" #include "mlir/Transforms/DialectConversion.h" -namespace onnf { +namespace onnx_mlir { class KrnlDialectOperandParser { public: @@ -59,7 +59,7 @@ void printDimAndSymbolList(mlir::Operation::operand_iterator &begin, void printBound(mlir::AffineMapAttr boundMap, mlir::Operation::operand_iterator &boundOperandsBeg, const char *prefix, mlir::OpAsmPrinter &p); -} // namespace onnf +} // namespace onnx_mlir namespace mlir { diff --git a/src/dialect/krnl/krnl_ops.cpp b/src/dialect/krnl/krnl_ops.cpp index b597494..871bdc3 100644 --- a/src/dialect/krnl/krnl_ops.cpp +++ b/src/dialect/krnl/krnl_ops.cpp @@ -188,9 +188,9 @@ void print(OpAsmPrinter &p, KrnlIterateOp &op) { p << " -> "; p.printOperand(var); p << " = "; - onnf::printBound((*boundItr++).cast(), operandItr, "max", p); + onnx_mlir::printBound((*boundItr++).cast(), operandItr, "max", p); p << " to "; - onnf::printBound((*boundItr++).cast(), operandItr, "min", p); + onnx_mlir::printBound((*boundItr++).cast(), operandItr, "min", p); delimiter = ", "; } @@ -202,7 +202,7 @@ void print(OpAsmPrinter &p, KrnlIterateOp &op) { ParseResult parseKrnlIterateOp(OpAsmParser &parser, OperationState &result) { auto builder = parser.getBuilder(); auto context = builder.getContext(); - onnf::KrnlDialectOperandParser operandParser(parser); + onnx_mlir::KrnlDialectOperandParser operandParser(parser); // Parse optimized loops: SmallVector optimizedLoopRefs; diff --git a/src/dialect/onnx/onnx.td b/src/dialect/onnx/onnx.td index 68dbbf6..218c573 100644 --- a/src/dialect/onnx/onnx.td +++ b/src/dialect/onnx/onnx.td @@ -42,7 +42,7 @@ class ONNX_Op traits = []> : //the tablegen code onnxop.in is generated with gen_doc.py //clone and install onnx // git clone --recursive https://github.com/onnx/onnx.git -// set up env for anaconda3 and for ONNF (BOOSTROOT, cmake, gcc ...) +// set up env for anaconda3 and for ONNX MLIR (BOOSTROOT, cmake, gcc ...) // cd onnx //install onnx // CC=gcc CXX=g++ pip install -e . diff --git a/src/dialect/onnx/onnx_ops.hpp b/src/dialect/onnx/onnx_ops.hpp index 1ba9669..b981aed 100644 --- a/src/dialect/onnx/onnx_ops.hpp +++ b/src/dialect/onnx/onnx_ops.hpp @@ -36,4 +36,4 @@ class ONNXOpsDialect : public Dialect { } // end namespace mlir -namespace onnf {} +namespace onnx_mlir {} diff --git a/src/main.cpp b/src/main.cpp index 9500d31..0926f99 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -36,11 +36,11 @@ void EmitLLVMBitCode(const mlir::OwningModuleRef &module); using namespace std; -using namespace onnf; +using namespace onnx_mlir; void LoadMLIR(string inputFilename, mlir::MLIRContext &context, mlir::OwningModuleRef &module) { - // Handle '.mlir' input to the ONNF frontend. + // Handle '.mlir' input to the ONNX MLIR frontend. // The mlir format indicates that one or more of the supported // representations are used in the file. llvm::ErrorOr> fileOrErr = @@ -77,11 +77,11 @@ int main(int argc, char *argv[]) { mlir::registerDialect(); mlir::registerDialect(); - llvm::cl::OptionCategory OnnfOptions("ONNF Options", + llvm::cl::OptionCategory OnnxMlirOptions("ONNX MLIR Options", "These are frontend options."); llvm::cl::opt inputFilename( llvm::cl::Positional, llvm::cl::desc(""), llvm::cl::init("-"), - llvm::cl::cat(OnnfOptions)); + llvm::cl::cat(OnnxMlirOptions)); enum EmissionTargetType { EmitONNXIR, @@ -99,11 +99,11 @@ int main(int argc, char *argv[]) { clEnumVal(EmitLLVMIR, "Lower model to LLVM IR (LLVM dialect)."), clEnumVal(EmitLLVMBC, "Lower model to LLVM IR and emit (to file) " "LLVM bitcode for model.")), - llvm::cl::init(EmitLLVMBC), llvm::cl::cat(OnnfOptions)); + llvm::cl::init(EmitLLVMBC), llvm::cl::cat(OnnxMlirOptions)); - llvm::cl::HideUnrelatedOptions(OnnfOptions); + llvm::cl::HideUnrelatedOptions(OnnxMlirOptions); llvm::cl::ParseCommandLineOptions(argc, argv, - "ONNF MLIR modular optimizer driver\n"); + "ONNX MLIR modular optimizer driver\n"); // Decide if the input file is an ONNX model or a model specified // in MLIR. The extension of the file is the decider. diff --git a/src/pass/passes.hpp b/src/pass/passes.hpp index b7bdc96..de8fe1d 100644 --- a/src/pass/passes.hpp +++ b/src/pass/passes.hpp @@ -1,10 +1,10 @@ -//===- passes.hpp - ONNF Passes Definition --------------------------------===// +//===- passes.hpp - ONNX MLIR Passes Definition ---------------------------===// // // Copyright 2019 The IBM Research Authors. // // ============================================================================= // -// This file exposes the entry points to create compiler passes for ONNF. +// This file exposes the entry points to create compiler passes for ONNX MLIR. // //===----------------------------------------------------------------------===// diff --git a/src/runtime/CMakeLists.txt b/src/runtime/CMakeLists.txt index 8be3cbb..fdc6278 100644 --- a/src/runtime/CMakeLists.txt +++ b/src/runtime/CMakeLists.txt @@ -3,8 +3,8 @@ add_library(cruntime dyn_memref.h data_type.h) target_include_directories(cruntime - PRIVATE ${ONNF_SRC_ROOT} ${ONNF_BIN_ROOT} - ${ONNF_SRC_ROOT}) + PRIVATE ${ONNX_MLIR_SRC_ROOT} ${ONNX_MLIR_BIN_ROOT} + ${ONNX_MLIR_SRC_ROOT}) pybind11_add_module(pyruntime dyn_memref.cpp @@ -13,6 +13,6 @@ pybind11_add_module(pyruntime runtime.hpp) target_link_libraries(pyruntime PRIVATE ${CMAKE_DL_LIBS}) target_include_directories(pyruntime - PRIVATE ${ONNF_SRC_ROOT} ${ONNF_BIN_ROOT} - ${ONNF_SRC_ROOT}) + PRIVATE ${ONNX_MLIR_SRC_ROOT} ${ONNX_MLIR_BIN_ROOT} + ${ONNX_MLIR_SRC_ROOT}) add_dependencies(pyruntime cruntime) diff --git a/src/tool/CMakeLists.txt b/src/tool/CMakeLists.txt index d1388d4..8e68c37 100644 --- a/src/tool/CMakeLists.txt +++ b/src/tool/CMakeLists.txt @@ -1 +1 @@ -add_subdirectory(onnf_opt) \ No newline at end of file +add_subdirectory(onnx_mlir_opt) \ No newline at end of file diff --git a/src/tool/onnf_opt/CMakeLists.txt b/src/tool/onnf_opt/CMakeLists.txt deleted file mode 100644 index 5201ae4..0000000 --- a/src/tool/onnf_opt/CMakeLists.txt +++ /dev/null @@ -1,9 +0,0 @@ -add_executable(onnf-opt onnf_opt.cpp) -add_dependencies(onnf-opt gen_krnl_ops) - -target_include_directories(onnf-opt PRIVATE ${ONNF_SRC_ROOT}) -target_include_directories(onnf-opt PRIVATE ${ONNF_BIN_ROOT}) - -target_link_libraries(onnf-opt builder ${MLIRLibs} onnf_transform onnf_shape_inference onnf_lower_frontend curses) -whole_archive_link_mlir(onnf-opt ${MLIRWholeArchiveLibs}) -whole_archive_link_onnf(onnf-opt compiler onnf_transform onnf_lower_frontend onnf_shape_inference) diff --git a/src/tool/onnx_mlir_opt/CMakeLists.txt b/src/tool/onnx_mlir_opt/CMakeLists.txt new file mode 100644 index 0000000..3c97cc1 --- /dev/null +++ b/src/tool/onnx_mlir_opt/CMakeLists.txt @@ -0,0 +1,9 @@ +add_executable(onnx-mlir-opt onnx_mlir_opt.cpp) +add_dependencies(onnx-mlir-opt gen_krnl_ops) + +target_include_directories(onnx-mlir-opt PRIVATE ${ONNX_MLIR_SRC_ROOT}) +target_include_directories(onnx-mlir-opt PRIVATE ${ONNX_MLIR_BIN_ROOT}) + +target_link_libraries(onnx-mlir-opt builder ${MLIRLibs} onnx_mlir_transform onnx_mlir_shape_inference onnx_mlir_lower_frontend curses) +whole_archive_link_mlir(onnx-mlir-opt ${MLIRWholeArchiveLibs}) +whole_archive_link_onnx_mlir(onnx-mlir-opt compiler onnx_mlir_transform onnx_mlir_lower_frontend onnx_mlir_shape_inference) diff --git a/src/tool/onnf_opt/onnf_opt.cpp b/src/tool/onnx_mlir_opt/onnx_mlir_opt.cpp similarity index 94% rename from src/tool/onnf_opt/onnf_opt.cpp rename to src/tool/onnx_mlir_opt/onnx_mlir_opt.cpp index 597bfd4..21a8bcc 100644 --- a/src/tool/onnf_opt/onnf_opt.cpp +++ b/src/tool/onnx_mlir_opt/onnx_mlir_opt.cpp @@ -1,4 +1,4 @@ -//===--------------------- onnf_opt.cpp - MLIR Operations -----------------===// +//===---------------- onnx_mlir_opt.cpp - MLIR Operations -----------------===// // // Copyright 2019 The IBM Research Authors. // @@ -19,7 +19,7 @@ #include "src/dialect/onnx/onnx_ops.hpp" #include "src/pass/passes.hpp" -using namespace onnf; +using namespace onnx_mlir; static llvm::cl::opt input_filename(llvm::cl::Positional, llvm::cl::desc(""), @@ -60,7 +60,7 @@ int main(int argc, char **argv) { mlir::registerPassManagerCLOptions(); mlir::PassPipelineCLParser passPipeline("", "Compiler passes to run"); llvm::cl::ParseCommandLineOptions(argc, argv, - "ONNF MLIR modular optimizer driver\n"); + "ONNX MLIR modular optimizer driver\n"); // Set up the input file. std::string error_message; diff --git a/src/transform/CMakeLists.txt b/src/transform/CMakeLists.txt index 6975bab..1e89333 100644 --- a/src/transform/CMakeLists.txt +++ b/src/transform/CMakeLists.txt @@ -1,9 +1,9 @@ -add_library(onnf_transform +add_library(onnx_mlir_transform lower_krnl.cpp lower_to_llvm.cpp) -target_include_directories(onnf_transform - PRIVATE ${ONNF_SRC_ROOT} ${ONNF_BIN_ROOT} - ${ONNF_SRC_ROOT}) -target_link_libraries(onnf_transform ${MLIRLibs}) -add_dependencies(onnf_transform gen_krnl_ops) +target_include_directories(onnx_mlir_transform + PRIVATE ${ONNX_MLIR_SRC_ROOT} ${ONNX_MLIR_BIN_ROOT} + ${ONNX_MLIR_SRC_ROOT}) +target_link_libraries(onnx_mlir_transform ${MLIRLibs}) +add_dependencies(onnx_mlir_transform gen_krnl_ops) diff --git a/test/backend/CMakeLists.txt b/test/backend/CMakeLists.txt index 641422c..a703319 100644 --- a/test/backend/CMakeLists.txt +++ b/test/backend/CMakeLists.txt @@ -6,5 +6,5 @@ add_custom_target(run-onnx-backend-test COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/test.py) -add_dependencies(run-onnx-backend-test onnf) +add_dependencies(run-onnx-backend-test onnx-mlir) add_dependencies(run-onnx-backend-test pyruntime) diff --git a/test/backend/test.py b/test/backend/test.py index 818332f..de856c1 100644 --- a/test/backend/test.py +++ b/test/backend/test.py @@ -16,12 +16,12 @@ import test_config VERBOSE = bool(os.environ.get("VERBOSE")) CXX = test_config.CXX_PATH -ONNF = os.path.join(test_config.ONNF_BUILD_PATH, "bin/onnf") +ONNX_MLIR = os.path.join(test_config.ONNX_MLIR_BUILD_PATH, "bin/onnx-mlir") LLC = os.path.join(test_config.LLVM_PROJ_BUILD_PATH, "bin/llc") # Make lib folder under build directory visible in PYTHONPATH doc_check_base_dir = os.path.dirname(os.path.realpath(__file__)) -RUNTIME_DIR = os.path.join(test_config.ONNF_BUILD_PATH, "lib") +RUNTIME_DIR = os.path.join(test_config.ONNX_MLIR_BUILD_PATH, "lib") sys.path.append(RUNTIME_DIR) from pyruntime import ExecutionSession @@ -39,7 +39,7 @@ class DummyBackend(onnx.backend.base.Backend): # Save model to disk as temp_model.onnx. onnx.save(model, "temp_model.onnx") # Call frontend to process temp_model.onnx, bit code will be generated. - execute_commands([ONNF, "temp_model.onnx"]) + execute_commands([ONNX_MLIR, "temp_model.onnx"]) # Call llc to generate object file from bitcode. execute_commands( [LLC, "-filetype=obj", "-relocation-model=pic", "model.bc"]) diff --git a/test/backend/test_config.py.in b/test/backend/test_config.py.in index 571e35d..08bf8d3 100644 --- a/test/backend/test_config.py.in +++ b/test/backend/test_config.py.in @@ -1,3 +1,3 @@ -ONNF_BUILD_PATH = "@CMAKE_BINARY_DIR@" +ONNX_MLIR_BUILD_PATH = "@CMAKE_BINARY_DIR@" LLVM_PROJ_BUILD_PATH = "@LLVM_PROJ_BUILD@" CXX_PATH = "@CMAKE_CXX_COMPILER@" diff --git a/test/mlir/CMakeLists.txt b/test/mlir/CMakeLists.txt index e8a4886..d4104f5 100644 --- a/test/mlir/CMakeLists.txt +++ b/test/mlir/CMakeLists.txt @@ -6,16 +6,16 @@ configure_lit_site_cfg(${CMAKE_CURRENT_SOURCE_DIR}/lit.site.cfg.py.in MAIN_CONFIG ${CMAKE_CURRENT_SOURCE_DIR}/lit.cfg.py) -set(ONNF_MLIR_TEST_DEPENDS onnf-opt) +set(ONNX_MLIR_TEST_DEPENDS onnx-mlir-opt) add_lit_testsuite(check-mlir-lit - "Running the ONNF MLIR regression tests" + "Running the ONNX MLIR regression tests" ${CMAKE_CURRENT_BINARY_DIR} DEPENDS - ${ONNF_MLIR_TEST_DEPENDS}) + ${ONNX_MLIR_TEST_DEPENDS}) set_target_properties(check-mlir-lit PROPERTIES FOLDER "Tests") -add_lit_testsuites(ONNF_MLIR +add_lit_testsuites(ONNX_MLIR ${CMAKE_CURRENT_SOURCE_DIR} DEPENDS - ${ONNF_MLIR_TEST_DEPS}) + ${ONNX_MLIR_TEST_DEPS}) diff --git a/test/mlir/krnl/ops.mlir b/test/mlir/krnl/ops.mlir index a098d66..11bbc70 100644 --- a/test/mlir/krnl/ops.mlir +++ b/test/mlir/krnl/ops.mlir @@ -1,5 +1,5 @@ -// RUN: onnf-opt %s -mlir-print-op-generic | FileCheck -check-prefix=GENERIC %s -// RUN: onnf-opt %s | FileCheck %s +// RUN: onnx-mlir-opt %s -mlir-print-op-generic | FileCheck -check-prefix=GENERIC %s +// RUN: onnx-mlir-opt %s | FileCheck %s // GENERIC-DAG: #{{.*}} = affine_map<() -> (0)> // GENERIC-DAG: #{{.*}} = affine_map<() -> (10)> diff --git a/test/mlir/krnl/reshape.mlir b/test/mlir/krnl/reshape.mlir index d805166..ddc9883 100644 --- a/test/mlir/krnl/reshape.mlir +++ b/test/mlir/krnl/reshape.mlir @@ -1,4 +1,4 @@ -// RUN: onnf-opt --shape-inference --lower-frontend --lower-krnl --lower-all-llvm %s -split-input-file | FileCheck %s +// RUN: onnx-mlir-opt --shape-inference --lower-frontend --lower-krnl --lower-all-llvm %s -split-input-file | FileCheck %s func @test_reshape(%arg0 : tensor, %arg1 : tensor<4xi32>) -> tensor<*xf32> { %0 = "onnx.Reshape"(%arg0, %arg1) : (tensor, tensor<4xi32>) -> tensor<*xf32> diff --git a/test/mlir/lit.cfg.py b/test/mlir/lit.cfg.py index 86aab83..ac22dc2 100644 --- a/test/mlir/lit.cfg.py +++ b/test/mlir/lit.cfg.py @@ -17,10 +17,10 @@ config.name = 'Open Neural Network Frontend' config.test_format = lit.formats.ShTest(not llvm_config.use_lit_shell) # test_source_root: The root path where tests are located. -config.test_source_root = config.onnf_mlir_test_src_dir +config.test_source_root = config.onnx_mlir_test_src_dir # test_exec_root: The root path where tests should be run. -config.test_exec_root = config.onnf_mlir_test_build_dir +config.test_exec_root = config.onnx_mlir_test_build_dir llvm_config.use_default_substitutions() @@ -28,10 +28,10 @@ llvm_config.use_default_substitutions() llvm_config.with_environment('PATH', config.llvm_tools_dir, append_path=True) tool_dirs = [ - config.onnf_mlir_tools_dir, config.mlir_tools_dir, config.llvm_tools_dir + config.onnx_mlir_tools_dir, config.mlir_tools_dir, config.llvm_tools_dir ] tool_names = [ - 'onnf-opt', 'mlir-opt', 'mlir-translate' + 'onnx-mlir-opt', 'mlir-opt', 'mlir-translate' ] tools = [ToolSubst(s, unresolved='ignore') for s in tool_names] llvm_config.add_tool_substitutions(tools, tool_dirs) \ No newline at end of file diff --git a/test/mlir/lit.site.cfg.py.in b/test/mlir/lit.site.cfg.py.in index 7acd351..652f8e3 100644 --- a/test/mlir/lit.site.cfg.py.in +++ b/test/mlir/lit.site.cfg.py.in @@ -6,11 +6,11 @@ config.mlir_obj_root = "@LLVM_PROJ_BUILD@" config.mlir_tools_dir = "@MLIR_TOOLS_DIR@" config.suffixes = ['.mlir'] -config.onnf_mlir_tools_dir = "@ONNF_TOOLS_DIR@" -config.onnf_mlir_test_src_dir = "@ONNF_LIT_TEST_SRC_DIR@" -config.onnf_mlir_test_build_dir = "@ONNF_LIT_TEST_BUILD_DIR@" +config.onnx_mlir_tools_dir = "@ONNX_MLIR_TOOLS_DIR@" +config.onnx_mlir_test_src_dir = "@ONNX_MLIR_LIT_TEST_SRC_DIR@" +config.onnx_mlir_test_build_dir = "@ONNX_MLIR_LIT_TEST_BUILD_DIR@" lit.llvm.initialize(lit_config, config) # Let the main config do the real work. -lit_config.load_config(config, "@ONNF_LIT_TEST_SRC_DIR@/lit.cfg.py") \ No newline at end of file +lit_config.load_config(config, "@ONNX_MLIR_LIT_TEST_SRC_DIR@/lit.cfg.py") \ No newline at end of file diff --git a/test/mlir/onnx/onnx_canonicalization.mlir b/test/mlir/onnx/onnx_canonicalization.mlir index 8ad7690..1308567 100644 --- a/test/mlir/onnx/onnx_canonicalization.mlir +++ b/test/mlir/onnx/onnx_canonicalization.mlir @@ -1,4 +1,4 @@ -// RUN: onnf-opt --canonicalize %s -split-input-file | FileCheck %s +// RUN: onnx-mlir-opt --canonicalize %s -split-input-file | FileCheck %s // CHECK-LABEL: func @test_matmul_add_fused(%{{.*}}: tensor<10x10xf32>, %{{.*}}: tensor<10x10xf32>, %{{.*}}: tensor<10x10xf32>) -> tensor<10x10xf32> { func @test_matmul_add_fused(%a0: tensor<10x10xf32>, %a1: tensor<10x10xf32>, %a2: tensor<10x10xf32>) -> tensor<10x10xf32> { diff --git a/test/mlir/onnx/onnx_decompose.mlir b/test/mlir/onnx/onnx_decompose.mlir index f01001b..8d58663 100644 --- a/test/mlir/onnx/onnx_decompose.mlir +++ b/test/mlir/onnx/onnx_decompose.mlir @@ -1,4 +1,4 @@ -// RUN: onnf-opt --decompose-onnx %s -split-input-file | FileCheck %s +// RUN: onnx-mlir-opt --decompose-onnx %s -split-input-file | FileCheck %s // CHECK-LABEL: @test_reducel1(%{{.*}}: tensor) -> tensor<*xf32> func @test_reducel1(%arg0 : tensor) -> tensor<*xf32> { diff --git a/test/mlir/onnx/onnx_lowering.mlir b/test/mlir/onnx/onnx_lowering.mlir index 3d05789..5321191 100644 --- a/test/mlir/onnx/onnx_lowering.mlir +++ b/test/mlir/onnx/onnx_lowering.mlir @@ -1,4 +1,4 @@ -// RUN: onnf-opt --shape-inference --lower-frontend %s -split-input-file | FileCheck %s +// RUN: onnx-mlir-opt --shape-inference --lower-frontend %s -split-input-file | FileCheck %s func @test_add(%arg0 : tensor<10x10xf32>, %arg1 : tensor<10x10xf32>) -> tensor<*xf32> { %0 = "onnx.Add"(%arg0, %arg1) : (tensor<10x10xf32>, tensor<10x10xf32>) -> tensor<*xf32> diff --git a/test/mlir/onnx/onnx_lowering_with_dealloc.mlir b/test/mlir/onnx/onnx_lowering_with_dealloc.mlir index 1286041..3940643 100644 --- a/test/mlir/onnx/onnx_lowering_with_dealloc.mlir +++ b/test/mlir/onnx/onnx_lowering_with_dealloc.mlir @@ -1,4 +1,4 @@ -// RUN: onnf-opt --shape-inference --lower-frontend %s -split-input-file | FileCheck %s +// RUN: onnx-mlir-opt --shape-inference --lower-frontend %s -split-input-file | FileCheck %s func @test_add_add(%arg0 : tensor<10x10xf32>, %arg1 : tensor<10x10xf32>) -> tensor<*xf32> { %0 = "onnx.Add"(%arg0, %arg1) : (tensor<10x10xf32>, tensor<10x10xf32>) -> tensor<*xf32> diff --git a/test/mlir/onnx/onnx_shape_inference.mlir b/test/mlir/onnx/onnx_shape_inference.mlir index 55005c3..e9ece82 100644 --- a/test/mlir/onnx/onnx_shape_inference.mlir +++ b/test/mlir/onnx/onnx_shape_inference.mlir @@ -1,4 +1,4 @@ -// RUN: onnf-opt --shape-inference %s -split-input-file | FileCheck %s +// RUN: onnx-mlir-opt --shape-inference %s -split-input-file | FileCheck %s //===----------------------------------------------------------------------===// /// Test the default behavior of transpose when no information for the diff --git a/test/mlir/onnx/onnx_shape_inference_maxpool.mlir b/test/mlir/onnx/onnx_shape_inference_maxpool.mlir index 1d83b8b..1fd6328 100644 --- a/test/mlir/onnx/onnx_shape_inference_maxpool.mlir +++ b/test/mlir/onnx/onnx_shape_inference_maxpool.mlir @@ -1,4 +1,4 @@ -// RUN: onnf-opt --shape-inference %s -split-input-file | FileCheck %s +// RUN: onnx-mlir-opt --shape-inference %s -split-input-file | FileCheck %s /// Test the default behavior of Max Pool with no padding (pad are set but shoudl be ignored) func @test_default_maxpoolsingleout(%arg0 : tensor<5x5x32x32xf32>) -> tensor<*xf32> { diff --git a/utils/install-mlir.sh b/utils/install-mlir.sh index b995b06..833d983 100644 --- a/utils/install-mlir.sh +++ b/utils/install-mlir.sh @@ -1,5 +1,5 @@ git clone https://github.com/llvm/llvm-project.git -# Check out a specific branch that is known to work with ONNF. +# Check out a specific branch that is known to work with ONNX MLIR. cd llvm-project && git checkout 076475713c236081a3247a53e9dbab9043c3eac2 && cd .. mkdir llvm-project/build cd llvm-project/build diff --git a/utils/install-onnf.sh b/utils/install-onnx-mlir.sh similarity index 63% rename from utils/install-onnf.sh rename to utils/install-onnx-mlir.sh index 5f2a98b..178b80c 100644 --- a/utils/install-onnf.sh +++ b/utils/install-onnx-mlir.sh @@ -2,9 +2,9 @@ export LLVM_PROJ_SRC=$(pwd)/llvm-project/ export LLVM_PROJ_BUILD=$(pwd)/llvm-project/build -mkdir ONNF/build && cd ONNF/build +mkdir onnx-mlir/build && cd onnx-mlir/build cmake .. -cmake --build . --target onnf +cmake --build . --target onnx-mlir # Run FileCheck tests: export LIT_OPTS=-v