Merge pull request #7 from clang-ykt/setup-buildbot

Set up CI build process
This commit is contained in:
Tian Jin 2019-12-24 04:07:15 -05:00 committed by GitHub
commit 5b389384ca
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 119 additions and 16 deletions

View File

@ -2,15 +2,59 @@ version: 2
jobs: jobs:
build: build:
docker: docker:
- image: debian:stretch - image: circleci/python
steps: steps:
- checkout - checkout
- run: - run:
name: Greeting name: "Pull Submodules"
command: echo Hello, world. command: |
git submodule update --init --recursive
- run:
name: Check current directory
command: pwd
- run:
name: Check current directory content
command: ls
- run:
name: Installing GCC
command: 'sudo apt-get update && sudo apt-get install -y gcc g++'
- run:
name: Install CMAKE
command: 'sudo apt-get update && sudo apt-get install -y cmake ninja-build'
- run:
name: Install Protobuf
command: 'sudo apt-get update && sudo apt-get install -y protobuf-compiler'
- run:
name: Check gcc version
command: gcc --version
- restore_cache:
key: ONNF-MLIR-{{ arch }}
- run:
name: Install MLIR
command: |
if [ ! -f llvm-project/build/bin/mlir-opt ]; then
git clone https://github.com/llvm/llvm-project.git
cd llvm-project && git checkout 9b6ad8466bb8b97082b705270603ad7f4559e931 && cd ..
git clone https://github.com/tensorflow/mlir llvm-project/llvm/projects/mlir
cd llvm-project/llvm/projects/mlir && git checkout 0710266d0f56cf6ab0f437badbd7416b6cecdf5f && cd ../../../..
mkdir llvm-project/build
cd llvm-project/build
cmake -G Ninja ../llvm -DLLVM_ENABLE_RTTI=ON -DLLVM_BUILD_EXAMPLES=OFF -DLLVM_TARGETS_TO_BUILD="host" -DCMAKE_BUILD_TYPE=Release
CMAKE_EXE_LINKER_FLAGS="-Wl,--reduce-memory-overheads -Wl,--hash-size=512" cmake --build . --target check-mlir -- -j 4
fi
- save_cache:
key: ONNF-MLIR-{{ arch }}
paths:
- llvm-project
- run:
name: Install ONNF
command: |
mkdir build && cd build
LLVM_SRC=$(pwd)/../llvm-project/llvm LLVM_BUILD=$(pwd)/../llvm-project/build cmake ..
make all
LIT_OPTS=-v make check-mlir-lit
- run: - run:
name: Print the Current Time name: Print the Current Time
command: date command: date

View File

@ -1,2 +1,66 @@
# ONNF # ONNF
Open Neural Network Frontend Open Neural Network Frontend : an ONNX frontend for MLIR.
[![CircleCI](https://circleci.com/gh/clang-ykt/ONNF.svg?style=svg)](https://circleci.com/gh/clang-ykt/ONNF)
## Installation
We assume an existing installation of MLIR. The LLVM-Project repo commit hash we used to test against is 9b6ad8466bb8b97082b705270603ad7f4559e931 and the MLIR repo commit hash we used is 0710266d0f56cf6ab0f437badbd7416b6cecdf5f.
Two environment variables need to be set:
- LLVM_SRC should point to the llvm src directory (e.g., llvm-project/llvm).
- LLVM_BUILD should point to the llvm build directory (e.g., llvm-project/build).
To build ONNF, use the following command:
```
git clone --recursive git@github.com:clang-ykt/ONNF.git
mkdir build
cd build
cmake ..
cmake --build . --target all
```
After the above commands succeed, an `onnf` executable should appear in the `bin` directory.
## Using ONNF
The usage of `onnf` is as such:
```
OVERVIEW: ONNF MLIR modular optimizer driver
USAGE: onnf [options] <input file>
OPTIONS:
Generic Options:
--help - Display available options (--help-hidden for more)
--help-list - Display list of available options (--help-list-hidden for more)
--version - Display the version of this program
ONNF Options:
These are frontend options.
Choose target to emit:
--EmitONNXIR - Ingest ONNX and emit corresponding ONNX dialect.
--EmitMLIR - Lower model to MLIR built-in transformation dialect.
--EmitLLVMIR - Lower model to LLVM IR (LLVM dialect).
--EmitLLVMBC - Lower model to LLVM IR and emit (to file) LLVM bitcode for model.
```
## Example
For example, to lower an ONNX model (e.g., add.onnx) to ONNX dialect, use the following command:
```
./onnf --EmitONNXIR add.onnx
```
The output should look like:
```
module {
func @main_graph(%arg0: tensor<10x10x10xf32>, %arg1: tensor<10x10x10xf32>) -> tensor<10x10x10xf32> {
%0 = "onnx.Add"(%arg0, %arg1) : (tensor<10x10x10xf32>, tensor<10x10x10xf32>) -> tensor<10x10x10xf32>
return %0 : tensor<10x10x10xf32>
}
}
```

View File

@ -7,11 +7,9 @@ add_library(compiler
dialect/onnx/onnx_ops.hpp dialect/onnx/onnx_ops.hpp
dialect/krnl/krnl_helper.cpp dialect/krnl/krnl_helper.cpp
dialect/krnl/krnl_helper.hpp dialect/krnl/krnl_helper.hpp
pass/shape_inference_pass.cpp
pass/shape_inference_interface.hpp pass/shape_inference_interface.hpp
dialect/onnx/onnxop.inc dialect/onnx/onnxop.inc
pass/onnx_combine.cpp pass/onnx_combine.cpp
pass/lower_frontend_to_krnl.cpp
pass/passes.hpp) pass/passes.hpp)
# Include root src directory. # Include root src directory.
@ -70,7 +68,7 @@ add_subdirectory(runtime)
add_executable(onnf main.cpp) add_executable(onnf main.cpp)
target_link_libraries(onnf builder ${MLIRLibs} onnf_transform) target_link_libraries(onnf builder ${MLIRLibs} onnf_transform onnf_shape_inference onnf_lower_frontend)
set_target_properties(onnf PROPERTIES LINK_FLAGS "-lz") set_target_properties(onnf PROPERTIES LINK_FLAGS "-lz")
whole_archive_link_mlir(onnf ${MLIRWholeArchiveLibs}) whole_archive_link_mlir(onnf ${MLIRWholeArchiveLibs})

View File

@ -331,9 +331,9 @@ Value *mapToLowerScalarOp<ONNXTanhOp>(Operation *op,
auto neg = rewriter.create<SubFOp>(loc, zero, operand); auto neg = rewriter.create<SubFOp>(loc, zero, operand);
auto exp = rewriter.create<ExpOp>(loc, operand); auto exp = rewriter.create<ExpOp>(loc, operand);
auto negExp = rewriter.create<ExpOp>(loc, neg); auto negExp = rewriter.create<ExpOp>(loc, neg);
auto result = auto diff = rewriter.create<SubFOp>(loc, exp, negExp);
rewriter.create<DivFOp>(loc, rewriter.create<SubFOp>(loc, exp, negExp), auto sum = rewriter.create<AddFOp>(loc, exp, negExp);
rewriter.create<AddFOp>(loc, exp, negExp)); auto result = rewriter.create<DivFOp>(loc, diff, sum);
return result; return result;
} }

View File

@ -4,9 +4,6 @@ add_dependencies(onnf-opt gen_krnl_ops)
target_include_directories(onnf-opt PRIVATE ${ONNF_SRC_ROOT}) target_include_directories(onnf-opt PRIVATE ${ONNF_SRC_ROOT})
target_include_directories(onnf-opt PRIVATE ${ONNF_BIN_ROOT}) target_include_directories(onnf-opt PRIVATE ${ONNF_BIN_ROOT})
target_link_libraries(onnf-opt compiler ${MLIRLibs}) target_link_libraries(onnf-opt ${MLIRLibs} curses)
whole_archive_link_mlir(onnf-opt ${MLIRWholeArchiveLibs}) whole_archive_link_mlir(onnf-opt ${MLIRWholeArchiveLibs})
whole_archive_link_onnf(onnf-opt onnf_transform onnf_lower_frontend onnf_shape_inference) whole_archive_link_onnf(onnf-opt compiler onnf_transform onnf_lower_frontend onnf_shape_inference)
# TODO: need to investigate how to whole-archive link compiler pass to onnf-opt.
target_link_libraries(onnf-opt compiler)