Enable e2e tests (#29)

* Sync with latest MLIR.

* Enable ONNX backend tests as a means to test ONNF lowering end-to-end.

* Install ONNX using quiet mode.

* Remove debug comments.

* Install ONNX from third_party/onnx.

* Check python version and fix pip command for installing ONNX.

* Using --user install option to prevent permission denied.

* Remove unused imports.

* Try using stock ONNX pip package as there are more tests in them.

* Pip got stuck building wheels, try sudo.

* Use verbose install to debug.

* Invalidate cache to build LLVM tools.

* Fix mlir installation script location.

* Debug to locate ONNF.

* Sanity check.

* Check out ONNF code first.

* Use verbose LIT output.

* 1. Update documentation to always use verbose LIT.
2. Update krnl ops to reflect new affine map attribute syntax.

* See if conda exists

* Install ONNX by manually cloning the repo.

* Install cmake first.

* Using sudo priviledge when installing.

* Limit build parallelism.

* Limit parallelism.

* Larger memory.

* Install onnx package with pip.

* Build MLIR tools.

* Invalidate cache.

* Compile model.so with -fPIC.

* Remove module dump to get concise debug output.

* Print command before executing.

* Use quiet install mode to reduce logging.

* Use -relocation-model=pic to generate position independent code.

* 1. Remove MAKEFLAGS because now buildbot has enough memory.
2. Run DocCheck as a last step.

* 1. Add verbose mode for backtend test.

* When dumping to LLVM bitcode, do not dump module IR, but print a message indicating that bitcode has been written to disk.

* Do not pass MakeFlags to CMake.

* Add more explaination for posible reasons of failing to identify tests.
This commit is contained in:
Tian Jin 2020-01-20 12:30:08 -05:00 committed by GitHub
parent c2d31c0b78
commit 8665ecd998
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 94 additions and 66 deletions

View File

@ -3,27 +3,12 @@ jobs:
build:
docker:
- image: circleci/python
resource_class: medium+
steps:
- run:
name: Installing GCC, CMake, Ninja, Protobuf
command: sudo apt-get update && sudo apt-get install -y gcc g++ cmake ninja-build protobuf-compiler
# Use cached mlir installation if possible.
- restore_cache:
key: V2-LLVM-PROJECT-{{ arch }}
- run:
name: Install MLIR
command: |
# Check whether cache restoration succeeds by checking whether
# mlir-opt executable exists.
if [ ! -f llvm-project/build/bin/mlir-opt ]; then
export MAKEFLAGS=-j4
source utils/install-mlir.sh
fi
- save_cache:
key: V2-LLVM-PROJECT-{{ arch }}
paths:
- llvm-project
- checkout:
path: ONNF
- run:
@ -31,9 +16,30 @@ jobs:
command: |
cd ONNF
git submodule update --init --recursive
# Use cached mlir installation if possible.
- restore_cache:
key: V4-LLVM-PROJECT-{{ arch }}
- run:
name: Install MLIR
command: |
# Check whether cache restoration succeeds by checking whether
# mlir-opt executable exists.
if [ ! -f llvm-project/build/bin/mlir-opt ]; then
source ONNF/utils/install-mlir.sh
fi
- save_cache:
key: V4-LLVM-PROJECT-{{ arch }}
paths:
- llvm-project
- run:
name: Install ONNF
command: source ONNF/utils/install-onnf.sh
- run:
name: Run End-To-End Tests
command: |
sudo pip install -q onnx
cd ONNF/build
cmake --build . --target run-onnx-backend-test
- run:
name: Run DocCheck
command: cd ONNF/build && cmake --build . --target check-doc

View File

@ -27,4 +27,3 @@ set(CMAKE_CXX_STANDARD 14)
add_subdirectory(src)
add_subdirectory(doc)
add_subdirectory(test)

View File

@ -20,7 +20,8 @@ cmake -G Ninja ../llvm \
-DLLVM_ENABLE_ASSERTIONS=ON \
-DLLVM_ENABLE_RTTI=ON
cmake --build . --target check-mlir -- ${MAKEFLAGS}
cmake --build . --target
cmake --build . --target check-mlir
```
Two environment variables need to be set:
@ -42,6 +43,7 @@ cmake ..
cmake --build . --target onnf
# Run FileCheck tests:
export LIT_OPTS=-v
cmake --build . --target check-mlir-lit
```

View File

@ -135,10 +135,13 @@ int main(int argc, char *argv[]) {
if (mlir::failed(pm.run(*module)))
return 4;
module->dump();
// Write LLVM bitcode to disk.
if (emissionTarget == EmitLLVMBC)
EmitLLVMBitCode(module);
if (emissionTarget == EmitLLVMBC) {
// Write LLVM bitcode to disk.
EmitLLVMBitCode(module);
printf("LLVM bitcode written to ./model.bc");
} else
module->dump();
return 0;
}

View File

@ -1 +1,2 @@
add_subdirectory(mlir)
add_subdirectory(backend)

View File

@ -0,0 +1,10 @@
configure_file(test.py test.py COPYONLY)
configure_file(test_config.py.in test_config.py)
find_package(PythonInterp 3 REQUIRED)
add_custom_target(run-onnx-backend-test
COMMAND ${PYTHON_EXECUTABLE}
${CMAKE_CURRENT_BINARY_DIR}/test.py)
add_dependencies(run-onnx-backend-test onnf)
add_dependencies(run-onnx-backend-test pyruntime)

View File

@ -3,46 +3,51 @@ from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import itertools
import os
import sys
import unittest
import onnx.backend.base
import onnx.backend.test
from onnx.backend.base import Device, DeviceType
import onnx.shape_inference
import onnx.version_converter
import subprocess
import test_config
VERBOSE = bool(os.environ.get("VERBOSE"))
CXX = test_config.CXX_PATH
ONNF = os.path.join(test_config.ONNF_BUILD_PATH, "bin/onnf")
LLC = os.path.join(test_config.LLVM_PROJ_BUILD_PATH, "bin/llc")
# Make lib folder under build directory visible in PYTHONPATH
doc_check_base_dir = os.path.dirname(os.path.realpath(__file__))
RUNTIME_DIR = os.path.join(test_config.ONNF_BUILD_PATH, "lib")
sys.path.append(RUNTIME_DIR)
from pyruntime import ExecutionSession
CXX = os.getenv('CXX')
ONNF = os.getenv('ONNF')
LLC = os.getenv('LLC')
RT_DIR = os.getenv('RT_DIR')
assert CXX and ONNF and LLC and RT_DIR, "tools path not set"
def execute_commands(cmds):
if (VERBOSE):
print(" ".join(cmds))
subprocess.run(cmds, stdout=subprocess.PIPE)
class DummyBackend(onnx.backend.base.Backend):
@classmethod
def prepare(
cls,
model,
device='CPU',
**kwargs
):
def prepare(cls, model, device='CPU', **kwargs):
super(DummyBackend, cls).prepare(model, device, **kwargs)
# Save model to disk as temp_model.onnx.
onnx.save(model, "temp_model.onnx")
# Call frontend to process temp_model.onnx, bit code will be generated.
subprocess.run([ONNF, "temp_model.onnx"], stdout=subprocess.PIPE)
execute_commands([ONNF, "temp_model.onnx"])
# Call llc to generate object file from bitcode.
subprocess.run([LLC, "-filetype=obj", "model.bc"],
stdout=subprocess.PIPE)
execute_commands(
[LLC, "-filetype=obj", "-relocation-model=pic", "model.bc"])
# Generate shared library from object file, linking with c runtime.
subprocess.run([
CXX, "-shared", "model.o", "-o", "model.so", "-L" + RT_DIR,
"-lcruntime"
],
stdout=subprocess.PIPE)
execute_commands([
CXX, "-shared", "-fPIC", "model.o", "-o", "model.so",
"-L" + RUNTIME_DIR, "-lcruntime"
])
return ExecutionSession("./model.so", "_dyn_entry_point_main_graph")
@classmethod
@ -140,18 +145,15 @@ import inspect
all_tests = inspect.getmembers(
backend_test.test_cases["OnnxBackendNodeModelTest"])
all_test_names = list(map(lambda x: x[0], all_tests))
# Ensure that test names specified in test_to_enable actually exist.
for test_name in test_to_enable:
assert test_name in all_test_names, "test name {} not found".format(test_name)
assert test_name in all_test_names, "test name {} not found, it is likely "
"that you may have misspelled the test name or the specified test does not "
"exist in the version of onnx package you installed.".format(
test_name)
backend_test.include(r"^{}$".format(test_name))
def tearDownModule():
print()
print("*" * 40)
print("A total of {} tests should have run".format(len(test_to_enable)))
print("*" * 40)
# import all test cases at global scope to make them visible to python.unittest
globals().update(backend_test.test_cases)

View File

@ -0,0 +1,3 @@
ONNF_BUILD_PATH = "@CMAKE_BINARY_DIR@"
LLVM_PROJ_BUILD_PATH = "@LLVM_PROJ_BUILD@"
CXX_PATH = "@CMAKE_CXX_COMPILER@"

View File

@ -1,12 +1,12 @@
// RUN: onnf-opt %s -mlir-print-op-generic | FileCheck -check-prefix=GENERIC %s
// RUN: onnf-opt %s | FileCheck %s
// GENERIC-DAG: #{{.*}} = () -> (0)
// GENERIC-DAG: #{{.*}} = () -> (10)
// GENERIC-DAG: #{{.*}} = () -> (1)
// GENERIC-DAG: #{{.*}} = () -> (11)
// GENERIC-DAG: #{{.*}} = (d0, d1) -> (d0 - d1)
// GENERIC-DAG: #{{.*}} = (d0, d1) -> (d0 + d1)
// GENERIC-DAG: #{{.*}} = affine_map<() -> (0)>
// GENERIC-DAG: #{{.*}} = affine_map<() -> (10)>
// GENERIC-DAG: #{{.*}} = affine_map<() -> (1)>
// GENERIC-DAG: #{{.*}} = affine_map<() -> (11)>
// GENERIC-DAG: #{{.*}} = affine_map<(d0, d1) -> (d0 - d1)>
// GENERIC-DAG: #{{.*}} = affine_map<(d0, d1) -> (d0 + d1)>
func @simple_iterate(%N : index) {
%ii, %ij, %ik = krnl.define_loops 3
@ -55,18 +55,18 @@ func @affine_map_bound(%N : index) {
// GENERIC: "krnl.iterate"(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) ( {
// GENERIC-NEXT: ^bb0(%{{.*}}: index, %{{.*}}: index):
// CHECK: krnl.iterate(%{{.*}}, %{{.*}}) with (%{{.*}} -> %{{.*}} = 0 to 10, %{{.*}} -> %{{.*}} = 0 to 10) {
krnl.iterate(%oi, %oj) with (%ii -> %i = ()->(0)() to ()->(10)(), %ij -> %j = 0 to 10) {
krnl.iterate(%oi, %oj) with (%ii -> %i = affine_map<()->(0)>() to affine_map<()->(10)>(), %ij -> %j = 0 to 10) {
// GENERIC: "krnl.iterate"(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) ( {
// GENERIC-NEXT: ^bb0(%{{.*}}: index):
// CHECK: krnl.iterate(%{{.*}}) with (%{{.*}} -> %{{.*}} = #{{.*}}(%{{.*}}, %{{.*}}) to #{{.*}}(%{{.*}}, %{{.*}})) {
krnl.iterate(%ok) with (%ik -> %k = (d0, d1)->(d0 - d1)(%i, %j) to (d0, d1)->(d0 + d1)(%i, %j)) {
krnl.iterate(%ok) with (%ik -> %k = affine_map<(d0, d1)->(d0 - d1)>(%i, %j) to affine_map<(d0, d1)->(d0 + d1)>(%i, %j)) {
}
// GENERIC: "krnl.iterate"(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) ( {
// GENERIC-NEXT: ^bb0(%{{.*}}: index):
// CHECK: krnl.iterate(%{{.*}}) with (%{{.*}} -> %{{.*}} = max #map{{.*}}(%{{.*}}, %{{.*}}) to min #map{{.*}}(%{{.*}}, %{{.*}})[%{{.*}}]) {
krnl.iterate(%ok) with (%ik -> %k = max (d0, d1)->(d0 - d1, 0)(%i, %j) to min (d0, d1)[s0]->(d0 + d1, s0)(%i, %j)[%N]) {
krnl.iterate(%ok) with (%ik -> %k = max affine_map<(d0, d1)->(d0 - d1, 0)>(%i, %j) to min affine_map<(d0, d1)[s0]->(d0 + d1, s0)>(%i, %j)[%N]) {
}
}

View File

@ -9,4 +9,5 @@ cmake -G Ninja ../llvm \
-DLLVM_ENABLE_ASSERTIONS=ON \
-DLLVM_ENABLE_RTTI=ON
cmake --build . --target check-mlir -- ${MAKEFLAGS}
cmake --build . --target
cmake --build . --target check-mlir

View File

@ -7,4 +7,5 @@ cmake ..
cmake --build . --target onnf
# Run FileCheck tests:
export LIT_OPTS=-v
cmake --build . --target check-mlir-lit