onnx-mlir/src/main.cpp

70 lines
2.2 KiB
C++
Raw Normal View History

//===--------------------------- main.cpp ---------------------------------===//
//
// Copyright 2019 The IBM Research Authors.
//
// =============================================================================
//
//===----------------------------------------------------------------------===//
#include "src/MainUtils.hpp"
2019-12-23 09:49:29 +08:00
2019-12-20 03:24:37 +08:00
using namespace std;
using namespace onnx_mlir;
2019-12-20 03:24:37 +08:00
2019-12-23 09:33:33 +08:00
int main(int argc, char *argv[]) {
registerDialects();
llvm::cl::OptionCategory OnnxMlirOptions(
"ONNX MLIR Options", "These are frontend options.");
llvm::cl::opt<string> inputFilename(llvm::cl::Positional,
llvm::cl::desc("<input file>"), llvm::cl::init("-"),
llvm::cl::cat(OnnxMlirOptions));
2019-12-23 12:52:49 +08:00
llvm::cl::opt<EmissionTargetType> emissionTarget(
llvm::cl::desc("Choose target to emit:"),
llvm::cl::values(
clEnumVal(EmitONNXBasic,
"Ingest ONNX and emit the basic ONNX operations without"
"inferred shapes."),
clEnumVal(
EmitONNXIR, "Ingest ONNX and emit corresponding ONNX dialect."),
clEnumVal(
EmitMLIR, "Lower model to MLIR built-in transformation dialect."),
2019-12-23 14:14:35 +08:00
clEnumVal(EmitLLVMIR, "Lower model to LLVM IR (LLVM dialect)."),
clEnumVal(EmitLLVMBC, "Lower model to LLVM IR and emit (to file) "
"LLVM bitcode for model.")),
llvm::cl::init(EmitLLVMBC), llvm::cl::cat(OnnxMlirOptions));
2019-12-23 12:52:49 +08:00
llvm::cl::HideUnrelatedOptions(OnnxMlirOptions);
llvm::cl::ParseCommandLineOptions(
argc, argv, "ONNX MLIR modular optimizer driver\n");
2019-12-23 09:33:33 +08:00
2019-12-23 12:52:49 +08:00
mlir::MLIRContext context;
mlir::OwningModuleRef module;
processInputFile(inputFilename, emissionTarget, context, module);
// Input file base name.
string outputBaseName =
inputFilename.substr(0, inputFilename.find_last_of("."));
mlir::PassManager pm(&context);
if (emissionTarget >= EmitONNXIR) {
addONNXToMLIRPasses(pm);
}
2019-12-23 12:52:49 +08:00
if (emissionTarget >= EmitMLIR) {
addONNXToKrnlPasses(pm);
addKrnlToAffinePasses(pm);
2019-12-23 12:52:49 +08:00
}
if (emissionTarget >= EmitLLVMIR)
addKrnlToLLVMPasses(pm);
2019-12-23 09:49:29 +08:00
if (mlir::failed(pm.run(*module)))
return 4;
emitOutputFiles(outputBaseName, emissionTarget, context, module);
Enable e2e tests (#29) * Sync with latest MLIR. * Enable ONNX backend tests as a means to test ONNF lowering end-to-end. * Install ONNX using quiet mode. * Remove debug comments. * Install ONNX from third_party/onnx. * Check python version and fix pip command for installing ONNX. * Using --user install option to prevent permission denied. * Remove unused imports. * Try using stock ONNX pip package as there are more tests in them. * Pip got stuck building wheels, try sudo. * Use verbose install to debug. * Invalidate cache to build LLVM tools. * Fix mlir installation script location. * Debug to locate ONNF. * Sanity check. * Check out ONNF code first. * Use verbose LIT output. * 1. Update documentation to always use verbose LIT. 2. Update krnl ops to reflect new affine map attribute syntax. * See if conda exists * Install ONNX by manually cloning the repo. * Install cmake first. * Using sudo priviledge when installing. * Limit build parallelism. * Limit parallelism. * Larger memory. * Install onnx package with pip. * Build MLIR tools. * Invalidate cache. * Compile model.so with -fPIC. * Remove module dump to get concise debug output. * Print command before executing. * Use quiet install mode to reduce logging. * Use -relocation-model=pic to generate position independent code. * 1. Remove MAKEFLAGS because now buildbot has enough memory. 2. Run DocCheck as a last step. * 1. Add verbose mode for backtend test. * When dumping to LLVM bitcode, do not dump module IR, but print a message indicating that bitcode has been written to disk. * Do not pass MakeFlags to CMake. * Add more explaination for posible reasons of failing to identify tests.
2020-01-21 01:30:08 +08:00
2019-12-20 03:24:37 +08:00
return 0;
}