onnx-mlir/src/main.cpp

157 lines
5.4 KiB
C++
Raw Normal View History

//===--------------------------- main.cpp ---------------------------------===//
//
// Copyright 2019 The IBM Research Authors.
//
// =============================================================================
//
//===----------------------------------------------------------------------===//
2019-12-20 03:24:37 +08:00
#include <cmath>
#include <iostream>
#include "llvm/Bitcode/BitcodeWriter.h"
2019-12-23 09:33:33 +08:00
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/FileUtilities.h"
2019-12-23 09:33:33 +08:00
#include "llvm/Support/InitLLVM.h"
#include "llvm/Support/Regex.h"
#include "llvm/Support/SourceMgr.h"
#include "src/builder/frontend_dialect_transformer.hpp"
2019-12-23 13:13:52 +08:00
#include "src/dialect/krnl/krnl_ops.hpp"
#include "src/dialect/onnx/onnx_ops.hpp"
#include "src/pass/passes.hpp"
#include "mlir/Conversion/LoopToStandard/ConvertLoopToStandard.h"
#include "mlir/ExecutionEngine/ExecutionEngine.h"
#include "mlir/ExecutionEngine/OptUtils.h"
#include "mlir/InitAllDialects.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Module.h"
#include "mlir/Parser.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Target/LLVMIR.h"
#include "mlir/Transforms/Passes.h"
2019-12-23 09:49:29 +08:00
void EmitLLVMBitCode(const mlir::OwningModuleRef &module);
2019-12-20 03:24:37 +08:00
using namespace std;
using namespace onnf;
2019-12-20 03:24:37 +08:00
2019-12-23 09:33:33 +08:00
void LoadMLIR(string inputFilename, mlir::MLIRContext &context,
mlir::OwningModuleRef &module) {
// Handle '.mlir' input to the ONNF frontend.
// The mlir format indicates that one or more of the supported
// representations are used in the file.
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> fileOrErr =
llvm::MemoryBuffer::getFileOrSTDIN(inputFilename);
if (std::error_code EC = fileOrErr.getError()) {
llvm::errs() << "Could not open input file: " << EC.message() << "\n";
return;
}
// Parse the input mlir.
llvm::SourceMgr sourceMgr;
sourceMgr.AddNewSourceBuffer(std::move(*fileOrErr), llvm::SMLoc());
module = mlir::parseSourceFile(sourceMgr, &context);
if (!module) {
llvm::errs() << "Error can't load file " << inputFilename << "\n";
return;
}
}
2019-12-23 09:49:29 +08:00
void EmitLLVMBitCode(const mlir::OwningModuleRef &module) {
error_code error;
llvm::raw_fd_ostream moduleBitcodeStream("model.bc", error,
llvm::sys::fs::F_None);
llvm::WriteBitcodeToFile(*mlir::translateModuleToLLVMIR(*module),
moduleBitcodeStream);
moduleBitcodeStream.flush();
}
2019-12-23 09:33:33 +08:00
int main(int argc, char *argv[]) {
mlir::registerDialect<mlir::AffineOpsDialect>();
mlir::registerDialect<mlir::LLVM::LLVMDialect>();
mlir::registerDialect<mlir::loop::LoopOpsDialect>();
mlir::registerDialect<mlir::StandardOpsDialect>();
mlir::registerDialect<mlir::ONNXOpsDialect>();
mlir::registerDialect<mlir::KrnlOpsDialect>();
2019-12-23 09:33:33 +08:00
llvm::cl::OptionCategory OnnfOptions("ONNF Options",
"These are frontend options.");
2019-12-23 09:49:29 +08:00
llvm::cl::opt<string> inputFilename(
2019-12-23 09:33:33 +08:00
llvm::cl::Positional, llvm::cl::desc("<input file>"), llvm::cl::init("-"),
llvm::cl::cat(OnnfOptions));
2019-12-23 12:52:49 +08:00
enum EmissionTargetType {
EmitONNXIR,
EmitMLIR,
EmitLLVMIR,
EmitLLVMBC,
};
llvm::cl::opt<EmissionTargetType> emissionTarget(
llvm::cl::desc("Choose target to emit:"),
llvm::cl::values(
2019-12-23 14:14:35 +08:00
clEnumVal(EmitONNXIR,
"Ingest ONNX and emit corresponding ONNX dialect."),
clEnumVal(EmitMLIR,
"Lower model to MLIR built-in transformation dialect."),
clEnumVal(EmitLLVMIR, "Lower model to LLVM IR (LLVM dialect)."),
clEnumVal(EmitLLVMBC, "Lower model to LLVM IR and emit (to file) "
"LLVM bitcode for model.")),
2019-12-23 12:52:49 +08:00
llvm::cl::init(EmitLLVMBC), llvm::cl::cat(OnnfOptions));
2019-12-23 09:33:33 +08:00
llvm::cl::HideUnrelatedOptions(OnnfOptions);
llvm::cl::ParseCommandLineOptions(argc, argv,
"ONNF MLIR modular optimizer driver\n");
// Decide if the input file is an ONNX model or a model specified
// in MLIR. The extension of the file is the decider.
2019-12-23 09:49:29 +08:00
string extension = inputFilename.substr(inputFilename.find_last_of(".") + 1);
bool inputIsONNX = (extension == "onnx");
bool inputIsMLIR = (extension == "mlir");
assert(inputIsONNX != inputIsMLIR &&
"Either ONNX model or MLIR file needs to be provided.");
2019-12-23 12:52:49 +08:00
mlir::MLIRContext context;
mlir::OwningModuleRef module;
2019-12-23 09:49:29 +08:00
if (inputIsONNX) {
ImportFrontendModelFile(inputFilename, context, module);
} else {
2019-12-23 09:49:29 +08:00
LoadMLIR(inputFilename, context, module);
}
mlir::PassManager pm(&context);
pm.addPass(mlir::createCanonicalizerPass());
pm.addPass(mlir::createShapeInferencePass());
2019-12-23 12:52:49 +08:00
if (emissionTarget >= EmitMLIR) {
pm.addPass(mlir::createLowerToKrnlPass());
// An additional pass of canonicalization is helpful because lowering
// from ONNX dialect to Standard dialect exposes additional canonicalization
// oppertunities.
pm.addPass(mlir::createCanonicalizerPass());
2019-12-23 12:52:49 +08:00
pm.addPass(mlir::createLowerKrnlPass());
}
if (emissionTarget >= EmitLLVMIR) {
pm.addPass(mlir::createLowerAffinePass());
pm.addPass(mlir::createLowerToCFGPass());
pm.addPass(mlir::createKrnlLowerToLLVMPass());
pm.addPass(mlir::createCanonicalizerPass());
}
2019-12-23 09:49:29 +08:00
if (mlir::failed(pm.run(*module)))
return 4;
2019-12-23 12:52:49 +08:00
Enable e2e tests (#29) * Sync with latest MLIR. * Enable ONNX backend tests as a means to test ONNF lowering end-to-end. * Install ONNX using quiet mode. * Remove debug comments. * Install ONNX from third_party/onnx. * Check python version and fix pip command for installing ONNX. * Using --user install option to prevent permission denied. * Remove unused imports. * Try using stock ONNX pip package as there are more tests in them. * Pip got stuck building wheels, try sudo. * Use verbose install to debug. * Invalidate cache to build LLVM tools. * Fix mlir installation script location. * Debug to locate ONNF. * Sanity check. * Check out ONNF code first. * Use verbose LIT output. * 1. Update documentation to always use verbose LIT. 2. Update krnl ops to reflect new affine map attribute syntax. * See if conda exists * Install ONNX by manually cloning the repo. * Install cmake first. * Using sudo priviledge when installing. * Limit build parallelism. * Limit parallelism. * Larger memory. * Install onnx package with pip. * Build MLIR tools. * Invalidate cache. * Compile model.so with -fPIC. * Remove module dump to get concise debug output. * Print command before executing. * Use quiet install mode to reduce logging. * Use -relocation-model=pic to generate position independent code. * 1. Remove MAKEFLAGS because now buildbot has enough memory. 2. Run DocCheck as a last step. * 1. Add verbose mode for backtend test. * When dumping to LLVM bitcode, do not dump module IR, but print a message indicating that bitcode has been written to disk. * Do not pass MakeFlags to CMake. * Add more explaination for posible reasons of failing to identify tests.
2020-01-21 01:30:08 +08:00
if (emissionTarget == EmitLLVMBC) {
// Write LLVM bitcode to disk.
EmitLLVMBitCode(module);
printf("LLVM bitcode written to ./model.bc");
} else
module->dump();
2019-12-20 03:24:37 +08:00
return 0;
}