From ad61eee908a5a1bb34243e66dcf7d1962e9c77c9 Mon Sep 17 00:00:00 2001 From: "Tung D. Le" Date: Wed, 20 May 2020 16:45:42 +0900 Subject: [PATCH] Move to more recent LLVM commit ID (#131) * Move to more recent LLVM ID (May 15) * clang-format * Bump cache version up * Update readme * Fix doc check * Move to a newer commit id * Update LoopToStandard -> SCFToStandard * Change MLIRSideEffects to MLIRSideEffectInterfaces Co-authored-by: Tian Jin --- .circleci/config.yml | 4 ++-- MLIR.cmake | 16 ++++++++-------- README.md | 4 ++-- docs/README.md | 4 ++-- src/Builder/FrontendDialectHelper.hpp | 2 +- src/Dialect/Krnl/KrnlOps.cpp | 23 +++++++++++------------ src/Dialect/Krnl/KrnlOps.td | 8 ++++---- src/Dialect/MLONNX/MLONNXOps.td | 2 +- src/Dialect/ONNX/ONNXOps.cpp | 14 +++++++------- src/Dialect/ONNX/ONNXOps.td | 6 +++--- src/Dialect/ONNX/ONNXOps.td.inc | 26 +++++++++++++------------- src/MainUtils.cpp | 2 +- src/MainUtils.hpp | 4 ++-- src/Tool/ONNXMLIROpt/ONNXMLIROpt.cpp | 2 +- src/Transform/LowerToLLVM.cpp | 4 ++-- utils/clone-mlir.sh | 2 +- utils/gen_doc.py | 10 +++++----- 17 files changed, 66 insertions(+), 67 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 5a8c23f..4df5769 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -18,7 +18,7 @@ jobs: git submodule update --init --recursive # Use cached mlir installation if possible. - restore_cache: - key: V11-LLVM-PROJECT-{{ arch }} + key: V13-LLVM-PROJECT-{{ arch }} - run: name: Install MLIR command: | @@ -29,7 +29,7 @@ jobs: source onnx-mlir/utils/install-mlir.sh fi - save_cache: - key: V11-LLVM-PROJECT-{{ arch }} + key: V13-LLVM-PROJECT-{{ arch }} paths: - llvm-project - run: diff --git a/MLIR.cmake b/MLIR.cmake index 2dbb7de..f189ea9 100644 --- a/MLIR.cmake +++ b/MLIR.cmake @@ -144,16 +144,16 @@ find_mlir_lib(MLIRExecutionEngine) find_mlir_lib(MLIRIR) find_mlir_lib(MLIRLLVMIR) find_mlir_lib(MLIRLoopAnalysis) -find_mlir_lib(MLIRLoopToStandard) -find_mlir_lib(MLIRLoopOps) +find_mlir_lib(MLIRSCFToStandard) find_mlir_lib(MLIRLoopLikeInterface) +find_mlir_lib(MLIRSCF) find_mlir_lib(MLIRLLVMIRTransforms) find_mlir_lib(MLIRMlirOptMain) find_mlir_lib(MLIRParser) find_mlir_lib(MLIRPass) find_mlir_lib(MLIRStandardOps) find_mlir_lib(MLIRStandardToLLVM) -find_mlir_lib(MLIRSideEffects) +find_mlir_lib(MLIRSideEffectInterfaces) find_mlir_lib(MLIRTargetLLVMIR) find_mlir_lib(MLIRTransforms) find_mlir_lib(MLIRTransformUtils) @@ -205,13 +205,13 @@ set(MLIRLibs ${MLIRExecutionEngine} ${MLIRIR} ${MLIRLLVMIRTransforms} - ${MLIRLoopToStandard} - ${MLIRLoopOps} + ${MLIRSCFToStandard} + ${MLIRSCF} ${MLIRLoopAnalysis} ${MLIRLoopLikeInterface} ${MLIROpenMP} ${MLIRMlirOptMain} - ${MLIRSideEffects} + ${MLIRSideEffectInterfaces} ${MLIRStandardOps} ${MLIRStandardToLLVM} ${MLIRSupport} @@ -251,9 +251,9 @@ set(MLIRWholeArchiveLibs MLIRStandardOps MLIRStandardToLLVM MLIRTransforms - MLIRLoopToStandard + MLIRSCFToStandard MLIRVector - MLIRLoopOps + MLIRSCF MLIRIR) # ONNX MLIR libraries that must be linked with --whole-archive for static build or diff --git a/README.md b/README.md index e3ce263..30cf4dc 100644 --- a/README.md +++ b/README.md @@ -24,7 +24,7 @@ Firstly, install MLIR (as a part of LLVM-Project): ``` bash git clone https://github.com/llvm/llvm-project.git # Check out a specific branch that is known to work with ONNX MLIR. -cd llvm-project && git checkout 3ce0ad1b336e67a76d78ae7ff7d66fe127586620 && cd .. +cd llvm-project && git checkout 0dc91bfd11e6cced0c46c1a25cc96edea0d8fc22 && cd .. ``` [same-as-file]: <> (utils/build-mlir.sh) @@ -114,7 +114,7 @@ Install MLIR (as a part of LLVM-Project): ```shell git clone https://github.com/llvm/llvm-project.git # Check out a specific branch that is known to work with ONNX MLIR. -cd llvm-project && git checkout 3ce0ad1b336e67a76d78ae7ff7d66fe127586620 && cd .. +cd llvm-project && git checkout 0dc91bfd11e6cced0c46c1a25cc96edea0d8fc22 && cd .. ``` [same-as-file]: <> (utils/build-mlir.cmd) diff --git a/docs/README.md b/docs/README.md index d452d34..5d5b2d1 100644 --- a/docs/README.md +++ b/docs/README.md @@ -20,7 +20,7 @@ Firstly, install MLIR (as a part of LLVM-Project): ``` bash git clone https://github.com/llvm/llvm-project.git # Check out a specific branch that is known to work with ONNX MLIR. -cd llvm-project && git checkout 3ce0ad1b336e67a76d78ae7ff7d66fe127586620 && cd .. +cd llvm-project && git checkout 0dc91bfd11e6cced0c46c1a25cc96edea0d8fc22 && cd .. ``` [same-as-file]: <> (utils/build-mlir.sh) @@ -110,7 +110,7 @@ Install MLIR (as a part of LLVM-Project): ```shell git clone https://github.com/llvm/llvm-project.git # Check out a specific branch that is known to work with ONNX MLIR. -cd llvm-project && git checkout 3ce0ad1b336e67a76d78ae7ff7d66fe127586620 && cd .. +cd llvm-project && git checkout 0dc91bfd11e6cced0c46c1a25cc96edea0d8fc22 && cd .. ``` [same-as-file]: <> (utils/build-mlir.cmd) diff --git a/src/Builder/FrontendDialectHelper.hpp b/src/Builder/FrontendDialectHelper.hpp index 584b2f3..aa348d7 100644 --- a/src/Builder/FrontendDialectHelper.hpp +++ b/src/Builder/FrontendDialectHelper.hpp @@ -14,7 +14,6 @@ #include #include -#include "mlir/Analysis/Verifier.h" #include "mlir/Dialect/StandardOps/IR/Ops.h" #include "mlir/IR/Attributes.h" #include "mlir/IR/Builders.h" @@ -26,6 +25,7 @@ #include "mlir/IR/PatternMatch.h" #include "mlir/IR/StandardTypes.h" #include "mlir/IR/Types.h" +#include "mlir/IR/Verifier.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/ScopedHashTable.h" diff --git a/src/Dialect/Krnl/KrnlOps.cpp b/src/Dialect/Krnl/KrnlOps.cpp index 6142701..ffa68c2 100644 --- a/src/Dialect/Krnl/KrnlOps.cpp +++ b/src/Dialect/Krnl/KrnlOps.cpp @@ -47,12 +47,12 @@ KrnlOpsDialect::KrnlOpsDialect(MLIRContext *context) //===----------------------------------------------------------------------===// void KrnlDefineLoopsOp::build( - Builder *builder, OperationState &result, int64_t num_loops) { + OpBuilder &builder, OperationState &result, int64_t num_loops) { // Create the same number of dimension handlers as the number of // dimensions in the associated integer set. - result.types.append(num_loops, LoopType::get(builder->getContext())); + result.types.append(num_loops, LoopType::get(builder.getContext())); result.addAttribute( - getNumLoopsAttrName(), builder->getI32IntegerAttr(num_loops)); + getNumLoopsAttrName(), builder.getI32IntegerAttr(num_loops)); } void print(OpAsmPrinter &p, KrnlDefineLoopsOp &op) { @@ -83,9 +83,8 @@ ParseResult parseKrnlDefineLoopsOp( //===----------------------------------------------------------------------===// void KrnlOptimizeLoopsOp::build( - Builder *builder, OperationState &result, int num_optimized_loops) { - result.types.append( - num_optimized_loops, LoopType::get(builder->getContext())); + OpBuilder &builder, OperationState &result, int num_optimized_loops) { + result.types.append(num_optimized_loops, LoopType::get(builder.getContext())); // Create a region and a block for the body. // Schedule intrinsics will be placed into this region. Region *region = result.addRegion(); @@ -145,7 +144,7 @@ ParseResult parseKrnlOptimizeLoopsOp( * Then the bounds will be parsed as: * %i0 = 10 to N : %i1 = M to 20 */ -void KrnlIterateOp::build(Builder *builder, OperationState &result, +void KrnlIterateOp::build(OpBuilder &builder, OperationState &result, KrnlIterateOperandPack operandPack) { // Record optimized loops and the number of such loops. result.addOperands(operandPack.getOperands()); @@ -153,7 +152,7 @@ void KrnlIterateOp::build(Builder *builder, OperationState &result, KrnlIterateOp::getBoundsAttrName(), operandPack.getAttributes()); result.addAttribute(getNumOptimizedLoopsAttrName(), - builder->getI64IntegerAttr(operandPack.getNumOptimizedLoops())); + builder.getI64IntegerAttr(operandPack.getNumOptimizedLoops())); // Create a region and a block for the body. The arguments of the region are // the loop induction variables; there can be multiple induction variables @@ -161,11 +160,11 @@ void KrnlIterateOp::build(Builder *builder, OperationState &result, Region *bodyRegion = result.addRegion(); auto *body = new Block(); auto body_args = llvm::SmallVector( - operandPack.getNumInputLoops(), IndexType::get(builder->getContext())); + operandPack.getNumInputLoops(), IndexType::get(builder.getContext())); body->addArguments(body_args); bodyRegion->push_back(body); - ensureTerminator(*bodyRegion, *builder, result.location); + ensureTerminator(*bodyRegion, builder, result.location); } void print(OpAsmPrinter &p, KrnlIterateOp &op) { @@ -247,7 +246,7 @@ ParseResult parseKrnlIterateOp(OpAsmParser &parser, OperationState &result) { // Get the attribute location. llvm::SMLoc attrLoc = parser.getCurrentLocation(); Attribute boundAttr; - llvm::SmallVector tempBoundAttrContainer; + NamedAttrList tempBoundAttrContainer; if (parser.parseAttribute( boundAttr, builder.getIndexType(), "temp", tempBoundAttrContainer)) return failure(); @@ -361,7 +360,7 @@ ParseResult parseKrnlReturnLoopsOp( return success(); } -void KrnlEntryPointOp::build(mlir::Builder *builder, OperationState &state, +void KrnlEntryPointOp::build(mlir::OpBuilder &builder, OperationState &state, SymbolRefAttr funcAttr, IntegerAttr numInputs, IntegerAttr numOutputs) { state.addAttribute(KrnlEntryPointOp::getEntryPointFuncAttrName(), funcAttr); state.addAttribute(KrnlEntryPointOp::getNumInputsAttrName(), numInputs); diff --git a/src/Dialect/Krnl/KrnlOps.td b/src/Dialect/Krnl/KrnlOps.td index a2772f9..74d08dd 100644 --- a/src/Dialect/Krnl/KrnlOps.td +++ b/src/Dialect/Krnl/KrnlOps.td @@ -29,7 +29,7 @@ def KrnlDefineLoopsOp : Op { let arguments = (ins); let results = (outs Variadic); let skipDefaultBuilders = 1; - let builders = [ OpBuilder<"Builder *builder, OperationState &result," + let builders = [ OpBuilder<"OpBuilder &builder, OperationState &result," "int64_t num_loops"> ]; let printer = [{ return ::print(p, *this); }]; @@ -67,7 +67,7 @@ def KrnlOptimizeLoopsOp : Op { let skipDefaultBuilders = 1; - let builders = [ OpBuilder<"Builder *builder, OperationState &result, " + let builders = [ OpBuilder<"OpBuilder &builder, OperationState &result, " "int timestamp_space_rank"> ]; let printer = [{ return ::print(p, *this); }]; @@ -100,7 +100,7 @@ def KrnlIterateOp : Op { let arguments = (ins Variadic); let regions = (region SizedRegion<1>:$bodyRegion); let skipDefaultBuilders = 1; - let builders = [ OpBuilder<"Builder *builder, OperationState &result, " + let builders = [ OpBuilder<"OpBuilder &builder, OperationState &result, " "KrnlIterateOperandPack operandPack"> ]; let extraClassDeclaration = [{ @@ -165,7 +165,7 @@ def KrnlEntryPointOp : Op { let summary = "Indicate ONNX entry point"; let description = [{The "krnl.entry_point" function indicates the main entry point of ONNX model.}]; - let builders = [ OpBuilder<"Builder *builder, OperationState &result, " + let builders = [ OpBuilder<"OpBuilder &builder, OperationState &result, " "SymbolRefAttr funcAttr, IntegerAttr numInputs, " "IntegerAttr numOutputs"> ]; diff --git a/src/Dialect/MLONNX/MLONNXOps.td b/src/Dialect/MLONNX/MLONNXOps.td index f71fdd1..713881a 100644 --- a/src/Dialect/MLONNX/MLONNXOps.td +++ b/src/Dialect/MLONNX/MLONNXOps.td @@ -61,7 +61,7 @@ class MLONNX_Op traits = []> : // 4. type of string, complex64 and complex128 for input/output are ignored // 5. unsigned int are treated as signed one -include "mlir/Interfaces/SideEffects.td" +include "mlir/Interfaces/SideEffectInterfaces.td" include "src/Dialect/MLONNX/MLONNXOps.td.inc" #endif // MLONNX_OPS diff --git a/src/Dialect/ONNX/ONNXOps.cpp b/src/Dialect/ONNX/ONNXOps.cpp index ff733ca..6cedbae 100644 --- a/src/Dialect/ONNX/ONNXOps.cpp +++ b/src/Dialect/ONNX/ONNXOps.cpp @@ -438,22 +438,22 @@ ONNXOpsDialect::ONNXOpsDialect(mlir::MLIRContext *ctx) >(); } -void ONNXEntryPointOp::build(mlir::Builder *builder, +void ONNXEntryPointOp::build(mlir::OpBuilder &builder, mlir::OperationState &state, mlir::FuncOp function, int numInputs, int numOutputs) { state.addAttribute(ONNXEntryPointOp::getEntryPointFuncAttrName(), - builder->getSymbolRefAttr(function)); + builder.getSymbolRefAttr(function)); state.addAttribute(ONNXEntryPointOp::getNumInputsAttrName(), - builder->getI32IntegerAttr(numInputs)); + builder.getI32IntegerAttr(numInputs)); state.addAttribute(ONNXEntryPointOp::getNumOutputsAttrName(), - builder->getI32IntegerAttr(numOutputs)); + builder.getI32IntegerAttr(numOutputs)); } ONNXEntryPointOp ONNXEntryPointOp::create(mlir::Location location, mlir::FuncOp &func, int numInputs, int numOutputs) { mlir::OperationState state(location, "onnx.EntryPoint"); - Builder builder(location->getContext()); - mlir::ONNXEntryPointOp::build(&builder, state, func, numInputs, numOutputs); + OpBuilder builder(location->getContext()); + mlir::ONNXEntryPointOp::build(builder, state, func, numInputs, numOutputs); Operation *op = mlir::Operation::create(state); auto onnxEntryOp = llvm::cast(op); return onnxEntryOp; @@ -1573,7 +1573,7 @@ bool ONNXPadConstantValuePadOp::inferShapes() { return false; } -void ONNXPadConstantValuePadOp::build(Builder *builder, OperationState &state, +void ONNXPadConstantValuePadOp::build(OpBuilder &builder, OperationState &state, Value data, ArrayAttr pads, FloatAttr constant_value, StringAttr mode) { Type outputType = padShapeInferenceHelper(data, pads); if (!outputType) { diff --git a/src/Dialect/ONNX/ONNXOps.td b/src/Dialect/ONNX/ONNXOps.td index 9956f0d..1ae7791 100644 --- a/src/Dialect/ONNX/ONNXOps.td +++ b/src/Dialect/ONNX/ONNXOps.td @@ -61,7 +61,7 @@ class ONNX_Op traits = []> : // 4. type of string, complex64 and complex128 for input/output are ignored // 5. unsigned int are treated as signed one -include "mlir/Interfaces/SideEffects.td" +include "mlir/Interfaces/SideEffectInterfaces.td" include "src/Dialect/ONNX/ONNXOps.td.inc" // Indicate entry point functions of ONNX graph. @@ -71,7 +71,7 @@ def ONNXEntryPointOp: ONNX_Op<"EntryPoint"> { The "onnx.EntryPoint" function indicates the main entry point of ONNX model. }]; - let builders = [OpBuilder<[{Builder *builder, OperationState &state, + let builders = [OpBuilder<[{OpBuilder &builder, OperationState &state, FuncOp function, int numInputs, int numOutputs}]>]; let extraClassDeclaration = [{ @@ -183,7 +183,7 @@ def ONNXPadConstantValuePadOp : ONNX_Op<"PadConstantValuePad", DefaultValuedAttr:$mode); let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$output); // A build method with the result type deduction. - let builders = [OpBuilder<"Builder *builder, OperationState &state, " + let builders = [OpBuilder<"OpBuilder &builder, OperationState &state, " "Value data, ArrayAttr pads, " "FloatAttr constant_value, StringAttr mode">]; } diff --git a/src/Dialect/ONNX/ONNXOps.td.inc b/src/Dialect/ONNX/ONNXOps.td.inc index 39a785f..7993e89 100644 --- a/src/Dialect/ONNX/ONNXOps.td.inc +++ b/src/Dialect/ONNX/ONNXOps.td.inc @@ -15,11 +15,11 @@ def ONNXAbsOp:ONNX_Op<"Abs", let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X); let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$Y); let builders = [ - OpBuilder<"Builder *builder, OperationState &state, Value X", [{ + OpBuilder<"OpBuilder &builder, OperationState &state, Value X", [{ auto elementType = X.getType().cast().getElementType(); build(builder, state, UnrankedTensorType::get(elementType), X); }]>, - OpBuilder<"Builder *builder, OperationState &state, ValueRange operands, ArrayRef attributes", [{ + OpBuilder<"OpBuilder &builder, OperationState &state, ValueRange operands, ArrayRef attributes", [{ auto elementType = operands[0].getType().cast().getElementType(); std::vector outputTypes; outputTypes.emplace_back(UnrankedTensorType::get(elementType)); @@ -349,7 +349,7 @@ def ONNXConstantOp:ONNX_Op<"Constant", OptionalAttr:$value); let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$output); let builders = [ - OpBuilder<"Builder *builder, OperationState &state, Attribute sparse_value, Attribute value", [{ + OpBuilder<"OpBuilder &builder, OperationState &state, Attribute sparse_value, Attribute value", [{ if (value) { auto tensorType = value.getType(); build(builder, state, tensorType, sparse_value, value); @@ -673,11 +673,11 @@ def ONNXExpOp:ONNX_Op<"Exp", let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input); let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$output); let builders = [ - OpBuilder<"Builder *builder, OperationState &state, Value input", [{ + OpBuilder<"OpBuilder &builder, OperationState &state, Value input", [{ auto elementType = input.getType().cast().getElementType(); build(builder, state, UnrankedTensorType::get(elementType), input); }]>, - OpBuilder<"Builder *builder, OperationState &state, ValueRange operands, ArrayRef attributes", [{ + OpBuilder<"OpBuilder &builder, OperationState &state, ValueRange operands, ArrayRef attributes", [{ auto elementType = operands[0].getType().cast().getElementType(); std::vector outputTypes; outputTypes.emplace_back(UnrankedTensorType::get(elementType)); @@ -1780,11 +1780,11 @@ def ONNXMulOp:ONNX_Op<"Mul", AnyTypeOf<[AnyMemRef, AnyTensor]>:$B); let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$C); let builders = [ - OpBuilder<"Builder *builder, OperationState &state, Value A, Value B", [{ + OpBuilder<"OpBuilder &builder, OperationState &state, Value A, Value B", [{ auto elementType = A.getType().cast().getElementType(); build(builder, state, UnrankedTensorType::get(elementType), A, B); }]>, - OpBuilder<"Builder *builder, OperationState &state, ValueRange operands, ArrayRef attributes", [{ + OpBuilder<"OpBuilder &builder, OperationState &state, ValueRange operands, ArrayRef attributes", [{ auto elementType = operands[0].getType().cast().getElementType(); std::vector outputTypes; outputTypes.emplace_back(UnrankedTensorType::get(elementType)); @@ -2014,11 +2014,11 @@ def ONNXPadOp:ONNX_Op<"Pad", DefaultValuedAttr:$mode); let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$output); let builders = [ - OpBuilder<"Builder *builder, OperationState &state, Value data, Value pads, Value constant_value, StringAttr mode", [{ + OpBuilder<"OpBuilder &builder, OperationState &state, Value data, Value pads, Value constant_value, StringAttr mode", [{ auto elementType = data.getType().cast().getElementType(); build(builder, state, UnrankedTensorType::get(elementType), data, pads, constant_value, mode); }]>, - OpBuilder<"Builder *builder, OperationState &state, ValueRange operands, ArrayRef attributes", [{ + OpBuilder<"OpBuilder &builder, OperationState &state, ValueRange operands, ArrayRef attributes", [{ auto elementType = operands[0].getType().cast().getElementType(); std::vector outputTypes; outputTypes.emplace_back(UnrankedTensorType::get(elementType)); @@ -2473,11 +2473,11 @@ def ONNXReduceSumOp:ONNX_Op<"ReduceSum", DefaultValuedAttr:$keepdims); let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$reduced); let builders = [ - OpBuilder<"Builder *builder, OperationState &state, Value data, ArrayAttr axes, IntegerAttr keepdims", [{ + OpBuilder<"OpBuilder &builder, OperationState &state, Value data, ArrayAttr axes, IntegerAttr keepdims", [{ auto elementType = data.getType().cast().getElementType(); build(builder, state, UnrankedTensorType::get(elementType), data, axes, keepdims); }]>, - OpBuilder<"Builder *builder, OperationState &state, ValueRange operands, ArrayRef attributes", [{ + OpBuilder<"OpBuilder &builder, OperationState &state, ValueRange operands, ArrayRef attributes", [{ auto elementType = operands[0].getType().cast().getElementType(); std::vector outputTypes; outputTypes.emplace_back(UnrankedTensorType::get(elementType)); @@ -2502,11 +2502,11 @@ def ONNXReduceSumSquareOp:ONNX_Op<"ReduceSumSquare", DefaultValuedAttr:$keepdims); let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$reduced); let builders = [ - OpBuilder<"Builder *builder, OperationState &state, Value data, ArrayAttr axes, IntegerAttr keepdims", [{ + OpBuilder<"OpBuilder &builder, OperationState &state, Value data, ArrayAttr axes, IntegerAttr keepdims", [{ auto elementType = data.getType().cast().getElementType(); build(builder, state, UnrankedTensorType::get(elementType), data, axes, keepdims); }]>, - OpBuilder<"Builder *builder, OperationState &state, ValueRange operands, ArrayRef attributes", [{ + OpBuilder<"OpBuilder &builder, OperationState &state, ValueRange operands, ArrayRef attributes", [{ auto elementType = operands[0].getType().cast().getElementType(); std::vector outputTypes; outputTypes.emplace_back(UnrankedTensorType::get(elementType)); diff --git a/src/MainUtils.cpp b/src/MainUtils.cpp index 0679cca..68b46af 100644 --- a/src/MainUtils.cpp +++ b/src/MainUtils.cpp @@ -79,7 +79,7 @@ void compileModuleToSharedLibrary( void registerDialects() { mlir::registerDialect(); mlir::registerDialect(); - mlir::registerDialect(); + mlir::registerDialect(); mlir::registerDialect(); mlir::registerDialect(); mlir::registerDialect(); diff --git a/src/MainUtils.hpp b/src/MainUtils.hpp index 9cc937f..c6c015d 100644 --- a/src/MainUtils.hpp +++ b/src/MainUtils.hpp @@ -25,7 +25,7 @@ #include "src/Dialect/ONNX/ONNXOps.hpp" #include "src/Pass/Passes.hpp" -#include "mlir/Conversion/LoopToStandard/ConvertLoopToStandard.h" +#include "mlir/Conversion/SCFToStandard/SCFToStandard.h" #include "mlir/ExecutionEngine/ExecutionEngine.h" #include "mlir/ExecutionEngine/OptUtils.h" #include "mlir/IR/MLIRContext.h" @@ -70,4 +70,4 @@ void outputCode( void emitOutputFiles(std::string outputBaseName, EmissionTargetType emissionTarget, mlir::MLIRContext &context, - mlir::OwningModuleRef &module); \ No newline at end of file + mlir::OwningModuleRef &module); diff --git a/src/Tool/ONNXMLIROpt/ONNXMLIROpt.cpp b/src/Tool/ONNXMLIROpt/ONNXMLIROpt.cpp index 83f601f..ec35aed 100644 --- a/src/Tool/ONNXMLIROpt/ONNXMLIROpt.cpp +++ b/src/Tool/ONNXMLIROpt/ONNXMLIROpt.cpp @@ -57,7 +57,7 @@ static llvm::cl::opt allowUnregisteredDialects( int main(int argc, char **argv) { mlir::registerDialect(); mlir::registerDialect(); - mlir::registerDialect(); + mlir::registerDialect(); mlir::registerDialect(); // Register transformation passes. diff --git a/src/Transform/LowerToLLVM.cpp b/src/Transform/LowerToLLVM.cpp index 1a79a8c..b5be56c 100644 --- a/src/Transform/LowerToLLVM.cpp +++ b/src/Transform/LowerToLLVM.cpp @@ -9,12 +9,12 @@ //===----------------------------------------------------------------------===// #include "mlir/Conversion/AffineToStandard/AffineToStandard.h" -#include "mlir/Conversion/LoopToStandard/ConvertLoopToStandard.h" +#include "mlir/Conversion/SCFToStandard/SCFToStandard.h" #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h" #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVMPass.h" #include "mlir/Dialect/Affine/IR/AffineOps.h" #include "mlir/Dialect/LLVMIR/LLVMDialect.h" -#include "mlir/Dialect/LoopOps/LoopOps.h" +#include "mlir/Dialect/SCF/SCF.h" #include "mlir/Dialect/StandardOps/IR/Ops.h" #include "mlir/Pass/Pass.h" #include "mlir/Target/LLVMIR/ModuleTranslation.h" diff --git a/utils/clone-mlir.sh b/utils/clone-mlir.sh index 99858da..efd2fb5 100644 --- a/utils/clone-mlir.sh +++ b/utils/clone-mlir.sh @@ -1,3 +1,3 @@ git clone https://github.com/llvm/llvm-project.git # Check out a specific branch that is known to work with ONNX MLIR. -cd llvm-project && git checkout 3ce0ad1b336e67a76d78ae7ff7d66fe127586620 && cd .. +cd llvm-project && git checkout 0dc91bfd11e6cced0c46c1a25cc96edea0d8fc22 && cd .. diff --git a/utils/gen_doc.py b/utils/gen_doc.py index 0c55118..4c5a991 100644 --- a/utils/gen_doc.py +++ b/utils/gen_doc.py @@ -94,7 +94,7 @@ custom_builder_ops_list = ['Abs', 'Mul', 'Exp', 'ReduceSum', 'ReduceSumSquare', #a dictionary to add any special definition for an operation custom_definition_misc = dict([ ('Constant', ''' let builders = [ - OpBuilder<"Builder *builder, OperationState &state, Attribute sparse_value, Attribute value", [{ + OpBuilder<"OpBuilder &builder, OperationState &state, Attribute sparse_value, Attribute value", [{ if (value) { auto tensorType = value.getType(); build(builder, state, tensorType, sparse_value, value); @@ -430,9 +430,9 @@ def gen_op_def(schema): else: s += indent + 'let builders = [\n' # Custom builders with operands and attributes having a seperate parameter. - # E.g. OpBuilder<"Builder *builder, OperationState &state, Value X, Value, Y, Attribute A", [{}]> + # E.g. OpBuilder<"OpBuilder &builder, OperationState &state, Value X, Value, Y, Attribute A", [{}]> indent = inc_indent(indent) - s += indent + 'OpBuilder<"Builder *builder, OperationState &state' + s += indent + 'OpBuilder<"OpBuilder &builder, OperationState &state' operands_dict = get_operands_or_results(schema, is_input=True) for name, ty in operands_dict.items(): s += ', {} {}'.format(tblgen_operand_type_to_cpp_type(ty), @@ -454,8 +454,8 @@ def gen_op_def(schema): s += indent + '}]>,\n' # Custom builders with all operands and attributes having aggregate parameters. - # E.g. OpBuilder<"Builder *builder, OperationState &state, ValueRange operands, ArrayRef attributes", [{}]>' - s += indent + 'OpBuilder<"Builder *builder, OperationState &state, ValueRange operands, ArrayRef attributes", [{\n' + # E.g. OpBuilder<"OpBuilder &builder, OperationState &state, ValueRange operands, ArrayRef attributes", [{}]>' + s += indent + 'OpBuilder<"OpBuilder &builder, OperationState &state, ValueRange operands, ArrayRef attributes", [{\n' indent = inc_indent(indent) s += indent + 'auto elementType = operands[0].getType().cast().getElementType();\n' s += indent + 'std::vector outputTypes;\n'