diff --git a/docs/ErrorHandling.md b/docs/ErrorHandling.md new file mode 100644 index 0000000..1dc2d8c --- /dev/null +++ b/docs/ErrorHandling.md @@ -0,0 +1,41 @@ +# Handling errors in MLIR + +Three are two different kinds of errors: errors that comes from user inputs, and compiler errors. We should provide meaningful user feedback for user input errors and we should use the `emitError` functions. Compiler errors should be reported using `asserts` or `llvm_unreachable` calls. In practice, if there are functions where errors are checked, and there is the ability to return "failure," the preferred way is to use `emitError` and return failure. If, on the other hand, the function does not allow to return failure, then an assert or unreachable call should be used. Returning error is important for passes that check user inputs, e.g. such as during the ingestion of the ONNX model. + +## User errors + +MLIR provides for 3 distinct calls depending on the severity: `emitError`, `emitWarning`, and 'emitRemark`. Errors should typically be reported to calling functions for proper handling. Typical use is as depicted below. + +```cpp + return op->emitError("message"); + return op->emitError() << "message"; +``` + +Above calls will include the location of the operation. It returns a `LogicalResult` which can be set/tested as below. Note that the `emitError` calls return a `failure()` value; +```cpp + LogicalResult isEven(int i) { return (i%2 == 0) success() : failure(); } + + if (succeeded(isEven(0)) && failed(isEven(1))) printf("It is all good.\n"); +``` + +Errors can also be reported outside of the context of an operation. In this case, a location must be provided. To report a warning or a remark, just substitute "Warning" or "Remark" instead of "Error" in the above examples. + +## Compiler errors + +Once an ONNX graph has been validated, every subsequent erroneous situations should be reported with an assert to stop the compilation, as this is a compiler error that needs to be properly handled. There are two calls that can be used: + +```cpp + assert(condition-that-should-hold-true && "error message"); + llvm_unreachable("error message"); +``` + +The unreachable call is useful in functions that should return a value, as the compiler will not report warnings if there is no dummy-value return statement along that path. Otherwise, in `void` functions, using an assert is perfectly fine. + + +## References + +Additional relevant information is found in the LLVM and MLIR documentation referred below. + +* [LLVM Docs on assert](https://llvm.org/docs/CodingStandards.html#assert-liberally) +* [MLIR Docs on diagnostic](https://mlir.llvm.org/docs/Diagnostics/) + \ No newline at end of file diff --git a/src/Conversion/ONNXToKrnl/Math/Elementwise.cpp b/src/Conversion/ONNXToKrnl/Math/Elementwise.cpp index ffa63ba..f05be6a 100644 --- a/src/Conversion/ONNXToKrnl/Math/Elementwise.cpp +++ b/src/Conversion/ONNXToKrnl/Math/Elementwise.cpp @@ -411,8 +411,7 @@ Value emitScalarOpFor(ConversionPatternRewriter &rewriter, rewriter.create(loc, zeroPredicate, zero, plusSelect); return result; } else { - emitError(loc, "unsupported element type"); - return {}; + llvm_unreachable("unsupported element type"); } } @@ -469,8 +468,7 @@ Value emitScalarOpFor(ConversionPatternRewriter &rewriter, return rewriter.create( loc, lessThanZero, negativeOperand, operand); } else { - emitError(loc, "unsupported element type"); - return {}; + llvm_unreachable("unsupported element type"); } } @@ -489,8 +487,7 @@ Value emitScalarOpFor(ConversionPatternRewriter &rewriter, auto zero = emitConstantOp(rewriter, loc, elementType, 0); return rewriter.create(loc, zero, operand); // 0 - X = -X } else { - emitError(loc, "unsupported element type"); - return nullptr; + llvm_unreachable("unsupported element type"); } } diff --git a/src/Conversion/ONNXToKrnl/Math/MatMul.cpp b/src/Conversion/ONNXToKrnl/Math/MatMul.cpp index 286221b..8f57357 100644 --- a/src/Conversion/ONNXToKrnl/Math/MatMul.cpp +++ b/src/Conversion/ONNXToKrnl/Math/MatMul.cpp @@ -104,7 +104,7 @@ struct ONNXMatMulOpLowering : public ConversionPattern { allocOperands.emplace_back(dim); } } else { - emitError(loc, "Invalid shapes"); + return emitError(loc, "Invalid shapes"); } alloc = rewriter.create(loc, memRefType, allocOperands); diff --git a/src/Conversion/ONNXToKrnl/Math/Reduction.cpp b/src/Conversion/ONNXToKrnl/Math/Reduction.cpp index a0fe36b..db0a034 100644 --- a/src/Conversion/ONNXToKrnl/Math/Reduction.cpp +++ b/src/Conversion/ONNXToKrnl/Math/Reduction.cpp @@ -69,8 +69,7 @@ Value emitScalarOpFor(ConversionPatternRewriter &rewriter, auto result = rewriter.create(loc, max, lhs, rhs); return result; } else { - emitError(loc, "unsupported element type"); - return nullptr; + llvm_unreachable("unsupported element type"); } } @@ -92,8 +91,7 @@ Value emitScalarOpFor(ConversionPatternRewriter &rewriter, auto result = rewriter.create(loc, min, lhs, rhs); return result; } else { - emitError(loc, "unsupported element type"); - return nullptr; + llvm_unreachable("unsupported element type"); } } diff --git a/src/Conversion/ONNXToKrnl/ONNXToKrnlCommon.cpp b/src/Conversion/ONNXToKrnl/ONNXToKrnlCommon.cpp index 6487247..4822491 100644 --- a/src/Conversion/ONNXToKrnl/ONNXToKrnlCommon.cpp +++ b/src/Conversion/ONNXToKrnl/ONNXToKrnlCommon.cpp @@ -344,7 +344,7 @@ Value emitConstantOp(ConversionPatternRewriter &rewriter, Location loc, } else if (typeKind == StandardTypes::Index) { constantAttr = rewriter.getIntegerAttr(type, (int64_t)value); } else { - emitError(loc, "unsupported element type"); + llvm_unreachable("unsupported element type"); } return rewriter.create(loc, constantAttr); @@ -409,10 +409,10 @@ Value emitPositiveInfinityConstantOp( constantAttr = rewriter.getIntegerAttr(type, APInt(width, value)); } } else { - emitError(loc, "unsupported element type"); + llvm_unreachable("unsupported element type"); } } else { - emitError(loc, "unsupported element type"); + llvm_unreachable("unsupported element type"); } return rewriter.create(loc, constantAttr); @@ -477,10 +477,10 @@ Value emitNegativeInfinityConstantOp( constantAttr = rewriter.getIntegerAttr(type, APInt(width, value)); } } else { - emitError(loc, "unsupported element type"); + llvm_unreachable("unsupported element type"); } } else { - emitError(loc, "unsupported element type"); + llvm_unreachable("unsupported element type"); } return rewriter.create(loc, constantAttr); diff --git a/src/Conversion/ONNXToKrnl/ONNXToKrnlCommon.hpp b/src/Conversion/ONNXToKrnl/ONNXToKrnlCommon.hpp index b5079d5..69b7f17 100644 --- a/src/Conversion/ONNXToKrnl/ONNXToKrnlCommon.hpp +++ b/src/Conversion/ONNXToKrnl/ONNXToKrnlCommon.hpp @@ -155,8 +155,7 @@ Value emitScalarOpFor(ConversionPatternRewriter &rewriter, Location loc, return rewriter.create>( loc, elementType, scalarOperands, mlir::None); } else { - emitError(loc, "unsupported element type"); - return nullptr; + llvm_unreachable("unsupported element type"); } } diff --git a/src/Conversion/ONNXToKrnl/RNN/LSTM.cpp b/src/Conversion/ONNXToKrnl/RNN/LSTM.cpp index e4b985e..b6d55be 100644 --- a/src/Conversion/ONNXToKrnl/RNN/LSTM.cpp +++ b/src/Conversion/ONNXToKrnl/RNN/LSTM.cpp @@ -168,8 +168,9 @@ LstmState allocAndInitializeStates( if (hasAllConstantDimensions(yMemRefType)) state.allH = insertAllocAndDealloc(yMemRefType, loc, rewriter, checkInsertDealloc(op->getOperation(), 0)); - else - emitError(loc, "Unsupported dynamic dimensions."); + else { + llvm_unreachable("Unsupported dynamic dimensions."); + } } else { state.allH = op->Y(); } @@ -181,7 +182,7 @@ LstmState allocAndInitializeStates( state.ht = insertAllocAndDealloc(yhMemRefType, loc, rewriter, checkInsertDealloc(op->getOperation(), 1)); else - emitError(loc, "Unsupported dynamic dimensions."); + llvm_unreachable("Unsupported dynamic dimensions."); } else { auto yhMemRefType = MemRefType::get( {dimAt(operandAdaptor.W(), 0), dimAt(operandAdaptor.X(), 1), @@ -197,7 +198,7 @@ LstmState allocAndInitializeStates( state.ct = insertAllocAndDealloc(ycMemRefType, loc, rewriter, checkInsertDealloc(op->getOperation(), 2)); else - emitError(loc, "Unsupported dynamic dimensions."); + llvm_unreachable("Unsupported dynamic dimensions."); } else { auto ycMemRefType = MemRefType::get( {dimAt(operandAdaptor.W(), 0), dimAt(operandAdaptor.X(), 1), diff --git a/src/Conversion/ONNXToKrnl/RNN/RNNBase.cpp b/src/Conversion/ONNXToKrnl/RNN/RNNBase.cpp index 2ce79c2..5a0c8db 100644 --- a/src/Conversion/ONNXToKrnl/RNN/RNNBase.cpp +++ b/src/Conversion/ONNXToKrnl/RNN/RNNBase.cpp @@ -47,7 +47,7 @@ Value applyActivation(ConversionPatternRewriter &rewriter, Location loc, else if (activation.name.equals_lower("sigmoid")) res = rewriter.create(loc, scalarMemRefType, alloc); else if (activation.name.equals_lower("affine")) - emitError(loc, "Unsupported activation"); + llvm_unreachable("Unsupported activation"); else if (activation.name.equals_lower("leakyrelu")) res = rewriter.create( loc, scalarMemRefType, alloc, attributes); @@ -55,7 +55,7 @@ Value applyActivation(ConversionPatternRewriter &rewriter, Location loc, res = rewriter.create( loc, scalarMemRefType, alloc, attributes); else if (activation.name.equals_lower("scaledtanh")) - emitError(loc, "Unsupported activation"); + llvm_unreachable("Unsupported activation"); else if (activation.name.equals_lower("hardsigmoid")) res = rewriter.create( loc, scalarMemRefType, alloc, attributes); diff --git a/src/Conversion/ONNXToKrnl/Tensor/Constant.cpp b/src/Conversion/ONNXToKrnl/Tensor/Constant.cpp index 709bd56..3b68c09 100644 --- a/src/Conversion/ONNXToKrnl/Tensor/Constant.cpp +++ b/src/Conversion/ONNXToKrnl/Tensor/Constant.cpp @@ -25,9 +25,8 @@ struct ONNXConstantOpLowering : public ConversionPattern { auto loc = op->getLoc(); auto constantOp = llvm::dyn_cast(op); - if (constantOp.sparse_value().hasValue()) { - emitError(loc, "Only support dense values at this time"); - } + if (constantOp.sparse_value().hasValue()) + return emitError(loc, "Only support dense values at this time"); auto memRefType = convertToMemRefType(*op->result_type_begin()); diff --git a/src/Conversion/ONNXToKrnl/Tensor/Pad.cpp b/src/Conversion/ONNXToKrnl/Tensor/Pad.cpp index 7421225..811cb87 100644 --- a/src/Conversion/ONNXToKrnl/Tensor/Pad.cpp +++ b/src/Conversion/ONNXToKrnl/Tensor/Pad.cpp @@ -27,17 +27,17 @@ struct ONNXPadOpLowering : public ConversionPattern { // Only constant padding is supported now. auto padMode = myOp.mode(); if (padMode != "constant") - emitError(loc, "unsupported mode for Pad"); + return emitError(loc, "unsupported mode for Pad"); DenseElementsAttr constantValAttr = myOp.getAttr("constant_value") .dyn_cast_or_null(); if (!constantValAttr) - emitError(loc, "unsupported value"); + return emitError(loc, "unsupported value"); DenseElementsAttr padsAttributes = myOp.getAttr("pads").dyn_cast_or_null(); if (!padsAttributes) - emitError(loc, "Pad: unknown pads"); + return emitError(loc, "Pad: unknown pads"); auto memRefType = convertToMemRefType(tensorType); Value alloc; @@ -46,7 +46,7 @@ struct ONNXPadOpLowering : public ConversionPattern { if (hasAllConstantDimensions(memRefType)) alloc = insertAllocAndDealloc(memRefType, loc, rewriter, insertDealloc); else - emitError(loc, "unexpected output has non-Constant shape"); + return emitError(loc, "unexpected output has non-Constant shape"); // Number of loops auto memRefShape = memRefType.getShape(); diff --git a/src/Conversion/ONNXToKrnl/Tensor/PadConstantValuePad.cpp b/src/Conversion/ONNXToKrnl/Tensor/PadConstantValuePad.cpp index 0638171..25fe213 100644 --- a/src/Conversion/ONNXToKrnl/Tensor/PadConstantValuePad.cpp +++ b/src/Conversion/ONNXToKrnl/Tensor/PadConstantValuePad.cpp @@ -26,7 +26,7 @@ struct ONNXPadConstantValuePadOpLowering : public ConversionPattern { // Only constant padding is supported now. auto padMode = llvm::dyn_cast(op).mode(); if (padMode != "constant") - emitError(loc, "unsupported mode for PadConstantValuePad"); + return emitError(loc, "unsupported mode for PadConstantValuePad"); auto constantValAttr = llvm::dyn_cast(op).constant_valueAttr(); @@ -38,7 +38,7 @@ struct ONNXPadConstantValuePadOpLowering : public ConversionPattern { if (hasAllConstantDimensions(memRefType)) alloc = insertAllocAndDealloc(memRefType, loc, rewriter, insertDealloc); else - emitError(loc, "unexpected output has non-Constant shape"); + return emitError(loc, "unexpected output has non-Constant shape"); // Number of loops auto memRefShape = memRefType.getShape(); diff --git a/src/Dialect/Krnl/KrnlHelper.cpp b/src/Dialect/Krnl/KrnlHelper.cpp index 09af502..4a36618 100644 --- a/src/Dialect/Krnl/KrnlHelper.cpp +++ b/src/Dialect/Krnl/KrnlHelper.cpp @@ -233,8 +233,7 @@ int BuildKrnlLoop::pushBounds(int64_t lowerBound, Value upperBoundMemRefOperand, // are supported. auto shape = upperBoundMemRefOperand.getType().cast().getShape(); if (shape[upperBoundMemRefIndex] < 0) { - if (upperBoundMustBeConstant) - emitError(loc, "Bound expected to be constant."); + assert(!upperBoundMustBeConstant && "Bound expected to be constant."); pack->pushOperandBound( rewriter .create(loc, upperBoundMemRefOperand, upperBoundMemRefIndex) @@ -253,16 +252,14 @@ int BuildKrnlLoop::pushBounds(Value lowerBound, Value upperBound) { void BuildKrnlLoop::createIterateOp() { // Loop definition operation is mandatory. - if (!createdDefineOp) - emitError(loc, "Must create define op before iterate op."); + assert(createdDefineOp && "Must create define op before iterate op."); // Loop optimization operation is mandatory (for now). - if (!createdOptimizeOp) - emitError(loc, "Must create optimize op before iterate op."); + assert(createdOptimizeOp && "Must create optimize op before iterate op."); // Check if all bounds have been defined. - if (pushCount != originalLoopNum) - emitError(loc, "Must push bounds for all original loops."); + assert(pushCount == originalLoopNum && + "Must push bounds for all original loops."); // Emit iteration operation. auto iterateOp = rewriter.create(loc, *pack); @@ -274,8 +271,8 @@ void BuildKrnlLoop::createDefineOptimizeAndIterateOp( Value memRefOperand, bool withEmptyOptimization) { // Rank of the MemRef operand. We will emit a loop for each dimension. int loopNum = memRefOperand.getType().cast().getShape().size(); - if (originalLoopNum != loopNum) - emitError(loc, "Mismatch in loop numbers from constructor and define."); + assert(originalLoopNum == loopNum && + "Mismatch in loop numbers from constructor and define."); // Emit the definition and the optimization operations for the loop nest. createDefineAndOptimizeOp(withEmptyOptimization); @@ -291,8 +288,8 @@ void BuildKrnlLoop::createDefineOptimizeAndIterateOp( BlockArgument &BuildKrnlLoop::getInductionVar(int originalLoopIndex) { // Check if loop iteration variable is within bounds. - if (originalLoopIndex < 0 || originalLoopIndex >= originalLoopNum) - emitError(loc, "Original loop index is out of bounds."); + assert(originalLoopIndex >= 0 && originalLoopIndex < originalLoopNum && + "Original loop index is out of bounds."); return iterBlock->getArguments()[originalLoopIndex]; } diff --git a/src/Dialect/Krnl/KrnlOps.hpp b/src/Dialect/Krnl/KrnlOps.hpp index b287883..1d4cd81 100644 --- a/src/Dialect/Krnl/KrnlOps.hpp +++ b/src/Dialect/Krnl/KrnlOps.hpp @@ -32,7 +32,7 @@ public: return LoopType::get(parser.getBuilder().getContext()); parser.emitError(parser.getCurrentLocation(), "Unknown type"); - return nullptr; + return {}; } /// Print a type registered to this dialect. diff --git a/src/Transform/ONNX/ElideConstants.cpp b/src/Transform/ONNX/ElideConstants.cpp index 7f09a91..71c353d 100644 --- a/src/Transform/ONNX/ElideConstants.cpp +++ b/src/Transform/ONNX/ElideConstants.cpp @@ -43,7 +43,7 @@ public: auto constOp = llvm::dyn_cast(&op); if (constOp->sparse_value().hasValue()) - emitError(loc, "Only support dense values at this time"); + return emitError(loc, "Only support dense values at this time"); if (constOp->value().hasValue()) { auto newConstOp = rewriter.create(