Believe all errors now results in meaningful actions (#153)

* removed warning missing return, dangling else

* fixed errors, made sure to return false in all shape inference failures

* shape inference use LogicalResults as return value

* format fixed

* format error

* additional error correction

* handle errors properly for all former emitError site, using either emitError, assert, or llvm_unreachable

* help added

* fixes

* edit of doc

* doc edit
This commit is contained in:
Alexandre Eichenberger 2020-06-01 13:55:19 -04:00 committed by GitHub
parent 536a20695f
commit 2a1fe9e1e7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 79 additions and 47 deletions

41
docs/ErrorHandling.md Normal file
View File

@ -0,0 +1,41 @@
# Handling errors in MLIR
Three are two different kinds of errors: errors that comes from user inputs, and compiler errors. We should provide meaningful user feedback for user input errors and we should use the `emitError` functions. Compiler errors should be reported using `asserts` or `llvm_unreachable` calls. In practice, if there are functions where errors are checked, and there is the ability to return "failure," the preferred way is to use `emitError` and return failure. If, on the other hand, the function does not allow to return failure, then an assert or unreachable call should be used. Returning error is important for passes that check user inputs, e.g. such as during the ingestion of the ONNX model.
## User errors
MLIR provides for 3 distinct calls depending on the severity: `emitError`, `emitWarning`, and 'emitRemark`. Errors should typically be reported to calling functions for proper handling. Typical use is as depicted below.
```cpp
return op->emitError("message");
return op->emitError() << "message";
```
Above calls will include the location of the operation. It returns a `LogicalResult` which can be set/tested as below. Note that the `emitError` calls return a `failure()` value;
```cpp
LogicalResult isEven(int i) { return (i%2 == 0) success() : failure(); }
if (succeeded(isEven(0)) && failed(isEven(1))) printf("It is all good.\n");
```
Errors can also be reported outside of the context of an operation. In this case, a location must be provided. To report a warning or a remark, just substitute "Warning" or "Remark" instead of "Error" in the above examples.
## Compiler errors
Once an ONNX graph has been validated, every subsequent erroneous situations should be reported with an assert to stop the compilation, as this is a compiler error that needs to be properly handled. There are two calls that can be used:
```cpp
assert(condition-that-should-hold-true && "error message");
llvm_unreachable("error message");
```
The unreachable call is useful in functions that should return a value, as the compiler will not report warnings if there is no dummy-value return statement along that path. Otherwise, in `void` functions, using an assert is perfectly fine.
## References
Additional relevant information is found in the LLVM and MLIR documentation referred below.
* [LLVM Docs on assert](https://llvm.org/docs/CodingStandards.html#assert-liberally)
* [MLIR Docs on diagnostic](https://mlir.llvm.org/docs/Diagnostics/)

View File

@ -411,8 +411,7 @@ Value emitScalarOpFor<ONNXSignOp>(ConversionPatternRewriter &rewriter,
rewriter.create<SelectOp>(loc, zeroPredicate, zero, plusSelect);
return result;
} else {
emitError(loc, "unsupported element type");
return {};
llvm_unreachable("unsupported element type");
}
}
@ -469,8 +468,7 @@ Value emitScalarOpFor<ONNXAbsOp>(ConversionPatternRewriter &rewriter,
return rewriter.create<SelectOp>(
loc, lessThanZero, negativeOperand, operand);
} else {
emitError(loc, "unsupported element type");
return {};
llvm_unreachable("unsupported element type");
}
}
@ -489,8 +487,7 @@ Value emitScalarOpFor<ONNXNegOp>(ConversionPatternRewriter &rewriter,
auto zero = emitConstantOp(rewriter, loc, elementType, 0);
return rewriter.create<mlir::SubIOp>(loc, zero, operand); // 0 - X = -X
} else {
emitError(loc, "unsupported element type");
return nullptr;
llvm_unreachable("unsupported element type");
}
}

View File

@ -104,7 +104,7 @@ struct ONNXMatMulOpLowering : public ConversionPattern {
allocOperands.emplace_back(dim);
}
} else {
emitError(loc, "Invalid shapes");
return emitError(loc, "Invalid shapes");
}
alloc = rewriter.create<AllocOp>(loc, memRefType, allocOperands);

View File

@ -69,8 +69,7 @@ Value emitScalarOpFor<ONNXReduceMaxOp>(ConversionPatternRewriter &rewriter,
auto result = rewriter.create<SelectOp>(loc, max, lhs, rhs);
return result;
} else {
emitError(loc, "unsupported element type");
return nullptr;
llvm_unreachable("unsupported element type");
}
}
@ -92,8 +91,7 @@ Value emitScalarOpFor<ONNXReduceMinOp>(ConversionPatternRewriter &rewriter,
auto result = rewriter.create<SelectOp>(loc, min, lhs, rhs);
return result;
} else {
emitError(loc, "unsupported element type");
return nullptr;
llvm_unreachable("unsupported element type");
}
}

View File

@ -344,7 +344,7 @@ Value emitConstantOp(ConversionPatternRewriter &rewriter, Location loc,
} else if (typeKind == StandardTypes::Index) {
constantAttr = rewriter.getIntegerAttr(type, (int64_t)value);
} else {
emitError(loc, "unsupported element type");
llvm_unreachable("unsupported element type");
}
return rewriter.create<ConstantOp>(loc, constantAttr);
@ -409,10 +409,10 @@ Value emitPositiveInfinityConstantOp(
constantAttr = rewriter.getIntegerAttr(type, APInt(width, value));
}
} else {
emitError(loc, "unsupported element type");
llvm_unreachable("unsupported element type");
}
} else {
emitError(loc, "unsupported element type");
llvm_unreachable("unsupported element type");
}
return rewriter.create<ConstantOp>(loc, constantAttr);
@ -477,10 +477,10 @@ Value emitNegativeInfinityConstantOp(
constantAttr = rewriter.getIntegerAttr(type, APInt(width, value));
}
} else {
emitError(loc, "unsupported element type");
llvm_unreachable("unsupported element type");
}
} else {
emitError(loc, "unsupported element type");
llvm_unreachable("unsupported element type");
}
return rewriter.create<ConstantOp>(loc, constantAttr);

View File

@ -155,8 +155,7 @@ Value emitScalarOpFor(ConversionPatternRewriter &rewriter, Location loc,
return rewriter.create<ScalarFOp<Op>>(
loc, elementType, scalarOperands, mlir::None);
} else {
emitError(loc, "unsupported element type");
return nullptr;
llvm_unreachable("unsupported element type");
}
}

View File

@ -168,8 +168,9 @@ LstmState allocAndInitializeStates<ONNXLSTMOp, LstmState>(
if (hasAllConstantDimensions(yMemRefType))
state.allH = insertAllocAndDealloc(yMemRefType, loc, rewriter,
checkInsertDealloc(op->getOperation(), 0));
else
emitError(loc, "Unsupported dynamic dimensions.");
else {
llvm_unreachable("Unsupported dynamic dimensions.");
}
} else {
state.allH = op->Y();
}
@ -181,7 +182,7 @@ LstmState allocAndInitializeStates<ONNXLSTMOp, LstmState>(
state.ht = insertAllocAndDealloc(yhMemRefType, loc, rewriter,
checkInsertDealloc(op->getOperation(), 1));
else
emitError(loc, "Unsupported dynamic dimensions.");
llvm_unreachable("Unsupported dynamic dimensions.");
} else {
auto yhMemRefType = MemRefType::get(
{dimAt(operandAdaptor.W(), 0), dimAt(operandAdaptor.X(), 1),
@ -197,7 +198,7 @@ LstmState allocAndInitializeStates<ONNXLSTMOp, LstmState>(
state.ct = insertAllocAndDealloc(ycMemRefType, loc, rewriter,
checkInsertDealloc(op->getOperation(), 2));
else
emitError(loc, "Unsupported dynamic dimensions.");
llvm_unreachable("Unsupported dynamic dimensions.");
} else {
auto ycMemRefType = MemRefType::get(
{dimAt(operandAdaptor.W(), 0), dimAt(operandAdaptor.X(), 1),

View File

@ -47,7 +47,7 @@ Value applyActivation(ConversionPatternRewriter &rewriter, Location loc,
else if (activation.name.equals_lower("sigmoid"))
res = rewriter.create<ONNXSigmoidOp>(loc, scalarMemRefType, alloc);
else if (activation.name.equals_lower("affine"))
emitError(loc, "Unsupported activation");
llvm_unreachable("Unsupported activation");
else if (activation.name.equals_lower("leakyrelu"))
res = rewriter.create<ONNXLeakyReluOp>(
loc, scalarMemRefType, alloc, attributes);
@ -55,7 +55,7 @@ Value applyActivation(ConversionPatternRewriter &rewriter, Location loc,
res = rewriter.create<ONNXThresholdedReluOp>(
loc, scalarMemRefType, alloc, attributes);
else if (activation.name.equals_lower("scaledtanh"))
emitError(loc, "Unsupported activation");
llvm_unreachable("Unsupported activation");
else if (activation.name.equals_lower("hardsigmoid"))
res = rewriter.create<ONNXHardSigmoidOp>(
loc, scalarMemRefType, alloc, attributes);

View File

@ -25,9 +25,8 @@ struct ONNXConstantOpLowering : public ConversionPattern {
auto loc = op->getLoc();
auto constantOp = llvm::dyn_cast<ONNXConstantOp>(op);
if (constantOp.sparse_value().hasValue()) {
emitError(loc, "Only support dense values at this time");
}
if (constantOp.sparse_value().hasValue())
return emitError(loc, "Only support dense values at this time");
auto memRefType = convertToMemRefType(*op->result_type_begin());

View File

@ -27,17 +27,17 @@ struct ONNXPadOpLowering : public ConversionPattern {
// Only constant padding is supported now.
auto padMode = myOp.mode();
if (padMode != "constant")
emitError(loc, "unsupported mode for Pad");
return emitError(loc, "unsupported mode for Pad");
DenseElementsAttr constantValAttr =
myOp.getAttr("constant_value")
.dyn_cast_or_null<mlir::DenseElementsAttr>();
if (!constantValAttr)
emitError(loc, "unsupported value");
return emitError(loc, "unsupported value");
DenseElementsAttr padsAttributes =
myOp.getAttr("pads").dyn_cast_or_null<mlir::DenseElementsAttr>();
if (!padsAttributes)
emitError(loc, "Pad: unknown pads");
return emitError(loc, "Pad: unknown pads");
auto memRefType = convertToMemRefType(tensorType);
Value alloc;
@ -46,7 +46,7 @@ struct ONNXPadOpLowering : public ConversionPattern {
if (hasAllConstantDimensions(memRefType))
alloc = insertAllocAndDealloc(memRefType, loc, rewriter, insertDealloc);
else
emitError(loc, "unexpected output has non-Constant shape");
return emitError(loc, "unexpected output has non-Constant shape");
// Number of loops
auto memRefShape = memRefType.getShape();

View File

@ -26,7 +26,7 @@ struct ONNXPadConstantValuePadOpLowering : public ConversionPattern {
// Only constant padding is supported now.
auto padMode = llvm::dyn_cast<ONNXPadConstantValuePadOp>(op).mode();
if (padMode != "constant")
emitError(loc, "unsupported mode for PadConstantValuePad");
return emitError(loc, "unsupported mode for PadConstantValuePad");
auto constantValAttr =
llvm::dyn_cast<ONNXPadConstantValuePadOp>(op).constant_valueAttr();
@ -38,7 +38,7 @@ struct ONNXPadConstantValuePadOpLowering : public ConversionPattern {
if (hasAllConstantDimensions(memRefType))
alloc = insertAllocAndDealloc(memRefType, loc, rewriter, insertDealloc);
else
emitError(loc, "unexpected output has non-Constant shape");
return emitError(loc, "unexpected output has non-Constant shape");
// Number of loops
auto memRefShape = memRefType.getShape();

View File

@ -233,8 +233,7 @@ int BuildKrnlLoop::pushBounds(int64_t lowerBound, Value upperBoundMemRefOperand,
// are supported.
auto shape = upperBoundMemRefOperand.getType().cast<MemRefType>().getShape();
if (shape[upperBoundMemRefIndex] < 0) {
if (upperBoundMustBeConstant)
emitError(loc, "Bound expected to be constant.");
assert(!upperBoundMustBeConstant && "Bound expected to be constant.");
pack->pushOperandBound(
rewriter
.create<DimOp>(loc, upperBoundMemRefOperand, upperBoundMemRefIndex)
@ -253,16 +252,14 @@ int BuildKrnlLoop::pushBounds(Value lowerBound, Value upperBound) {
void BuildKrnlLoop::createIterateOp() {
// Loop definition operation is mandatory.
if (!createdDefineOp)
emitError(loc, "Must create define op before iterate op.");
assert(createdDefineOp && "Must create define op before iterate op.");
// Loop optimization operation is mandatory (for now).
if (!createdOptimizeOp)
emitError(loc, "Must create optimize op before iterate op.");
assert(createdOptimizeOp && "Must create optimize op before iterate op.");
// Check if all bounds have been defined.
if (pushCount != originalLoopNum)
emitError(loc, "Must push bounds for all original loops.");
assert(pushCount == originalLoopNum &&
"Must push bounds for all original loops.");
// Emit iteration operation.
auto iterateOp = rewriter.create<KrnlIterateOp>(loc, *pack);
@ -274,8 +271,8 @@ void BuildKrnlLoop::createDefineOptimizeAndIterateOp(
Value memRefOperand, bool withEmptyOptimization) {
// Rank of the MemRef operand. We will emit a loop for each dimension.
int loopNum = memRefOperand.getType().cast<MemRefType>().getShape().size();
if (originalLoopNum != loopNum)
emitError(loc, "Mismatch in loop numbers from constructor and define.");
assert(originalLoopNum == loopNum &&
"Mismatch in loop numbers from constructor and define.");
// Emit the definition and the optimization operations for the loop nest.
createDefineAndOptimizeOp(withEmptyOptimization);
@ -291,8 +288,8 @@ void BuildKrnlLoop::createDefineOptimizeAndIterateOp(
BlockArgument &BuildKrnlLoop::getInductionVar(int originalLoopIndex) {
// Check if loop iteration variable is within bounds.
if (originalLoopIndex < 0 || originalLoopIndex >= originalLoopNum)
emitError(loc, "Original loop index is out of bounds.");
assert(originalLoopIndex >= 0 && originalLoopIndex < originalLoopNum &&
"Original loop index is out of bounds.");
return iterBlock->getArguments()[originalLoopIndex];
}

View File

@ -32,7 +32,7 @@ public:
return LoopType::get(parser.getBuilder().getContext());
parser.emitError(parser.getCurrentLocation(), "Unknown type");
return nullptr;
return {};
}
/// Print a type registered to this dialect.

View File

@ -43,7 +43,7 @@ public:
auto constOp = llvm::dyn_cast<ONNXConstantOp>(&op);
if (constOp->sparse_value().hasValue())
emitError(loc, "Only support dense values at this time");
return emitError(loc, "Only support dense values at this time");
if (constOp->value().hasValue()) {
auto newConstOp = rewriter.create<ONNXConstantOp>(