Lower ONNXAbsOp to Krnl dialect and enable e2e tests for ONNXReduceL1 (#18)
Co-authored-by: Gheorghe-Teodor Bercea <gt.bercea@gmail.com>
This commit is contained in:
parent
1622b9f161
commit
4763e8a8bc
|
@ -47,7 +47,7 @@ OpsWithShapeInference = [
|
||||||
'LeakyRelu', 'Elu', 'Selu', 'HardSigmoid', 'Reshape', 'Reciprocal',
|
'LeakyRelu', 'Elu', 'Selu', 'HardSigmoid', 'Reshape', 'Reciprocal',
|
||||||
'Identity', 'Cos', 'Log', 'Transpose', 'Softmax', 'ReduceMax', 'ReduceMin',
|
'Identity', 'Cos', 'Log', 'Transpose', 'Softmax', 'ReduceMax', 'ReduceMin',
|
||||||
'ReduceProd', 'ReduceSum', 'Softplus', 'Softsign', 'Sqrt', 'Unsqueeze',
|
'ReduceProd', 'ReduceSum', 'Softplus', 'Softsign', 'Sqrt', 'Unsqueeze',
|
||||||
'Sign', 'Constant', 'ONNXAveragePoolOp'
|
'Sign', 'Constant', 'ONNXAveragePoolOp', 'Abs'
|
||||||
]
|
]
|
||||||
|
|
||||||
# Operations supporting canonicalization.
|
# Operations supporting canonicalization.
|
||||||
|
|
|
@ -465,6 +465,30 @@ Value mapToLowerScalarOp<ONNXMinOp>(Operation *op, ArrayRef<Type> result_types,
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//===----------------------------------------------------------------------===//
|
||||||
|
// Scalar unary ops for lowering ONNXAbsOp
|
||||||
|
//===----------------------------------------------------------------------===//
|
||||||
|
template <>
|
||||||
|
Value mapToLowerScalarOp<ONNXAbsOp>(Operation *op, ArrayRef<Type> result_types,
|
||||||
|
ArrayRef<Value> operands, ConversionPatternRewriter &rewriter) {
|
||||||
|
auto loc = op->getLoc();
|
||||||
|
Value operand = operands[0];
|
||||||
|
auto elementType = result_types[0];
|
||||||
|
|
||||||
|
if (elementType.isa<FloatType>()) {
|
||||||
|
return rewriter.create<AbsFOp>(loc, operand);
|
||||||
|
} else if (elementType.isa<IntegerType>()) {
|
||||||
|
auto zero = emitConstantOp(rewriter, loc, elementType, 0);
|
||||||
|
auto lessThanZero =
|
||||||
|
rewriter.create<CmpIOp>(loc, CmpIPredicate::slt, operand, zero);
|
||||||
|
auto negativeOperand = rewriter.create<SubIOp>(loc, zero, operand);
|
||||||
|
return rewriter.create<SelectOp>(
|
||||||
|
loc, lessThanZero, negativeOperand, operand);
|
||||||
|
} else {
|
||||||
|
emitError(loc, "unsupported element type");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Element-wise unary ops lowering to Krnl dialect.
|
// Element-wise unary ops lowering to Krnl dialect.
|
||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
template <typename ElementwiseUnaryOp>
|
template <typename ElementwiseUnaryOp>
|
||||||
|
@ -615,7 +639,8 @@ struct ONNXElementwiseVariadicOpLowering : public ConversionPattern {
|
||||||
|
|
||||||
void populateLoweringONNXElementwiseOpPattern(
|
void populateLoweringONNXElementwiseOpPattern(
|
||||||
OwningRewritePatternList &patterns, MLIRContext *ctx) {
|
OwningRewritePatternList &patterns, MLIRContext *ctx) {
|
||||||
patterns.insert<ONNXElementwiseVariadicOpLowering<mlir::ONNXAddOp>,
|
patterns.insert<ONNXElementwiseUnaryOpLowering<mlir::ONNXAbsOp>,
|
||||||
|
ONNXElementwiseVariadicOpLowering<mlir::ONNXAddOp>,
|
||||||
ONNXElementwiseVariadicOpLowering<mlir::ONNXAndOp>,
|
ONNXElementwiseVariadicOpLowering<mlir::ONNXAndOp>,
|
||||||
ONNXElementwiseUnaryOpLowering<mlir::ONNXCosOp>,
|
ONNXElementwiseUnaryOpLowering<mlir::ONNXCosOp>,
|
||||||
ONNXElementwiseUnaryOpLowering<mlir::ONNXCoshOp>,
|
ONNXElementwiseUnaryOpLowering<mlir::ONNXCoshOp>,
|
||||||
|
|
|
@ -458,6 +458,12 @@ void ONNXSqrtOp::inferShapes() { getResult().setType(getOperand().getType()); }
|
||||||
/// the shape inference interface.
|
/// the shape inference interface.
|
||||||
void ONNXSignOp::inferShapes() { getResult().setType(getOperand().getType()); }
|
void ONNXSignOp::inferShapes() { getResult().setType(getOperand().getType()); }
|
||||||
|
|
||||||
|
//===----------------------------------------------------------------------===//
|
||||||
|
// Abs
|
||||||
|
/// Infer the output shape of the ONNXAbsOp. This method is required by the
|
||||||
|
/// shape inference interface.
|
||||||
|
void ONNXAbsOp::inferShapes() { getResult().setType(getOperand().getType()); }
|
||||||
|
|
||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
// Add
|
// Add
|
||||||
/// Infer the output shape of the ONNXAddOp. This method is required by the
|
/// Infer the output shape of the ONNXAddOp. This method is required by the
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
//********************************************************
|
//********************************************************
|
||||||
|
|
||||||
def ONNXAbsOp:ONNX_Op<"Abs",
|
def ONNXAbsOp:ONNX_Op<"Abs",
|
||||||
[NoSideEffect]> {
|
[NoSideEffect, DeclareOpInterfaceMethods<ShapeInferenceOpInterface>]> {
|
||||||
let summary = "ONNX Abs operation";
|
let summary = "ONNX Abs operation";
|
||||||
let description = [{
|
let description = [{
|
||||||
"Absolute takes one input data (Tensor<T>) and produces one output data"
|
"Absolute takes one input data (Tensor<T>) and produces one output data"
|
||||||
|
|
|
@ -121,6 +121,7 @@ public:
|
||||||
op->getName().getStringRef() != "onnx.PadConstantPad" &&
|
op->getName().getStringRef() != "onnx.PadConstantPad" &&
|
||||||
op->getName().getStringRef() != "onnx.PadConstantValuePad" &&
|
op->getName().getStringRef() != "onnx.PadConstantValuePad" &&
|
||||||
op->getName().getStringRef() != "onnx.BatchNormalizationTestMode" &&
|
op->getName().getStringRef() != "onnx.BatchNormalizationTestMode" &&
|
||||||
|
op->getName().getStringRef() != "onnx.Abs" &&
|
||||||
op->getName().getStringRef() != "onnx.Constant" &&
|
op->getName().getStringRef() != "onnx.Constant" &&
|
||||||
op->getName().getStringRef() != "onnx.Unsqueeze")
|
op->getName().getStringRef() != "onnx.Unsqueeze")
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -64,6 +64,9 @@ backend_test = onnx.backend.test.BackendTest(DummyBackend, __name__)
|
||||||
# https://github.com/onnx/onnx/tree/master/onnx/backend/test/data/node
|
# https://github.com/onnx/onnx/tree/master/onnx/backend/test/data/node
|
||||||
|
|
||||||
test_to_enable = [
|
test_to_enable = [
|
||||||
|
# Abs Op:
|
||||||
|
"test_abs_cpu",
|
||||||
|
|
||||||
# Add Op:
|
# Add Op:
|
||||||
"test_add_cpu",
|
"test_add_cpu",
|
||||||
"test_add_bcast_cpu",
|
"test_add_bcast_cpu",
|
||||||
|
@ -174,15 +177,15 @@ test_to_enable = [
|
||||||
"test_reduce_sum_negative_axes_keepdims_example_cpu",
|
"test_reduce_sum_negative_axes_keepdims_example_cpu",
|
||||||
"test_reduce_sum_negative_axes_keepdims_random_cpu",
|
"test_reduce_sum_negative_axes_keepdims_random_cpu",
|
||||||
|
|
||||||
# ReduceL1: this op depends on ONNXAbsOp so we will turn these tests on once ONNXAbsOp is implemented.
|
# ReduceL1
|
||||||
#"test_reduce_l1_default_axes_keepdims_example_cpu",
|
"test_reduce_l1_default_axes_keepdims_example_cpu",
|
||||||
#"test_reduce_l1_default_axes_keepdims_random_cpu",
|
"test_reduce_l1_default_axes_keepdims_random_cpu",
|
||||||
#"test_reduce_l1_do_not_keepdims_example_cpu",
|
"test_reduce_l1_do_not_keepdims_example_cpu",
|
||||||
#"test_reduce_l1_do_not_keepdims_random_cpu",
|
"test_reduce_l1_do_not_keepdims_random_cpu",
|
||||||
#"test_reduce_l1_keep_dims_example_cpu",
|
"test_reduce_l1_keep_dims_example_cpu",
|
||||||
#"test_reduce_l1_keep_dims_random_cpu",
|
"test_reduce_l1_keep_dims_random_cpu",
|
||||||
#"test_reduce_l1_negative_axes_keep_dims_example_cpu",
|
"test_reduce_l1_negative_axes_keep_dims_example_cpu",
|
||||||
#"test_reduce_l1_negative_axes_keep_dims_random_cpu",
|
"test_reduce_l1_negative_axes_keep_dims_random_cpu",
|
||||||
|
|
||||||
# ReduceL2
|
# ReduceL2
|
||||||
"test_reduce_l2_default_axes_keepdims_example_cpu",
|
"test_reduce_l2_default_axes_keepdims_example_cpu",
|
||||||
|
|
|
@ -1511,6 +1511,47 @@ func @test_maxpooling_singleout_no_pad_w_strides_w_ceil_mode_w_unknown_dims(%arg
|
||||||
// CHECK: return [[RES]] : memref<?x3x?x16xf32>
|
// CHECK: return [[RES]] : memref<?x3x?x16xf32>
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func @test_abs_float(%arg0 : tensor<?x10xf32>) -> tensor<*xf32> {
|
||||||
|
%0 = "onnx.Abs"(%arg0) : (tensor<?x10xf32>) -> tensor<*xf32>
|
||||||
|
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||||
|
|
||||||
|
// CHECK-LABEL: test_abs_float
|
||||||
|
// CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref<?x10xf32>
|
||||||
|
// CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref<?x10xf32>
|
||||||
|
// CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2
|
||||||
|
// CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops {
|
||||||
|
// CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1
|
||||||
|
// CHECK: } : () -> (!krnl.loop, !krnl.loop)
|
||||||
|
// CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref<?x10xf32>
|
||||||
|
// CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) {
|
||||||
|
// CHECK: [[LOAD:%.+]] = load %arg0[%arg1, %arg2] : memref<?x10xf32>
|
||||||
|
// CHECK: [[ABS:%.+]] = absf [[LOAD]] : f32
|
||||||
|
// CHECK: store [[ABS]], [[RES]][%arg1, %arg2] : memref<?x10xf32>
|
||||||
|
// CHECK: return [[RES]] : memref<?x10xf32>
|
||||||
|
}
|
||||||
|
|
||||||
|
func @test_abs_int(%arg0 : tensor<?x10xi32>) -> tensor<*xi32> {
|
||||||
|
%0 = "onnx.Abs"(%arg0) : (tensor<?x10xi32>) -> tensor<*xi32>
|
||||||
|
"std.return"(%0) : (tensor<*xi32>) -> ()
|
||||||
|
|
||||||
|
// CHECK-LABEL: test_abs_int
|
||||||
|
// CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref<?x10xi32>
|
||||||
|
// CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref<?x10xi32>
|
||||||
|
// CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2
|
||||||
|
// CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops {
|
||||||
|
// CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1
|
||||||
|
// CHECK: } : () -> (!krnl.loop, !krnl.loop)
|
||||||
|
// CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref<?x10xi32>
|
||||||
|
// CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) {
|
||||||
|
// CHECK: [[LOAD:%.+]] = load %arg0[%arg1, %arg2] : memref<?x10xi32>
|
||||||
|
// CHECK: [[ZERO:%.+]] = constant 0 : i32
|
||||||
|
// CHECK: [[LESS_THAN_ZERO:%.+]] = cmpi "slt", [[LOAD]], [[ZERO]] : i32
|
||||||
|
// CHECK: [[NEGATIVE_LOAD:%.+]] = subi [[ZERO]], [[LOAD]] : i32
|
||||||
|
// CHECK: [[SELECT:%.+]] = select [[LESS_THAN_ZERO]], [[NEGATIVE_LOAD]], [[LOAD]] : i32
|
||||||
|
// CHECK: store [[SELECT]], [[RES]][%arg1, %arg2] : memref<?x10xi32>
|
||||||
|
// CHECK: return [[RES]] : memref<?x10xi32>
|
||||||
|
}
|
||||||
|
|
||||||
func @test_constant_pad1(%arg0: tensor<16x16xf32>) -> tensor<18x20xf32> {
|
func @test_constant_pad1(%arg0: tensor<16x16xf32>) -> tensor<18x20xf32> {
|
||||||
%0 = "onnx.PadConstantValuePad"(%arg0) {constant_value = 0.000000e+00 : f32, mode = "constant", pads = [0, 3, 2, 1]} : (tensor<16x16xf32>) -> tensor<18x20xf32>
|
%0 = "onnx.PadConstantValuePad"(%arg0) {constant_value = 0.000000e+00 : f32, mode = "constant", pads = [0, 3, 2, 1]} : (tensor<16x16xf32>) -> tensor<18x20xf32>
|
||||||
return %0 : tensor<18x20xf32>
|
return %0 : tensor<18x20xf32>
|
||||||
|
|
Loading…
Reference in New Issue