From 4763e8a8bc1bc6474bff6258028fd354f2c0a659 Mon Sep 17 00:00:00 2001 From: "Tung D. Le" Date: Wed, 18 Mar 2020 00:12:45 +0900 Subject: [PATCH] Lower ONNXAbsOp to Krnl dialect and enable e2e tests for ONNXReduceL1 (#18) Co-authored-by: Gheorghe-Teodor Bercea --- doc/gen_doc.py | 2 +- .../onnx_to_krnl/math/elementwise.cpp | 27 +++++++++++- src/dialect/onnx/onnx_ops.cpp | 6 +++ src/dialect/onnx/onnxop.inc | 2 +- src/pass/shape_inference_pass.cpp | 1 + test/backend/test.py | 21 ++++++---- test/mlir/onnx/onnx_lowering.mlir | 41 +++++++++++++++++++ 7 files changed, 88 insertions(+), 12 deletions(-) diff --git a/doc/gen_doc.py b/doc/gen_doc.py index bb180d2..0ad5db7 100644 --- a/doc/gen_doc.py +++ b/doc/gen_doc.py @@ -47,7 +47,7 @@ OpsWithShapeInference = [ 'LeakyRelu', 'Elu', 'Selu', 'HardSigmoid', 'Reshape', 'Reciprocal', 'Identity', 'Cos', 'Log', 'Transpose', 'Softmax', 'ReduceMax', 'ReduceMin', 'ReduceProd', 'ReduceSum', 'Softplus', 'Softsign', 'Sqrt', 'Unsqueeze', - 'Sign', 'Constant', 'ONNXAveragePoolOp' + 'Sign', 'Constant', 'ONNXAveragePoolOp', 'Abs' ] # Operations supporting canonicalization. diff --git a/src/conversion/onnx_to_krnl/math/elementwise.cpp b/src/conversion/onnx_to_krnl/math/elementwise.cpp index 7ab36af..200c6cc 100644 --- a/src/conversion/onnx_to_krnl/math/elementwise.cpp +++ b/src/conversion/onnx_to_krnl/math/elementwise.cpp @@ -465,6 +465,30 @@ Value mapToLowerScalarOp(Operation *op, ArrayRef result_types, return result; } +//===----------------------------------------------------------------------===// +// Scalar unary ops for lowering ONNXAbsOp +//===----------------------------------------------------------------------===// +template <> +Value mapToLowerScalarOp(Operation *op, ArrayRef result_types, + ArrayRef operands, ConversionPatternRewriter &rewriter) { + auto loc = op->getLoc(); + Value operand = operands[0]; + auto elementType = result_types[0]; + + if (elementType.isa()) { + return rewriter.create(loc, operand); + } else if (elementType.isa()) { + auto zero = emitConstantOp(rewriter, loc, elementType, 0); + auto lessThanZero = + rewriter.create(loc, CmpIPredicate::slt, operand, zero); + auto negativeOperand = rewriter.create(loc, zero, operand); + return rewriter.create( + loc, lessThanZero, negativeOperand, operand); + } else { + emitError(loc, "unsupported element type"); + } +} + // Element-wise unary ops lowering to Krnl dialect. //===----------------------------------------------------------------------===// template @@ -615,7 +639,8 @@ struct ONNXElementwiseVariadicOpLowering : public ConversionPattern { void populateLoweringONNXElementwiseOpPattern( OwningRewritePatternList &patterns, MLIRContext *ctx) { - patterns.insert, + patterns.insert, + ONNXElementwiseVariadicOpLowering, ONNXElementwiseVariadicOpLowering, ONNXElementwiseUnaryOpLowering, ONNXElementwiseUnaryOpLowering, diff --git a/src/dialect/onnx/onnx_ops.cpp b/src/dialect/onnx/onnx_ops.cpp index 89e269d..3b3104a 100644 --- a/src/dialect/onnx/onnx_ops.cpp +++ b/src/dialect/onnx/onnx_ops.cpp @@ -458,6 +458,12 @@ void ONNXSqrtOp::inferShapes() { getResult().setType(getOperand().getType()); } /// the shape inference interface. void ONNXSignOp::inferShapes() { getResult().setType(getOperand().getType()); } +//===----------------------------------------------------------------------===// +// Abs +/// Infer the output shape of the ONNXAbsOp. This method is required by the +/// shape inference interface. +void ONNXAbsOp::inferShapes() { getResult().setType(getOperand().getType()); } + //===----------------------------------------------------------------------===// // Add /// Infer the output shape of the ONNXAddOp. This method is required by the diff --git a/src/dialect/onnx/onnxop.inc b/src/dialect/onnx/onnxop.inc index c64e1e6..d70e1f9 100644 --- a/src/dialect/onnx/onnxop.inc +++ b/src/dialect/onnx/onnxop.inc @@ -6,7 +6,7 @@ //******************************************************** def ONNXAbsOp:ONNX_Op<"Abs", - [NoSideEffect]> { + [NoSideEffect, DeclareOpInterfaceMethods]> { let summary = "ONNX Abs operation"; let description = [{ "Absolute takes one input data (Tensor) and produces one output data" diff --git a/src/pass/shape_inference_pass.cpp b/src/pass/shape_inference_pass.cpp index 8df3d66..899bfb4 100644 --- a/src/pass/shape_inference_pass.cpp +++ b/src/pass/shape_inference_pass.cpp @@ -121,6 +121,7 @@ public: op->getName().getStringRef() != "onnx.PadConstantPad" && op->getName().getStringRef() != "onnx.PadConstantValuePad" && op->getName().getStringRef() != "onnx.BatchNormalizationTestMode" && + op->getName().getStringRef() != "onnx.Abs" && op->getName().getStringRef() != "onnx.Constant" && op->getName().getStringRef() != "onnx.Unsqueeze") return false; diff --git a/test/backend/test.py b/test/backend/test.py index de856c1..95c29ce 100644 --- a/test/backend/test.py +++ b/test/backend/test.py @@ -64,6 +64,9 @@ backend_test = onnx.backend.test.BackendTest(DummyBackend, __name__) # https://github.com/onnx/onnx/tree/master/onnx/backend/test/data/node test_to_enable = [ + # Abs Op: + "test_abs_cpu", + # Add Op: "test_add_cpu", "test_add_bcast_cpu", @@ -174,15 +177,15 @@ test_to_enable = [ "test_reduce_sum_negative_axes_keepdims_example_cpu", "test_reduce_sum_negative_axes_keepdims_random_cpu", - # ReduceL1: this op depends on ONNXAbsOp so we will turn these tests on once ONNXAbsOp is implemented. - #"test_reduce_l1_default_axes_keepdims_example_cpu", - #"test_reduce_l1_default_axes_keepdims_random_cpu", - #"test_reduce_l1_do_not_keepdims_example_cpu", - #"test_reduce_l1_do_not_keepdims_random_cpu", - #"test_reduce_l1_keep_dims_example_cpu", - #"test_reduce_l1_keep_dims_random_cpu", - #"test_reduce_l1_negative_axes_keep_dims_example_cpu", - #"test_reduce_l1_negative_axes_keep_dims_random_cpu", + # ReduceL1 + "test_reduce_l1_default_axes_keepdims_example_cpu", + "test_reduce_l1_default_axes_keepdims_random_cpu", + "test_reduce_l1_do_not_keepdims_example_cpu", + "test_reduce_l1_do_not_keepdims_random_cpu", + "test_reduce_l1_keep_dims_example_cpu", + "test_reduce_l1_keep_dims_random_cpu", + "test_reduce_l1_negative_axes_keep_dims_example_cpu", + "test_reduce_l1_negative_axes_keep_dims_random_cpu", # ReduceL2 "test_reduce_l2_default_axes_keepdims_example_cpu", diff --git a/test/mlir/onnx/onnx_lowering.mlir b/test/mlir/onnx/onnx_lowering.mlir index 5321191..3f70f26 100644 --- a/test/mlir/onnx/onnx_lowering.mlir +++ b/test/mlir/onnx/onnx_lowering.mlir @@ -1511,6 +1511,47 @@ func @test_maxpooling_singleout_no_pad_w_strides_w_ceil_mode_w_unknown_dims(%arg // CHECK: return [[RES]] : memref } +func @test_abs_float(%arg0 : tensor) -> tensor<*xf32> { + %0 = "onnx.Abs"(%arg0) : (tensor) -> tensor<*xf32> + "std.return"(%0) : (tensor<*xf32>) -> () + + // CHECK-LABEL: test_abs_float + // CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref + // CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref + // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 + // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { + // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 + // CHECK: } : () -> (!krnl.loop, !krnl.loop) + // CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref + // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { + // CHECK: [[LOAD:%.+]] = load %arg0[%arg1, %arg2] : memref + // CHECK: [[ABS:%.+]] = absf [[LOAD]] : f32 + // CHECK: store [[ABS]], [[RES]][%arg1, %arg2] : memref + // CHECK: return [[RES]] : memref +} + +func @test_abs_int(%arg0 : tensor) -> tensor<*xi32> { + %0 = "onnx.Abs"(%arg0) : (tensor) -> tensor<*xi32> + "std.return"(%0) : (tensor<*xi32>) -> () + + // CHECK-LABEL: test_abs_int + // CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref + // CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref + // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 + // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { + // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 + // CHECK: } : () -> (!krnl.loop, !krnl.loop) + // CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref + // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { + // CHECK: [[LOAD:%.+]] = load %arg0[%arg1, %arg2] : memref + // CHECK: [[ZERO:%.+]] = constant 0 : i32 + // CHECK: [[LESS_THAN_ZERO:%.+]] = cmpi "slt", [[LOAD]], [[ZERO]] : i32 + // CHECK: [[NEGATIVE_LOAD:%.+]] = subi [[ZERO]], [[LOAD]] : i32 + // CHECK: [[SELECT:%.+]] = select [[LESS_THAN_ZERO]], [[NEGATIVE_LOAD]], [[LOAD]] : i32 + // CHECK: store [[SELECT]], [[RES]][%arg1, %arg2] : memref + // CHECK: return [[RES]] : memref +} + func @test_constant_pad1(%arg0: tensor<16x16xf32>) -> tensor<18x20xf32> { %0 = "onnx.PadConstantValuePad"(%arg0) {constant_value = 0.000000e+00 : f32, mode = "constant", pads = [0, 3, 2, 1]} : (tensor<16x16xf32>) -> tensor<18x20xf32> return %0 : tensor<18x20xf32>