From f4fefcf7132264faa119866857a37611f7918a79 Mon Sep 17 00:00:00 2001 From: "Tung D. Le" Date: Thu, 9 Apr 2020 15:22:36 +0900 Subject: [PATCH] Re-add tanh lowering (#75) * Re-add tanh lowering * Make the emission deterministic --- .../ONNXToKrnl/Math/Elementwise.cpp | 30 +++++++++++++++---- test/backend/test.py | 4 +++ test/mlir/onnx/onnx_lowering.mlir | 8 ++++- .../mlir/onnx/onnx_lowering_with_dealloc.mlir | 16 ++++++++-- 4 files changed, 49 insertions(+), 9 deletions(-) diff --git a/src/Conversion/ONNXToKrnl/Math/Elementwise.cpp b/src/Conversion/ONNXToKrnl/Math/Elementwise.cpp index 75990f0..222c538 100644 --- a/src/Conversion/ONNXToKrnl/Math/Elementwise.cpp +++ b/src/Conversion/ONNXToKrnl/Math/Elementwise.cpp @@ -66,12 +66,6 @@ struct ScalarOp { using IOp = AddIOp; }; -template <> -struct ScalarOp { - using FOp = TanhOp; - using IOp = TanhOp; // not use -}; - template <> struct ScalarOp { using FOp = CosOp; @@ -138,6 +132,30 @@ Value mapToLowerScalarOp(Operation *op, ArrayRef result_types, return result; } +//===----------------------------------------------------------------------===// +// Scalar unary ops for lowering ONNXTanhOp +//===----------------------------------------------------------------------===// +template <> +Value mapToLowerScalarOp(Operation *op, ArrayRef result_types, + ArrayRef operands, + ConversionPatternRewriter &rewriter) { + // ONNXTanhOp(%X) = DivFOp(SubFOp(ExpOp(%X), ExpOp(NegFOp(%X))), + // AddFOp(ExpOp(%X), ExpOp(NegFOp(%X)))) + auto loc = op->getLoc(); + Value operand = operands[0]; + auto elementType = result_types[0]; + + auto zero = emitConstantOp(rewriter, loc, elementType, 0); + auto neg = rewriter.create(loc, zero, operand); + auto exp = rewriter.create(loc, operand); + auto negExp = rewriter.create(loc, neg); + auto dividend = rewriter.create(loc, exp, negExp); + auto divisor = rewriter.create(loc, exp, negExp); + auto result = rewriter.create(loc, dividend, divisor); + + return result; +} + //===----------------------------------------------------------------------===// // Scalar unary ops for lowering ONNXSigmoidOp //===----------------------------------------------------------------------===// diff --git a/test/backend/test.py b/test/backend/test.py index 902f7e5..f47ae77 100644 --- a/test/backend/test.py +++ b/test/backend/test.py @@ -82,6 +82,10 @@ test_to_enable = [ "test_cosh_cpu", "test_cosh_example_cpu", + # Tanh: + "test_tanh_cpu", + "test_tanh_example_cpu", + # Div Op: "test_div_cpu", "test_div_bcast_cpu", diff --git a/test/mlir/onnx/onnx_lowering.mlir b/test/mlir/onnx/onnx_lowering.mlir index f1453bd..db42e01 100644 --- a/test/mlir/onnx/onnx_lowering.mlir +++ b/test/mlir/onnx/onnx_lowering.mlir @@ -159,7 +159,13 @@ func @test_tanh(%arg0 : tensor) -> tensor<*xf32> { // CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { // CHECK: [[LOAD:%.+]] = load %arg0[%arg1, %arg2] : memref - // CHECK: [[TANH:%.+]] = tanh [[LOAD]] : f32 + // CHECK: [[ZERO:%.+]] = constant {{0.+}} : f32 + // CHECK: [[NLOAD:%.+]] = subf [[ZERO]], [[LOAD]] : f32 + // CHECK: [[EXP:%.+]] = exp [[LOAD]] : f32 + // CHECK: [[NEXP:%.+]] = exp [[NLOAD]] : f32 + // CHECK: [[DIVIDEND:%.+]] = subf [[EXP]], [[NEXP]] : f32 + // CHECK: [[DIVISOR:%.+]] = addf [[EXP]], [[NEXP]] : f32 + // CHECK: [[TANH:%.+]] = divf [[DIVIDEND]], [[DIVISOR]] : f32 // CHECK: store [[TANH]], [[RES]][%arg1, %arg2] : memref // CHECK: return [[RES]] : memref } diff --git a/test/mlir/onnx/onnx_lowering_with_dealloc.mlir b/test/mlir/onnx/onnx_lowering_with_dealloc.mlir index 3940643..b81430d 100644 --- a/test/mlir/onnx/onnx_lowering_with_dealloc.mlir +++ b/test/mlir/onnx/onnx_lowering_with_dealloc.mlir @@ -315,7 +315,13 @@ func @test_tanh_tanh(%arg0 : tensor) -> tensor<*xf32> { // CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { // CHECK: [[LOAD:%.+]] = load %arg0[%arg1, %arg2] : memref - // CHECK: [[TANH:%.+]] = tanh [[LOAD]] : f32 + // CHECK: [[ZERO:%.+]] = constant {{0.+}} : f32 + // CHECK: [[NLOAD:%.+]] = subf [[ZERO]], [[LOAD]] : f32 + // CHECK: [[EXP:%.+]] = exp [[LOAD]] : f32 + // CHECK: [[NEXP:%.+]] = exp [[NLOAD]] : f32 + // CHECK: [[DIVIDEND:%.+]] = subf [[EXP]], [[NEXP]] : f32 + // CHECK: [[DIVISOR:%.+]] = addf [[EXP]], [[NEXP]] : f32 + // CHECK: [[TANH:%.+]] = divf [[DIVIDEND]], [[DIVISOR]] : f32 // CHECK: store [[TANH]], [[RES]][%arg1, %arg2] : memref /// Second Tanh @@ -328,7 +334,13 @@ func @test_tanh_tanh(%arg0 : tensor) -> tensor<*xf32> { // CHECK: [[DIM_2:%.+]] = dim [[RES]], 0 : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { // CHECK: [[LOAD:%.+]] = load [[RES]][%arg1, %arg2] : memref - // CHECK: [[TANH_RES:%.+]] = tanh [[LOAD]] : f32 + // CHECK: [[ZERO:%.+]] = constant {{0.+}} : f32 + // CHECK: [[NLOAD:%.+]] = subf [[ZERO]], [[LOAD]] : f32 + // CHECK: [[EXP:%.+]] = exp [[LOAD]] : f32 + // CHECK: [[NEXP:%.+]] = exp [[NLOAD]] : f32 + // CHECK: [[DIVIDEND:%.+]] = subf [[EXP]], [[NEXP]] : f32 + // CHECK: [[DIVISOR:%.+]] = addf [[EXP]], [[NEXP]] : f32 + // CHECK: [[TANH_RES:%.+]] = divf [[DIVIDEND]], [[DIVISOR]] : f32 // CHECK: store [[TANH_RES]], [[RET_RES]][%arg1, %arg2] : memref /// Dealloc of first result.