diff --git a/src/dialect/onnx/gen_doc.py b/src/dialect/onnx/gen_doc.py index ed99e57..bdf236a 100644 --- a/src/dialect/onnx/gen_doc.py +++ b/src/dialect/onnx/gen_doc.py @@ -267,7 +267,8 @@ def gen_schema(schema) : 'Add', 'Mul', 'Div', 'Sub', 'And', 'Or', 'Xor', 'Sum', 'Max', 'Min', 'MatMul', 'Gemm', 'LeakyRelu', 'Elu', 'Selu', 'HardSigmoid', 'Reshape', 'Reciprocal', - 'Identity', 'Cos', 'Log', 'Transpose', 'Softmax'] + 'Identity', 'Cos', 'Log', 'Transpose', 'Softmax', + 'Softplus', 'Softsign'] CanonicalList=['Add', 'Identity'] line_indent = ' ' diff --git a/src/dialect/onnx/onnx_ops.cpp b/src/dialect/onnx/onnx_ops.cpp index b36448d..6e6fc23 100644 --- a/src/dialect/onnx/onnx_ops.cpp +++ b/src/dialect/onnx/onnx_ops.cpp @@ -166,6 +166,22 @@ void ONNXSoftmaxOp::inferShapes() { getResult().setType(getOperand().getType()); } +//===----------------------------------------------------------------------===// +// Softplus +/// Infer the output shape of the ONNXSoftplusOp. This method is required by +/// the shape inference interface. +void ONNXSoftplusOp::inferShapes() { + getResult().setType(getOperand().getType()); +} + +//===----------------------------------------------------------------------===// +// Softsign +/// Infer the output shape of the ONNXSoftsignOp. This method is required by +/// the shape inference interface. +void ONNXSoftsignOp::inferShapes() { + getResult().setType(getOperand().getType()); +} + //===----------------------------------------------------------------------===// // Add /// Infer the output shape of the ONNXAddOp. This method is required by the diff --git a/src/dialect/onnx/onnxop.inc b/src/dialect/onnx/onnxop.inc index 3a54fa0..f377772 100644 --- a/src/dialect/onnx/onnxop.inc +++ b/src/dialect/onnx/onnxop.inc @@ -2863,7 +2863,7 @@ def ONNXSoftmaxOp:ONNX_Op<"Softmax", } def ONNXSoftplusOp:ONNX_Op<"Softplus", - [NoSideEffect]> { + [NoSideEffect, DeclareOpInterfaceMethods]> { let summary = "ONNX Softplus operation"; let description = [{ "Softplus takes one input data (Tensor) and produces one output data" @@ -2875,7 +2875,7 @@ def ONNXSoftplusOp:ONNX_Op<"Softplus", } def ONNXSoftsignOp:ONNX_Op<"Softsign", - [NoSideEffect]> { + [NoSideEffect, DeclareOpInterfaceMethods]> { let summary = "ONNX Softsign operation"; let description = [{ "Calculates the softsign (x/(1+|x|)) of the given input tensor element-wise." diff --git a/src/pass/lower_frontend_to_krnl.cpp b/src/pass/lower_frontend_to_krnl.cpp index 58d603a..020204a 100644 --- a/src/pass/lower_frontend_to_krnl.cpp +++ b/src/pass/lower_frontend_to_krnl.cpp @@ -570,6 +570,46 @@ Value mapToLowerScalarOp( return result; } +//===----------------------------------------------------------------------===// +// Scalar unary ops for lowering ONNXSoftplusOp +//===----------------------------------------------------------------------===// +template <> +Value mapToLowerScalarOp( + Operation *op, ArrayRef result_types, ArrayRef operands, + ConversionPatternRewriter &rewriter) { + // ONNXSoftplusOp(%X) = LogOp(AddFOp(ExpOp(%X), ConstantOp 1)) + auto loc = op->getLoc(); + Value operand = operands[0]; + auto elementType = result_types[0]; + + auto exp = rewriter.create(loc, operand); + auto one = rewriter.create(loc, FloatAttr::get(elementType, 1)); + auto add = rewriter.create(loc, exp, one); + auto result = rewriter.create(loc, add); + + return result; +} + +//===----------------------------------------------------------------------===// +// Scalar unary ops for lowering ONNXSoftsignOp +//===----------------------------------------------------------------------===// +template <> +Value mapToLowerScalarOp( + Operation *op, ArrayRef result_types, ArrayRef operands, + ConversionPatternRewriter &rewriter) { + // ONNXSoftsignOp(%X) = DivFOp(ConstantOp 1, %X) + auto loc = op->getLoc(); + Value operand = operands[0]; + auto elementType = result_types[0]; + + auto abs = rewriter.create(loc, operand); + auto one = rewriter.create(loc, FloatAttr::get(elementType, 1)); + auto add = rewriter.create(loc, abs, one); + auto result = rewriter.create(loc, operand, add); + + return result; +} + //===----------------------------------------------------------------------===// // Scalar unary ops for lowering ONNXMaxOp //===----------------------------------------------------------------------===// @@ -1214,6 +1254,8 @@ void FrontendToKrnlLoweringPass::runOnModule() { ONNXElementwiseUnaryOpLowering, ONNXElementwiseUnaryOpLowering, ONNXElementwiseUnaryOpLowering, + ONNXElementwiseUnaryOpLowering, + ONNXElementwiseUnaryOpLowering, ONNXElementwiseVariadicOpLowering, ONNXElementwiseVariadicOpLowering, ONNXElementwiseVariadicOpLowering, diff --git a/src/pass/shape_inference_pass.cpp b/src/pass/shape_inference_pass.cpp index 5239904..c95191f 100644 --- a/src/pass/shape_inference_pass.cpp +++ b/src/pass/shape_inference_pass.cpp @@ -101,6 +101,8 @@ public: op->getName().getStringRef() != "onnx.LeakyRelu" && op->getName().getStringRef() != "onnx.Selu" && op->getName().getStringRef() != "onnx.Reciprocal" && + op->getName().getStringRef() != "onnx.Softplus" && + op->getName().getStringRef() != "onnx.Softsign" && op->getName().getStringRef() != "onnx.Mul" && op->getName().getStringRef() != "onnx.Add" && op->getName().getStringRef() != "onnx.Div" && diff --git a/test/backend/test.py b/test/backend/test.py index 1b257c8..a369a5b 100644 --- a/test/backend/test.py +++ b/test/backend/test.py @@ -146,6 +146,14 @@ test_to_enable = [ # Reciprocal Op: "test_reciprocal_cpu", "test_reciprocal_example_cpu", + + # SoftplusOp: + "test_softplus_cpu", + "test_softplus_example_cpu", + + # SoftsignOp: + "test_softsign_cpu", + "test_softsign_example_cpu", ] # Extract name of all test cases. diff --git a/test/mlir/onnx/onnx_lowering.mlir b/test/mlir/onnx/onnx_lowering.mlir index c6c5927..65120b6 100644 --- a/test/mlir/onnx/onnx_lowering.mlir +++ b/test/mlir/onnx/onnx_lowering.mlir @@ -508,6 +508,50 @@ func @test_reciprocal(%arg0 : tensor) -> tensor<*xf32> { // CHECK: return [[RES]] : memref } +func @test_softplus(%arg0 : tensor) -> tensor<*xf32> { + %0 = "onnx.Softplus"(%arg0) : (tensor) -> tensor<*xf32> + "std.return"(%0) : (tensor<*xf32>) -> () + + // CHECK-LABEL: test_softplus + // CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref + // CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref + // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 + // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { + // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 + // CHECK: } : () -> (!krnl.loop, !krnl.loop) + // CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref + // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { + // CHECK: [[LOAD:%.+]] = load %arg0[%arg1, %arg2] : memref + // CHECK: [[EXP:%.+]] = exp [[LOAD]] : f32 + // CHECK: [[ONE:%.+]] = constant {{1.+}} : f32 + // CHECK: [[ADD:%.+]] = addf [[EXP]], [[ONE]] : f32 + // CHECK: [[SOFTPLUS_RES:%.+]] = log [[ADD]] : f32 + // CHECK: store [[SOFTPLUS_RES]], [[RES]][%arg1, %arg2] : memref + // CHECK: return [[RES]] : memref +} + +func @test_softsign(%arg0 : tensor) -> tensor<*xf32> { + %0 = "onnx.Softsign"(%arg0) : (tensor) -> tensor<*xf32> + "std.return"(%0) : (tensor<*xf32>) -> () + + // CHECK-LABEL: test_softsign + // CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref + // CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref + // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 + // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { + // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 + // CHECK: } : () -> (!krnl.loop, !krnl.loop) + // CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref + // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { + // CHECK: [[LOAD:%.+]] = load %arg0[%arg1, %arg2] : memref + // CHECK: [[ABS:%.+]] = absf [[LOAD]] : f32 + // CHECK: [[ONE:%.+]] = constant {{1.+}} : f32 + // CHECK: [[ADD:%.+]] = addf [[ABS]], [[ONE]] : f32 + // CHECK: [[SOFTSIGN_RES:%.+]] = divf [[LOAD]], [[ADD]] : f32 + // CHECK: store [[SOFTSIGN_RES]], [[RES]][%arg1, %arg2] : memref + // CHECK: return [[RES]] : memref +} + func @test_add_with_broadcasting(%arg0 : tensor, %arg1 : tensor) -> tensor<*xf32> { %0 = "onnx.Add"(%arg0, %arg1) : (tensor, tensor) -> tensor<*xf32> "std.return"(%0) : (tensor<*xf32>) -> ()