From 4b33c312d6d18de19f7615148e6e6f2616dcdc8a Mon Sep 17 00:00:00 2001 From: Anh Leu Date: Fri, 17 Jul 2020 10:01:30 -0500 Subject: [PATCH] Add ONNXScalerOp pattern (#220) * add ONNXScalerOp pattern * move ScalerOp rewrite rule to Rewrite.cpp .td * attempt to fix format issue * fixing format issue * fixing format issue2 * add ONNXScalerOp pattern * move ScalerOp rewrite rule to Rewrite.cpp .td * attempt to fix format issue * fixing format issue * fixing format issue2 --- src/Dialect/ONNX/ONNXOps.td.inc | 1 + src/Transform/ONNX/Rewrite.cpp | 27 ++++++++ src/Transform/ONNX/Rewrite.td | 52 ++++++++++++++ test/mlir/onnx/onnx_canonicalization.mlir | 83 +++++++++++++++++++++++ utils/gen_onnx_mlir.py | 2 +- 5 files changed, 164 insertions(+), 1 deletion(-) diff --git a/src/Dialect/ONNX/ONNXOps.td.inc b/src/Dialect/ONNX/ONNXOps.td.inc index eb183cd..5589f75 100644 --- a/src/Dialect/ONNX/ONNXOps.td.inc +++ b/src/Dialect/ONNX/ONNXOps.td.inc @@ -6090,6 +6090,7 @@ def ONNXSVMRegressorOp:ONNX_Op<"SVMRegressor", def ONNXScalerOp:ONNX_Op<"Scaler", [NoSideEffect]> { + let hasCanonicalizer = 1; let summary = "ONNX Scaler operation"; let description = [{ "Rescale input data, for example to standardize features by removing the mean and scaling to unit variance." diff --git a/src/Transform/ONNX/Rewrite.cpp b/src/Transform/ONNX/Rewrite.cpp index b390419..873ae4f 100644 --- a/src/Transform/ONNX/Rewrite.cpp +++ b/src/Transform/ONNX/Rewrite.cpp @@ -18,6 +18,23 @@ using namespace mlir; namespace { +// Create an DenseElementsAttr of ArrayAttr. +// This function is used to get Value Type for Scaler function. +DenseElementsAttr createDenseArrayAttr( + PatternRewriter &rewriter, ArrayAttr origAttrs) { + mlir::Type elementType = rewriter.getF32Type(); + int nElements = origAttrs.getValue().size(); + SmallVector wrapper(nElements, 0); + if (origAttrs) { + for (int i = 0; i < nElements; ++i) { + wrapper[i] = origAttrs.getValue()[i].cast().getValueAsDouble(); + } + } + return DenseElementsAttr::get( + RankedTensorType::get(wrapper.size(), elementType), + llvm::makeArrayRef(wrapper)); +} + // Check whether an ArrayAttr contains non-zero values or not. bool hasNonZeroInArrayAttr(ArrayAttr attrs) { bool allZeros = true; @@ -92,3 +109,13 @@ void ONNXConvOp::getCanonicalizationPatterns( OwningRewritePatternList &results, MLIRContext *context) { results.insert(context); } + +/// on the ONNXScalerOp. +void ONNXScalerOp::getCanonicalizationPatterns( + OwningRewritePatternList &result, MLIRContext *context) { + result.insert(context); + result.insert(context); + result.insert(context); + result.insert(context); + result.insert(context); +} diff --git a/src/Transform/ONNX/Rewrite.td b/src/Transform/ONNX/Rewrite.td index 4857348..40294eb 100644 --- a/src/Transform/ONNX/Rewrite.td +++ b/src/Transform/ONNX/Rewrite.td @@ -100,4 +100,56 @@ def ConvOpPaddingPattern: Pat< [(HasNonZeroInArrayAttr:$pads), (IsNotStringAttrOfValue<"VALID"> $auto_pad)] >; +//===----------------------------------------------------------------------===// +// ONNXScalerOp %X, %Offest, %Scale +// x input, a offset, b scale +//===----------------------------------------------------------------------===// +// Useful test definitions. +def AttributeIsNull : + Constraint, + "Attribute is null">; + +def HasFloatType : Constraint().getElementType().isF32())">>; + +// Create a DenseElementsAttr from an ArrayAttr. +def createDenseArrayAttr: + NativeCodeCall<"createDenseArrayAttr($_builder, $0)">; + +def ScalerT : NativeCodeCall<"$_builder.getI64IntegerAttr(0)">; + +// No attribute +def ScalerNullPattern : Pat< + (ONNXScalerOp $x, $a, $b), + (replaceWithValue $x), + [(HasFloatType:$x),(AttributeIsNull:$a), (AttributeIsNull:$b)]>; + +// No attribute, input x not float type +def ScalerNullPattern2 : Pat< + (ONNXScalerOp $x, $a, $b), + (ONNXCastOp $x, (ScalerT)), + [(AttributeIsNull:$a), (AttributeIsNull:$b)]>; + +// No scale +def ScalerNoScalePattern : Pat< + (ONNXScalerOp $x, $a, $b), + (ONNXSubOp $x, + (ONNXConstantOp (GetNullAttr), (createDenseArrayAttr $a))), + [(AttributeIsNull:$b)]>; + +// No offset +def ScalerNoOffsetPattern : Pat< + (ONNXScalerOp $x, $a, $b), + (ONNXMulOp $x, + (ONNXConstantOp (GetNullAttr), (createDenseArrayAttr $b))), + [(AttributeIsNull:$a)]>; + +// Normal ONNXScalerOp +def ScalerPattern : Pat< + (ONNXScalerOp $x, $a, $b), + (ONNXMulOp + (ONNXSubOp $x, + (ONNXConstantOp (GetNullAttr), (createDenseArrayAttr $a))), + (ONNXConstantOp (GetNullAttr), (createDenseArrayAttr $b)))>; + + #endif // ONNX_REWRITE diff --git a/test/mlir/onnx/onnx_canonicalization.mlir b/test/mlir/onnx/onnx_canonicalization.mlir index 3d37f5d..b2239ce 100644 --- a/test/mlir/onnx/onnx_canonicalization.mlir +++ b/test/mlir/onnx/onnx_canonicalization.mlir @@ -96,3 +96,86 @@ func @test_gemm_add_fusion_rank3(%arg0: tensor<128x128x256xf32>, %arg1: tensor<1 // CHECK-NEXT: [[GEMM:%.+]] = "onnx.Gemm"(%{{.*}}, %{{.*}}, %{{.*}}) {alpha = 1.000000e+00 : f32, beta = 1.000000e+00 : f32, transA = 0 : i64, transB = 0 : i64} : (tensor<128x128x256xf32>, tensor<128x128x256xf32>, tensor<256xf32>) -> tensor<*xf32> // return [[GEMM]] : tensor<*xf32> } + +// ----- +// Scaler Pattern test +// ----- + +// null +// CHECK-LABEL: func @test_scaler_null_float(%{{.*}}: tensor<3xf32>) -> tensor<3xf32> { +func @test_scaler_null_float(%arg0: tensor<3xf32>) -> tensor<3xf32> { + %0 = "onnx.Scaler"(%arg0) : (tensor<3xf32>) -> tensor<3xf32> + return %0 : tensor<3xf32> + + // CHECK-NEXT: return %arg0 : tensor<3xf32> +} + +// ----- + +// null not float +// CHECK-LABEL: func @test_scaler_null(%{{.*}}: tensor<3xi32>) -> tensor<3xf32> { +func @test_scaler_null(%arg0: tensor<3xi32>) -> tensor<3xf32> { + %0 = "onnx.Scaler"(%arg0) : (tensor<3xi32>) -> tensor<3xf32> + return %0 : tensor<3xf32> + + // CHECK-NEXT: %0 = "onnx.Cast"(%arg0) {to = 0 : i64} : (tensor<3xi32>) -> tensor<3xf32> + // CHECK-NEXT: return %0 : tensor<3xf32> +} + +// ----- + +// scaler no offset +// CHECK-LABEL: func @test_scaler_no_offset(%{{.*}}: tensor<3xf32>) -> tensor<3xf32> { +func @test_scaler_no_offset(%arg0: tensor<3xf32>) -> tensor<3xf32> { + %0 = "onnx.Scaler"(%arg0) {scale = [3.125000e-02 : f32, 0.0909090936 : f32, 0.0333333351 : f32]} : (tensor<3xf32>) -> tensor<3xf32> + return %0 : tensor<3xf32> + + + // CHECK-NEXT: %0 = "onnx.Constant"() {value = dense<[3.125000e-02, 0.0909090936, 0.0333333351]> : tensor<3xf32>} : () -> tensor<3xf32> + // CHECK-NEXT: %1 = "onnx.Mul"(%arg0, %0) : (tensor<3xf32>, tensor<3xf32>) -> tensor<3xf32> + // CHECK-NEXT: return %1 : tensor<3xf32> +} + +// ----- + +// scaler no scale +// CHECK-LABEL: func @test_scaler_no_scale(%{{.*}}: tensor<3xf32>) -> tensor<3xf32> { +func @test_scaler_no_scale(%arg0: tensor<3xf32>) -> tensor<3xf32> { + %0 = "onnx.Scaler"(%arg0) {offset = [1986.99939 : f32, 0.99999988 : f32, 0.999999701 : f32]} : (tensor<3xf32>) -> tensor<3xf32> + return %0 : tensor<3xf32> + + // CHECK-NEXT: %0 = "onnx.Constant"() {value = dense<[1986.99939, 0.99999988, 0.999999701]> : tensor<3xf32>} : () -> tensor<3xf32> + // CHECK-NEXT: %1 = "onnx.Sub"(%arg0, %0) : (tensor<3xf32>, tensor<3xf32>) -> tensor<3xf32> + // CHECK-NEXT: return %1 : tensor<3xf32> +} + +// ----- + +// normal scaler +// CHECK-LABEL: func @test_scaler_normal(%{{.*}}: tensor<3xf32>) -> tensor<3xf32> { +func @test_scaler_normal(%arg0: tensor<3xf32>) -> tensor<3xf32> { + %0 = "onnx.Scaler"(%arg0) {offset = [1986.99939 : f32, 0.99999988 : f32, 0.999999701 : f32], scale = [3.125000e-02 : f32, 0.0909090936 : f32, 0.0333333351 : f32]} : (tensor<3xf32>) -> tensor<3xf32> + return %0 : tensor<3xf32> + + // CHECK-NEXT: %0 = "onnx.Constant"() {value = dense<[1986.99939, 0.99999988, 0.999999701]> : tensor<3xf32>} : () -> tensor<3xf32> + // CHECK-NEXT: %1 = "onnx.Sub"(%arg0, %0) : (tensor<3xf32>, tensor<3xf32>) -> tensor<3xf32> + // CHECK-NEXT: %2 = "onnx.Constant"() {value = dense<[3.125000e-02, 0.0909090936, 0.0333333351]> : tensor<3xf32>} : () -> tensor<3xf32> + // CHECK-NEXT: %3 = "onnx.Mul"(%1, %2) : (tensor<3xf32>, tensor<3xf32>) -> tensor<3xf32> + // CHECK-NEXT: return %3 : tensor<3xf32> +} + +// ----- + +// normal scaler with constant offset and scale +// CHECK-LABEL: func @test_scaler_constant(%{{.*}}: tensor<3xf32>) -> tensor<3xf32> { +func @test_scaler_constant(%arg0: tensor<3xf32>) -> tensor<3xf32> { + %0 = "onnx.Scaler"(%arg0) {offset = [1986.99939 : f32], scale = [3.125000e-02 : f32]} : (tensor<3xf32>) -> tensor<3xf32> + return %0 : tensor<3xf32> + + // CHECK-NEXT: %0 = "onnx.Constant"() {value = dense<1986.99939> : tensor<1xf32>} : () -> tensor<1xf32> + // CHECK-NEXT: %1 = "onnx.Sub"(%arg0, %0) : (tensor<3xf32>, tensor<1xf32>) -> tensor<3xf32> + // CHECK-NEXT: %2 = "onnx.Constant"() {value = dense<3.125000e-02> : tensor<1xf32>} : () -> tensor<1xf32> + // CHECK-NEXT: %3 = "onnx.Mul"(%1, %2) : (tensor<3xf32>, tensor<1xf32>) -> tensor<3xf32> + // CHECK-NEXT: return %3 : tensor<3xf32> +} + diff --git a/utils/gen_onnx_mlir.py b/utils/gen_onnx_mlir.py index 7b0e620..da99203 100644 --- a/utils/gen_onnx_mlir.py +++ b/utils/gen_onnx_mlir.py @@ -256,7 +256,7 @@ OpsWithShapeInference = [ ] # Operations supporting canonicalization. -OpsWithCanonicalizer = ['Add', 'Identity', 'Gemm', 'Conv'] +OpsWithCanonicalizer = ['Add', 'Identity', 'Gemm', 'Conv', 'Scaler'] # Operations who have operands that, if produced by constant operations, should # be promoted to become an attribute (via attribute promotion).