diff --git a/src/pass/onnx_rewrite.td b/src/pass/onnx_rewrite.td index c3b5490..22a3cd1 100644 --- a/src/pass/onnx_rewrite.td +++ b/src/pass/onnx_rewrite.td @@ -33,6 +33,11 @@ class StringAttrOfValue: class FloatAttrOfValue: NativeCodeCall<"FloatAttr::get($0.getType().cast().getElementType(), " # val # ")">; +// Create a FloatAttr for the negative infinity. +def FloatAttrOfNegativeInfinity: + NativeCodeCall<"FloatAttr::get($0.getType().cast().getElementType(), " + "-std::numeric_limits::infinity())">; + // Create an ArrayAttr of IntergerAttr(s) of zero values. // This function is used for padding attribute in MaxPoolSingleOut. def createArrayAttrOfZerosFrom: @@ -80,7 +85,7 @@ def MaxPoolSingleOutOpPaddingPattern: Pat< (ONNXMaxPoolSingleOutOp (ONNXPadConstantValuePadOp $x, (insertZerosForNonPaddedDims<2> $pads), - (FloatAttrOfValue<0> $res), + (FloatAttrOfNegativeInfinity $res), (StringAttrOfValue<"constant">)), $auto_pad, $ceil_mode, $dilation, $kernel_shape, (createArrayAttrOfZerosFrom $pads), diff --git a/test/mlir/onnx/onnx_canonicalization.mlir b/test/mlir/onnx/onnx_canonicalization.mlir index 78193cb..3e27d7e 100644 --- a/test/mlir/onnx/onnx_canonicalization.mlir +++ b/test/mlir/onnx/onnx_canonicalization.mlir @@ -82,7 +82,7 @@ func @test_maxpoolsingleout_split(%arg0: tensor<5x5x32x32xf32>) -> tensor<5x8x32 %0 = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0, kernel_shape = [5,3], pads = [1, 2, 3, 4] } : (tensor<5x5x32x32xf32>) -> tensor<5x8x32x39xf32> "std.return"(%0) : (tensor<5x8x32x39xf32>) -> () - // CHECK-NEXT: %0 = "onnx.PadConstantValuePad"(%arg0) {constant_value = 0.000000e+00 : f32, mode = "constant", pads = [0, 0, 1, 2, 0, 0, 3, 4]} : (tensor<5x5x32x32xf32>) -> tensor<5x8x32x39xf32> + // CHECK-NEXT: %0 = "onnx.PadConstantValuePad"(%arg0) {constant_value = 0xFF800000 : f32, mode = "constant", pads = [0, 0, 1, 2, 0, 0, 3, 4]} : (tensor<5x5x32x32xf32>) -> tensor<5x8x32x39xf32> // CHECK-NEXT: %1 = "onnx.MaxPoolSingleOut"(%0) {auto_pad = "NOTSET", ceil_mode = 0 : i64, kernel_shape = [5, 3], pads = [0, 0, 0, 0], storage_order = 0 : i64} : (tensor<5x8x32x39xf32>) -> tensor<5x8x32x39xf32> // CHECK-NEXT: return %1 : tensor<5x8x32x39xf32> } @@ -92,7 +92,7 @@ func @test_maxpoolsingleout_split_unknown_dims(%arg0: tensor<*xf32>) -> tensor<* %0 = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0, kernel_shape = [5,3], pads = [1, 2, 3, 4] } : (tensor<*xf32>) -> tensor<*xf32> "std.return"(%0) : (tensor<*xf32>) -> () - // CHECK-NEXT: %0 = "onnx.PadConstantValuePad"(%arg0) {constant_value = 0.000000e+00 : f32, mode = "constant", pads = [0, 0, 1, 2, 0, 0, 3, 4]} : (tensor<*xf32>) -> tensor<*xf32> + // CHECK-NEXT: %0 = "onnx.PadConstantValuePad"(%arg0) {constant_value = 0xFF800000 : f32, mode = "constant", pads = [0, 0, 1, 2, 0, 0, 3, 4]} : (tensor<*xf32>) -> tensor<*xf32> // CHECK-NEXT: %1 = "onnx.MaxPoolSingleOut"(%0) {auto_pad = "NOTSET", ceil_mode = 0 : i64, kernel_shape = [5, 3], pads = [0, 0, 0, 0], storage_order = 0 : i64} : (tensor<*xf32>) -> tensor<*xf32> // CHECK-NEXT: return %1 : tensor<*xf32> }