diff --git a/doc/Dialects/onnx.md b/doc/Dialects/onnx.md index 69349aa..e4ca150 100644 --- a/doc/Dialects/onnx.md +++ b/doc/Dialects/onnx.md @@ -2848,6 +2848,32 @@ ONNX PRelu operation 1. `Y`: memref of any type values or tensor of any type values +### onnx.PadConstantPad (ONNXPadConstantPadOp) +ONNX Pad operation with constant padding value + +#### Description: + + +"this operation is introduced to handle situation" + " in which the padding value and padding are constants" + "They will become attributes." + +#### Operands: + +1. `data`: memref of any type values or tensor of any type values +1. `constant_value`: memref of any type values or tensor of any type values + +#### Attributes: + +| Attribute | MLIR Type | Description | +| :-------: | :-------: | ----------- | +| `pads` | `ArrayAttr` | 64-bit integer array attribute attribute | +| `mode` | `StringAttr` | string attribute attribute | + +#### Results: + +1. `output`: memref of any type values or tensor of any type values + ### onnx.PadConstantValue (ONNXPadConstantValueOp) ONNX Pad operation with constant padding value @@ -2876,7 +2902,7 @@ ONNX Pad operation with constant padding value 1. `output`: memref of any type values or tensor of any type values -### onnx.PadConstatValuePad (ONNXPadConstantValuePadOp) +### onnx.PadConstantValuePad (ONNXPadConstantValuePadOp) ONNX Pad operation with constant padding value #### Description: diff --git a/src/dialect/onnx/onnx.td b/src/dialect/onnx/onnx.td index 1cc88c3..1dde6cc 100644 --- a/src/dialect/onnx/onnx.td +++ b/src/dialect/onnx/onnx.td @@ -169,8 +169,22 @@ def ONNXPadConstantValueOp : ONNX_Op<"PadConstantValue", let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$output); } -def ONNXPadConstantValuePadOp : ONNX_Op<"PadConstatValuePad", - [NoSideEffect ]> { +def ONNXPadConstantPadOp : ONNX_Op<"PadConstantPad", + [NoSideEffect, DeclareOpInterfaceMethods ]> { + let summary = "ONNX Pad operation with constant padding value"; + let description = [{ "this operation is introduced to handle situation" + " in which the padding value and padding are constants" + "They will become attributes." + }]; + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$constant_value, + I64ArrayAttr:$pads, + DefaultValuedAttr:$mode); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$output); +} + +def ONNXPadConstantValuePadOp : ONNX_Op<"PadConstantValuePad", + [NoSideEffect, DeclareOpInterfaceMethods ]> { let summary = "ONNX Pad operation with constant padding value"; let description = [{ "this operation is introduced to handle situation" " in which the padding value and padding are constants" diff --git a/src/dialect/onnx/onnx_ops.cpp b/src/dialect/onnx/onnx_ops.cpp index ed18086..5474856 100644 --- a/src/dialect/onnx/onnx_ops.cpp +++ b/src/dialect/onnx/onnx_ops.cpp @@ -1045,6 +1045,57 @@ void ONNXMaxPoolSingleOutOp::inferShapes() { //===----------------------------------------------------------------------===// +static Type padShapeInferenceHelper(Value data, ArrayAttr padsOpt) { + // Cannot infer shape if no shape exists. + if (!data.getType().isa()) + return (Type)NULL; + auto dataTy = data.getType().cast(); + auto dataShape = dataTy.getShape(); + auto dataRank = dataShape.size(); + SmallVector outputShape(dataShape.begin(), dataShape.end()); + if (padsOpt) { + auto padsArray = padsOpt.getValue(); + // Pads consists of two values for each axis of data. + // The two values specify the number of elements padded before and after respectively. + for (int i = 0; i < dataRank; ++i) { + int64_t p1 = (padsArray[2*i]).cast().getInt(); + int64_t p2 = (padsArray[2*i+1]).cast().getInt(); + //Have to non-negative constant + if (p1 < 0 || p2 <0) + return (Type)NULL; + outputShape[i] += p1+p2; + } + + return (RankedTensorType::get(outputShape, dataTy.getElementType())); + } else { + return (Type)NULL; + } +} + +// PadConstantPad + +void ONNXPadConstantPadOp::inferShapes(){ + auto outputType = padShapeInferenceHelper(data(), pads()); + if (outputType) { + getResult().setType(outputType); + } + return; +} + +//===----------------------------------------------------------------------===// + +// PadConstantValuePad + +void ONNXPadConstantValuePadOp::inferShapes(){ + auto outputType = padShapeInferenceHelper(data(), pads()); + if (outputType) { + getResult().setType(outputType); + } + return; +} + +//===----------------------------------------------------------------------===// + // Unsqueeze void ONNXUnsqueezeOp::inferShapes() { diff --git a/src/pass/shape_inference_pass.cpp b/src/pass/shape_inference_pass.cpp index 4038ec3..47826af 100644 --- a/src/pass/shape_inference_pass.cpp +++ b/src/pass/shape_inference_pass.cpp @@ -127,6 +127,8 @@ public: op->getName().getStringRef() != "onnx.Softmax" && op->getName().getStringRef() != "onnx.Sqrt" && op->getName().getStringRef() != "onnx.ConvNoBias" && + op->getName().getStringRef() != "onnx.PadConstantPad" && + op->getName().getStringRef() != "onnx.PadConstantValuePad" && op->getName().getStringRef() != "onnx.BatchNormalizationTestMode" && op->getName().getStringRef() != "onnx.Unsqueeze") return false; diff --git a/test/mlir/onnx/onnx_canonicalization.mlir b/test/mlir/onnx/onnx_canonicalization.mlir index 840cd7d..a535a33 100644 --- a/test/mlir/onnx/onnx_canonicalization.mlir +++ b/test/mlir/onnx/onnx_canonicalization.mlir @@ -87,7 +87,7 @@ func @test_reducesumsquare(%arg0 : tensor) -> tensor<*xf32> { // CHECK-LABEL: @test_constant_pad(%{{.*}}: tensor) -> tensor<*xf32> { func @test_constant_pad(%arg0 : tensor) -> tensor<*xf32> { - // CHECK-NEXT: [[SQUARE:%.+]] = "onnx.PadConstatValuePad"(%arg0) {constant_value = 0.000000e+00 : f32, mode = "constant", pads = [0, 2, 0, 0]} : (tensor) -> tensor<*xf32> + // CHECK-NEXT: [[SQUARE:%.+]] = "onnx.PadConstantValuePad"(%arg0) {constant_value = 0.000000e+00 : f32, mode = "constant", pads = [0, 2, 0, 0]} : (tensor) -> tensor<*xf32> %0 ="onnx.Constant"() {value=[0, 2, 0, 0]} : ()-> tensor %2 = "onnx.PadConstantValue"(%arg0, %0) {constant_value=0. : f32, mode = "constant"} : (tensor, tensor)-> tensor<*xf32> "std.return"(%2) : (tensor<*xf32>) -> () @@ -97,7 +97,7 @@ func @test_constant_pad(%arg0 : tensor) -> tensor<*xf32> { func @test_conv_split(%arg0 : tensor<1x9x32x64xf32>, %arg1 : tensor<5x9x6x7xf32>) -> tensor<*xf32> { %0 = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "NOTSET", group = 1 : i64, pads = [2, 3, 4, 5]} : (tensor<1x9x32x64xf32>, tensor<5x9x6x7xf32>) -> tensor<*xf32> "std.return"(%0) : (tensor<*xf32>) -> () - // CHECK-NEXT: %0 = "onnx.PadConstatValuePad"(%arg0) {constant_value = 0.000000e+00 : f32, mode = "constant", pads = [0, 0, 2, 3, 0, 0, 4, 5]} : (tensor<1x9x32x64xf32>) -> tensor<1x9x38x72xf32> + // CHECK-NEXT: %0 = "onnx.PadConstantValuePad"(%arg0) {constant_value = 0.000000e+00 : f32, mode = "constant", pads = [0, 0, 2, 3, 0, 0, 4, 5]} : (tensor<1x9x32x64xf32>) -> tensor<1x9x38x72xf32> // CHECK-NEXT: %1 = "onnx.ConvNoBias"(%0, %arg1) {auto_pad = "NOTSET", group = 1 : i64, pads = [0, 0, 0, 0]} : (tensor<1x9x38x72xf32>, tensor<5x9x6x7xf32>) -> tensor<*xf32> // CHECK-NEXT: return %1 : tensor<*xf32> } diff --git a/test/mlir/onnx/onnx_shape_inference.mlir b/test/mlir/onnx/onnx_shape_inference.mlir index fbd718c..f3c82eb 100644 --- a/test/mlir/onnx/onnx_shape_inference.mlir +++ b/test/mlir/onnx/onnx_shape_inference.mlir @@ -269,8 +269,27 @@ func @test_conv_no_bias_10(%arg0 : tensor<1x2x32x64xf32>, %arg1 : tensor<5x2x6x7 func @test_conv_no_bias_11(%arg0 : tensor<1x2x32x64xf32>, %arg1 : tensor<5x2x6x7xf32>) -> tensor<*xf32> { %0 = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "SAME_UPPER", group = 1 : i64, dilations = [2, 3]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>) -> tensor<*xf32> "std.return"(%0) : (tensor<*xf32>) -> () - +} // CHECK-LABEL: test_conv_no_bias_11 // CHECK: [[RES_ATTR:%.+]] = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "SAME_UPPER", dilations = [2, 3], group = 1 : i64} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>) -> tensor<1x5x32x64xf32> // CHECK: return [[RES_ATTR]] : tensor<1x5x32x64xf32> + + +/// Test PadConstantValuePad_1 +func @test_PadConstantValuePad_1(%arg0 : tensor<16x13xf32>) -> tensor<*xf32> { + %0 = "onnx.PadConstantValuePad"(%arg0) {constant_value = 0.000000e+00 : f32, mode = "constant", pads = [0, 2, 0, 0]} : (tensor<16x13xf32>) -> tensor<*xf32> + "std.return"(%0) : (tensor<*xf32>) -> () } +// CHECK-LABEL: test_PadConstantValuePad_1 +// CHECK: [[RES:%.+]] = "onnx.PadConstantValuePad"(%arg0) {constant_value = 0.000000e+00 : f32, mode = "constant", pads = [0, 2, 0, 0]} : (tensor<16x13xf32>) -> tensor<18x13xf32> +// CHECK: return [[RES]] : tensor<18x13xf32> + +/// Test PadConstantPad_1 +func @test_PadConstantPad_1(%arg0 : tensor<16x13xf32>, %arg1 : tensor<*xf32>) -> tensor<*xf32> { + %0 = "onnx.PadConstantPad"(%arg0, %arg1) {mode = "constant", pads = [0, 2, 3, 1]} : (tensor<16x13xf32>, tensor<*xf32>) -> tensor<*xf32> + "std.return"(%0) : (tensor<*xf32>) -> () +} +// CHECK-LABEL: test_PadConstantPad_1 +// CHECK: [[RES:%.+]] = "onnx.PadConstantPad"(%arg0, %arg1) {mode = "constant", pads = [0, 2, 3, 1]} : (tensor<16x13xf32>, tensor<*xf32>) -> tensor<18x17xf32> +// CHECK: return [[RES]] : tensor<18x17xf32> +