Using attribute setters for maxpool (#105)

* using attribute setters for maxpool

* fix typos, added handling of storage order, simplified code
This commit is contained in:
Alexandre Eichenberger 2020-02-25 14:33:48 -05:00 committed by GitHub
parent e02aa87748
commit 3b1c29c078
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 104 additions and 98 deletions

View File

@ -24,12 +24,29 @@
using namespace mlir; using namespace mlir;
using namespace mlir::OpTrait::util; using namespace mlir::OpTrait::util;
//===----------------------------------------------------------------------===//
// ONNX Helper functions
//===----------------------------------------------------------------------===//
static size_t ArrayAttrSize(ArrayAttr a) { return a.size(); }
static size_t ArrayAttrSize(Optional<ArrayAttr> a) {
return a.getValue().size();
}
static int64_t ArrayAttrIntVal(ArrayAttr a, int i) {
return (a.getValue()[i]).cast<IntegerAttr>().getInt();
}
static int64_t ArrayAttrIntVal(Optional<ArrayAttr> a, int i) {
return (a.getValue().getValue()[i]).cast<IntegerAttr>().getInt();
}
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// Get reduction type // Get reduction type
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
RankedTensorType getReductionOutputType(RankedTensorType operandTy, RankedTensorType getReductionOutputType(
Optional<ArrayAttr> axesAttrs, RankedTensorType operandTy, Optional<ArrayAttr> axesAttrs, APInt keepdims) {
APInt keepdims) {
int64_t rank = operandTy.getRank(); int64_t rank = operandTy.getRank();
SmallVector<int64_t, 4> axes; SmallVector<int64_t, 4> axes;
@ -87,8 +104,8 @@ ONNXOpsDialect::ONNXOpsDialect(mlir::MLIRContext *ctx)
} }
void ONNXEntryPointOp::build(mlir::Builder *builder, void ONNXEntryPointOp::build(mlir::Builder *builder,
mlir::OperationState &state, mlir::FuncOp function, mlir::OperationState &state, mlir::FuncOp function, int numInputs,
int numInputs, int numOutputs) { int numOutputs) {
state.addAttribute(ONNXEntryPointOp::getEntryPointFuncAttrName(), state.addAttribute(ONNXEntryPointOp::getEntryPointFuncAttrName(),
builder->getSymbolRefAttr(function)); builder->getSymbolRefAttr(function));
state.addAttribute(ONNXEntryPointOp::getNumInputsAttrName(), state.addAttribute(ONNXEntryPointOp::getNumInputsAttrName(),
@ -98,8 +115,7 @@ void ONNXEntryPointOp::build(mlir::Builder *builder,
} }
ONNXEntryPointOp ONNXEntryPointOp::create(mlir::Location location, ONNXEntryPointOp ONNXEntryPointOp::create(mlir::Location location,
mlir::FuncOp &func, int numInputs, mlir::FuncOp &func, int numInputs, int numOutputs) {
int numOutputs) {
mlir::OperationState state(location, "onnx.EntryPoint"); mlir::OperationState state(location, "onnx.EntryPoint");
Builder builder(location->getContext()); Builder builder(location->getContext());
mlir::ONNXEntryPointOp::build(&builder, state, func, numInputs, numOutputs); mlir::ONNXEntryPointOp::build(&builder, state, func, numInputs, numOutputs);
@ -885,111 +901,103 @@ void ONNXConvNoBiasOp::inferShapes() {
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// MaxPoolSingleOut // MaxPoolSingleOut
// Infer shape attributes output:
// - auto_pad set to NOTSET;
// - dilations, strides: set to 1 if not defined by user;
// - pads: set to proper value, 0 if not defined by user.
void ONNXMaxPoolSingleOutOp::inferShapes() { void ONNXMaxPoolSingleOutOp::inferShapes() {
// Cannot infer shape if no shape exists. // Cannot infer shape if no shape exists.
if (!X().getType().isa<RankedTensorType>()) if (!X().getType().isa<RankedTensorType>())
return; return;
auto builder = mlir::Builder(this->getContext());
// 1) get shape of input // 1) Get shape of input.
auto xTy = X().getType().cast<RankedTensorType>(); auto xTy = X().getType().cast<RankedTensorType>();
auto xShape = xTy.getShape(); auto xShape = xTy.getShape();
auto xRank = xShape.size(); auto xRank = xShape.size();
// 2) analyse parameters // 2) Analyse parameters. Get kernel sizes from kernel_shape attribute.
// get kernel sizes from kernel_shape attribute
auto kernelShape = kernel_shape(); auto kernelShape = kernel_shape();
if (!kernelShape) if (!kernelShape)
emitError( emitError(
"kernel_shape is a mandatory attribute for which there is no default."); "kernel_shape is a mandatory attribute for which there is no default");
auto kernelShapeArray = kernelShape.getValue(); auto kernelRank = ArrayAttrSize(kernelShape);
auto kernelRank = kernelShape.size();
if (kernelRank > xRank) if (kernelRank > xRank)
emitError("kernel_shape spatial dimension is too large."); emitError("kernel_shape spatial dimension is too large");
auto kernelOffset = xRank - kernelRank; auto kernelOffset = xRank - kernelRank;
// ceil mode // Ceil mode.
auto ceilMode = ceil_mode().getSExtValue(); auto ceilMode = ceil_mode().getSExtValue();
// dilatation // Dilatation.
SmallVector<int64_t, 4> actualDilations;
auto dilationsOpt = dilations(); auto dilationsOpt = dilations();
if (dilationsOpt.hasValue()) { if (dilationsOpt.hasValue()) {
auto dilationsArray = if (ArrayAttrSize(dilationsOpt) != kernelRank)
dilationsOpt.getValue().getValue(); // opt -> attr -> array emitError("dialation rank is not the same as the spatial rank");
if (dilationsArray.size() != kernelRank) // Test values.
emitError("dialation rank is not the same as the spatial rank.");
// fill in the actual values
for (int i = 0; i < kernelRank; ++i) { for (int i = 0; i < kernelRank; ++i) {
int64_t d = (dilationsArray[i]).cast<IntegerAttr>().getInt(); if (ArrayAttrIntVal(dilationsOpt, i) < 1)
if (d < 1) emitError("dialation value must be nonzero positive");
emitError("dialation value must be nonzero positive.");
actualDilations.emplace_back(d);
} }
} else { } else {
for (int i = 0; i < kernelRank; ++i) { // Default dilatation is needed.
actualDilations.emplace_back(1); SmallVector<int64_t, 4> defaultVals(kernelRank, 1);
} // Convert to ArrayRef, then build attribute, then store attribute.
ArrayRef<int64_t> defaultRefs(defaultVals);
auto defaultAttr = builder.getI64ArrayAttr(defaultRefs);
dilationsAttr(defaultAttr);
dilationsOpt = dilations();
} }
// storage order // Storage order.
auto storageOrder = storage_order().getSExtValue();
if (storageOrder != 0)
emitError("column major storage order not supported at this time");
// strides // Strides.
SmallVector<int64_t, 4> actualStrides;
auto stridesOpt = strides(); auto stridesOpt = strides();
if (stridesOpt.hasValue()) { if (stridesOpt.hasValue()) {
auto stridesArray = stridesOpt.getValue().getValue(); if (ArrayAttrSize(stridesOpt) != kernelRank)
if (stridesArray.size() != kernelRank) emitError("strides rank is not the same as the spatial rank");
emitError("strides rank is not the same as the spatial rank."); // Check values.
// fill in the actual values
for (int i = 0; i < kernelRank; ++i) { for (int i = 0; i < kernelRank; ++i) {
int64_t s = (stridesArray[i]).cast<IntegerAttr>().getInt(); if (ArrayAttrIntVal(stridesOpt, i) < 1)
if (s < 1) emitError("strides value must be nonzero positive");
emitError("strides value must be nonzero positive.");
actualStrides.emplace_back(s);
} }
} else { } else {
for (int i = 0; i < kernelRank; ++i) { SmallVector<int64_t, 4> defaultVals(kernelRank, 1);
actualStrides.emplace_back(1); // Convert to ArrayRef, then build attribute, then store attribute.
} ArrayRef<int64_t> defaultRefs(defaultVals);
auto defaultAttr = builder.getI64ArrayAttr(defaultRefs);
stridesAttr(defaultAttr);
stridesOpt = strides();
} }
// now try to find padding, getting auto_pad attribute first // Now try to find padding, getting auto_pad attribute first.
auto autoPad = auto_pad(); auto autoPad = auto_pad();
// and then investigate the various different cases // And then investigate the various different cases.
SmallVector<int64_t, 4> actualPads; SmallVector<int64_t, 4> actualPads(2 * kernelRank, 0);
auto defaultPads = false;
if (autoPad == "NOTSET") { if (autoPad == "NOTSET") {
auto padsOpt = pads(); auto padsOpt = pads();
if (padsOpt.hasValue()) { if (padsOpt.hasValue()) {
auto padsArray = padsOpt.getValue().getValue(); // Pads consists of two entries for each spatial axis.
// pads consists of two entries for each spatial axis. if (ArrayAttrSize(padsOpt) != 2 * kernelRank)
if (padsArray.size() != 2 * kernelRank) emitError("pads rank is not twice the spatial rank");
emitError("pads rank is not twice the spatial rank."); // Check values
// fill in the actual values
for (int i = 0; i < 2 * kernelRank; ++i) { for (int i = 0; i < 2 * kernelRank; ++i) {
int64_t p = (padsArray[i]).cast<IntegerAttr>().getInt(); int64_t p = ArrayAttrIntVal(padsOpt, i);
if (p < 0) if (p < 0)
emitError("pads value must be nonnegative."); emitError("pads value must be nonnegative");
actualPads.emplace_back(p); actualPads[i] = p;
} }
} else {
// pads are not defined, default to value 0
defaultPads = true;
} }
} else if (autoPad == "VALID") {
defaultPads = true;
} else if (autoPad == "SAME_UPPER" || autoPad == "SAME_LOWER") { } else if (autoPad == "SAME_UPPER" || autoPad == "SAME_LOWER") {
// init pad with zero
for (int i = 0; i < 2 * kernelRank; ++i) {
actualPads.emplace_back(0);
}
for (int i = 0; i < kernelRank; ++i) { for (int i = 0; i < kernelRank; ++i) {
auto inputSpatialShape = xShape[kernelOffset + i]; auto inputSpatialShape = xShape[kernelOffset + i];
auto kernelSpatialShape = auto kernelSpatialShape = ArrayAttrIntVal(kernelShape, i);
(kernelShapeArray[i]).cast<IntegerAttr>().getInt(); auto dilations = ArrayAttrIntVal(dilationsOpt, i);
auto dilations = actualDilations[i]; auto strideSpatialShape = ArrayAttrIntVal(stridesOpt, i);
auto strideSpatialShape = actualStrides[i];
int64_t outputSpatialShape = int64_t outputSpatialShape =
ceil((1.0 * inputSpatialShape) / (1.0 * strideSpatialShape)); ceil((1.0 * inputSpatialShape) / (1.0 * strideSpatialShape));
auto sumOfPad = (outputSpatialShape - 1) * strideSpatialShape + auto sumOfPad = (outputSpatialShape - 1) * strideSpatialShape +
@ -1004,29 +1012,27 @@ void ONNXMaxPoolSingleOutOp::inferShapes() {
} }
} }
} }
} else { } else if (autoPad != "VALID") {
emitError("auto_pad of unknown / unsupported value."); emitError("auto_pad of unknown / unsupported value.");
} }
// handle case where default pad values must be used // Set pads values in attributes.
if (defaultPads) { {
for (int i = 0; i < 2 * kernelRank; ++i) { ArrayRef<int64_t> defaultRefs(actualPads);
actualPads.emplace_back(0); auto defaultAttr = builder.getI64ArrayAttr(defaultRefs);
} padsAttr(defaultAttr);
auto defaultAutoPadAttr = builder.getStringAttr("NOTSET");
auto_padAttr(defaultAutoPadAttr);
} }
// initialize output shape // Initialize output shape.
SmallVector<int64_t, 4> yShape(xShape.begin(), xShape.end()); SmallVector<int64_t, 4> yShape(xShape.begin(), xShape.end());
// for all kernel dimensions // Process for all kernel dimensions.
for (int i = 0; i < kernelRank; ++i) { for (int i = 0; i < kernelRank; ++i) {
auto inputSpatialShape = xShape[kernelOffset + i]; auto inputSpatialShape = xShape[kernelOffset + i];
auto padShape = actualPads[i] + actualPads[kernelRank + i]; auto padShape = actualPads[i] + actualPads[kernelRank + i];
auto kernelSpatialShape = auto kernelSpatialShape = ArrayAttrIntVal(kernelShape, i);
(kernelShapeArray[i]).cast<IntegerAttr>().getInt(); auto dilations = ArrayAttrIntVal(dilationsOpt, i);
auto dilations = actualDilations[i]; auto strideSpatialShape = ArrayAttrIntVal(stridesOpt, i);
auto strideSpatialShape = actualStrides[i];
/// output_spatial_shape[i] = ceil( (input_spatial_shape[i] + pad_shape[i] -
// ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) /
// strides_spatial_shape[i] + 1)
double numerator = inputSpatialShape + padShape - double numerator = inputSpatialShape + padShape -
((kernelSpatialShape - 1) * dilations + 1); ((kernelSpatialShape - 1) * dilations + 1);
double denominator = strideSpatialShape; double denominator = strideSpatialShape;

View File

@ -6,7 +6,7 @@ func @test_default_maxpoolsingleout(%arg0 : tensor<5x5x32x32xf32>) -> tensor<*xf
"std.return"(%0) : (tensor<*xf32>) -> () "std.return"(%0) : (tensor<*xf32>) -> ()
} }
// CHECK-LABEL: test_default_maxpoolsingleout // CHECK-LABEL: test_default_maxpoolsingleout
// CHECK: [[RES:%.+]] = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "VALID", ceil_mode = 0 : i64, kernel_shape = [3, 3], pads = [1, 1, 1, 1]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x30x30xf32> // CHECK: [[RES:%.+]] = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : i64, dilations = [1, 1], kernel_shape = [3, 3], pads = [0, 0, 0, 0], strides = [1, 1]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x30x30xf32>
// CHECK: return [[RES]] : tensor<5x5x30x30xf32> // CHECK: return [[RES]] : tensor<5x5x30x30xf32>
@ -16,7 +16,7 @@ func @test_default_maxpoolsingleout_defpad(%arg0 : tensor<5x5x32x32xf32>) -> ten
"std.return"(%0) : (tensor<*xf32>) -> () "std.return"(%0) : (tensor<*xf32>) -> ()
} }
// CHECK-LABEL: test_default_maxpoolsingleout_defpad // CHECK-LABEL: test_default_maxpoolsingleout_defpad
// CHECK: [[RES:%.+]] = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : i64, kernel_shape = [3, 3]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x30x30xf32> // CHECK: [[RES:%.+]] = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : i64, dilations = [1, 1], kernel_shape = [3, 3], pads = [0, 0, 0, 0], strides = [1, 1]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x30x30xf32>
// CHECK: return [[RES]] : tensor<5x5x30x30xf32> // CHECK: return [[RES]] : tensor<5x5x30x30xf32>
@ -26,7 +26,7 @@ func @test_default_maxpoolsingleout_pad(%arg0 : tensor<5x5x32x32xf32>) -> tensor
"std.return"(%0) : (tensor<*xf32>) -> () "std.return"(%0) : (tensor<*xf32>) -> ()
} }
// CHECK-LABEL: test_default_maxpoolsingleout_pad // CHECK-LABEL: test_default_maxpoolsingleout_pad
// CHECK: [[RES:%.+]] = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : i64, kernel_shape = [3, 3], pads = [1, 1, 1, 1]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x32x32xf32> // CHECK: [[RES:%.+]] = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : i64, dilations = [1, 1], kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x32x32xf32>
// CHECK: return [[RES]] : tensor<5x5x32x32xf32> // CHECK: return [[RES]] : tensor<5x5x32x32xf32>
@ -36,7 +36,7 @@ func @test_default_maxpoolsingleout_pad_nonunif(%arg0 : tensor<5x5x32x32xf32>) -
"std.return"(%0) : (tensor<*xf32>) -> () "std.return"(%0) : (tensor<*xf32>) -> ()
} }
// CHECK-LABEL: test_default_maxpoolsingleout_pad_nonunif // CHECK-LABEL: test_default_maxpoolsingleout_pad_nonunif
// CHECK: [[RES:%.+]] = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : i64, kernel_shape = [5, 3], pads = [2, 1, 1, 0]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x31x31xf32> // CHECK: [[RES:%.+]] = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : i64, dilations = [1, 1], kernel_shape = [5, 3], pads = [2, 1, 1, 0], strides = [1, 1]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x31x31xf32>
// CHECK: return [[RES]] : tensor<5x5x31x31xf32> // CHECK: return [[RES]] : tensor<5x5x31x31xf32>
@ -46,7 +46,7 @@ func @test_default_maxpoolsingleout_strides(%arg0 : tensor<5x5x32x32xf32>) -> te
"std.return"(%0) : (tensor<*xf32>) -> () "std.return"(%0) : (tensor<*xf32>) -> ()
} }
// CHECK-LABEL: test_default_maxpoolsingleout_strides // CHECK-LABEL: test_default_maxpoolsingleout_strides
// CHECK: [[RES:%.+]] = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : i64, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [2, 2]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x16x16xf32> // CHECK: [[RES:%.+]] = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : i64, dilations = [1, 1], kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [2, 2]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x16x16xf32>
// CHECK: return [[RES]] : tensor<5x5x16x16xf32> // CHECK: return [[RES]] : tensor<5x5x16x16xf32>
@ -56,7 +56,7 @@ func @test_default_maxpoolsingleout_strides_nonunifpad(%arg0 : tensor<5x5x30x32x
"std.return"(%0) : (tensor<*xf32>) -> () "std.return"(%0) : (tensor<*xf32>) -> ()
} }
// CHECK-LABEL: test_default_maxpoolsingleout_strides_nonunifpad // CHECK-LABEL: test_default_maxpoolsingleout_strides_nonunifpad
// CHECK: [[RES:%.+]] = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : i64, kernel_shape = [2, 2], pads = [1, 0, 0, 0], strides = [2, 2]} : (tensor<5x5x30x32xf32>) -> tensor<5x5x15x16xf32> // CHECK: [[RES:%.+]] = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : i64, dilations = [1, 1], kernel_shape = [2, 2], pads = [1, 0, 0, 0], strides = [2, 2]} : (tensor<5x5x30x32xf32>) -> tensor<5x5x15x16xf32>
// CHECK: return [[RES]] : tensor<5x5x15x16xf32> // CHECK: return [[RES]] : tensor<5x5x15x16xf32>
@ -66,7 +66,7 @@ func @test_default_maxpoolsingleout_strides_nonunifpad_ceil(%arg0 : tensor<5x5x3
"std.return"(%0) : (tensor<*xf32>) -> () "std.return"(%0) : (tensor<*xf32>) -> ()
} }
// CHECK-LABEL: test_default_maxpoolsingleout_strides_nonunifpad_ceil // CHECK-LABEL: test_default_maxpoolsingleout_strides_nonunifpad_ceil
// CHECK: [[RES:%.+]] = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 1 : i64, kernel_shape = [2, 2], pads = [1, 0, 0, 0], strides = [2, 2]} : (tensor<5x5x30x32xf32>) -> tensor<5x5x16x16xf32> // CHECK: [[RES:%.+]] = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 1 : i64, dilations = [1, 1], kernel_shape = [2, 2], pads = [1, 0, 0, 0], strides = [2, 2]} : (tensor<5x5x30x32xf32>) -> tensor<5x5x16x16xf32>
// CHECK: return [[RES]] : tensor<5x5x16x16xf32> // CHECK: return [[RES]] : tensor<5x5x16x16xf32>
@ -76,7 +76,7 @@ func @test_default_maxpoolsingleout_strides_dilatation(%arg0 : tensor<5x5x8x8xf3
"std.return"(%0) : (tensor<*xf32>) -> () "std.return"(%0) : (tensor<*xf32>) -> ()
} }
// CHECK-LABEL: test_default_maxpoolsingleout_strides_dilatation // CHECK-LABEL: test_default_maxpoolsingleout_strides_dilatation
// CHECK: [[RES:%.+]] = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : i64, dilations = [2, 2], kernel_shape = [2, 2], strides = [3, 3]} : (tensor<5x5x8x8xf32>) -> tensor<5x5x2x2xf32> // CHECK: [[RES:%.+]] = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : i64, dilations = [2, 2], kernel_shape = [2, 2], pads = [0, 0, 0, 0], strides = [3, 3]} : (tensor<5x5x8x8xf32>) -> tensor<5x5x2x2xf32>
// CHECK: return [[RES]] : tensor<5x5x2x2xf32> // CHECK: return [[RES]] : tensor<5x5x2x2xf32>
/// Test the default behavior of Max Pool with dilatation /// Test the default behavior of Max Pool with dilatation
@ -85,7 +85,7 @@ func @test_default_maxpoolsingleout_upper(%arg0 : tensor<5x5x16x13xf32>) -> tens
"std.return"(%0) : (tensor<*xf32>) -> () "std.return"(%0) : (tensor<*xf32>) -> ()
} }
// CHECK-LABEL: test_default_maxpoolsingleout_upper // CHECK-LABEL: test_default_maxpoolsingleout_upper
// CHECK: [[RES:%.+]] = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "SAME_UPPER", ceil_mode = 0 : i64, kernel_shape = [4, 4], strides = [4, 4]} : (tensor<5x5x16x13xf32>) -> tensor<5x5x4x4xf32> // CHECK: [[RES:%.+]] = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : i64, dilations = [1, 1], kernel_shape = [4, 4], pads = [0, 1, 0, 2], strides = [4, 4]} : (tensor<5x5x16x13xf32>) -> tensor<5x5x4x4xf32>
// CHECK: return [[RES]] : tensor<5x5x4x4xf32> // CHECK: return [[RES]] : tensor<5x5x4x4xf32>
@ -95,6 +95,6 @@ func @test_default_maxpoolsingleout_lower(%arg0 : tensor<5x5x16x13xf32>) -> tens
"std.return"(%0) : (tensor<*xf32>) -> () "std.return"(%0) : (tensor<*xf32>) -> ()
} }
// CHECK-LABEL: test_default_maxpoolsingleout_lower // CHECK-LABEL: test_default_maxpoolsingleout_lower
// CHECK: [[RES:%.+]] = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "SAME_LOWER", ceil_mode = 0 : i64, kernel_shape = [4, 4], strides = [4, 4]} : (tensor<5x5x16x13xf32>) -> tensor<5x5x4x4xf32> // CHECK: [[RES:%.+]] = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : i64, dilations = [1, 1], kernel_shape = [4, 4], pads = [0, 2, 0, 1], strides = [4, 4]} : (tensor<5x5x16x13xf32>) -> tensor<5x5x4x4xf32>
// CHECK: return [[RES]] : tensor<5x5x4x4xf32> // CHECK: return [[RES]] : tensor<5x5x4x4xf32>