Shape inference for ONNXAveragePool (#21)

* Shape inference for ONNXAveragePool

* Edit comments and puts helper function on top of the file

* Fix template
This commit is contained in:
Tung D. Le 2020-03-13 22:59:16 +09:00 committed by GitHub
parent a65820940c
commit 362491553c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 343 additions and 187 deletions

View File

@ -47,7 +47,7 @@ OpsWithShapeInference = [
'LeakyRelu', 'Elu', 'Selu', 'HardSigmoid', 'Reshape', 'Reciprocal', 'LeakyRelu', 'Elu', 'Selu', 'HardSigmoid', 'Reshape', 'Reciprocal',
'Identity', 'Cos', 'Log', 'Transpose', 'Softmax', 'ReduceMax', 'ReduceMin', 'Identity', 'Cos', 'Log', 'Transpose', 'Softmax', 'ReduceMax', 'ReduceMin',
'ReduceProd', 'ReduceSum', 'Softplus', 'Softsign', 'Sqrt', 'Unsqueeze', 'ReduceProd', 'ReduceSum', 'Softplus', 'Softsign', 'Sqrt', 'Unsqueeze',
'Sign', 'Constant' 'Sign', 'Constant', 'ONNXAveragePoolOp'
] ]
# Operations supporting canonicalization. # Operations supporting canonicalization.

View File

@ -89,6 +89,211 @@ RankedTensorType getReductionOutputType(
return RankedTensorType::get(dims, operandTy.getElementType()); return RankedTensorType::get(dims, operandTy.getElementType());
} }
//===----------------------------------------------------------------------===//
// Support function that computes default values for dilations.
//
template <class T>
static void processConvDilationParam(T *op, Optional<ArrayAttr> kernelShape) {
auto builder = mlir::Builder(op->getContext());
auto kernelRank = ArrayAttrSize(kernelShape);
auto dilationsOpt = op->dilations();
if (dilationsOpt.hasValue()) {
if (ArrayAttrSize(dilationsOpt) != kernelRank)
op->emitError("dialation rank is not the same as the spatial rank");
// Test values to be greater than 0.
for (int i = 0; i < kernelRank; ++i) {
if (ArrayAttrIntVal(dilationsOpt, i) < 1)
op->emitError("dialation value must be nonzero positive");
}
} else {
// Default dilatation is needed, all dimensions init with 1.
SmallVector<int64_t, 4> defaultVals(kernelRank, 1);
// Convert to ArrayRef, then build attribute, then store attribute.
ArrayRef<int64_t> defaultRefs(defaultVals);
op->dilationsAttr(builder.getI64ArrayAttr(defaultRefs));
}
}
//===----------------------------------------------------------------------===//
// Support function that computes default values for strides.
//
template <class T>
static void processConvStrideParam(T *op, Optional<ArrayAttr> kernelShape) {
auto builder = mlir::Builder(op->getContext());
auto kernelRank = ArrayAttrSize(kernelShape);
auto stridesOpt = op->strides();
if (stridesOpt.hasValue()) {
if (ArrayAttrSize(stridesOpt) != kernelRank)
op->emitError("strides rank is not the same as the spatial rank");
// Check values to be greater than 0.
for (int i = 0; i < kernelRank; ++i) {
if (ArrayAttrIntVal(stridesOpt, i) < 1)
op->emitError("strides value must be nonzero positive");
}
} else {
// Default stride is needed, all dimensions init with 1.
SmallVector<int64_t, 4> defaultVals(kernelRank, 1);
// Convert to ArrayRef, then build attribute, then store attribute.
ArrayRef<int64_t> defaultRefs(defaultVals);
op->stridesAttr(builder.getI64ArrayAttr(defaultRefs));
}
}
//===----------------------------------------------------------------------===//
// Support function that computes default values for pads.
//
template <class T>
static void processConvPadParam(T *op,
ArrayRef<int64_t> inputShape, Optional<ArrayAttr> kernelShape,
Optional<ArrayAttr> stridesOpt,
Optional<ArrayAttr> dilationsOpt = llvm::None) {
auto builder = mlir::Builder(op->getContext());
auto inputRank = inputShape.size();
auto kernelRank = ArrayAttrSize(kernelShape);
auto kernelOffset = inputRank - kernelRank;
// Try to find padding, getting auto_pad attribute first.
auto autoPad = op->auto_pad();
// And then investigate the various different cases. Prefill pad values with
// zeros, the most common case.
SmallVector<int64_t, 4> actualPads(2 * kernelRank, 0);
bool updatedPad = false;
if (autoPad == "NOTSET") {
auto padsOpt = op->pads();
if (padsOpt.hasValue()) {
// Only option where pads are not updated. Pads consists of two entries
// for each spatial axis.
if (ArrayAttrSize(padsOpt) != 2 * kernelRank)
op->emitError("pads rank is not twice the spatial rank");
// Check values, pads cannot be negative.
for (int i = 0; i < 2 * kernelRank; ++i) {
if (ArrayAttrIntVal(padsOpt, i) < 0)
op->emitError("pads value must be nonnegative");
}
} else {
// We have notset with no pads, they are assumed to be all zero.
updatedPad = true;
}
} else if (autoPad == "SAME_UPPER" || autoPad == "SAME_LOWER") {
// Reload dialtion and strides as they may have gotten default values.
updatedPad = true;
int64_t dilationVal = 1;
for (int i = 0; i < kernelRank; ++i) {
auto inputSize = inputShape[kernelOffset + i];
auto kernelSize = ArrayAttrIntVal(kernelShape, i);
if (dilationsOpt.hasValue())
dilationVal = ArrayAttrIntVal(dilationsOpt, i);
auto strideVal = ArrayAttrIntVal(stridesOpt, i);
// Output size is input size divided by stride. When stride is 1, then
// input and output are the same size, which is the usual case. When
// stride is greater than 1, take the ceil to be sure to have each input
// value used, as padding will be used to fill the gaps.
int64_t outputSize = ceil((1.0 * inputSize) / (1.0 * strideVal));
// Forumla is from ONNX MaxPool, and can be explained as follows. Pads is
// the difference between the needed values for the computations, minus
// the input values. The needed values for the computation is the
// effective side of the kernel plus the number of times we jump to the
// next kernel. Number of time we jump is (outputSize - 1). That number is
// multiplied with the size of the jump, namely strideVal. Now for the
// effective kernel size. It is the kernelSize + the number of times we
// have dilation holes time the dialtion. The number of dialtion holes is
// (kernelSize -1). Thus the effective size is "kernelSize +
// (kernelSize-1)*dialation". This simplifies to "(kernelSize
// -1)*dialation + 1".
auto sumOfPad = (outputSize - 1) * strideVal +
((kernelSize - 1) * dilationVal + 1) - inputSize;
// Pad values are assumed equal on both size, at half the total value.
actualPads[i] = actualPads[kernelRank + i] = sumOfPad / 2;
// But if the total pad value is odd, we add 1 to begining or end
// depending on autoPad value.
if (sumOfPad % 2 != 0) {
if (autoPad == "SAME_UPPER") {
actualPads[kernelRank + i] += 1;
} else {
actualPads[i] += 1;
}
}
}
} else if (autoPad == "VALID") {
// No pad, default value was set to zero, we are all set.
updatedPad = true;
} else {
op->emitError("auto_pad of unknown / unsupported value");
}
// Set pads values in attributes, if it is needed.
if (updatedPad) {
ArrayRef<int64_t> defaultRefs(actualPads);
op->padsAttr(builder.getI64ArrayAttr(defaultRefs));
}
// In all cases now, the acutal pad values are found in the pads attribute.
op->auto_padAttr(builder.getStringAttr("NOTSET"));
}
//===----------------------------------------------------------------------===//
// Support function that computes default values for dilations, strides, and
// pads.
template <class T>
static void processConvTypeParams(T *op, Value inputOperand) {
auto builder = mlir::Builder(op->getContext());
// 1) Get shape of input.
auto inputShape = inputOperand.getType().cast<RankedTensorType>().getShape();
auto inputRank = inputShape.size();
// 2) Get kernel_shape attribute.
auto kernelShape = op->kernel_shape();
// Dilation.
processConvDilationParam<T>(op, kernelShape);
auto dilationsOpt = op->dilations();
// Strides.
processConvStrideParam<T>(op, kernelShape);
auto stridesOpt = op->strides();
// Pads.
processConvPadParam<T>(op, inputShape, kernelShape, stridesOpt, dilationsOpt);
}
//===----------------------------------------------------------------------===//
// Compute spatial dimensions given dilations, strides, pads, and ceil mode.
//
static void insertConvSpatialDim(SmallVector<int64_t, 4> *outputDims,
ArrayRef<int64_t> xShape, Optional<ArrayAttr> kernelShape,
Optional<ArrayAttr> padsOpt, Optional<ArrayAttr> stridesOpt,
Optional<ArrayAttr> dilationsOpt = llvm::None, bool ceilMode = false) {
auto xRank = xShape.size();
auto spatialRank = ArrayAttrSize(kernelShape);
auto spatialOffset = xRank - spatialRank;
int64_t dilationVal = 1;
for (int i = 0; i < spatialRank; ++i) {
auto inputSize = xShape[spatialOffset + i];
auto sumOfPads =
ArrayAttrIntVal(padsOpt, i) + ArrayAttrIntVal(padsOpt, spatialRank + i);
auto kernelSize = ArrayAttrIntVal(kernelShape, i);
if (dilationsOpt.hasValue())
dilationVal = ArrayAttrIntVal(dilationsOpt, i);
auto strideVal = ArrayAttrIntVal(stridesOpt, i);
// Number of useful values: input plus pad - effective size of kernel (see
// processConvTypeParams comments to see how this value is derived).
double numerator =
inputSize + sumOfPads - ((kernelSize - 1) * dilationVal + 1);
// Useful number is divided by the strides.
double denominator = strideVal;
int64_t res;
if (ceilMode) {
res = ceil(numerator / denominator) + 1;
} else {
res = floor(numerator / denominator) + 1;
}
outputDims->emplace_back(res);
}
}
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// ONNXOpsDialect // ONNXOpsDialect
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
@ -765,139 +970,6 @@ void ONNXReduceSumOp::inferShapes() {
getResult().setType(getReductionOutputType(operandTy, axes(), keepdims())); getResult().setType(getReductionOutputType(operandTy, axes(), keepdims()));
} }
//===----------------------------------------------------------------------===//
// Conv
// Support function that computes default values for dilations, strides, and
// pads.
template <class T>
static void processConvTypeParams(T *op, Value inputOperand) {
auto builder = mlir::Builder(op->getContext());
// 1) Get shape of input.
auto inputShape = inputOperand.getType().cast<RankedTensorType>().getShape();
auto inputRank = inputShape.size();
// 2) Get kernel sizes from kernel_shape attribute.
auto kernelShape = op->kernel_shape();
auto kernelRank = ArrayAttrSize(kernelShape);
auto kernelOffset = inputRank - kernelRank;
// Dilatation.
auto dilationsOpt = op->dilations();
if (dilationsOpt.hasValue()) {
if (ArrayAttrSize(dilationsOpt) != kernelRank)
op->emitError("dialation rank is not the same as the spatial rank");
// Test values to be greater than 0.
for (int i = 0; i < kernelRank; ++i) {
if (ArrayAttrIntVal(dilationsOpt, i) < 1)
op->emitError("dialation value must be nonzero positive");
}
} else {
// Default dilatation is needed, all dimensions init with 1.
SmallVector<int64_t, 4> defaultVals(kernelRank, 1);
// Convert to ArrayRef, then build attribute, then store attribute.
ArrayRef<int64_t> defaultRefs(defaultVals);
op->dilationsAttr(builder.getI64ArrayAttr(defaultRefs));
}
// Strides.
auto stridesOpt = op->strides();
if (stridesOpt.hasValue()) {
if (ArrayAttrSize(stridesOpt) != kernelRank)
op->emitError("strides rank is not the same as the spatial rank");
// Check values to be greater than 0.
for (int i = 0; i < kernelRank; ++i) {
if (ArrayAttrIntVal(stridesOpt, i) < 1)
op->emitError("strides value must be nonzero positive");
}
} else {
// Default stride is needed, all dimensions init with 1.
SmallVector<int64_t, 4> defaultVals(kernelRank, 1);
// Convert to ArrayRef, then build attribute, then store attribute.
ArrayRef<int64_t> defaultRefs(defaultVals);
op->stridesAttr(builder.getI64ArrayAttr(defaultRefs));
}
// Now try to find padding, getting auto_pad attribute first.
auto autoPad = op->auto_pad();
// And then investigate the various different cases. Prefill pad values with
// zeros, the most common case.
SmallVector<int64_t, 4> actualPads(2 * kernelRank, 0);
bool updatedPad = false;
if (autoPad == "NOTSET") {
auto padsOpt = op->pads();
if (padsOpt.hasValue()) {
// Only option where pads are not updated. Pads consists of two entries
// for each spatial axis.
if (ArrayAttrSize(padsOpt) != 2 * kernelRank)
op->emitError("pads rank is not twice the spatial rank");
// Check values, pads cannot be negative.
for (int i = 0; i < 2 * kernelRank; ++i) {
if (ArrayAttrIntVal(padsOpt, i) < 0)
op->emitError("pads value must be nonnegative");
}
} else {
// We have notset with no pads, they are assumed to be all zero.
updatedPad = true;
}
} else if (autoPad == "SAME_UPPER" || autoPad == "SAME_LOWER") {
// Reload dialtion and strides as they may have gotten default values.
updatedPad = true;
dilationsOpt = op->dilations();
stridesOpt = op->strides();
for (int i = 0; i < kernelRank; ++i) {
auto inputSize = inputShape[kernelOffset + i];
auto kernelSize = ArrayAttrIntVal(kernelShape, i);
auto dilationVal = ArrayAttrIntVal(dilationsOpt, i);
auto strideVal = ArrayAttrIntVal(stridesOpt, i);
// Output size is input size divided by stride. When stride is 1, then
// input and output are the same size, which is the usual case. When
// stride is greater than 1, take the ceil to be sure to have each input
// value used, as padding will be used to fill the gaps.
int64_t outputSize = ceil((1.0 * inputSize) / (1.0 * strideVal));
// Forumla is from ONNX MaxPool, and can be explained as follows. Pads is
// the difference between the needed values for the computations, minus
// the input values. The needed values for the computation is the
// effective side of the kernel plus the number of times we jump to the
// next kernel. Number of time we jump is (outputSize - 1). That number is
// multiplied with the size of the jump, namely strideVal. Now for the
// effective kernel size. It is the kernelSize + the number of times we
// have dilation holes time the dialtion. The number of dialtion holes is
// (kernelSize -1). Thus the effective size is "kernelSize +
// (kernelSize-1)*dialation". This simplifies to "(kernelSize
// -1)*dialation + 1".
auto sumOfPad = (outputSize - 1) * strideVal +
((kernelSize - 1) * dilationVal + 1) - inputSize;
// Pad values are assumed equal on both size, at half the total value.
actualPads[i] = actualPads[kernelRank + i] = sumOfPad / 2;
// But if the total pad value is odd, we add 1 to begining or end
// depending on autoPad value.
if (sumOfPad % 2 != 0) {
if (autoPad == "SAME_UPPER") {
actualPads[kernelRank + i] += 1;
} else {
actualPads[i] += 1;
}
}
}
} else if (autoPad == "VALID") {
// No pad, default value was set to zero, we are all set.
updatedPad = true;
} else {
op->emitError("auto_pad of unknown / unsupported value");
}
// Set pads values in attributes, if it is needed.
if (updatedPad) {
ArrayRef<int64_t> defaultRefs(actualPads);
op->padsAttr(builder.getI64ArrayAttr(defaultRefs));
}
// In all cases now, the acutal pad values are found in the pads attribute.
op->auto_padAttr(builder.getStringAttr("NOTSET"));
}
// Conv // Conv
// For this operation, we define the attributes once in the original Conv // For this operation, we define the attributes once in the original Conv
@ -981,23 +1053,54 @@ void ONNXConvNoBiasOp::inferShapes() {
outputDims.emplace_back(xShape[0]); outputDims.emplace_back(xShape[0]);
// Insert number of filters being applied (number of output channels). // Insert number of filters being applied (number of output channels).
outputDims.emplace_back(weightShape[0]); outputDims.emplace_back(weightShape[0]);
// Compute and insert spatial dims.
insertConvSpatialDim(
&outputDims, xShape, kernelShape, padsOpt, stridesOpt, dilationsOpt);
getResult().setType(RankedTensorType::get(outputDims, xTy.getElementType()));
}
//===----------------------------------------------------------------------===//
// AveragePool
// Infer shape attributes output:
// - auto_pad set to NOTSET;
// - strides: set to 1 if not defined by user;
// - pads: set to proper value, 0 if not defined by user.
void ONNXAveragePoolOp::inferShapes() {
// Cannot infer shape if no shape exists.
if (!X().getType().isa<RankedTensorType>())
return;
// Get shape of input.
auto xTy = X().getType().cast<RankedTensorType>();
auto xShape = xTy.getShape();
// Kernel shape.
auto kernelShape = kernel_shape();
if (!kernelShape)
emitError(
"kernel_shape is a mandatory attribute for which there is no default");
// Ceil mode.
auto ceilMode = ceil_mode().getSExtValue();
// Process strides and pads.
processConvStrideParam<ONNXAveragePoolOp>(this, kernelShape);
auto stridesOpt = strides();
processConvPadParam<ONNXAveragePoolOp>(
this, xShape, kernelShape, stridesOpt, llvm::None);
auto padsOpt = pads();
SmallVector<int64_t, 4> outputDims;
// Insert batch size.
outputDims.emplace_back(xShape[0]);
outputDims.emplace_back(xShape[1]);
// Compute and insert spatial dims.
insertConvSpatialDim(&outputDims, xShape, kernelShape, padsOpt, stridesOpt,
llvm::None, ceilMode);
// Then the spatial dimensions of the output are computed.
for (int i = 0; i < spatialRank; ++i) {
auto inputSize = xShape[spatialOffset + i];
auto sumOfPads =
ArrayAttrIntVal(padsOpt, i) + ArrayAttrIntVal(padsOpt, spatialRank + i);
auto kernelSize = ArrayAttrIntVal(kernelShape, i);
auto dilationVal = ArrayAttrIntVal(dilationsOpt, i);
auto strideVal = ArrayAttrIntVal(stridesOpt, i);
// Number of useful values: input plus pad - effective size of kernel (see
// processConvTypeParams comments to see how this value is derived).
double numerator =
inputSize + sumOfPads - ((kernelSize - 1) * dilationVal + 1);
// Useful number is divided by the strides.
double denominator = strideVal;
outputDims.emplace_back(floor(numerator / denominator) + 1);
}
getResult().setType(RankedTensorType::get(outputDims, xTy.getElementType())); getResult().setType(RankedTensorType::get(outputDims, xTy.getElementType()));
} }
@ -1013,59 +1116,40 @@ void ONNXMaxPoolSingleOutOp::inferShapes() {
// Cannot infer shape if no shape exists. // Cannot infer shape if no shape exists.
if (!X().getType().isa<RankedTensorType>()) if (!X().getType().isa<RankedTensorType>())
return; return;
auto builder = mlir::Builder(this->getContext());
// 1) Get shape of input. // Get shape of input.
auto xTy = X().getType().cast<RankedTensorType>(); auto xTy = X().getType().cast<RankedTensorType>();
auto xShape = xTy.getShape(); auto xShape = xTy.getShape();
auto xRank = xShape.size();
// 2) Analyse parameters. Get kernel sizes from kernel_shape attribute. // Kernel shape.
auto kernelShape = kernel_shape(); auto kernelShape = kernel_shape();
if (!kernelShape) if (!kernelShape)
emitError( emitError(
"kernel_shape is a mandatory attribute for which there is no default"); "kernel_shape is a mandatory attribute for which there is no default");
auto kernelRank = ArrayAttrSize(kernelShape);
if (kernelRank > xRank)
emitError("kernel_shape spatial dimension is too large");
auto kernelOffset = xRank - kernelRank;
// Ceil mode.
auto ceilMode = ceil_mode().getSExtValue();
// Storage order. // Storage order.
auto storageOrder = storage_order().getSExtValue(); auto storageOrder = storage_order().getSExtValue();
if (storageOrder != 0) if (storageOrder != 0)
emitError("column major storage order not supported at this time"); emitError("column major storage order not supported at this time");
processConvTypeParams<ONNXMaxPoolSingleOutOp>(this, X()); // Process strides, dilations, and pads.
processConvTypeParams<>(this, X());
// Initialize output shape.
SmallVector<int64_t, 4> yShape(xShape.begin(), xShape.end());
auto dilationsOpt = dilations(); auto dilationsOpt = dilations();
auto stridesOpt = strides(); auto stridesOpt = strides();
auto padsOpt = pads(); auto padsOpt = pads();
// Process for all kernel dimensions.
for (int i = 0; i < kernelRank; ++i) { // Ceil mode.
auto inputSize = xShape[kernelOffset + i]; auto ceilMode = ceil_mode().getSExtValue();
auto sumOfPads =
ArrayAttrIntVal(padsOpt, i) + ArrayAttrIntVal(padsOpt, kernelRank + i); SmallVector<int64_t, 4> outputDims;
auto kernelSize = ArrayAttrIntVal(kernelShape, i); // Insert batch size.
auto dilationVal = ArrayAttrIntVal(dilationsOpt, i); outputDims.emplace_back(xShape[0]);
auto strideVal = ArrayAttrIntVal(stridesOpt, i); outputDims.emplace_back(xShape[1]);
double numerator = // Compute and insert spatial dims.
inputSize + sumOfPads - ((kernelSize - 1) * dilationVal + 1); insertConvSpatialDim(&outputDims, xShape, kernelShape, padsOpt, stridesOpt,
double denominator = strideVal; dilationsOpt, ceilMode);
int64_t res;
if (ceilMode) { getResult().setType(RankedTensorType::get(outputDims, xTy.getElementType()));
res = ceil(numerator / denominator) + 1;
} else {
res = floor(numerator / denominator) + 1;
}
yShape[kernelOffset + i] = res;
}
auto arrayTy = X().getType().cast<RankedTensorType>();
getResult().setType(RankedTensorType::get(yShape, arrayTy.getElementType()));
} }
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//

View File

@ -148,7 +148,7 @@ def ONNXAtanhOp:ONNX_Op<"Atanh",
} }
def ONNXAveragePoolOp:ONNX_Op<"AveragePool", def ONNXAveragePoolOp:ONNX_Op<"AveragePool",
[NoSideEffect]> { [NoSideEffect, DeclareOpInterfaceMethods<ShapeInferenceOpInterface>]> {
let summary = "ONNX AveragePool operation"; let summary = "ONNX AveragePool operation";
let description = [{ let description = [{
"AveragePool consumes an input tensor X and applies average pooling across" "AveragePool consumes an input tensor X and applies average pooling across"

View File

@ -103,6 +103,7 @@ public:
op->getName().getStringRef() != "onnx.Xor" && op->getName().getStringRef() != "onnx.Xor" &&
op->getName().getStringRef() != "onnx.Sum" && op->getName().getStringRef() != "onnx.Sum" &&
op->getName().getStringRef() != "onnx.Max" && op->getName().getStringRef() != "onnx.Max" &&
op->getName().getStringRef() != "onnx.AveragePool" &&
op->getName().getStringRef() != "onnx.MaxPoolSingleOut" && op->getName().getStringRef() != "onnx.MaxPoolSingleOut" &&
op->getName().getStringRef() != "onnx.Min" && op->getName().getStringRef() != "onnx.Min" &&
op->getName().getStringRef() != "onnx.Identity" && op->getName().getStringRef() != "onnx.Identity" &&

View File

@ -331,3 +331,74 @@ func @test_constant_sparse_2d_value() -> tensor<*xf32> {
// CHECK-LABEL: test_constant_sparse_2d_value // CHECK-LABEL: test_constant_sparse_2d_value
// CHECK: [[RES:%.+]] = "onnx.Constant"() {sparse_value = sparse<{{\[}}[0, 1{{\]}}], 2.000000e+00> : tensor<3x2xf32>} : () -> tensor<3x2xf32> // CHECK: [[RES:%.+]] = "onnx.Constant"() {sparse_value = sparse<{{\[}}[0, 1{{\]}}], 2.000000e+00> : tensor<3x2xf32>} : () -> tensor<3x2xf32>
// CHECK: return [[RES]] : tensor<3x2xf32> // CHECK: return [[RES]] : tensor<3x2xf32>
/// Test the default behavior of Average Pool with no padding (pad are set but shoud be ignored)
func @test_default_averagepool(%arg0 : tensor<5x5x32x32xf32>) -> tensor<*xf32> {
%0 = "onnx.AveragePool"(%arg0) {auto_pad = "VALID", ceil_mode = 0, kernel_shape = [3,3], pads = [1, 1, 1, 1] } : (tensor<5x5x32x32xf32>) -> tensor<*xf32>
"std.return"(%0) : (tensor<*xf32>) -> ()
// CHECK-LABEL: test_default_averagepool
// CHECK: [[RES:%.+]] = "onnx.AveragePool"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : i64, kernel_shape = [3, 3], pads = [0, 0, 0, 0], strides = [1, 1]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x30x30xf32>
// CHECK: return [[RES]] : tensor<5x5x30x30xf32>
}
/// Test the default behavior of Average Pool with no padding (pad are not set, default to zero)
func @test_default_averagepool_defpad(%arg0 : tensor<5x5x32x32xf32>) -> tensor<*xf32> {
%0 = "onnx.AveragePool"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0, kernel_shape = [3,3]} : (tensor<5x5x32x32xf32>) -> tensor<*xf32>
"std.return"(%0) : (tensor<*xf32>) -> ()
// CHECK-LABEL: test_default_averagepool_defpad
// CHECK: [[RES:%.+]] = "onnx.AveragePool"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : i64, kernel_shape = [3, 3], pads = [0, 0, 0, 0], strides = [1, 1]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x30x30xf32>
// CHECK: return [[RES]] : tensor<5x5x30x30xf32>
}
/// Test the default behavior of Average Pool with uniform padding
func @test_default_averagepool_pad(%arg0 : tensor<5x5x32x32xf32>) -> tensor<*xf32> {
%0 = "onnx.AveragePool"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0, kernel_shape = [3,3], pads = [1, 1, 1, 1] } : (tensor<5x5x32x32xf32>) -> tensor<*xf32>
"std.return"(%0) : (tensor<*xf32>) -> ()
// CHECK-LABEL: test_default_averagepool_pad
// CHECK: [[RES:%.+]] = "onnx.AveragePool"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : i64, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x32x32xf32>
// CHECK: return [[RES]] : tensor<5x5x32x32xf32>
}
/// Test the default behavior of Average Pool with non uniform padding
func @test_default_averagepool_pad_nonunif(%arg0 : tensor<5x5x32x32xf32>) -> tensor<*xf32> {
%0 = "onnx.AveragePool"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0, kernel_shape = [5,3], pads = [2, 1, 1, 0] } : (tensor<5x5x32x32xf32>) -> tensor<*xf32>
"std.return"(%0) : (tensor<*xf32>) -> ()
// CHECK-LABEL: test_default_averagepool_pad_nonunif
// CHECK: [[RES:%.+]] = "onnx.AveragePool"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : i64, kernel_shape = [5, 3], pads = [2, 1, 1, 0], strides = [1, 1]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x31x31xf32>
// CHECK: return [[RES]] : tensor<5x5x31x31xf32>
}
/// Test the default behavior of Average Pool with non uniform padding
func @test_default_averagepool_strides(%arg0 : tensor<5x5x32x32xf32>) -> tensor<*xf32> {
%0 = "onnx.AveragePool"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0, kernel_shape = [3,3], pads = [1, 1, 1, 1], strides = [2, 2] } : (tensor<5x5x32x32xf32>) -> tensor<*xf32>
"std.return"(%0) : (tensor<*xf32>) -> ()
// CHECK-LABEL: test_default_averagepool_strides
// CHECK: [[RES:%.+]] = "onnx.AveragePool"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : i64, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [2, 2]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x16x16xf32>
// CHECK: return [[RES]] : tensor<5x5x16x16xf32>
}
/// Test the default behavior of Average Pool with non uniform padding
func @test_default_averagepool_strides_nonunifpad(%arg0 : tensor<5x5x30x32xf32>) -> tensor<*xf32> {
%0 = "onnx.AveragePool"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0, kernel_shape = [2,2], pads = [1, 0, 0, 0], strides = [2, 2] } : (tensor<5x5x30x32xf32>) -> tensor<*xf32>
"std.return"(%0) : (tensor<*xf32>) -> ()
// CHECK-LABEL: test_default_averagepool_strides_nonunifpad
// CHECK: [[RES:%.+]] = "onnx.AveragePool"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : i64, kernel_shape = [2, 2], pads = [1, 0, 0, 0], strides = [2, 2]} : (tensor<5x5x30x32xf32>) -> tensor<5x5x15x16xf32>
// CHECK: return [[RES]] : tensor<5x5x15x16xf32>
}
/// Test the default behavior of Average Pool with non uniform padding
func @test_default_averagepool_strides_nonunifpad_ceil(%arg0 : tensor<5x5x30x32xf32>) -> tensor<*xf32> {
%0 = "onnx.AveragePool"(%arg0) {auto_pad = "NOTSET", ceil_mode = 1, kernel_shape = [2,2], pads = [1, 0, 0, 0], strides = [2, 2] } : (tensor<5x5x30x32xf32>) -> tensor<*xf32>
"std.return"(%0) : (tensor<*xf32>) -> ()
// CHECK-LABEL: test_default_averagepool_strides_nonunifpad_ceil
// CHECK: [[RES:%.+]] = "onnx.AveragePool"(%arg0) {auto_pad = "NOTSET", ceil_mode = 1 : i64, kernel_shape = [2, 2], pads = [1, 0, 0, 0], strides = [2, 2]} : (tensor<5x5x30x32xf32>) -> tensor<5x5x16x16xf32>
// CHECK: return [[RES]] : tensor<5x5x16x16xf32>
}