Support LLVM as of 7dcd0042 (#309)
* Update to support LLVM as of 7dcd0042 Fixes for upstream changes to mlir. - New pass registration method from https://reviews.llvm.org/D85622 - Integer attributes are now C types when possible https://reviews.llvm.org/D86739 Signed-off-by: Nathaniel McVicar <namcvica@microsoft.com> * Fix for checkclang * Windows incremental build fix from @max-ku * Remove MLIRShapeToSCF lib * Missed a getSExtValue on now c type * Rebuild prereq docker. * Bump CircleCI cache version. * Update hash for Windows build Signed-off-by: Nathaniel McVicar <namcvica@microsoft.com> * Bump CircieCI cache version again. * Rebuild prereq docker. * Update README.md * Update README.md * Undo edits to ONNXOps.td.inc. * Undo changes to ONNXOps.td.inc. * Fix cast op TableGen. * Tweak tablegen definition of Cast. * Use explicitly signed integer as attributes. * Move all signless attribute to explicitly signed attribute. * Import ONNX int attribute as SI64 attribute. * Make Conv.group attr use SI64 attr. * Fix conv test. * Fix DocCheck complaint. Co-authored-by: Tian Jin <tjingrant@gmail.com>
This commit is contained in:
parent
4cc16aceb7
commit
3491b90b1e
|
@ -18,7 +18,7 @@ jobs:
|
|||
git submodule update --init --recursive
|
||||
# Use cached mlir installation if possible.
|
||||
- restore_cache:
|
||||
key: V18-LLVM-PROJECT-{{ arch }}
|
||||
key: V20-LLVM-PROJECT-{{ arch }}
|
||||
- run:
|
||||
name: Install MLIR
|
||||
command: |
|
||||
|
@ -29,7 +29,7 @@ jobs:
|
|||
source onnx-mlir/utils/install-mlir.sh
|
||||
fi
|
||||
- save_cache:
|
||||
key: V18-LLVM-PROJECT-{{ arch }}
|
||||
key: V20-LLVM-PROJECT-{{ arch }}
|
||||
paths:
|
||||
- llvm-project
|
||||
- run:
|
||||
|
|
13
MLIR.cmake
13
MLIR.cmake
|
@ -170,7 +170,6 @@ find_mlir_lib(MLIRTransformUtils)
|
|||
find_mlir_lib(MLIRSupport)
|
||||
find_mlir_lib(MLIRShape)
|
||||
find_mlir_lib(MLIRShapeToStandard)
|
||||
find_mlir_lib(MLIRShapeToSCF)
|
||||
find_mlir_lib(MLIRSideEffectInterfaces)
|
||||
find_mlir_lib(MLIROpenMP)
|
||||
find_mlir_lib(MLIROptLib)
|
||||
|
@ -266,7 +265,6 @@ set(MLIRLibs
|
|||
${MLIRPresburger}
|
||||
${MLIRShape}
|
||||
${MLIRShapeToStandard}
|
||||
${MLIRShapeToSCF}
|
||||
${MLIRInferTypeOpInterface}
|
||||
# strict order verified
|
||||
${LLVMBitWriter}
|
||||
|
@ -316,8 +314,15 @@ endfunction()
|
|||
# the LLVM TableGen command to have the TableGen target so that changes to the
|
||||
# table gen utility itself can be detected and cause re-compilation of .td file.
|
||||
add_executable(mlir-tblgen IMPORTED)
|
||||
set_property(TARGET mlir-tblgen
|
||||
PROPERTY IMPORTED_LOCATION ${LLVM_PROJ_BIN}/mlir-tblgen)
|
||||
# Specify extension for incremental Windows builds.
|
||||
if(MSVC)
|
||||
set_property(TARGET mlir-tblgen
|
||||
PROPERTY IMPORTED_LOCATION ${LLVM_PROJ_BIN}/mlir-tblgen.exe)
|
||||
else()
|
||||
set_property(TARGET mlir-tblgen
|
||||
PROPERTY IMPORTED_LOCATION ${LLVM_PROJ_BIN}/mlir-tblgen)
|
||||
endif()
|
||||
|
||||
set(MLIR_TABLEGEN_EXE mlir-tblgen)
|
||||
|
||||
# Add a dialect used by ONNX MLIR and copy the generated operation
|
||||
|
|
|
@ -62,7 +62,7 @@ Firstly, install MLIR (as a part of LLVM-Project):
|
|||
``` bash
|
||||
git clone https://github.com/llvm/llvm-project.git
|
||||
# Check out a specific branch that is known to work with ONNX MLIR.
|
||||
cd llvm-project && git checkout 1d01fc100bb5bef5f5eaf92520b2e52f64ee1d6e && cd ..
|
||||
cd llvm-project && git checkout 91671e13efbc5dbd17b832d7973401350d0a6ee6 && cd ..
|
||||
```
|
||||
|
||||
[same-as-file]: <> (utils/build-mlir.sh)
|
||||
|
@ -152,7 +152,7 @@ Install MLIR (as a part of LLVM-Project):
|
|||
```shell
|
||||
git clone https://github.com/llvm/llvm-project.git
|
||||
# Check out a specific branch that is known to work with ONNX MLIR.
|
||||
cd llvm-project && git checkout 1d01fc100bb5bef5f5eaf92520b2e52f64ee1d6e && cd ..
|
||||
cd llvm-project && git checkout 91671e13efbc5dbd17b832d7973401350d0a6ee6 && cd ..
|
||||
```
|
||||
|
||||
[same-as-file]: <> (utils/build-mlir.cmd)
|
||||
|
|
|
@ -111,8 +111,8 @@ ONNX ArgMax operation
|
|||
|
||||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`axis` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`keepdims` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`axis` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`keepdims` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Operands:
|
||||
|
||||
|
@ -139,8 +139,8 @@ ONNX ArgMin operation
|
|||
|
||||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`axis` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`keepdims` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`axis` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`keepdims` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Operands:
|
||||
|
||||
|
@ -285,8 +285,8 @@ ONNX AveragePool operation
|
|||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`auto_pad` | ::mlir::StringAttr | string attribute
|
||||
`ceil_mode` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`count_include_pad` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`ceil_mode` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`count_include_pad` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`kernel_shape` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
`pads` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
`strides` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
|
@ -457,7 +457,7 @@ ONNX CastMap operation
|
|||
| :-------: | :-------: | ----------- |
|
||||
`cast_to` | ::mlir::StringAttr | string attribute
|
||||
`map_form` | ::mlir::StringAttr | string attribute
|
||||
`max_map` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`max_map` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Operands:
|
||||
|
||||
|
@ -499,7 +499,7 @@ ONNX Cast operation
|
|||
|
||||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`to` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`to` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Operands:
|
||||
|
||||
|
@ -532,7 +532,7 @@ ONNX CategoryMapper operation
|
|||
| :-------: | :-------: | ----------- |
|
||||
`cats_int64s` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
`cats_strings` | ::mlir::ArrayAttr | string array attribute
|
||||
`default_int64` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`default_int64` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`default_string` | ::mlir::StringAttr | string attribute
|
||||
|
||||
#### Operands:
|
||||
|
@ -602,7 +602,7 @@ ONNX Compress operation
|
|||
|
||||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`axis` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`axis` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Operands:
|
||||
|
||||
|
@ -630,8 +630,8 @@ ONNX ConcatFromSequence operation
|
|||
|
||||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`axis` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`new_axis` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`axis` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`new_axis` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Operands:
|
||||
|
||||
|
@ -655,7 +655,7 @@ ONNX Concat operation
|
|||
|
||||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`axis` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`axis` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Operands:
|
||||
|
||||
|
@ -726,7 +726,7 @@ ONNX ConvInteger operation
|
|||
| :-------: | :-------: | ----------- |
|
||||
`auto_pad` | ::mlir::StringAttr | string attribute
|
||||
`dilations` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
`group` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`group` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`kernel_shape` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
`pads` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
`strides` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
|
@ -759,7 +759,7 @@ ONNX Conv operation
|
|||
| :-------: | :-------: | ----------- |
|
||||
`auto_pad` | ::mlir::StringAttr | string attribute
|
||||
`dilations` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
`group` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`group` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`kernel_shape` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
`pads` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
`strides` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
|
@ -803,7 +803,7 @@ ONNX ConvTranspose operation
|
|||
| :-------: | :-------: | ----------- |
|
||||
`auto_pad` | ::mlir::StringAttr | string attribute
|
||||
`dilations` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
`group` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`group` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`kernel_shape` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
`output_padding` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
`output_shape` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
|
@ -889,8 +889,8 @@ ONNX CumSum operation
|
|||
|
||||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`exclusive` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`reverse` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`exclusive` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`reverse` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Operands:
|
||||
|
||||
|
@ -941,7 +941,7 @@ ONNX DepthToSpace operation
|
|||
|
||||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`blocksize` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`blocksize` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`mode` | ::mlir::StringAttr | string attribute
|
||||
|
||||
#### Operands:
|
||||
|
@ -1262,8 +1262,8 @@ ONNX EyeLike operation
|
|||
|
||||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`dtype` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`k` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`dtype` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`k` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Operands:
|
||||
|
||||
|
@ -1316,7 +1316,7 @@ ONNX Flatten operation
|
|||
|
||||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`axis` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`axis` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Operands:
|
||||
|
||||
|
@ -1437,8 +1437,8 @@ ONNX GRU operation
|
|||
`activations` | ::mlir::ArrayAttr | string array attribute
|
||||
`clip` | ::mlir::FloatAttr | 32-bit float attribute
|
||||
`direction` | ::mlir::StringAttr | string attribute
|
||||
`hidden_size` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`linear_before_reset` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`hidden_size` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`linear_before_reset` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Operands:
|
||||
|
||||
|
@ -1522,7 +1522,7 @@ ONNX GatherElements operation
|
|||
|
||||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`axis` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`axis` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Operands:
|
||||
|
||||
|
@ -1686,7 +1686,7 @@ ONNX Gather operation
|
|||
|
||||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`axis` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`axis` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Operands:
|
||||
|
||||
|
@ -1725,8 +1725,8 @@ ONNX Gemm operation
|
|||
| :-------: | :-------: | ----------- |
|
||||
`alpha` | ::mlir::FloatAttr | 32-bit float attribute
|
||||
`beta` | ::mlir::FloatAttr | 32-bit float attribute
|
||||
`transA` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`transB` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`transA` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`transB` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Operands:
|
||||
|
||||
|
@ -1774,7 +1774,7 @@ ONNX GlobalLpPool operation
|
|||
|
||||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`p` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`p` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Operands:
|
||||
|
||||
|
@ -1880,7 +1880,7 @@ ONNX Hardmax operation
|
|||
|
||||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`axis` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`axis` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Operands:
|
||||
|
||||
|
@ -1957,7 +1957,7 @@ ONNX Imputer operation
|
|||
`imputed_value_floats` | ::mlir::ArrayAttr | 32-bit float array attribute
|
||||
`imputed_value_int64s` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
`replaced_value_float` | ::mlir::FloatAttr | 32-bit float attribute
|
||||
`replaced_value_int64` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`replaced_value_int64` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Operands:
|
||||
|
||||
|
@ -2012,8 +2012,8 @@ ONNX IsInf operation
|
|||
|
||||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`detect_negative` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`detect_positive` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`detect_negative` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`detect_positive` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Operands:
|
||||
|
||||
|
@ -2067,7 +2067,7 @@ ONNX LRN operation
|
|||
`alpha` | ::mlir::FloatAttr | 32-bit float attribute
|
||||
`beta` | ::mlir::FloatAttr | 32-bit float attribute
|
||||
`bias` | ::mlir::FloatAttr | 32-bit float attribute
|
||||
`size` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`size` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Operands:
|
||||
|
||||
|
@ -2176,8 +2176,8 @@ ONNX LSTM operation
|
|||
`activations` | ::mlir::ArrayAttr | string array attribute
|
||||
`clip` | ::mlir::FloatAttr | 32-bit float attribute
|
||||
`direction` | ::mlir::StringAttr | string attribute
|
||||
`hidden_size` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`input_forget` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`hidden_size` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`input_forget` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Operands:
|
||||
|
||||
|
@ -2227,7 +2227,7 @@ ONNX LabelEncoder operation
|
|||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`default_float` | ::mlir::FloatAttr | 32-bit float attribute
|
||||
`default_int64` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`default_int64` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`default_string` | ::mlir::StringAttr | string attribute
|
||||
`keys_floats` | ::mlir::ArrayAttr | 32-bit float array attribute
|
||||
`keys_int64s` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
|
@ -2310,7 +2310,7 @@ ONNX LinearClassifier operation
|
|||
`classlabels_strings` | ::mlir::ArrayAttr | string array attribute
|
||||
`coefficients` | ::mlir::ArrayAttr | 32-bit float array attribute
|
||||
`intercepts` | ::mlir::ArrayAttr | 32-bit float array attribute
|
||||
`multi_class` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`multi_class` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`post_transform` | ::mlir::StringAttr | string attribute
|
||||
|
||||
#### Operands:
|
||||
|
@ -2344,7 +2344,7 @@ ONNX LinearRegressor operation
|
|||
`coefficients` | ::mlir::ArrayAttr | 32-bit float array attribute
|
||||
`intercepts` | ::mlir::ArrayAttr | 32-bit float array attribute
|
||||
`post_transform` | ::mlir::StringAttr | string attribute
|
||||
`targets` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`targets` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Operands:
|
||||
|
||||
|
@ -2399,7 +2399,7 @@ ONNX LogSoftmax operation
|
|||
|
||||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`axis` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`axis` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Operands:
|
||||
|
||||
|
@ -2561,8 +2561,8 @@ ONNX LpNormalization operation
|
|||
|
||||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`axis` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`p` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`axis` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`p` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Operands:
|
||||
|
||||
|
@ -2592,7 +2592,7 @@ ONNX LpPool operation
|
|||
| :-------: | :-------: | ----------- |
|
||||
`auto_pad` | ::mlir::StringAttr | string attribute
|
||||
`kernel_shape` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
`p` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`p` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`pads` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
`strides` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
|
||||
|
@ -2708,11 +2708,11 @@ ONNX MaxPool operation
|
|||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`auto_pad` | ::mlir::StringAttr | string attribute
|
||||
`ceil_mode` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`ceil_mode` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`dilations` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
`kernel_shape` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
`pads` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
`storage_order` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`storage_order` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`strides` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
|
||||
#### Operands:
|
||||
|
@ -2740,11 +2740,11 @@ ONNX MaxPool operation with a single output.
|
|||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`auto_pad` | ::mlir::StringAttr | string attribute
|
||||
`ceil_mode` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`ceil_mode` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`dilations` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
`kernel_shape` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
`pads` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
`storage_order` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`storage_order` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`strides` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
|
||||
#### Operands:
|
||||
|
@ -2919,7 +2919,7 @@ ONNX Mod operation
|
|||
|
||||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`fmod` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`fmod` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Operands:
|
||||
|
||||
|
@ -2966,8 +2966,8 @@ ONNX Multinomial operation
|
|||
|
||||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`dtype` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`sample_size` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`dtype` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`sample_size` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`seed` | ::mlir::FloatAttr | 32-bit float attribute
|
||||
|
||||
#### Operands:
|
||||
|
@ -3018,7 +3018,7 @@ ONNX NonMaxSuppression operation
|
|||
|
||||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`center_point_box` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`center_point_box` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Operands:
|
||||
|
||||
|
@ -3127,7 +3127,7 @@ ONNX OneHotEncoder operation
|
|||
| :-------: | :-------: | ----------- |
|
||||
`cats_int64s` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
`cats_strings` | ::mlir::ArrayAttr | string array attribute
|
||||
`zeros` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`zeros` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Operands:
|
||||
|
||||
|
@ -3169,7 +3169,7 @@ ONNX OneHot operation
|
|||
|
||||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`axis` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`axis` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Operands:
|
||||
|
||||
|
@ -3459,7 +3459,7 @@ ONNX QLinearConv operation
|
|||
| :-------: | :-------: | ----------- |
|
||||
`auto_pad` | ::mlir::StringAttr | string attribute
|
||||
`dilations` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
`group` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`group` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`kernel_shape` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
`pads` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
`strides` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
|
@ -3613,7 +3613,7 @@ ONNX RNN operation
|
|||
`activations` | ::mlir::ArrayAttr | string array attribute
|
||||
`clip` | ::mlir::FloatAttr | 32-bit float attribute
|
||||
`direction` | ::mlir::StringAttr | string attribute
|
||||
`hidden_size` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`hidden_size` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Operands:
|
||||
|
||||
|
@ -3649,7 +3649,7 @@ ONNX RandomNormalLike operation
|
|||
|
||||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`dtype` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`dtype` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`mean` | ::mlir::FloatAttr | 32-bit float attribute
|
||||
`scale` | ::mlir::FloatAttr | 32-bit float attribute
|
||||
`seed` | ::mlir::FloatAttr | 32-bit float attribute
|
||||
|
@ -3682,7 +3682,7 @@ ONNX RandomNormal operation
|
|||
|
||||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`dtype` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`dtype` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`mean` | ::mlir::FloatAttr | 32-bit float attribute
|
||||
`scale` | ::mlir::FloatAttr | 32-bit float attribute
|
||||
`seed` | ::mlir::FloatAttr | 32-bit float attribute
|
||||
|
@ -3710,7 +3710,7 @@ ONNX RandomUniformLike operation
|
|||
|
||||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`dtype` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`dtype` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`high` | ::mlir::FloatAttr | 32-bit float attribute
|
||||
`low` | ::mlir::FloatAttr | 32-bit float attribute
|
||||
`seed` | ::mlir::FloatAttr | 32-bit float attribute
|
||||
|
@ -3742,7 +3742,7 @@ ONNX RandomUniform operation
|
|||
|
||||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`dtype` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`dtype` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`high` | ::mlir::FloatAttr | 32-bit float attribute
|
||||
`low` | ::mlir::FloatAttr | 32-bit float attribute
|
||||
`seed` | ::mlir::FloatAttr | 32-bit float attribute
|
||||
|
@ -3834,7 +3834,7 @@ ONNX ReduceL1 operation
|
|||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`axes` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
`keepdims` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`keepdims` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Operands:
|
||||
|
||||
|
@ -3864,7 +3864,7 @@ ONNX ReduceL2 operation
|
|||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`axes` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
`keepdims` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`keepdims` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Operands:
|
||||
|
||||
|
@ -3894,7 +3894,7 @@ ONNX ReduceLogSumExp operation
|
|||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`axes` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
`keepdims` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`keepdims` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Operands:
|
||||
|
||||
|
@ -3924,7 +3924,7 @@ ONNX ReduceLogSum operation
|
|||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`axes` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
`keepdims` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`keepdims` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Operands:
|
||||
|
||||
|
@ -3954,7 +3954,7 @@ ONNX ReduceMax operation
|
|||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`axes` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
`keepdims` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`keepdims` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Operands:
|
||||
|
||||
|
@ -3984,7 +3984,7 @@ ONNX ReduceMean operation
|
|||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`axes` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
`keepdims` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`keepdims` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Operands:
|
||||
|
||||
|
@ -4014,7 +4014,7 @@ ONNX ReduceMin operation
|
|||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`axes` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
`keepdims` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`keepdims` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Operands:
|
||||
|
||||
|
@ -4044,7 +4044,7 @@ ONNX ReduceProd operation
|
|||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`axes` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
`keepdims` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`keepdims` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Operands:
|
||||
|
||||
|
@ -4074,7 +4074,7 @@ ONNX ReduceSum operation
|
|||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`axes` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
`keepdims` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`keepdims` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Operands:
|
||||
|
||||
|
@ -4104,7 +4104,7 @@ ONNX ReduceSumSquare operation
|
|||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`axes` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
`keepdims` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`keepdims` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Operands:
|
||||
|
||||
|
@ -4176,7 +4176,7 @@ ONNX Resize operation
|
|||
| :-------: | :-------: | ----------- |
|
||||
`coordinate_transformation_mode` | ::mlir::StringAttr | string attribute
|
||||
`cubic_coeff_a` | ::mlir::FloatAttr | 32-bit float attribute
|
||||
`exclude_outside` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`exclude_outside` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`extrapolation_value` | ::mlir::FloatAttr | 32-bit float attribute
|
||||
`mode` | ::mlir::StringAttr | string attribute
|
||||
`nearest_mode` | ::mlir::StringAttr | string attribute
|
||||
|
@ -4238,8 +4238,8 @@ ONNX ReverseSequence operation
|
|||
|
||||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`batch_axis` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`time_axis` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`batch_axis` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`time_axis` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Operands:
|
||||
|
||||
|
@ -4275,9 +4275,9 @@ ONNX RoiAlign operation
|
|||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`mode` | ::mlir::StringAttr | string attribute
|
||||
`output_height` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`output_width` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`sampling_ratio` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`output_height` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`output_width` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`sampling_ratio` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`spatial_scale` | ::mlir::FloatAttr | 32-bit float attribute
|
||||
|
||||
#### Operands:
|
||||
|
@ -4372,8 +4372,8 @@ ONNX SVMRegressor operation
|
|||
`coefficients` | ::mlir::ArrayAttr | 32-bit float array attribute
|
||||
`kernel_params` | ::mlir::ArrayAttr | 32-bit float array attribute
|
||||
`kernel_type` | ::mlir::StringAttr | string attribute
|
||||
`n_supports` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`one_class` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`n_supports` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`one_class` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`post_transform` | ::mlir::StringAttr | string attribute
|
||||
`rho` | ::mlir::ArrayAttr | 32-bit float array attribute
|
||||
`support_vectors` | ::mlir::ArrayAttr | 32-bit float array attribute
|
||||
|
@ -4546,7 +4546,7 @@ ONNX Scan operation
|
|||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`body` | ::mlir::Attribute | any attribute
|
||||
`num_scan_inputs` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`num_scan_inputs` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`scan_input_axes` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
`scan_input_directions` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
`scan_output_axes` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
|
@ -4624,7 +4624,7 @@ ONNX ScatterElements operation
|
|||
|
||||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`axis` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`axis` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Operands:
|
||||
|
||||
|
@ -4777,7 +4777,7 @@ ONNX Scatter operation
|
|||
|
||||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`axis` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`axis` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Operands:
|
||||
|
||||
|
@ -4871,7 +4871,7 @@ ONNX SequenceEmpty operation
|
|||
|
||||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`dtype` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`dtype` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Results:
|
||||
|
||||
|
@ -5160,7 +5160,7 @@ ONNX Softmax operation
|
|||
|
||||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`axis` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`axis` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Operands:
|
||||
|
||||
|
@ -5224,7 +5224,7 @@ ONNX SpaceToDepth operation
|
|||
|
||||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`blocksize` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`blocksize` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Operands:
|
||||
|
||||
|
@ -5250,7 +5250,7 @@ ONNX Split operation
|
|||
|
||||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`axis` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`axis` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`split` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
|
||||
#### Operands:
|
||||
|
@ -5284,8 +5284,8 @@ ONNX SplitToSequence operation
|
|||
|
||||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`axis` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`keepdims` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`axis` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`keepdims` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Operands:
|
||||
|
||||
|
@ -5366,7 +5366,7 @@ ONNX StringNormalizer operation
|
|||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`case_change_action` | ::mlir::StringAttr | string attribute
|
||||
`is_case_sensitive` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`is_case_sensitive` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`locale` | ::mlir::StringAttr | string attribute
|
||||
`stopwords` | ::mlir::ArrayAttr | string array attribute
|
||||
|
||||
|
@ -5495,9 +5495,9 @@ ONNX TfIdfVectorizer operation
|
|||
|
||||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`max_gram_length` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`max_skip_count` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`min_gram_length` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`max_gram_length` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`max_skip_count` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`min_gram_length` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`mode` | ::mlir::StringAttr | string attribute
|
||||
`ngram_counts` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
`ngram_indexes` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
|
@ -5587,9 +5587,9 @@ ONNX TopK operation
|
|||
|
||||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`axis` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`largest` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`sorted` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`axis` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`largest` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`sorted` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Operands:
|
||||
|
||||
|
@ -5701,7 +5701,7 @@ ONNX TreeEnsembleRegressor operation
|
|||
| :-------: | :-------: | ----------- |
|
||||
`aggregate_function` | ::mlir::StringAttr | string attribute
|
||||
`base_values` | ::mlir::ArrayAttr | 32-bit float array attribute
|
||||
`n_targets` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`n_targets` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`nodes_falsenodeids` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
`nodes_featureids` | ::mlir::ArrayAttr | 64-bit integer array attribute
|
||||
`nodes_hitrates` | ::mlir::ArrayAttr | 32-bit float array attribute
|
||||
|
@ -5813,8 +5813,8 @@ ONNX Unique operation
|
|||
|
||||
| Attribute | MLIR Type | Description |
|
||||
| :-------: | :-------: | ----------- |
|
||||
`axis` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`sorted` | ::mlir::IntegerAttr | 64-bit signless integer attribute
|
||||
`axis` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
`sorted` | ::mlir::IntegerAttr | 64-bit signed integer attribute
|
||||
|
||||
#### Operands:
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ Firstly, install MLIR (as a part of LLVM-Project):
|
|||
``` bash
|
||||
git clone https://github.com/llvm/llvm-project.git
|
||||
# Check out a specific branch that is known to work with ONNX MLIR.
|
||||
cd llvm-project && git checkout 1d01fc100bb5bef5f5eaf92520b2e52f64ee1d6e && cd ..
|
||||
cd llvm-project && git checkout 91671e13efbc5dbd17b832d7973401350d0a6ee6 && cd ..
|
||||
```
|
||||
|
||||
[same-as-file]: <> (utils/build-mlir.sh)
|
||||
|
@ -110,7 +110,7 @@ Install MLIR (as a part of LLVM-Project):
|
|||
```shell
|
||||
git clone https://github.com/llvm/llvm-project.git
|
||||
# Check out a specific branch that is known to work with ONNX MLIR.
|
||||
cd llvm-project && git checkout 1d01fc100bb5bef5f5eaf92520b2e52f64ee1d6e && cd ..
|
||||
cd llvm-project && git checkout 91671e13efbc5dbd17b832d7973401350d0a6ee6 && cd ..
|
||||
```
|
||||
|
||||
[same-as-file]: <> (utils/build-mlir.cmd)
|
||||
|
|
|
@ -118,7 +118,9 @@ private:
|
|||
mlirAttr = builder_.getF32FloatAttr(attr.f());
|
||||
break;
|
||||
case onnx::AttributeProto::INT:
|
||||
mlirAttr = builder_.getI64IntegerAttr(attr.i());
|
||||
mlirAttr =
|
||||
IntegerAttr::get(builder_.getIntegerType(64, /*isSigned=*/true),
|
||||
APInt(64, /*value=*/attr.i(), /*isSigned=*/true));
|
||||
break;
|
||||
case onnx::AttributeProto::STRING:
|
||||
mlirAttr = builder_.getStringAttr(attr.s());
|
||||
|
|
|
@ -92,8 +92,8 @@ Value emitScalarOpFor<ONNXCastOp>(ConversionPatternRewriter &rewriter,
|
|||
Location loc, Operation *op, Type elementType,
|
||||
ArrayRef<Value> scalarOperands) {
|
||||
ONNXCastOp castOp = llvm::dyn_cast<ONNXCastOp>(op);
|
||||
auto mlirtype = convertONNXTypeToMLIRType(rewriter,
|
||||
static_cast<onnx::TensorProto_DataType>(castOp.toAttr().getInt()));
|
||||
auto mlirtype = convertONNXTypeToMLIRType(
|
||||
rewriter, static_cast<onnx::TensorProto_DataType>(castOp.to()));
|
||||
Value operand = scalarOperands[0];
|
||||
auto origtype = operand.getType();
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ struct ONNXSoftmaxOpLowering : public ConversionPattern {
|
|||
// exp_x / sum
|
||||
auto memRefType = convertToMemRefType(*op->result_type_begin());
|
||||
int64_t rank = memRefType.getRank();
|
||||
int64_t axis = llvm::dyn_cast<ONNXSoftmaxOp>(op).axis().getSExtValue();
|
||||
int64_t axis = llvm::dyn_cast<ONNXSoftmaxOp>(op).axis();
|
||||
axis = axis >= 0 ? axis : rank + axis;
|
||||
assert(axis >= -rank && axis <= rank - 1);
|
||||
|
||||
|
|
|
@ -90,7 +90,7 @@ struct ONNXConvOpLowering : public ConversionPattern {
|
|||
// Before we start the iteration we need to compute the number of
|
||||
// unsplit kernels and fetch the number of groups from the attribute
|
||||
// list. Group is always a compilation constant.
|
||||
int64_t group = convOp.group().getSExtValue();
|
||||
int64_t group = convOp.group();
|
||||
// Compute the number of unsplit kernels. The number of kernels
|
||||
// must be a multiple of the number of groups.
|
||||
int64_t kernelsPerGroup = floor(kernelShape[0] / group);
|
||||
|
|
|
@ -208,7 +208,7 @@ struct ONNXPoolOpLowering : public ConversionPattern {
|
|||
strides.emplace_back(stride.cast<IntegerAttr>().getInt());
|
||||
|
||||
// Read ceil_mode attribute
|
||||
auto ceilMode = poolOp.ceil_mode().getSExtValue();
|
||||
auto ceilMode = poolOp.ceil_mode();
|
||||
|
||||
// Read pads attribute
|
||||
SmallVector<int64_t, 4> pads;
|
||||
|
|
|
@ -23,7 +23,7 @@ struct ONNXConcatOpLowering : public ConversionPattern {
|
|||
Value alloc;
|
||||
bool insertDealloc = checkInsertDealloc(op);
|
||||
ONNXConcatOp concatOp = llvm::dyn_cast<ONNXConcatOp>(op);
|
||||
auto axis = concatOp.axis().getSExtValue();
|
||||
auto axis = concatOp.axis();
|
||||
int inputNum = operands.size();
|
||||
// Alloc and dealloc.
|
||||
auto resultOperand = concatOp.concat_result();
|
||||
|
|
|
@ -28,7 +28,7 @@ struct ONNXGatherOpLowering : public ConversionPattern {
|
|||
Value indices = operandAdaptor.indices();
|
||||
auto indicesShape = indices.getType().cast<MemRefType>().getShape();
|
||||
int64_t indicesRank = indicesShape.size();
|
||||
int64_t axisIndex = gatherOp.axis().getSExtValue();
|
||||
int64_t axisIndex = gatherOp.axis();
|
||||
// get output info
|
||||
auto outputMemRefType = convertToMemRefType(*op->result_type_begin());
|
||||
auto outputMemRefShape = outputMemRefType.getShape();
|
||||
|
|
|
@ -21,7 +21,7 @@ struct ONNXSplitOpLowering : public ConversionPattern {
|
|||
// Gather info.
|
||||
auto loc = op->getLoc();
|
||||
ONNXSplitOp splitOp = llvm::dyn_cast<ONNXSplitOp>(op);
|
||||
auto axis = splitOp.axis().getSExtValue();
|
||||
auto axis = splitOp.axis();
|
||||
auto split = splitOp.split().getValue();
|
||||
SmallVector<int64_t, 4> splitOffset;
|
||||
int64_t offset = 0;
|
||||
|
|
|
@ -78,8 +78,8 @@ int64_t AffineMapIntConstant(Builder &builder, AffineMap map,
|
|||
//===----------------------------------------------------------------------===//
|
||||
// Get reduction type
|
||||
//===----------------------------------------------------------------------===//
|
||||
RankedTensorType getReductionOutputType(
|
||||
RankedTensorType operandTy, Optional<ArrayAttr> axesAttrs, APInt keepdims) {
|
||||
RankedTensorType getReductionOutputType(RankedTensorType operandTy,
|
||||
Optional<ArrayAttr> axesAttrs, uint64_t keepdims) {
|
||||
int64_t rank = operandTy.getRank();
|
||||
|
||||
SmallVector<int64_t, 4> axes;
|
||||
|
@ -378,7 +378,7 @@ static LogicalResult RNNShapeInference(T *op) {
|
|||
// Get hidden size from hidden_size attribute.
|
||||
int64_t hiddenSize = -1;
|
||||
if (op->hidden_size().hasValue()) {
|
||||
hiddenSize = op->hidden_size().getValue().getSExtValue();
|
||||
hiddenSize = op->hidden_size().getValue();
|
||||
} else {
|
||||
// Infer hidden_size from wShape and rShape if possible.
|
||||
if (rShape[2] != -1)
|
||||
|
@ -390,7 +390,10 @@ static LogicalResult RNNShapeInference(T *op) {
|
|||
// Update hidden_size attribute.
|
||||
if (hiddenSize != -1) {
|
||||
auto builder = mlir::Builder(op->getContext());
|
||||
op->hidden_sizeAttr(builder.getI64IntegerAttr(hiddenSize));
|
||||
auto hiddenSizeAttr =
|
||||
IntegerAttr::get(builder.getIntegerType(64, /*isSigned=*/true),
|
||||
APInt(64, /*value=*/hiddenSize, /*isSigned=*/true));
|
||||
op->hidden_sizeAttr(hiddenSizeAttr);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1422,11 +1425,12 @@ LogicalResult ONNXConvOp::inferShapes() {
|
|||
return emitError("Weight size not compatible with data size");
|
||||
|
||||
// Group is a required attribute and should have default value of 1.
|
||||
int64_t group = ONNXConvOp::group().getSExtValue();
|
||||
int64_t group = ONNXConvOp::group();
|
||||
|
||||
// Check if the attribute actually exists. If it does not then add it.
|
||||
if (!groupAttr())
|
||||
groupAttr(builder.getI64IntegerAttr(group));
|
||||
groupAttr(IntegerAttr::get(builder.getIntegerType(64, /*isSigned=*/true),
|
||||
APInt(64, group, /*isSigned=*/true)));
|
||||
|
||||
// Check that the X.shape[1] == (W.shape[1] * group) == C condition holds.
|
||||
if (xShape[1] != -1 && weightShape[1] != -1 &&
|
||||
|
@ -1542,11 +1546,12 @@ LogicalResult ONNXConvTransposeOp::inferShapes() {
|
|||
}
|
||||
|
||||
// Group is a required attribute and should have default value of 1.
|
||||
int64_t group = ONNXConvTransposeOp::group().getSExtValue();
|
||||
int64_t group = ONNXConvTransposeOp::group();
|
||||
|
||||
// Check if the attribute actually exists. If it does not then add it.
|
||||
if (!groupAttr())
|
||||
groupAttr(builder.getI64IntegerAttr(group));
|
||||
groupAttr(IntegerAttr::get(builder.getIntegerType(64, /*isSigned=*/true),
|
||||
APInt(64, group, /*isSigned=*/true)));
|
||||
|
||||
int64_t inChannels = weightShape[0];
|
||||
int64_t outChannels = weightShape[1] * group;
|
||||
|
@ -1662,7 +1667,7 @@ LogicalResult ONNXAveragePoolOp::inferShapes() {
|
|||
"kernel_shape is a mandatory attribute for which there is no default");
|
||||
|
||||
// Ceil mode.
|
||||
auto ceilMode = ceil_mode().getSExtValue();
|
||||
auto ceilMode = ceil_mode();
|
||||
|
||||
// Process strides and pads.
|
||||
LogicalResult res =
|
||||
|
@ -1715,7 +1720,7 @@ LogicalResult ONNXMaxPoolSingleOutOp::inferShapes() {
|
|||
"kernel_shape is a mandatory attribute for which there is no default");
|
||||
|
||||
// Storage order.
|
||||
auto storageOrder = storage_order().getSExtValue();
|
||||
auto storageOrder = storage_order();
|
||||
if (storageOrder != 0)
|
||||
return emitError("column major storage order not supported at this time");
|
||||
|
||||
|
@ -1726,7 +1731,7 @@ LogicalResult ONNXMaxPoolSingleOutOp::inferShapes() {
|
|||
auto padsOpt = pads();
|
||||
|
||||
// Ceil mode.
|
||||
auto ceilMode = ceil_mode().getSExtValue();
|
||||
auto ceilMode = ceil_mode();
|
||||
|
||||
SmallVector<int64_t, 4> outputDims;
|
||||
// Insert batch size.
|
||||
|
@ -1957,7 +1962,7 @@ LogicalResult ONNXCastOp::inferShapes() {
|
|||
return UnrankedTensorType::get(elementType);
|
||||
};
|
||||
|
||||
int64_t targetType = toAttr().getInt();
|
||||
int64_t targetType = to();
|
||||
OpBuilder builder(getContext());
|
||||
if (auto elementType = convertONNXTypeToMLIRType(
|
||||
builder, static_cast<onnx::TensorProto_DataType>(targetType))) {
|
||||
|
@ -2012,12 +2017,13 @@ LogicalResult ONNXConcatOp::inferShapes() {
|
|||
auto commonType = getOperand(0).getType().cast<RankedTensorType>();
|
||||
auto commonShape = commonType.getShape();
|
||||
auto commonRank = commonShape.size();
|
||||
auto axisIndex = axis().getSExtValue();
|
||||
int64_t axisIndex = axis();
|
||||
// Negative axis means values are counted from the opposite side.
|
||||
if (axisIndex < 0) {
|
||||
axisIndex = commonRank + axisIndex;
|
||||
auto builder = mlir::Builder(getContext());
|
||||
axisAttr(builder.getI64IntegerAttr(axisIndex));
|
||||
axisAttr(IntegerAttr::get(builder.getIntegerType(64, /*isSigned=*/true),
|
||||
APInt(64, /*value=*/axisIndex, /*isSigned=*/true)));
|
||||
}
|
||||
if (axisIndex >= commonRank)
|
||||
return emitError("Concat axis value out of bound");
|
||||
|
@ -2090,14 +2096,15 @@ LogicalResult ONNXSplitOp::inferShapes() {
|
|||
int64_t inputRank = inputShape.size();
|
||||
|
||||
// Checking value of axis parameter.
|
||||
auto axisIndex = axis().getSExtValue();
|
||||
int64_t axisIndex = axis();
|
||||
if (axisIndex < -inputRank || axisIndex >= inputRank)
|
||||
return emitError("Split axis value out of bound");
|
||||
// Negative axis means values are counted from the opposite side.
|
||||
if (axisIndex < 0) {
|
||||
axisIndex = inputRank + axisIndex;
|
||||
auto builder = mlir::Builder(getContext());
|
||||
axisAttr(builder.getI64IntegerAttr(axisIndex));
|
||||
axisAttr(IntegerAttr::get(builder.getIntegerType(64, /*isSigned=*/true),
|
||||
APInt(64, /*value=*/axisIndex, /*isSigned=*/true)));
|
||||
}
|
||||
|
||||
// Checking value of split parameter.
|
||||
|
@ -2296,11 +2303,12 @@ LogicalResult ONNXConvIntegerOp::inferShapes() {
|
|||
}
|
||||
|
||||
// Group is a required attribute and should have default value of 1.
|
||||
int64_t group = ONNXConvIntegerOp::group().getSExtValue();
|
||||
int64_t group = ONNXConvIntegerOp::group();
|
||||
|
||||
// Check if the attribute actually exists. If it does not then add it.
|
||||
if (!groupAttr())
|
||||
groupAttr(builder.getI64IntegerAttr(group));
|
||||
groupAttr(IntegerAttr::get(builder.getIntegerType(64, /*isSigned=*/true),
|
||||
APInt(64, 1, /*isSigned=*/true)));
|
||||
|
||||
// Check that the X.shape[1] == (W.shape[1] * group) == C condition holds.
|
||||
if (xShape[1] != -1 && weightShape[1] != -1 &&
|
||||
|
@ -2477,7 +2485,7 @@ LogicalResult ONNXGatherOp::inferShapes() {
|
|||
return emitError("Input tensor must have rank >= 1");
|
||||
|
||||
// Read 'axis' attribute.
|
||||
auto axisIndex = axis().getSExtValue();
|
||||
int64_t axisIndex = axis();
|
||||
// 'axis' must be in [-rank, rank-1]
|
||||
if (axisIndex < -inputRank || axisIndex >= inputRank)
|
||||
return emitError("Gather axis value out of bound");
|
||||
|
@ -2485,7 +2493,8 @@ LogicalResult ONNXGatherOp::inferShapes() {
|
|||
if (axisIndex < 0) {
|
||||
axisIndex += inputRank;
|
||||
auto builder = mlir::Builder(getContext());
|
||||
axisAttr(builder.getI64IntegerAttr(axisIndex));
|
||||
axisAttr(IntegerAttr::get(builder.getIntegerType(64, /*isSigned=*/true),
|
||||
APInt(64, /*value=*/axisIndex, /*isSigned=*/true)));
|
||||
}
|
||||
|
||||
// If 'indices' is a constant, check whether its values are valid or not.
|
||||
|
|
|
@ -118,11 +118,11 @@ def ONNXMaxPoolSingleOutOp: ONNX_Op<"MaxPoolSingleOut",
|
|||
}];
|
||||
let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X,
|
||||
DefaultValuedAttr<StrAttr, "NOTSET">:$auto_pad,
|
||||
DefaultValuedAttr<I64Attr, "0">:$ceil_mode,
|
||||
DefaultValuedAttr<SI64Attr, "0">:$ceil_mode,
|
||||
OptionalAttr<I64ArrayAttr>:$dilations,
|
||||
DefaultValuedAttr<I64ArrayAttr, "{}">:$kernel_shape,
|
||||
OptionalAttr<I64ArrayAttr>:$pads,
|
||||
DefaultValuedAttr<I64Attr, "0">:$storage_order,
|
||||
DefaultValuedAttr<SI64Attr, "0">:$storage_order,
|
||||
OptionalAttr<I64ArrayAttr>:$strides);
|
||||
let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y);
|
||||
let extraClassDeclaration = [{
|
||||
|
|
|
@ -193,8 +193,8 @@ def ONNXArgMaxOp:ONNX_Op<"ArgMax",
|
|||
"The type of the output tensor is integer."
|
||||
}];
|
||||
let arguments = (ins AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$data,
|
||||
DefaultValuedAttr<I64Attr, "0">:$axis,
|
||||
DefaultValuedAttr<I64Attr, "1">:$keepdims);
|
||||
DefaultValuedAttr<SI64Attr, "0">:$axis,
|
||||
DefaultValuedAttr<SI64Attr, "1">:$keepdims);
|
||||
let results = (outs TensorOf<[I64]>:$reduced);
|
||||
let extraClassDeclaration = [{
|
||||
static int getNumberOfOperands() {
|
||||
|
@ -219,8 +219,8 @@ def ONNXArgMinOp:ONNX_Op<"ArgMin",
|
|||
"The type of the output tensor is integer."
|
||||
}];
|
||||
let arguments = (ins AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$data,
|
||||
DefaultValuedAttr<I64Attr, "0">:$axis,
|
||||
DefaultValuedAttr<I64Attr, "1">:$keepdims);
|
||||
DefaultValuedAttr<SI64Attr, "0">:$axis,
|
||||
DefaultValuedAttr<SI64Attr, "1">:$keepdims);
|
||||
let results = (outs TensorOf<[I64]>:$reduced);
|
||||
let extraClassDeclaration = [{
|
||||
static int getNumberOfOperands() {
|
||||
|
@ -355,8 +355,8 @@ def ONNXAveragePoolOp:ONNX_Op<"AveragePool",
|
|||
}];
|
||||
let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$X,
|
||||
DefaultValuedAttr<StrAttr, "NOTSET">:$auto_pad,
|
||||
DefaultValuedAttr<I64Attr, "0">:$ceil_mode,
|
||||
DefaultValuedAttr<I64Attr, "0">:$count_include_pad,
|
||||
DefaultValuedAttr<SI64Attr, "0">:$ceil_mode,
|
||||
DefaultValuedAttr<SI64Attr, "0">:$count_include_pad,
|
||||
I64ArrayAttr:$kernel_shape,
|
||||
OptionalAttr<I64ArrayAttr>:$pads,
|
||||
OptionalAttr<I64ArrayAttr>:$strides);
|
||||
|
@ -474,7 +474,7 @@ def ONNXCastOp:ONNX_Op<"Cast",
|
|||
"an integer 36 to Boolean may produce 1 because we truncate bits which can't be stored in the targeted type."
|
||||
}];
|
||||
let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I1]>, TensorOf<[StringType]>, AnyMemRef]>:$input,
|
||||
I64Attr:$to);
|
||||
SI64Attr:$to);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I1]>, TensorOf<[StringType]>, AnyMemRef]>:$output);
|
||||
let extraClassDeclaration = [{
|
||||
static int getNumberOfOperands() {
|
||||
|
@ -488,7 +488,7 @@ def ONNXCastOp:ONNX_Op<"Cast",
|
|||
}
|
||||
std::vector<mlir::Type> resultTypeInference() {
|
||||
std::vector<mlir::Type> resultTypes;
|
||||
auto toAttr = to().getSExtValue();
|
||||
auto toAttr = to();
|
||||
auto builder = mlir::OpBuilder(getContext());
|
||||
resultTypes.push_back(mlir::UnrankedTensorType::get(
|
||||
convertONNXTypeToMLIRType(builder, static_cast<onnx::TensorProto_DataType>(toAttr))));
|
||||
|
@ -564,7 +564,7 @@ def ONNXCompressOp:ONNX_Op<"Compress",
|
|||
}];
|
||||
let arguments = (ins AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex<F32>]>, TensorOf<[Complex<F64>]>, AnyMemRef]>:$input,
|
||||
AnyTypeOf<[TensorOf<[I1]>, AnyMemRef]>:$condition,
|
||||
OptionalAttr<I64Attr>:$axis);
|
||||
OptionalAttr<SI64Attr>:$axis);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex<F32>]>, TensorOf<[Complex<F64>]>, AnyMemRef]>:$output);
|
||||
let extraClassDeclaration = [{
|
||||
static int getNumberOfOperands() {
|
||||
|
@ -586,7 +586,7 @@ def ONNXConcatOp:ONNX_Op<"Concat",
|
|||
"Concatenate a list of tensors into a single tensor. All input tensors must have the same shape, except for the dimension size of the axis to concatenate on."
|
||||
}];
|
||||
let arguments = (ins Variadic<AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex<F32>]>, TensorOf<[Complex<F64>]>, AnyMemRef]>>:$inputs,
|
||||
I64Attr:$axis);
|
||||
SI64Attr:$axis);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex<F32>]>, TensorOf<[Complex<F64>]>, AnyMemRef]>:$concat_result);
|
||||
let extraClassDeclaration = [{
|
||||
static int getNumberOfOperands() {
|
||||
|
@ -611,8 +611,8 @@ def ONNXConcatFromSequenceOp:ONNX_Op<"ConcatFromSequence",
|
|||
"When 'new_axis' is 1, the behavior is similar to numpy.stack."
|
||||
}];
|
||||
let arguments = (ins AnyTypeOf<[SeqOf<[TensorOf<[UI8]>]>, SeqOf<[TensorOf<[UI16]>]>, SeqOf<[TensorOf<[UI32]>]>, SeqOf<[TensorOf<[UI64]>]>, SeqOf<[TensorOf<[I8]>]>, SeqOf<[TensorOf<[I16]>]>, SeqOf<[TensorOf<[I32]>]>, SeqOf<[TensorOf<[I64]>]>, SeqOf<[TensorOf<[F16]>]>, SeqOf<[TensorOf<[F32]>]>, SeqOf<[TensorOf<[F64]>]>, SeqOf<[TensorOf<[StringType]>]>, SeqOf<[TensorOf<[I1]>]>, SeqOf<[TensorOf<[Complex<F32>]>]>, SeqOf<[TensorOf<[Complex<F64>]>]>, AnyMemRef]>:$input_sequence,
|
||||
I64Attr:$axis,
|
||||
DefaultValuedAttr<I64Attr, "0">:$new_axis);
|
||||
SI64Attr:$axis,
|
||||
DefaultValuedAttr<SI64Attr, "0">:$new_axis);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex<F32>]>, TensorOf<[Complex<F64>]>, AnyMemRef]>:$concat_result);
|
||||
let extraClassDeclaration = [{
|
||||
static int getNumberOfOperands() {
|
||||
|
@ -716,7 +716,7 @@ def ONNXConvOp:ONNX_Op<"Conv",
|
|||
AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef, NoneType]>:$B,
|
||||
DefaultValuedAttr<StrAttr, "NOTSET">:$auto_pad,
|
||||
OptionalAttr<I64ArrayAttr>:$dilations,
|
||||
DefaultValuedAttr<I64Attr, "1">:$group,
|
||||
DefaultValuedAttr<SI64Attr, "1">:$group,
|
||||
OptionalAttr<I64ArrayAttr>:$kernel_shape,
|
||||
OptionalAttr<I64ArrayAttr>:$pads,
|
||||
OptionalAttr<I64ArrayAttr>:$strides);
|
||||
|
@ -747,7 +747,7 @@ def ONNXConvIntegerOp:ONNX_Op<"ConvInteger",
|
|||
AnyTypeOf<[TensorOf<[I8]>, TensorOf<[UI8]>, AnyMemRef, NoneType]>:$w_zero_point,
|
||||
DefaultValuedAttr<StrAttr, "NOTSET">:$auto_pad,
|
||||
OptionalAttr<I64ArrayAttr>:$dilations,
|
||||
DefaultValuedAttr<I64Attr, "1">:$group,
|
||||
DefaultValuedAttr<SI64Attr, "1">:$group,
|
||||
OptionalAttr<I64ArrayAttr>:$kernel_shape,
|
||||
OptionalAttr<I64ArrayAttr>:$pads,
|
||||
OptionalAttr<I64ArrayAttr>:$strides);
|
||||
|
@ -789,7 +789,7 @@ def ONNXConvTransposeOp:ONNX_Op<"ConvTranspose",
|
|||
AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef, NoneType]>:$B,
|
||||
DefaultValuedAttr<StrAttr, "NOTSET">:$auto_pad,
|
||||
OptionalAttr<I64ArrayAttr>:$dilations,
|
||||
DefaultValuedAttr<I64Attr, "1">:$group,
|
||||
DefaultValuedAttr<SI64Attr, "1">:$group,
|
||||
OptionalAttr<I64ArrayAttr>:$kernel_shape,
|
||||
OptionalAttr<I64ArrayAttr>:$output_padding,
|
||||
OptionalAttr<I64ArrayAttr>:$output_shape,
|
||||
|
@ -878,8 +878,8 @@ def ONNXCumSumOp:ONNX_Op<"CumSum",
|
|||
}];
|
||||
let arguments = (ins AnyTypeOf<[TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$x,
|
||||
AnyTypeOf<[TensorOf<[I32]>, TensorOf<[I64]>, AnyMemRef]>:$axis,
|
||||
DefaultValuedAttr<I64Attr, "0">:$exclusive,
|
||||
DefaultValuedAttr<I64Attr, "0">:$reverse);
|
||||
DefaultValuedAttr<SI64Attr, "0">:$exclusive,
|
||||
DefaultValuedAttr<SI64Attr, "0">:$reverse);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$y);
|
||||
let extraClassDeclaration = [{
|
||||
static int getNumberOfOperands() {
|
||||
|
@ -927,7 +927,7 @@ def ONNXDepthToSpaceOp:ONNX_Op<"DepthToSpace",
|
|||
""
|
||||
}];
|
||||
let arguments = (ins AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex<F32>]>, TensorOf<[Complex<F64>]>, AnyMemRef]>:$input,
|
||||
I64Attr:$blocksize,
|
||||
SI64Attr:$blocksize,
|
||||
DefaultValuedAttr<StrAttr, "DCR">:$mode);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex<F32>]>, TensorOf<[Complex<F64>]>, AnyMemRef]>:$output);
|
||||
let extraClassDeclaration = [{
|
||||
|
@ -1289,8 +1289,8 @@ def ONNXEyeLikeOp:ONNX_Op<"EyeLike",
|
|||
"TensorProto message and be valid as an output type."
|
||||
}];
|
||||
let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I1]>, AnyMemRef]>:$input,
|
||||
OptionalAttr<I64Attr>:$dtype,
|
||||
DefaultValuedAttr<I64Attr, "0">:$k);
|
||||
OptionalAttr<SI64Attr>:$dtype,
|
||||
DefaultValuedAttr<SI64Attr, "0">:$k);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I1]>, AnyMemRef]>:$output);
|
||||
let extraClassDeclaration = [{
|
||||
static int getNumberOfOperands() {
|
||||
|
@ -1314,7 +1314,7 @@ def ONNXFlattenOp:ONNX_Op<"Flatten",
|
|||
"(d_0 X d_1 ... d_(axis-1), d_axis X d_(axis+1) ... X dn)."
|
||||
}];
|
||||
let arguments = (ins AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex<F32>]>, TensorOf<[Complex<F64>]>, AnyMemRef]>:$input,
|
||||
DefaultValuedAttr<I64Attr, "1">:$axis);
|
||||
DefaultValuedAttr<SI64Attr, "1">:$axis);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex<F32>]>, TensorOf<[Complex<F64>]>, AnyMemRef]>:$output);
|
||||
let extraClassDeclaration = [{
|
||||
static int getNumberOfOperands() {
|
||||
|
@ -1441,8 +1441,8 @@ def ONNXGRUOp:ONNX_Op<"GRU",
|
|||
OptionalAttr<StrArrayAttr>:$activations,
|
||||
OptionalAttr<F32Attr>:$clip,
|
||||
DefaultValuedAttr<StrAttr, "forward">:$direction,
|
||||
OptionalAttr<I64Attr>:$hidden_size,
|
||||
DefaultValuedAttr<I64Attr, "0">:$linear_before_reset);
|
||||
OptionalAttr<SI64Attr>:$hidden_size,
|
||||
DefaultValuedAttr<SI64Attr, "0">:$linear_before_reset);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef, NoneType, NoneType, NoneType]>:$Y,
|
||||
AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef, NoneType, NoneType, NoneType, NoneType]>:$Y_h);
|
||||
let extraClassDeclaration = [{
|
||||
|
@ -1522,7 +1522,7 @@ def ONNXGatherOp:ONNX_Op<"Gather",
|
|||
}];
|
||||
let arguments = (ins AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex<F32>]>, TensorOf<[Complex<F64>]>, AnyMemRef]>:$data,
|
||||
AnyTypeOf<[TensorOf<[I32]>, TensorOf<[I64]>, AnyMemRef]>:$indices,
|
||||
DefaultValuedAttr<I64Attr, "0">:$axis);
|
||||
DefaultValuedAttr<SI64Attr, "0">:$axis);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex<F32>]>, TensorOf<[Complex<F64>]>, AnyMemRef]>:$output);
|
||||
let extraClassDeclaration = [{
|
||||
static int getNumberOfOperands() {
|
||||
|
@ -1599,7 +1599,7 @@ def ONNXGatherElementsOp:ONNX_Op<"GatherElements",
|
|||
}];
|
||||
let arguments = (ins AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex<F32>]>, TensorOf<[Complex<F64>]>, AnyMemRef]>:$data,
|
||||
AnyTypeOf<[TensorOf<[I32]>, TensorOf<[I64]>, AnyMemRef]>:$indices,
|
||||
DefaultValuedAttr<I64Attr, "0">:$axis);
|
||||
DefaultValuedAttr<SI64Attr, "0">:$axis);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex<F32>]>, TensorOf<[Complex<F64>]>, AnyMemRef]>:$output);
|
||||
let extraClassDeclaration = [{
|
||||
static int getNumberOfOperands() {
|
||||
|
@ -1724,8 +1724,8 @@ def ONNXGemmOp:ONNX_Op<"Gemm",
|
|||
AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I32]>, TensorOf<[I64]>, AnyMemRef, NoneType]>:$C,
|
||||
DefaultValuedAttr<F32Attr, "1.0">:$alpha,
|
||||
DefaultValuedAttr<F32Attr, "1.0">:$beta,
|
||||
DefaultValuedAttr<I64Attr, "0">:$transA,
|
||||
DefaultValuedAttr<I64Attr, "0">:$transB);
|
||||
DefaultValuedAttr<SI64Attr, "0">:$transA,
|
||||
DefaultValuedAttr<SI64Attr, "0">:$transB);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I32]>, TensorOf<[I64]>, AnyMemRef, NoneType]>:$Y);
|
||||
let extraClassDeclaration = [{
|
||||
static int getNumberOfOperands() {
|
||||
|
@ -1772,7 +1772,7 @@ def ONNXGlobalLpPoolOp:ONNX_Op<"GlobalLpPool",
|
|||
" equal to the spatial dimension of input tensor."
|
||||
}];
|
||||
let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$X,
|
||||
DefaultValuedAttr<I64Attr, "2">:$p);
|
||||
DefaultValuedAttr<SI64Attr, "2">:$p);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$Y);
|
||||
let extraClassDeclaration = [{
|
||||
static int getNumberOfOperands() {
|
||||
|
@ -1906,7 +1906,7 @@ def ONNXHardmaxOp:ONNX_Op<"Hardmax",
|
|||
"and contains the hardmax values of the corresponding input."
|
||||
}];
|
||||
let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$input,
|
||||
DefaultValuedAttr<I64Attr, "1">:$axis);
|
||||
DefaultValuedAttr<SI64Attr, "1">:$axis);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$output);
|
||||
let extraClassDeclaration = [{
|
||||
static int getNumberOfOperands() {
|
||||
|
@ -2002,8 +2002,8 @@ def ONNXIsInfOp:ONNX_Op<"IsInf",
|
|||
"Map infinity to true and other values to false."
|
||||
}];
|
||||
let arguments = (ins AnyTypeOf<[TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$X,
|
||||
DefaultValuedAttr<I64Attr, "1">:$detect_negative,
|
||||
DefaultValuedAttr<I64Attr, "1">:$detect_positive);
|
||||
DefaultValuedAttr<SI64Attr, "1">:$detect_negative,
|
||||
DefaultValuedAttr<SI64Attr, "1">:$detect_positive);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[I1]>, AnyMemRef]>:$Y);
|
||||
let extraClassDeclaration = [{
|
||||
static int getNumberOfOperands() {
|
||||
|
@ -2058,7 +2058,7 @@ def ONNXLRNOp:ONNX_Op<"LRN",
|
|||
DefaultValuedAttr<F32Attr, "0.0001">:$alpha,
|
||||
DefaultValuedAttr<F32Attr, "0.75">:$beta,
|
||||
DefaultValuedAttr<F32Attr, "1.0">:$bias,
|
||||
I64Attr:$size);
|
||||
SI64Attr:$size);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$Y);
|
||||
let extraClassDeclaration = [{
|
||||
static int getNumberOfOperands() {
|
||||
|
@ -2172,8 +2172,8 @@ def ONNXLSTMOp:ONNX_Op<"LSTM",
|
|||
OptionalAttr<StrArrayAttr>:$activations,
|
||||
OptionalAttr<F32Attr>:$clip,
|
||||
DefaultValuedAttr<StrAttr, "forward">:$direction,
|
||||
OptionalAttr<I64Attr>:$hidden_size,
|
||||
DefaultValuedAttr<I64Attr, "0">:$input_forget);
|
||||
OptionalAttr<SI64Attr>:$hidden_size,
|
||||
DefaultValuedAttr<SI64Attr, "0">:$input_forget);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef, NoneType, NoneType, NoneType, NoneType, NoneType]>:$Y,
|
||||
AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef, NoneType, NoneType, NoneType, NoneType, NoneType, NoneType]>:$Y_h,
|
||||
AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef, NoneType, NoneType, NoneType, NoneType, NoneType, NoneType, NoneType]>:$Y_c);
|
||||
|
@ -2306,7 +2306,7 @@ def ONNXLogSoftmaxOp:ONNX_Op<"LogSoftmax",
|
|||
"and contains the logsoftmax values of the corresponding input."
|
||||
}];
|
||||
let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$input,
|
||||
DefaultValuedAttr<I64Attr, "1">:$axis);
|
||||
DefaultValuedAttr<SI64Attr, "1">:$axis);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$output);
|
||||
let extraClassDeclaration = [{
|
||||
static int getNumberOfOperands() {
|
||||
|
@ -2464,8 +2464,8 @@ def ONNXLpNormalizationOp:ONNX_Op<"LpNormalization",
|
|||
"Given a matrix, apply Lp-normalization along the provided axis."
|
||||
}];
|
||||
let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$input,
|
||||
DefaultValuedAttr<I64Attr, "-1">:$axis,
|
||||
DefaultValuedAttr<I64Attr, "2">:$p);
|
||||
DefaultValuedAttr<SI64Attr, "-1">:$axis,
|
||||
DefaultValuedAttr<SI64Attr, "2">:$p);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$output);
|
||||
let extraClassDeclaration = [{
|
||||
static int getNumberOfOperands() {
|
||||
|
@ -2493,7 +2493,7 @@ def ONNXLpPoolOp:ONNX_Op<"LpPool",
|
|||
let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$X,
|
||||
DefaultValuedAttr<StrAttr, "NOTSET">:$auto_pad,
|
||||
I64ArrayAttr:$kernel_shape,
|
||||
DefaultValuedAttr<I64Attr, "2">:$p,
|
||||
DefaultValuedAttr<SI64Attr, "2">:$p,
|
||||
OptionalAttr<I64ArrayAttr>:$pads,
|
||||
OptionalAttr<I64ArrayAttr>:$strides);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$Y);
|
||||
|
@ -2616,11 +2616,11 @@ def ONNXMaxPoolOp:ONNX_Op<"MaxPool",
|
|||
}];
|
||||
let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$X,
|
||||
DefaultValuedAttr<StrAttr, "NOTSET">:$auto_pad,
|
||||
DefaultValuedAttr<I64Attr, "0">:$ceil_mode,
|
||||
DefaultValuedAttr<SI64Attr, "0">:$ceil_mode,
|
||||
OptionalAttr<I64ArrayAttr>:$dilations,
|
||||
I64ArrayAttr:$kernel_shape,
|
||||
OptionalAttr<I64ArrayAttr>:$pads,
|
||||
DefaultValuedAttr<I64Attr, "0">:$storage_order,
|
||||
DefaultValuedAttr<SI64Attr, "0">:$storage_order,
|
||||
OptionalAttr<I64ArrayAttr>:$strides);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$Y,
|
||||
AnyTypeOf<[TensorOf<[I64]>, AnyMemRef, NoneType]>:$Indices);
|
||||
|
@ -2795,7 +2795,7 @@ def ONNXModOp:ONNX_Op<"Mod",
|
|||
}];
|
||||
let arguments = (ins AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$A,
|
||||
AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$B,
|
||||
DefaultValuedAttr<I64Attr, "0">:$fmod);
|
||||
DefaultValuedAttr<SI64Attr, "0">:$fmod);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$C);
|
||||
let extraClassDeclaration = [{
|
||||
static int getNumberOfOperands() {
|
||||
|
@ -2868,8 +2868,8 @@ def ONNXMultinomialOp:ONNX_Op<"Multinomial",
|
|||
"of each of the possible outcomes."
|
||||
}];
|
||||
let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$input,
|
||||
DefaultValuedAttr<I64Attr, "6">:$dtype,
|
||||
DefaultValuedAttr<I64Attr, "1">:$sample_size,
|
||||
DefaultValuedAttr<SI64Attr, "6">:$dtype,
|
||||
DefaultValuedAttr<SI64Attr, "1">:$sample_size,
|
||||
OptionalAttr<F32Attr>:$seed);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[I32]>, TensorOf<[I64]>, AnyMemRef]>:$output);
|
||||
let extraClassDeclaration = [{
|
||||
|
@ -2937,7 +2937,7 @@ def ONNXNonMaxSuppressionOp:ONNX_Op<"NonMaxSuppression",
|
|||
AnyTypeOf<[TensorOf<[I64]>, NoneType]>:$max_output_boxes_per_class,
|
||||
AnyTypeOf<[TensorOf<[F32]>, NoneType]>:$iou_threshold,
|
||||
AnyTypeOf<[TensorOf<[F32]>, NoneType]>:$score_threshold,
|
||||
DefaultValuedAttr<I64Attr, "0">:$center_point_box);
|
||||
DefaultValuedAttr<SI64Attr, "0">:$center_point_box);
|
||||
let results = (outs TensorOf<[I64]>:$selected_indices);
|
||||
let extraClassDeclaration = [{
|
||||
static int getNumberOfOperands() {
|
||||
|
@ -3024,7 +3024,7 @@ def ONNXOneHotOp:ONNX_Op<"OneHot",
|
|||
let arguments = (ins AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$indices,
|
||||
AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$depth,
|
||||
AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex<F32>]>, TensorOf<[Complex<F64>]>, AnyMemRef]>:$values,
|
||||
DefaultValuedAttr<I64Attr, "-1">:$axis);
|
||||
DefaultValuedAttr<SI64Attr, "-1">:$axis);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex<F32>]>, TensorOf<[Complex<F64>]>, AnyMemRef]>:$output);
|
||||
let extraClassDeclaration = [{
|
||||
static int getNumberOfOperands() {
|
||||
|
@ -3306,7 +3306,7 @@ def ONNXQLinearConvOp:ONNX_Op<"QLinearConv",
|
|||
AnyTypeOf<[TensorOf<[I32]>, AnyMemRef, NoneType]>:$B,
|
||||
DefaultValuedAttr<StrAttr, "NOTSET">:$auto_pad,
|
||||
OptionalAttr<I64ArrayAttr>:$dilations,
|
||||
DefaultValuedAttr<I64Attr, "1">:$group,
|
||||
DefaultValuedAttr<SI64Attr, "1">:$group,
|
||||
OptionalAttr<I64ArrayAttr>:$kernel_shape,
|
||||
OptionalAttr<I64ArrayAttr>:$pads,
|
||||
OptionalAttr<I64ArrayAttr>:$strides);
|
||||
|
@ -3461,7 +3461,7 @@ def ONNXRNNOp:ONNX_Op<"RNN",
|
|||
DefaultValuedAttr<StrArrayAttr, "{\"Tanh\", \"Tanh\"}">:$activations,
|
||||
OptionalAttr<F32Attr>:$clip,
|
||||
DefaultValuedAttr<StrAttr, "forward">:$direction,
|
||||
OptionalAttr<I64Attr>:$hidden_size);
|
||||
OptionalAttr<SI64Attr>:$hidden_size);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef, NoneType, NoneType, NoneType]>:$Y,
|
||||
AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef, NoneType, NoneType, NoneType, NoneType]>:$Y_h);
|
||||
let extraClassDeclaration = [{
|
||||
|
@ -3489,7 +3489,7 @@ def ONNXRandomNormalOp:ONNX_Op<"RandomNormal",
|
|||
"be one of the data types specified in the 'DataType' enum field in the"
|
||||
"TensorProto message."
|
||||
}];
|
||||
let arguments = (ins DefaultValuedAttr<I64Attr, "1">:$dtype,
|
||||
let arguments = (ins DefaultValuedAttr<SI64Attr, "1">:$dtype,
|
||||
DefaultValuedAttr<F32Attr, "0.0">:$mean,
|
||||
DefaultValuedAttr<F32Attr, "1.0">:$scale,
|
||||
OptionalAttr<F32Attr>:$seed,
|
||||
|
@ -3521,7 +3521,7 @@ def ONNXRandomNormalLikeOp:ONNX_Op<"RandomNormalLike",
|
|||
"TensorProto message, and be valid as an output type."
|
||||
}];
|
||||
let arguments = (ins AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex<F32>]>, TensorOf<[Complex<F64>]>, AnyMemRef]>:$input,
|
||||
OptionalAttr<I64Attr>:$dtype,
|
||||
OptionalAttr<SI64Attr>:$dtype,
|
||||
DefaultValuedAttr<F32Attr, "0.0">:$mean,
|
||||
DefaultValuedAttr<F32Attr, "1.0">:$scale,
|
||||
OptionalAttr<F32Attr>:$seed);
|
||||
|
@ -3550,7 +3550,7 @@ def ONNXRandomUniformOp:ONNX_Op<"RandomUniform",
|
|||
"be one of the data types specified in the 'DataType' enum field in the"
|
||||
"TensorProto message."
|
||||
}];
|
||||
let arguments = (ins DefaultValuedAttr<I64Attr, "1">:$dtype,
|
||||
let arguments = (ins DefaultValuedAttr<SI64Attr, "1">:$dtype,
|
||||
DefaultValuedAttr<F32Attr, "1.0">:$high,
|
||||
DefaultValuedAttr<F32Attr, "0.0">:$low,
|
||||
OptionalAttr<F32Attr>:$seed,
|
||||
|
@ -3582,7 +3582,7 @@ def ONNXRandomUniformLikeOp:ONNX_Op<"RandomUniformLike",
|
|||
"TensorProto message and be valid as an output type."
|
||||
}];
|
||||
let arguments = (ins AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex<F32>]>, TensorOf<[Complex<F64>]>, AnyMemRef]>:$input,
|
||||
OptionalAttr<I64Attr>:$dtype,
|
||||
OptionalAttr<SI64Attr>:$dtype,
|
||||
DefaultValuedAttr<F32Attr, "1.0">:$high,
|
||||
DefaultValuedAttr<F32Attr, "0.0">:$low,
|
||||
OptionalAttr<F32Attr>:$seed);
|
||||
|
@ -3683,7 +3683,7 @@ def ONNXReduceL1Op:ONNX_Op<"ReduceL1",
|
|||
}];
|
||||
let arguments = (ins AnyTypeOf<[TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$data,
|
||||
OptionalAttr<I64ArrayAttr>:$axes,
|
||||
DefaultValuedAttr<I64Attr, "1">:$keepdims);
|
||||
DefaultValuedAttr<SI64Attr, "1">:$keepdims);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$reduced);
|
||||
let extraClassDeclaration = [{
|
||||
static int getNumberOfOperands() {
|
||||
|
@ -3711,7 +3711,7 @@ def ONNXReduceL2Op:ONNX_Op<"ReduceL2",
|
|||
}];
|
||||
let arguments = (ins AnyTypeOf<[TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$data,
|
||||
OptionalAttr<I64ArrayAttr>:$axes,
|
||||
DefaultValuedAttr<I64Attr, "1">:$keepdims);
|
||||
DefaultValuedAttr<SI64Attr, "1">:$keepdims);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$reduced);
|
||||
let extraClassDeclaration = [{
|
||||
static int getNumberOfOperands() {
|
||||
|
@ -3739,7 +3739,7 @@ def ONNXReduceLogSumOp:ONNX_Op<"ReduceLogSum",
|
|||
}];
|
||||
let arguments = (ins AnyTypeOf<[TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$data,
|
||||
OptionalAttr<I64ArrayAttr>:$axes,
|
||||
DefaultValuedAttr<I64Attr, "1">:$keepdims);
|
||||
DefaultValuedAttr<SI64Attr, "1">:$keepdims);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$reduced);
|
||||
let extraClassDeclaration = [{
|
||||
static int getNumberOfOperands() {
|
||||
|
@ -3767,7 +3767,7 @@ def ONNXReduceLogSumExpOp:ONNX_Op<"ReduceLogSumExp",
|
|||
}];
|
||||
let arguments = (ins AnyTypeOf<[TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$data,
|
||||
OptionalAttr<I64ArrayAttr>:$axes,
|
||||
DefaultValuedAttr<I64Attr, "1">:$keepdims);
|
||||
DefaultValuedAttr<SI64Attr, "1">:$keepdims);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$reduced);
|
||||
let extraClassDeclaration = [{
|
||||
static int getNumberOfOperands() {
|
||||
|
@ -3795,7 +3795,7 @@ def ONNXReduceMaxOp:ONNX_Op<"ReduceMax",
|
|||
}];
|
||||
let arguments = (ins AnyTypeOf<[TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$data,
|
||||
OptionalAttr<I64ArrayAttr>:$axes,
|
||||
DefaultValuedAttr<I64Attr, "1">:$keepdims);
|
||||
DefaultValuedAttr<SI64Attr, "1">:$keepdims);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$reduced);
|
||||
let extraClassDeclaration = [{
|
||||
static int getNumberOfOperands() {
|
||||
|
@ -3823,7 +3823,7 @@ def ONNXReduceMeanOp:ONNX_Op<"ReduceMean",
|
|||
}];
|
||||
let arguments = (ins AnyTypeOf<[TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$data,
|
||||
OptionalAttr<I64ArrayAttr>:$axes,
|
||||
DefaultValuedAttr<I64Attr, "1">:$keepdims);
|
||||
DefaultValuedAttr<SI64Attr, "1">:$keepdims);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$reduced);
|
||||
let extraClassDeclaration = [{
|
||||
static int getNumberOfOperands() {
|
||||
|
@ -3851,7 +3851,7 @@ def ONNXReduceMinOp:ONNX_Op<"ReduceMin",
|
|||
}];
|
||||
let arguments = (ins AnyTypeOf<[TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$data,
|
||||
OptionalAttr<I64ArrayAttr>:$axes,
|
||||
DefaultValuedAttr<I64Attr, "1">:$keepdims);
|
||||
DefaultValuedAttr<SI64Attr, "1">:$keepdims);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$reduced);
|
||||
let extraClassDeclaration = [{
|
||||
static int getNumberOfOperands() {
|
||||
|
@ -3879,7 +3879,7 @@ def ONNXReduceProdOp:ONNX_Op<"ReduceProd",
|
|||
}];
|
||||
let arguments = (ins AnyTypeOf<[TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$data,
|
||||
OptionalAttr<I64ArrayAttr>:$axes,
|
||||
DefaultValuedAttr<I64Attr, "1">:$keepdims);
|
||||
DefaultValuedAttr<SI64Attr, "1">:$keepdims);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$reduced);
|
||||
let extraClassDeclaration = [{
|
||||
static int getNumberOfOperands() {
|
||||
|
@ -3907,7 +3907,7 @@ def ONNXReduceSumOp:ONNX_Op<"ReduceSum",
|
|||
}];
|
||||
let arguments = (ins AnyTypeOf<[TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$data,
|
||||
OptionalAttr<I64ArrayAttr>:$axes,
|
||||
DefaultValuedAttr<I64Attr, "1">:$keepdims);
|
||||
DefaultValuedAttr<SI64Attr, "1">:$keepdims);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$reduced);
|
||||
let builders = [
|
||||
OpBuilder<"OpBuilder &builder, OperationState &state, Value data, ArrayAttr axes, IntegerAttr keepdims", [{
|
||||
|
@ -3947,7 +3947,7 @@ def ONNXReduceSumSquareOp:ONNX_Op<"ReduceSumSquare",
|
|||
}];
|
||||
let arguments = (ins AnyTypeOf<[TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$data,
|
||||
OptionalAttr<I64ArrayAttr>:$axes,
|
||||
DefaultValuedAttr<I64Attr, "1">:$keepdims);
|
||||
DefaultValuedAttr<SI64Attr, "1">:$keepdims);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$reduced);
|
||||
let builders = [
|
||||
OpBuilder<"OpBuilder &builder, OperationState &state, Value data, ArrayAttr axes, IntegerAttr keepdims", [{
|
||||
|
@ -4041,7 +4041,7 @@ def ONNXResizeOp:ONNX_Op<"Resize",
|
|||
AnyTypeOf<[TensorOf<[I64]>, NoneType]>:$sizes,
|
||||
DefaultValuedAttr<StrAttr, "half_pixel">:$coordinate_transformation_mode,
|
||||
DefaultValuedAttr<F32Attr, "-0.75">:$cubic_coeff_a,
|
||||
DefaultValuedAttr<I64Attr, "0">:$exclude_outside,
|
||||
DefaultValuedAttr<SI64Attr, "0">:$exclude_outside,
|
||||
DefaultValuedAttr<F32Attr, "0.0">:$extrapolation_value,
|
||||
DefaultValuedAttr<StrAttr, "nearest">:$mode,
|
||||
DefaultValuedAttr<StrAttr, "round_prefer_floor">:$nearest_mode);
|
||||
|
@ -4099,8 +4099,8 @@ def ONNXReverseSequenceOp:ONNX_Op<"ReverseSequence",
|
|||
}];
|
||||
let arguments = (ins AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex<F32>]>, TensorOf<[Complex<F64>]>, AnyMemRef]>:$input,
|
||||
TensorOf<[I64]>:$sequence_lens,
|
||||
DefaultValuedAttr<I64Attr, "1">:$batch_axis,
|
||||
DefaultValuedAttr<I64Attr, "0">:$time_axis);
|
||||
DefaultValuedAttr<SI64Attr, "1">:$batch_axis,
|
||||
DefaultValuedAttr<SI64Attr, "0">:$time_axis);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex<F32>]>, TensorOf<[Complex<F64>]>, AnyMemRef]>:$Y);
|
||||
let extraClassDeclaration = [{
|
||||
static int getNumberOfOperands() {
|
||||
|
@ -4135,9 +4135,9 @@ def ONNXRoiAlignOp:ONNX_Op<"RoiAlign",
|
|||
AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$rois,
|
||||
AnyTypeOf<[TensorOf<[I64]>, AnyMemRef]>:$batch_indices,
|
||||
DefaultValuedAttr<StrAttr, "avg">:$mode,
|
||||
DefaultValuedAttr<I64Attr, "1">:$output_height,
|
||||
DefaultValuedAttr<I64Attr, "1">:$output_width,
|
||||
DefaultValuedAttr<I64Attr, "0">:$sampling_ratio,
|
||||
DefaultValuedAttr<SI64Attr, "1">:$output_height,
|
||||
DefaultValuedAttr<SI64Attr, "1">:$output_width,
|
||||
DefaultValuedAttr<SI64Attr, "0">:$sampling_ratio,
|
||||
DefaultValuedAttr<F32Attr, "1.0">:$spatial_scale);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$Y);
|
||||
let extraClassDeclaration = [{
|
||||
|
@ -4314,7 +4314,7 @@ def ONNXScanOp:ONNX_Op<"Scan",
|
|||
}];
|
||||
let arguments = (ins AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex<F32>]>, TensorOf<[Complex<F64>]>, AnyMemRef]>:$initial_state_and_scan_inputs,
|
||||
AnyAttr:$body,
|
||||
I64Attr:$num_scan_inputs,
|
||||
SI64Attr:$num_scan_inputs,
|
||||
OptionalAttr<I64ArrayAttr>:$scan_input_axes,
|
||||
OptionalAttr<I64ArrayAttr>:$scan_input_directions,
|
||||
OptionalAttr<I64ArrayAttr>:$scan_output_axes,
|
||||
|
@ -4394,7 +4394,7 @@ def ONNXScatterOp:ONNX_Op<"Scatter",
|
|||
let arguments = (ins AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex<F32>]>, TensorOf<[Complex<F64>]>, AnyMemRef]>:$data,
|
||||
AnyTypeOf<[TensorOf<[I32]>, TensorOf<[I64]>, AnyMemRef]>:$indices,
|
||||
AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex<F32>]>, TensorOf<[Complex<F64>]>, AnyMemRef]>:$updates,
|
||||
DefaultValuedAttr<I64Attr, "0">:$axis);
|
||||
DefaultValuedAttr<SI64Attr, "0">:$axis);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex<F32>]>, TensorOf<[Complex<F64>]>, AnyMemRef]>:$output);
|
||||
let extraClassDeclaration = [{
|
||||
static int getNumberOfOperands() {
|
||||
|
@ -4468,7 +4468,7 @@ def ONNXScatterElementsOp:ONNX_Op<"ScatterElements",
|
|||
let arguments = (ins AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex<F32>]>, TensorOf<[Complex<F64>]>, AnyMemRef]>:$data,
|
||||
AnyTypeOf<[TensorOf<[I32]>, TensorOf<[I64]>, AnyMemRef]>:$indices,
|
||||
AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex<F32>]>, TensorOf<[Complex<F64>]>, AnyMemRef]>:$updates,
|
||||
DefaultValuedAttr<I64Attr, "0">:$axis);
|
||||
DefaultValuedAttr<SI64Attr, "0">:$axis);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex<F32>]>, TensorOf<[Complex<F64>]>, AnyMemRef]>:$output);
|
||||
let extraClassDeclaration = [{
|
||||
static int getNumberOfOperands() {
|
||||
|
@ -4639,7 +4639,7 @@ def ONNXSequenceEmptyOp:ONNX_Op<"SequenceEmpty",
|
|||
let description = [{
|
||||
"Construct an empty tensor sequence, with given data type."
|
||||
}];
|
||||
let arguments = (ins OptionalAttr<I64Attr>:$dtype);
|
||||
let arguments = (ins OptionalAttr<SI64Attr>:$dtype);
|
||||
let results = (outs AnyTypeOf<[SeqOf<[TensorOf<[UI8]>]>, SeqOf<[TensorOf<[UI16]>]>, SeqOf<[TensorOf<[UI32]>]>, SeqOf<[TensorOf<[UI64]>]>, SeqOf<[TensorOf<[I8]>]>, SeqOf<[TensorOf<[I16]>]>, SeqOf<[TensorOf<[I32]>]>, SeqOf<[TensorOf<[I64]>]>, SeqOf<[TensorOf<[F16]>]>, SeqOf<[TensorOf<[F32]>]>, SeqOf<[TensorOf<[F64]>]>, SeqOf<[TensorOf<[StringType]>]>, SeqOf<[TensorOf<[I1]>]>, SeqOf<[TensorOf<[Complex<F32>]>]>, SeqOf<[TensorOf<[Complex<F64>]>]>, AnyMemRef]>:$output);
|
||||
let extraClassDeclaration = [{
|
||||
static int getNumberOfOperands() {
|
||||
|
@ -4962,7 +4962,7 @@ def ONNXSoftmaxOp:ONNX_Op<"Softmax",
|
|||
"and contains the softmax values of the corresponding input."
|
||||
}];
|
||||
let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$input,
|
||||
DefaultValuedAttr<I64Attr, "1">:$axis);
|
||||
DefaultValuedAttr<SI64Attr, "1">:$axis);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$output);
|
||||
let extraClassDeclaration = [{
|
||||
static int getNumberOfOperands() {
|
||||
|
@ -5030,7 +5030,7 @@ def ONNXSpaceToDepthOp:ONNX_Op<"SpaceToDepth",
|
|||
"are moved to the depth dimension."
|
||||
}];
|
||||
let arguments = (ins AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex<F32>]>, TensorOf<[Complex<F64>]>, AnyMemRef]>:$input,
|
||||
I64Attr:$blocksize);
|
||||
SI64Attr:$blocksize);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex<F32>]>, TensorOf<[Complex<F64>]>, AnyMemRef]>:$output);
|
||||
let extraClassDeclaration = [{
|
||||
static int getNumberOfOperands() {
|
||||
|
@ -5054,7 +5054,7 @@ def ONNXSplitOp:ONNX_Op<"Split",
|
|||
"Otherwise, the tensor is split to equal sized parts."
|
||||
}];
|
||||
let arguments = (ins AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex<F32>]>, TensorOf<[Complex<F64>]>, AnyMemRef]>:$input,
|
||||
DefaultValuedAttr<I64Attr, "0">:$axis,
|
||||
DefaultValuedAttr<SI64Attr, "0">:$axis,
|
||||
OptionalAttr<I64ArrayAttr>:$split);
|
||||
let results = (outs Variadic<AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex<F32>]>, TensorOf<[Complex<F64>]>, AnyMemRef]>>:$outputs);
|
||||
let extraClassDeclaration = [{
|
||||
|
@ -5087,8 +5087,8 @@ def ONNXSplitToSequenceOp:ONNX_Op<"SplitToSequence",
|
|||
}];
|
||||
let arguments = (ins AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex<F32>]>, TensorOf<[Complex<F64>]>, AnyMemRef]>:$input,
|
||||
AnyTypeOf<[TensorOf<[I32]>, TensorOf<[I64]>, AnyMemRef, NoneType]>:$split,
|
||||
DefaultValuedAttr<I64Attr, "0">:$axis,
|
||||
DefaultValuedAttr<I64Attr, "1">:$keepdims);
|
||||
DefaultValuedAttr<SI64Attr, "0">:$axis,
|
||||
DefaultValuedAttr<SI64Attr, "1">:$keepdims);
|
||||
let results = (outs AnyTypeOf<[SeqOf<[TensorOf<[UI8]>]>, SeqOf<[TensorOf<[UI16]>]>, SeqOf<[TensorOf<[UI32]>]>, SeqOf<[TensorOf<[UI64]>]>, SeqOf<[TensorOf<[I8]>]>, SeqOf<[TensorOf<[I16]>]>, SeqOf<[TensorOf<[I32]>]>, SeqOf<[TensorOf<[I64]>]>, SeqOf<[TensorOf<[F16]>]>, SeqOf<[TensorOf<[F32]>]>, SeqOf<[TensorOf<[F64]>]>, SeqOf<[TensorOf<[StringType]>]>, SeqOf<[TensorOf<[I1]>]>, SeqOf<[TensorOf<[Complex<F32>]>]>, SeqOf<[TensorOf<[Complex<F64>]>]>, AnyMemRef]>:$output_sequence);
|
||||
let extraClassDeclaration = [{
|
||||
static int getNumberOfOperands() {
|
||||
|
@ -5179,7 +5179,7 @@ def ONNXStringNormalizerOp:ONNX_Op<"StringNormalizer",
|
|||
}];
|
||||
let arguments = (ins TensorOf<[StringType]>:$X,
|
||||
DefaultValuedAttr<StrAttr, "NONE">:$case_change_action,
|
||||
DefaultValuedAttr<I64Attr, "0">:$is_case_sensitive,
|
||||
DefaultValuedAttr<SI64Attr, "0">:$is_case_sensitive,
|
||||
OptionalAttr<StrAttr>:$locale,
|
||||
OptionalAttr<StrArrayAttr>:$stopwords);
|
||||
let results = (outs TensorOf<[StringType]>:$Y);
|
||||
|
@ -5344,9 +5344,9 @@ def ONNXTfIdfVectorizerOp:ONNX_Op<"TfIdfVectorizer",
|
|||
"If pool_strings is set, the input must be a string tensor."
|
||||
}];
|
||||
let arguments = (ins AnyTypeOf<[TensorOf<[StringType]>, TensorOf<[I32]>, TensorOf<[I64]>, AnyMemRef]>:$X,
|
||||
I64Attr:$max_gram_length,
|
||||
I64Attr:$max_skip_count,
|
||||
I64Attr:$min_gram_length,
|
||||
SI64Attr:$max_gram_length,
|
||||
SI64Attr:$max_skip_count,
|
||||
SI64Attr:$min_gram_length,
|
||||
StrAttr:$mode,
|
||||
I64ArrayAttr:$ngram_counts,
|
||||
I64ArrayAttr:$ngram_indexes,
|
||||
|
@ -5439,9 +5439,9 @@ def ONNXTopKOp:ONNX_Op<"TopK",
|
|||
}];
|
||||
let arguments = (ins AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$X,
|
||||
TensorOf<[I64]>:$K,
|
||||
DefaultValuedAttr<I64Attr, "-1">:$axis,
|
||||
DefaultValuedAttr<I64Attr, "1">:$largest,
|
||||
DefaultValuedAttr<I64Attr, "1">:$sorted);
|
||||
DefaultValuedAttr<SI64Attr, "-1">:$axis,
|
||||
DefaultValuedAttr<SI64Attr, "1">:$largest,
|
||||
DefaultValuedAttr<SI64Attr, "1">:$sorted);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$Values,
|
||||
AnyTypeOf<[TensorOf<[I64]>, AnyMemRef]>:$Indices);
|
||||
let extraClassDeclaration = [{
|
||||
|
@ -5563,8 +5563,8 @@ def ONNXUniqueOp:ONNX_Op<"Unique",
|
|||
" output_counts = [2 1 1]"
|
||||
}];
|
||||
let arguments = (ins AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex<F32>]>, TensorOf<[Complex<F64>]>, AnyMemRef]>:$X,
|
||||
OptionalAttr<I64Attr>:$axis,
|
||||
DefaultValuedAttr<I64Attr, "1">:$sorted);
|
||||
OptionalAttr<SI64Attr>:$axis,
|
||||
DefaultValuedAttr<SI64Attr, "1">:$sorted);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex<F32>]>, TensorOf<[Complex<F64>]>, AnyMemRef]>:$Y,
|
||||
AnyTypeOf<[TensorOf<[I64]>, NoneType]>:$indices,
|
||||
AnyTypeOf<[TensorOf<[I64]>, NoneType]>:$inverse_indices,
|
||||
|
@ -5785,7 +5785,7 @@ def ONNXCastMapOp:ONNX_Op<"CastMap",
|
|||
let arguments = (ins AnyTypeOf<[TupleOf<[I64, StringType]>, TupleOf<[I64, F32]>, AnyMemRef]>:$X,
|
||||
DefaultValuedAttr<StrAttr, "TO_FLOAT">:$cast_to,
|
||||
DefaultValuedAttr<StrAttr, "DENSE">:$map_form,
|
||||
DefaultValuedAttr<I64Attr, "1">:$max_map);
|
||||
DefaultValuedAttr<SI64Attr, "1">:$max_map);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[StringType]>, TensorOf<[F32]>, TensorOf<[I64]>, AnyMemRef]>:$Y);
|
||||
let extraClassDeclaration = [{
|
||||
static int getNumberOfOperands() {
|
||||
|
@ -5816,7 +5816,7 @@ def ONNXCategoryMapperOp:ONNX_Op<"CategoryMapper",
|
|||
let arguments = (ins AnyTypeOf<[TensorOf<[StringType]>, TensorOf<[I64]>, AnyMemRef]>:$X,
|
||||
OptionalAttr<I64ArrayAttr>:$cats_int64s,
|
||||
OptionalAttr<StrArrayAttr>:$cats_strings,
|
||||
DefaultValuedAttr<I64Attr, "-1">:$default_int64,
|
||||
DefaultValuedAttr<SI64Attr, "-1">:$default_int64,
|
||||
DefaultValuedAttr<StrAttr, "_Unused">:$default_string);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[StringType]>, TensorOf<[I64]>, AnyMemRef]>:$Y);
|
||||
let extraClassDeclaration = [{
|
||||
|
@ -5908,7 +5908,7 @@ def ONNXImputerOp:ONNX_Op<"Imputer",
|
|||
OptionalAttr<F32ArrayAttr>:$imputed_value_floats,
|
||||
OptionalAttr<I64ArrayAttr>:$imputed_value_int64s,
|
||||
DefaultValuedAttr<F32Attr, "0.0">:$replaced_value_float,
|
||||
DefaultValuedAttr<I64Attr, "0">:$replaced_value_int64);
|
||||
DefaultValuedAttr<SI64Attr, "0">:$replaced_value_int64);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[I64]>, TensorOf<[I32]>, AnyMemRef]>:$Y);
|
||||
let extraClassDeclaration = [{
|
||||
static int getNumberOfOperands() {
|
||||
|
@ -5947,7 +5947,7 @@ def ONNXLabelEncoderOp:ONNX_Op<"LabelEncoder",
|
|||
}];
|
||||
let arguments = (ins AnyTypeOf<[TensorOf<[StringType]>, TensorOf<[I64]>, TensorOf<[F32]>, AnyMemRef]>:$X,
|
||||
DefaultValuedAttr<F32Attr, "-0.0">:$default_float,
|
||||
DefaultValuedAttr<I64Attr, "-1">:$default_int64,
|
||||
DefaultValuedAttr<SI64Attr, "-1">:$default_int64,
|
||||
DefaultValuedAttr<StrAttr, "_Unused">:$default_string,
|
||||
OptionalAttr<F32ArrayAttr>:$keys_floats,
|
||||
OptionalAttr<I64ArrayAttr>:$keys_int64s,
|
||||
|
@ -5980,7 +5980,7 @@ def ONNXLinearClassifierOp:ONNX_Op<"LinearClassifier",
|
|||
OptionalAttr<StrArrayAttr>:$classlabels_strings,
|
||||
F32ArrayAttr:$coefficients,
|
||||
OptionalAttr<F32ArrayAttr>:$intercepts,
|
||||
DefaultValuedAttr<I64Attr, "0">:$multi_class,
|
||||
DefaultValuedAttr<SI64Attr, "0">:$multi_class,
|
||||
DefaultValuedAttr<StrAttr, "NONE">:$post_transform);
|
||||
let results = (outs AnyTypeOf<[TensorOf<[StringType]>, TensorOf<[I64]>, AnyMemRef]>:$Y,
|
||||
TensorOf<[F32]>:$Z);
|
||||
|
@ -6012,7 +6012,7 @@ def ONNXLinearRegressorOp:ONNX_Op<"LinearRegressor",
|
|||
OptionalAttr<F32ArrayAttr>:$coefficients,
|
||||
OptionalAttr<F32ArrayAttr>:$intercepts,
|
||||
DefaultValuedAttr<StrAttr, "NONE">:$post_transform,
|
||||
DefaultValuedAttr<I64Attr, "1">:$targets);
|
||||
DefaultValuedAttr<SI64Attr, "1">:$targets);
|
||||
let results = (outs TensorOf<[F32]>:$Y);
|
||||
let extraClassDeclaration = [{
|
||||
static int getNumberOfOperands() {
|
||||
|
@ -6074,7 +6074,7 @@ def ONNXOneHotEncoderOp:ONNX_Op<"OneHotEncoder",
|
|||
let arguments = (ins AnyTypeOf<[TensorOf<[StringType]>, TensorOf<[I64]>, TensorOf<[I32]>, TensorOf<[F32]>, TensorOf<[F64]>, AnyMemRef]>:$X,
|
||||
OptionalAttr<I64ArrayAttr>:$cats_int64s,
|
||||
OptionalAttr<StrArrayAttr>:$cats_strings,
|
||||
DefaultValuedAttr<I64Attr, "1">:$zeros);
|
||||
DefaultValuedAttr<SI64Attr, "1">:$zeros);
|
||||
let results = (outs TensorOf<[F32]>:$Y);
|
||||
let extraClassDeclaration = [{
|
||||
static int getNumberOfOperands() {
|
||||
|
@ -6132,8 +6132,8 @@ def ONNXSVMRegressorOp:ONNX_Op<"SVMRegressor",
|
|||
OptionalAttr<F32ArrayAttr>:$coefficients,
|
||||
OptionalAttr<F32ArrayAttr>:$kernel_params,
|
||||
DefaultValuedAttr<StrAttr, "LINEAR">:$kernel_type,
|
||||
DefaultValuedAttr<I64Attr, "0">:$n_supports,
|
||||
DefaultValuedAttr<I64Attr, "0">:$one_class,
|
||||
DefaultValuedAttr<SI64Attr, "0">:$n_supports,
|
||||
DefaultValuedAttr<SI64Attr, "0">:$one_class,
|
||||
DefaultValuedAttr<StrAttr, "NONE">:$post_transform,
|
||||
OptionalAttr<F32ArrayAttr>:$rho,
|
||||
OptionalAttr<F32ArrayAttr>:$support_vectors);
|
||||
|
@ -6239,7 +6239,7 @@ def ONNXTreeEnsembleRegressorOp:ONNX_Op<"TreeEnsembleRegressor",
|
|||
let arguments = (ins AnyTypeOf<[TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[I64]>, TensorOf<[I32]>, AnyMemRef]>:$X,
|
||||
DefaultValuedAttr<StrAttr, "SUM">:$aggregate_function,
|
||||
OptionalAttr<F32ArrayAttr>:$base_values,
|
||||
OptionalAttr<I64Attr>:$n_targets,
|
||||
OptionalAttr<SI64Attr>:$n_targets,
|
||||
OptionalAttr<I64ArrayAttr>:$nodes_falsenodeids,
|
||||
OptionalAttr<I64ArrayAttr>:$nodes_featureids,
|
||||
OptionalAttr<F32ArrayAttr>:$nodes_hitrates,
|
||||
|
|
|
@ -68,6 +68,8 @@ int main(int argc, char **argv) {
|
|||
registry.insert<mlir::StandardOpsDialect>();
|
||||
registry.insert<mlir::vector::VectorDialect>();
|
||||
registry.insert<mlir::shape::ShapeDialect>();
|
||||
registry.insert<mlir::ONNXOpsDialect>();
|
||||
registry.insert<mlir::KrnlOpsDialect>();
|
||||
|
||||
registerTransformsPasses();
|
||||
registerAffinePasses();
|
||||
|
@ -77,8 +79,6 @@ int main(int argc, char **argv) {
|
|||
|
||||
llvm::InitLLVM y(argc, argv);
|
||||
|
||||
mlir::registerDialect<mlir::ONNXOpsDialect>();
|
||||
mlir::registerDialect<mlir::KrnlOpsDialect>();
|
||||
initOMPasses();
|
||||
initMLIRPasses();
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ class HasRankOf<int rank> : Constraint<CPred<"$0.getType().isa<ShapedType>() &&
|
|||
def HasNoneType : Constraint<CPred<"$0.getType().isa<NoneType>()">>;
|
||||
def HasSameElementType : Constraint<
|
||||
CPred<"($0.getType().dyn_cast<ShapedType>().getElementType() == "
|
||||
"convertONNXTypeToMLIRType($_builder, static_cast<onnx::TensorProto_DataType>($1.cast<::mlir::IntegerAttr>().getInt())))">,
|
||||
"convertONNXTypeToMLIRType($_builder, static_cast<onnx::TensorProto_DataType>($1.cast<::mlir::IntegerAttr>().getSInt())))">,
|
||||
"has same element type">;
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
@ -43,8 +43,8 @@ def HasSameElementType : Constraint<
|
|||
|
||||
def GemmAlpha : NativeCodeCall<"$_builder.getF32FloatAttr(1.0)">;
|
||||
def GemmBeta : NativeCodeCall<"$_builder.getF32FloatAttr(1.0)">;
|
||||
def GemmTransA : NativeCodeCall<"$_builder.getI64IntegerAttr(0)">;
|
||||
def GemmTransB : NativeCodeCall<"$_builder.getI64IntegerAttr(0)">;
|
||||
def GemmTransA : NativeCodeCall<"IntegerAttr::get($_builder.getIntegerType(64, /*isSigned=*/true), APInt(64, 0, /*isSigned=*/true))">;
|
||||
def GemmTransB : NativeCodeCall<"IntegerAttr::get($_builder.getIntegerType(64, /*isSigned=*/true), APInt(64, 0, /*isSigned=*/true))">;
|
||||
|
||||
// onnx.add(onnx.matmul(%X, %Y), %Z) = onnx.Gemm(%X, %Y, %Z)
|
||||
def MulAddToGemmOptPattern : Pat<(ONNXAddOp (ONNXMatMulOp:$res $m1, $m2), $m3),
|
||||
|
|
|
@ -72,7 +72,7 @@ def GetNullAttr :
|
|||
def createDenseArrayAttr:
|
||||
NativeCodeCall<"createDenseArrayAttr($_builder, $0)">;
|
||||
|
||||
def ScalerT : NativeCodeCall<"$_builder.getI64IntegerAttr(1)">;
|
||||
def ScalerT : NativeCodeCall<"IntegerAttr::get($_builder.getIntegerType(64, /*isSigned=*/true), APInt(64, 1, /*isSigned=*/true))">;
|
||||
|
||||
// No attribute
|
||||
def ScalerNullPattern : Pat<
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
// CHECK-LABEL: func @test_matmul_add_fused(%{{.*}}: tensor<10x10xf32>, %{{.*}}: tensor<10x10xf32>, %{{.*}}: tensor<10x10xf32>) -> tensor<10x10xf32> {
|
||||
func @test_matmul_add_fused(%a0: tensor<10x10xf32>, %a1: tensor<10x10xf32>, %a2: tensor<10x10xf32>) -> tensor<10x10xf32> {
|
||||
// CHECK-NEXT: %{{[0-9]+}} = "onnx.Gemm"(%{{.*}}, %{{.*}}, %{{.*}}) {alpha = 1.000000e+00 : f32, beta = 1.000000e+00 : f32, transA = 0 : i64, transB = 0 : i64} : (tensor<10x10xf32>, tensor<10x10xf32>, tensor<10x10xf32>) -> tensor<10x10xf32>
|
||||
// CHECK-NEXT: %{{[0-9]+}} = "onnx.Gemm"(%{{.*}}, %{{.*}}, %{{.*}}) {alpha = 1.000000e+00 : f32, beta = 1.000000e+00 : f32, transA = 0 : si64, transB = 0 : si64} : (tensor<10x10xf32>, tensor<10x10xf32>, tensor<10x10xf32>) -> tensor<10x10xf32>
|
||||
%0 = "onnx.MatMul"(%a0, %a1) : (tensor<10x10xf32>, tensor<10x10xf32>) -> tensor<10x10xf32>
|
||||
%1 = "onnx.Add"(%0, %a2) : (tensor<10x10xf32>, tensor<10x10xf32>) -> tensor<10x10xf32>
|
||||
"std.return"(%1) : (tensor<10x10xf32>) -> ()
|
||||
|
@ -60,14 +60,14 @@ func @test_constant_pad(%arg0 : tensor<?x?xf32>) -> tensor<*xf32> {
|
|||
// CHECK-LABEL: @test_conv_split(%{{.*}}: tensor<1x9x32x64xf32>, %{{.*}}: tensor<5x9x6x7xf32>) -> tensor<*xf32> {
|
||||
func @test_conv_split(%arg0 : tensor<1x9x32x64xf32>, %arg1 : tensor<5x9x6x7xf32>) -> tensor<*xf32> {
|
||||
%cst = constant unit
|
||||
%0 = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", group = 1 : i64, pads = [2, 3, 4, 5]} : (tensor<1x9x32x64xf32>, tensor<5x9x6x7xf32>, none) -> tensor<*xf32>
|
||||
%0 = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", group = 1 : si64, pads = [2, 3, 4, 5]} : (tensor<1x9x32x64xf32>, tensor<5x9x6x7xf32>, none) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-NEXT: %cst = constant unit
|
||||
// CHECK-NEXT: %0 = "onnx.Constant"() {value = dense<[0, 0, 2, 3, 0, 0, 4, 5]> : tensor<8xi64>} : () -> tensor<8xi64>
|
||||
// CHECK-NEXT: %1 = "onnx.Constant"() {value = dense<0.000000e+00> : tensor<1xf32>} : () -> tensor<1xf32>
|
||||
// CHECK-NEXT: %2 = "onnx.Pad"(%arg0, %0, %1) {mode = "constant"} : (tensor<1x9x32x64xf32>, tensor<8xi64>, tensor<1xf32>) -> tensor<*xf32>
|
||||
// CHECK-NEXT: %3 = "onnx.Conv"(%2, %arg1, %cst) {auto_pad = "NOTSET", group = 1 : i64, pads = [0, 0, 0, 0]} : (tensor<*xf32>, tensor<5x9x6x7xf32>, none) -> tensor<*xf32>
|
||||
// CHECK-NEXT: %3 = "onnx.Conv"(%2, %arg1, %cst) {auto_pad = "NOTSET", group = 1 : si64, pads = [0, 0, 0, 0]} : (tensor<*xf32>, tensor<5x9x6x7xf32>, none) -> tensor<*xf32>
|
||||
// CHECK-NEXT: return %3 : tensor<*xf32>
|
||||
}
|
||||
|
||||
|
@ -80,7 +80,7 @@ func @test_gemm_add_fusion(%arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32
|
|||
%1 = "onnx.Add"(%0, %arg2) : (tensor<*xf32>, tensor<128xf32>) -> tensor<*xf32>
|
||||
return %1 : tensor<*xf32>
|
||||
|
||||
// CHECK-NEXT: [[GEMM:%.+]] = "onnx.Gemm"(%{{.*}}, %{{.*}}, %{{.*}}) {alpha = 1.000000e+00 : f32, beta = 1.000000e+00 : f32, transA = 0 : i64, transB = 0 : i64} : (tensor<128x128xf32>, tensor<128x128xf32>, tensor<128xf32>) -> tensor<*xf32>
|
||||
// CHECK-NEXT: [[GEMM:%.+]] = "onnx.Gemm"(%{{.*}}, %{{.*}}, %{{.*}}) {alpha = 1.000000e+00 : f32, beta = 1.000000e+00 : f32, transA = 0 : si64, transB = 0 : si64} : (tensor<128x128xf32>, tensor<128x128xf32>, tensor<128xf32>) -> tensor<*xf32>
|
||||
// return [[GEMM]] : tensor<*xf32>
|
||||
}
|
||||
|
||||
|
@ -93,7 +93,7 @@ func @test_gemm_add_fusion_rank3(%arg0: tensor<128x128x256xf32>, %arg1: tensor<1
|
|||
%1 = "onnx.Add"(%0, %arg2) : (tensor<*xf32>, tensor<256xf32>) -> tensor<*xf32>
|
||||
return %1 : tensor<*xf32>
|
||||
|
||||
// CHECK-NEXT: [[GEMM:%.+]] = "onnx.Gemm"(%{{.*}}, %{{.*}}, %{{.*}}) {alpha = 1.000000e+00 : f32, beta = 1.000000e+00 : f32, transA = 0 : i64, transB = 0 : i64} : (tensor<128x128x256xf32>, tensor<128x128x256xf32>, tensor<256xf32>) -> tensor<*xf32>
|
||||
// CHECK-NEXT: [[GEMM:%.+]] = "onnx.Gemm"(%{{.*}}, %{{.*}}, %{{.*}}) {alpha = 1.000000e+00 : f32, beta = 1.000000e+00 : f32, transA = 0 : si64, transB = 0 : si64} : (tensor<128x128x256xf32>, tensor<128x128x256xf32>, tensor<256xf32>) -> tensor<*xf32>
|
||||
// return [[GEMM]] : tensor<*xf32>
|
||||
}
|
||||
|
||||
|
@ -101,7 +101,7 @@ func @test_gemm_add_fusion_rank3(%arg0: tensor<128x128x256xf32>, %arg1: tensor<1
|
|||
|
||||
//CHECK-LABEL: @cast_elimination(%{{.*}}: tensor<2xf32>) -> tensor<2xf32> {
|
||||
func @cast_elimination(%arg0: tensor<2xf32>) -> tensor<2xf32> {
|
||||
%0 = "onnx.Cast"(%arg0) {to = 1 : i64} : (tensor<2xf32>) -> tensor<2xf32>
|
||||
%0 = "onnx.Cast"(%arg0) {to = 1 : si64} : (tensor<2xf32>) -> tensor<2xf32>
|
||||
return %0 : tensor<2xf32>
|
||||
|
||||
// CHECK-NEXT: return %arg0 : tensor<2xf32>
|
||||
|
@ -112,7 +112,7 @@ func @cast_elimination(%arg0: tensor<2xf32>) -> tensor<2xf32> {
|
|||
func @test_conv_batchnormtestmode_fusion_nobias(%arg0 : tensor<1x3x224x224xf32>) -> tensor<1x64x112x112xf32> {
|
||||
%cst = constant unit
|
||||
%0 = "onnx.Constant"() : () -> tensor<64x3x7x7xf32>
|
||||
%1 = "onnx.Conv"(%arg0, %0, %cst) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : i64, kernel_shape = [7, 7], pads = [3, 3, 3, 3], strides = [2, 2]} : (tensor<1x3x224x224xf32>, tensor<64x3x7x7xf32>, none) -> tensor<1x64x112x112xf32>
|
||||
%1 = "onnx.Conv"(%arg0, %0, %cst) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : si64, kernel_shape = [7, 7], pads = [3, 3, 3, 3], strides = [2, 2]} : (tensor<1x3x224x224xf32>, tensor<64x3x7x7xf32>, none) -> tensor<1x64x112x112xf32>
|
||||
%2 = "onnx.Constant"() : () -> tensor<64xf32>
|
||||
%3 = "onnx.Constant"() : () -> tensor<64xf32>
|
||||
%4 = "onnx.Constant"() : () -> tensor<64xf32>
|
||||
|
@ -142,7 +142,7 @@ func @test_conv_batchnormtestmode_fusion_nobias(%arg0 : tensor<1x3x224x224xf32>)
|
|||
// CHECK: [[PAD_ARG2:%.+]] = "onnx.Constant"() {value = dense<0.000000e+00> : tensor<1xf32>} : () -> tensor<1xf32>
|
||||
// CHECK: [[PADDED_INPUT:%.+]] = "onnx.Pad"(%arg0, [[PAD_ARG1]], [[PAD_ARG2]]) {mode = "constant"} : (tensor<1x3x224x224xf32>, tensor<8xi64>, tensor<1xf32>) -> tensor<*xf32>
|
||||
|
||||
// CHECK: [[RES:%.+]] = "onnx.Conv"([[PADDED_INPUT]], [[NEW_WEIGHT]], [[NEW_BIAS]]) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : i64, kernel_shape = [7, 7], pads = [0, 0, 0, 0], strides = [2, 2]} : (tensor<*xf32>, tensor<*xf32>, tensor<*xf32>) -> tensor<1x64x112x112xf32>
|
||||
// CHECK: [[RES:%.+]] = "onnx.Conv"([[PADDED_INPUT]], [[NEW_WEIGHT]], [[NEW_BIAS]]) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : si64, kernel_shape = [7, 7], pads = [0, 0, 0, 0], strides = [2, 2]} : (tensor<*xf32>, tensor<*xf32>, tensor<*xf32>) -> tensor<1x64x112x112xf32>
|
||||
|
||||
// CHECK-NOT: {{.*}} = "onnx.BatchNormalizationTestMode"{{.*}}
|
||||
|
||||
|
@ -154,7 +154,7 @@ func @test_conv_batchnormtestmode_fusion_nobias(%arg0 : tensor<1x3x224x224xf32>)
|
|||
func @test_conv_batchnormtestmode_fusion(%arg0 : tensor<1x3x224x224xf32>, %arg1 : tensor<64xf32>) -> tensor<1x64x112x112xf32> {
|
||||
%cst = constant unit
|
||||
%0 = "onnx.Constant"() : () -> tensor<64x3x7x7xf32>
|
||||
%1 = "onnx.Conv"(%arg0, %0, %arg1) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : i64, kernel_shape = [7, 7], pads = [3, 3, 3, 3], strides = [2, 2]} : (tensor<1x3x224x224xf32>, tensor<64x3x7x7xf32>, tensor<64xf32>) -> tensor<1x64x112x112xf32>
|
||||
%1 = "onnx.Conv"(%arg0, %0, %arg1) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : si64, kernel_shape = [7, 7], pads = [3, 3, 3, 3], strides = [2, 2]} : (tensor<1x3x224x224xf32>, tensor<64x3x7x7xf32>, tensor<64xf32>) -> tensor<1x64x112x112xf32>
|
||||
%2 = "onnx.Constant"() : () -> tensor<64xf32>
|
||||
%3 = "onnx.Constant"() : () -> tensor<64xf32>
|
||||
%4 = "onnx.Constant"() : () -> tensor<64xf32>
|
||||
|
@ -184,7 +184,7 @@ func @test_conv_batchnormtestmode_fusion(%arg0 : tensor<1x3x224x224xf32>, %arg1
|
|||
// CHECK: [[PAD_ARG2:%.+]] = "onnx.Constant"() {value = dense<0.000000e+00> : tensor<1xf32>} : () -> tensor<1xf32>
|
||||
// CHECK: [[PADDED_INPUT:%.+]] = "onnx.Pad"(%arg0, [[PAD_ARG1]], [[PAD_ARG2]]) {mode = "constant"} : (tensor<1x3x224x224xf32>, tensor<8xi64>, tensor<1xf32>) -> tensor<*xf32>
|
||||
|
||||
// CHECK: [[RES:%.+]] = "onnx.Conv"([[PADDED_INPUT]], [[NEW_WEIGHT]], [[NEW_BIAS]]) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : i64, kernel_shape = [7, 7], pads = [0, 0, 0, 0], strides = [2, 2]} : (tensor<*xf32>, tensor<*xf32>, tensor<*xf32>) -> tensor<1x64x112x112xf32>
|
||||
// CHECK: [[RES:%.+]] = "onnx.Conv"([[PADDED_INPUT]], [[NEW_WEIGHT]], [[NEW_BIAS]]) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : si64, kernel_shape = [7, 7], pads = [0, 0, 0, 0], strides = [2, 2]} : (tensor<*xf32>, tensor<*xf32>, tensor<*xf32>) -> tensor<1x64x112x112xf32>
|
||||
|
||||
// CHECK-NOT: {{.*}} = "onnx.BatchNormalizationTestMode"{{.*}}
|
||||
|
||||
|
|
|
@ -4,22 +4,22 @@
|
|||
|
||||
// CHECK-LABEL: @test_reducel1(%{{.*}}: tensor<?x?x?xf32>) -> tensor<*xf32>
|
||||
func @test_reducel1(%arg0 : tensor<?x?x?xf32>) -> tensor<*xf32> {
|
||||
%0 ="onnx.ReduceL1"(%arg0) {axes=[1], keepdims = 0 : i64} : (tensor<?x?x?xf32>)-> tensor<*xf32>
|
||||
%0 ="onnx.ReduceL1"(%arg0) {axes=[1], keepdims = 0 : si64} : (tensor<?x?x?xf32>)-> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-NEXT: [[ABS:%.+]] = "onnx.Abs"(%arg0) : (tensor<?x?x?xf32>) -> tensor<*xf32>
|
||||
// CHECK-NEXT: %{{[0-9]+}} = "onnx.ReduceSum"([[ABS]]) {axes = [1], keepdims = 0 : i64} : (tensor<*xf32>) -> tensor<*xf32>
|
||||
// CHECK-NEXT: %{{[0-9]+}} = "onnx.ReduceSum"([[ABS]]) {axes = [1], keepdims = 0 : si64} : (tensor<*xf32>) -> tensor<*xf32>
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
// CHECK-LABEL: @test_reducel2(%{{.*}}: tensor<?x?x?xf32>) -> tensor<*xf32>
|
||||
func @test_reducel2(%arg0 : tensor<?x?x?xf32>) -> tensor<*xf32> {
|
||||
%0 ="onnx.ReduceL2"(%arg0) {axes=[1], keepdims = 0 : i64} : (tensor<?x?x?xf32>)-> tensor<*xf32>
|
||||
%0 ="onnx.ReduceL2"(%arg0) {axes=[1], keepdims = 0 : si64} : (tensor<?x?x?xf32>)-> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-NEXT: [[MUL:%.+]] = "onnx.Mul"(%arg0, %arg0) : (tensor<?x?x?xf32>, tensor<?x?x?xf32>) -> tensor<*xf32>
|
||||
// CHECK-NEXT: [[REDUCE_SUM:%.+]] = "onnx.ReduceSum"([[MUL]]) {axes = [1], keepdims = 0 : i64} : (tensor<*xf32>) -> tensor<*xf32>
|
||||
// CHECK-NEXT: [[REDUCE_SUM:%.+]] = "onnx.ReduceSum"([[MUL]]) {axes = [1], keepdims = 0 : si64} : (tensor<*xf32>) -> tensor<*xf32>
|
||||
// CHECK-NEXT: [[SQRT:%.+]] = "onnx.Sqrt"([[REDUCE_SUM]]) : (tensor<*xf32>) -> tensor<*xf32>
|
||||
}
|
||||
|
||||
|
@ -27,10 +27,10 @@ func @test_reducel2(%arg0 : tensor<?x?x?xf32>) -> tensor<*xf32> {
|
|||
|
||||
// CHECK-LABEL: @test_reducelogsum(%{{.*}}: tensor<?x?x?xf32>) -> tensor<*xf32>
|
||||
func @test_reducelogsum(%arg0 : tensor<?x?x?xf32>) -> tensor<*xf32> {
|
||||
%0 ="onnx.ReduceLogSum"(%arg0) {axes=[1], keepdims = 0 : i64} : (tensor<?x?x?xf32>)-> tensor<*xf32>
|
||||
%0 ="onnx.ReduceLogSum"(%arg0) {axes=[1], keepdims = 0 : si64} : (tensor<?x?x?xf32>)-> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-NEXT: [[REDUCE_SUM:%.+]] = "onnx.ReduceSum"(%arg0) {axes = [1], keepdims = 0 : i64} : (tensor<?x?x?xf32>) -> tensor<*xf32>
|
||||
// CHECK-NEXT: [[REDUCE_SUM:%.+]] = "onnx.ReduceSum"(%arg0) {axes = [1], keepdims = 0 : si64} : (tensor<?x?x?xf32>) -> tensor<*xf32>
|
||||
// CHECK-NEXT: [[LOG:%.+]] = "onnx.Log"([[REDUCE_SUM]]) : (tensor<*xf32>) -> tensor<*xf32>
|
||||
}
|
||||
|
||||
|
@ -38,11 +38,11 @@ func @test_reducelogsum(%arg0 : tensor<?x?x?xf32>) -> tensor<*xf32> {
|
|||
|
||||
// CHECK-LABEL: @test_reducelogsumexp(%{{.*}}: tensor<?x?x?xf32>) -> tensor<*xf32>
|
||||
func @test_reducelogsumexp(%arg0 : tensor<?x?x?xf32>) -> tensor<*xf32> {
|
||||
%0 ="onnx.ReduceLogSumExp"(%arg0) {axes=[1], keepdims = 0 : i64} : (tensor<?x?x?xf32>)-> tensor<*xf32>
|
||||
%0 ="onnx.ReduceLogSumExp"(%arg0) {axes=[1], keepdims = 0 : si64} : (tensor<?x?x?xf32>)-> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-NEXT: [[EXP:%.+]] = "onnx.Exp"(%arg0) : (tensor<?x?x?xf32>) -> tensor<*xf32>
|
||||
// CHECK-NEXT: [[REDUCE_SUM:%.+]] = "onnx.ReduceSum"([[EXP]]) {axes = [1], keepdims = 0 : i64} : (tensor<*xf32>) -> tensor<*xf32>
|
||||
// CHECK-NEXT: [[REDUCE_SUM:%.+]] = "onnx.ReduceSum"([[EXP]]) {axes = [1], keepdims = 0 : si64} : (tensor<*xf32>) -> tensor<*xf32>
|
||||
// CHECK-NEXT: [[LOG:%.+]] = "onnx.Log"([[REDUCE_SUM]]) : (tensor<*xf32>) -> tensor<*xf32>
|
||||
}
|
||||
|
||||
|
@ -50,11 +50,11 @@ func @test_reducelogsumexp(%arg0 : tensor<?x?x?xf32>) -> tensor<*xf32> {
|
|||
|
||||
// CHECK-LABEL: @test_reducesumsquare(%{{.*}}: tensor<?x?x?xf32>) -> tensor<*xf32>
|
||||
func @test_reducesumsquare(%arg0 : tensor<?x?x?xf32>) -> tensor<*xf32> {
|
||||
%0 ="onnx.ReduceSumSquare"(%arg0) {axes=[1], keepdims = 0 : i64} : (tensor<?x?x?xf32>)-> tensor<*xf32>
|
||||
%0 ="onnx.ReduceSumSquare"(%arg0) {axes=[1], keepdims = 0 : si64} : (tensor<?x?x?xf32>)-> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-NEXT: [[SQUARE:%.+]] = "onnx.Mul"(%arg0, %arg0) : (tensor<?x?x?xf32>, tensor<?x?x?xf32>) -> tensor<*xf32>
|
||||
// CHECK-NEXT: %{{[0-9]+}} = "onnx.ReduceSum"([[SQUARE]]) {axes = [1], keepdims = 0 : i64} : (tensor<*xf32>) -> tensor<*xf32>
|
||||
// CHECK-NEXT: %{{[0-9]+}} = "onnx.ReduceSum"([[SQUARE]]) {axes = [1], keepdims = 0 : si64} : (tensor<*xf32>) -> tensor<*xf32>
|
||||
}
|
||||
|
||||
// -----
|
||||
|
@ -78,7 +78,7 @@ func @test_scaler_null(%arg0: tensor<3xi32>) -> tensor<3xf32> {
|
|||
%0 = "onnx.Scaler"(%arg0) : (tensor<3xi32>) -> tensor<3xf32>
|
||||
return %0 : tensor<3xf32>
|
||||
|
||||
// CHECK-NEXT: %0 = "onnx.Cast"(%arg0) {to = 1 : i64} : (tensor<3xi32>) -> tensor<3xf32>
|
||||
// CHECK-NEXT: %0 = "onnx.Cast"(%arg0) {to = 1 : si64} : (tensor<3xi32>) -> tensor<3xf32>
|
||||
// CHECK-NEXT: return %0 : tensor<3xf32>
|
||||
}
|
||||
|
||||
|
@ -103,7 +103,7 @@ func @test_scaler_no_offset2(%arg0: tensor<3xi32>) -> tensor<3xf32> {
|
|||
%0 = "onnx.Scaler"(%arg0) {scale = [3.125000e-02 : f32, 0.0909090936 : f32, 0.0333333351 : f32]} : (tensor<3xi32>) -> tensor<3xf32>
|
||||
return %0 : tensor<3xf32>
|
||||
|
||||
// CHECK-NEXT: %0 = "onnx.Cast"(%arg0) {to = 1 : i64} : (tensor<3xi32>) -> tensor<*xf32>
|
||||
// CHECK-NEXT: %0 = "onnx.Cast"(%arg0) {to = 1 : si64} : (tensor<3xi32>) -> tensor<*xf32>
|
||||
// CHECK-NEXT: %1 = "onnx.Constant"() {value = dense<[3.125000e-02, 0.0909090936, 0.0333333351]> : tensor<3xf32>} : () -> tensor<3xf32>
|
||||
// CHECK-NEXT: %2 = "onnx.Mul"(%0, %1) : (tensor<*xf32>, tensor<3xf32>) -> tensor<3xf32>
|
||||
// CHECK-NEXT: return %2 : tensor<3xf32>
|
||||
|
@ -130,7 +130,7 @@ func @test_scaler_no_scale2(%arg0: tensor<3xi32>) -> tensor<3xf32> {
|
|||
%0 = "onnx.Scaler"(%arg0) {offset = [1986.99939 : f32, 0.99999988 : f32, 0.999999701 : f32]} : (tensor<3xi32>) -> tensor<3xf32>
|
||||
return %0 : tensor<3xf32>
|
||||
|
||||
//CHECK-NEXT: %0 = "onnx.Cast"(%arg0) {to = 1 : i64} : (tensor<3xi32>) -> tensor<*xf32>
|
||||
//CHECK-NEXT: %0 = "onnx.Cast"(%arg0) {to = 1 : si64} : (tensor<3xi32>) -> tensor<*xf32>
|
||||
//CHECK-NEXT: %1 = "onnx.Constant"() {value = dense<[1986.99939, 0.99999988, 0.999999701]> : tensor<3xf32>} : () -> tensor<3xf32>
|
||||
//CHECK-NEXT: %2 = "onnx.Sub"(%0, %1) : (tensor<*xf32>, tensor<3xf32>) -> tensor<3xf32>
|
||||
//CHECK-NEXT: return %2 : tensor<3xf32>
|
||||
|
@ -159,7 +159,7 @@ func @test_scaler_normal2(%arg0: tensor<3xi32>) -> tensor<3xf32> {
|
|||
%0 = "onnx.Scaler"(%arg0) {offset = [1986.99939 : f32, 0.99999988 : f32, 0.999999701 : f32], scale = [3.125000e-02 : f32, 0.0909090936 : f32, 0.0333333351 : f32]} : (tensor<3xi32>) -> tensor<3xf32>
|
||||
return %0 : tensor<3xf32>
|
||||
|
||||
// CHECK-NEXT: %0 = "onnx.Cast"(%arg0) {to = 1 : i64} : (tensor<3xi32>) -> tensor<*xf32>
|
||||
// CHECK-NEXT: %0 = "onnx.Cast"(%arg0) {to = 1 : si64} : (tensor<3xi32>) -> tensor<*xf32>
|
||||
// CHECK-NEXT: %1 = "onnx.Constant"() {value = dense<[1986.99939, 0.99999988, 0.999999701]> : tensor<3xf32>} : () -> tensor<3xf32>
|
||||
// CHECK-NEXT: %2 = "onnx.Sub"(%0, %1) : (tensor<*xf32>, tensor<3xf32>) -> tensor<*xf32>
|
||||
// CHECK-NEXT: %3 = "onnx.Constant"() {value = dense<[3.125000e-02, 0.0909090936, 0.0333333351]> : tensor<3xf32>} : () -> tensor<3xf32>
|
||||
|
|
|
@ -696,7 +696,7 @@ func @test_add_with_broadcasting(%arg0 : tensor<?xf32>, %arg1 : tensor<?x10xf32>
|
|||
// -----
|
||||
|
||||
func @test_reducemax(%arg0 : tensor<3x2x2xf32>) -> tensor<*xf32> {
|
||||
%0 ="onnx.ReduceMax"(%arg0) {axes=[1], keepdims = 0 : i64} : (tensor<3x2x2xf32>)-> tensor<*xf32>
|
||||
%0 ="onnx.ReduceMax"(%arg0) {axes=[1], keepdims = 0 : si64} : (tensor<3x2x2xf32>)-> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_reducemax
|
||||
|
@ -720,7 +720,7 @@ func @test_reducemax(%arg0 : tensor<3x2x2xf32>) -> tensor<*xf32> {
|
|||
// -----
|
||||
|
||||
func @test_reducemin(%arg0 : tensor<3x2x2xf32>) -> tensor<*xf32> {
|
||||
%0 ="onnx.ReduceMin"(%arg0) {axes=[1], keepdims = 0 : i64} : (tensor<3x2x2xf32>)-> tensor<*xf32>
|
||||
%0 ="onnx.ReduceMin"(%arg0) {axes=[1], keepdims = 0 : si64} : (tensor<3x2x2xf32>)-> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_reducemin
|
||||
|
@ -744,7 +744,7 @@ func @test_reducemin(%arg0 : tensor<3x2x2xf32>) -> tensor<*xf32> {
|
|||
// -----
|
||||
|
||||
func @test_reduceprod(%arg0 : tensor<3x2x2xf32>) -> tensor<*xf32> {
|
||||
%0 ="onnx.ReduceProd"(%arg0) {axes=[1], keepdims = 0 : i64} : (tensor<3x2x2xf32>)-> tensor<*xf32>
|
||||
%0 ="onnx.ReduceProd"(%arg0) {axes=[1], keepdims = 0 : si64} : (tensor<3x2x2xf32>)-> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_reduceprod
|
||||
|
@ -767,7 +767,7 @@ func @test_reduceprod(%arg0 : tensor<3x2x2xf32>) -> tensor<*xf32> {
|
|||
// -----
|
||||
|
||||
func @test_reducesum(%arg0 : tensor<3x2x2xf32>) -> tensor<*xf32> {
|
||||
%0 ="onnx.ReduceSum"(%arg0) {axes=[1], keepdims = 0 : i64} : (tensor<3x2x2xf32>)-> tensor<*xf32>
|
||||
%0 ="onnx.ReduceSum"(%arg0) {axes=[1], keepdims = 0 : si64} : (tensor<3x2x2xf32>)-> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_reducesum
|
||||
|
@ -790,7 +790,7 @@ func @test_reducesum(%arg0 : tensor<3x2x2xf32>) -> tensor<*xf32> {
|
|||
// -----
|
||||
|
||||
func @test_softmax(%arg0 : tensor<10x10xf32>) -> tensor<*xf32> {
|
||||
%0 = "onnx.Softmax"(%arg0) {axis=1:i64} : (tensor<10x10xf32>) -> tensor<*xf32>
|
||||
%0 = "onnx.Softmax"(%arg0) {axis=1: si64} : (tensor<10x10xf32>) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_softmax
|
||||
|
@ -835,7 +835,7 @@ func @test_softmax(%arg0 : tensor<10x10xf32>) -> tensor<*xf32> {
|
|||
// -----
|
||||
|
||||
func @test_gemm(%arg0 : tensor<5x10xf32>, %arg1 : tensor<5x10xf32>, %arg2: tensor<10xf32>) -> tensor<*xf32> {
|
||||
%0 ="onnx.Gemm"(%arg0, %arg1, %arg2) {alpha = 1.0 : f32, beta = 5.0 : f32, transA = 1, transB = 0} : (tensor<5x10xf32>, tensor<5x10xf32>, tensor<10xf32>) -> tensor<*xf32>
|
||||
%0 ="onnx.Gemm"(%arg0, %arg1, %arg2) {alpha = 1.0 : f32, beta = 5.0 : f32, transA = 1 : si64, transB = 0 : si64} : (tensor<5x10xf32>, tensor<5x10xf32>, tensor<10xf32>) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_gemm
|
||||
|
@ -1191,7 +1191,7 @@ func @test_matmul7(%arg0 : tensor<5xf32>, %arg1 : tensor<5xf32>) -> tensor<*xf32
|
|||
|
||||
func @test_conv_no_bias_no_pad(%arg0 : tensor<1x2x32x64xf32>, %arg1 : tensor<5x2x6x7xf32>) -> tensor<*xf32> {
|
||||
%cst = constant unit
|
||||
%0 = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", group = 1 : i64} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>, none) -> tensor<*xf32>
|
||||
%0 = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", group = 1 : si64} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>, none) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_conv_no_bias_no_pad
|
||||
|
@ -1227,7 +1227,7 @@ func @test_conv_no_bias_no_pad(%arg0 : tensor<1x2x32x64xf32>, %arg1 : tensor<5x2
|
|||
// -----
|
||||
|
||||
func @test_conv_bias_no_pad(%arg0 : tensor<1x2x32x64xf32>, %arg1 : tensor<5x2x6x7xf32>, %arg2 : tensor<5xf32>) -> tensor<*xf32> {
|
||||
%0 = "onnx.Conv"(%arg0, %arg1, %arg2) {auto_pad = "NOTSET", group = 1 : i64} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>, tensor<5xf32>) -> tensor<*xf32>
|
||||
%0 = "onnx.Conv"(%arg0, %arg1, %arg2) {auto_pad = "NOTSET", group = 1 : si64} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>, tensor<5xf32>) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_conv_bias_no_pad
|
||||
|
@ -1267,7 +1267,7 @@ func @test_conv_bias_no_pad(%arg0 : tensor<1x2x32x64xf32>, %arg1 : tensor<5x2x6x
|
|||
|
||||
func @test_conv_no_bias_no_pad_w_group(%arg0 : tensor<1x9x32x64xf32>, %arg1 : tensor<5x3x6x7xf32>) -> tensor<*xf32> {
|
||||
%cst = constant unit
|
||||
%0 = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", group = 3 : i64} : (tensor<1x9x32x64xf32>, tensor<5x3x6x7xf32>, none) -> tensor<*xf32>
|
||||
%0 = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", group = 3 : si64} : (tensor<1x9x32x64xf32>, tensor<5x3x6x7xf32>, none) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_conv_no_bias_no_pad_w_group
|
||||
|
@ -1306,7 +1306,7 @@ func @test_conv_no_bias_no_pad_w_group(%arg0 : tensor<1x9x32x64xf32>, %arg1 : te
|
|||
|
||||
func @test_conv_no_bias_no_pad_w_strides(%arg0 : tensor<1x9x32x64xf32>, %arg1 : tensor<5x9x6x7xf32>) -> tensor<*xf32> {
|
||||
%cst = constant unit
|
||||
%0 = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", group = 1 : i64, strides = [2, 2]} : (tensor<1x9x32x64xf32>, tensor<5x9x6x7xf32>, none) -> tensor<*xf32>
|
||||
%0 = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", group = 1 : si64, strides = [2, 2]} : (tensor<1x9x32x64xf32>, tensor<5x9x6x7xf32>, none) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_conv_no_bias_no_pad_w_strides
|
||||
|
@ -1520,7 +1520,7 @@ func @test_constant_dense_2d_value(%arg0: tensor<1xf32>) -> tensor<*xf32> {
|
|||
// -----
|
||||
|
||||
func @test_concat_1(%arg0 : tensor<5x5x1x32xf32>, %arg1 : tensor<5x5x3x32xf32>, %arg2 : tensor<5x5x5x32xf32>) -> tensor<5x5x9x32xf32> {
|
||||
%1 = "onnx.Concat"(%arg0, %arg1, %arg2) { axis = 2 } : (tensor<5x5x1x32xf32>, tensor<5x5x3x32xf32>, tensor<5x5x5x32xf32>) -> tensor<5x5x9x32xf32>
|
||||
%1 = "onnx.Concat"(%arg0, %arg1, %arg2) { axis = 2 : si64} : (tensor<5x5x1x32xf32>, tensor<5x5x3x32xf32>, tensor<5x5x5x32xf32>) -> tensor<5x5x9x32xf32>
|
||||
"std.return"(%1) : (tensor<5x5x9x32xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_concat_1
|
||||
|
@ -1678,7 +1678,7 @@ func @test_maxpool_pooling_operation(%arg0 : tensor<1x3x32x32xf32>) -> tensor<*x
|
|||
|
||||
func @test_lstm_general_computation(%arg0: tensor<4x3x2xf32>, %arg1: tensor<1x12x2xf32>, %arg2: tensor<1x12x3xf32>) -> tensor<*xf32> {
|
||||
%cst = constant unit
|
||||
%Y, %Y_h, %Y_c = "onnx.LSTM"(%arg0, %arg1, %arg2, %cst, %cst, %cst, %cst, %cst) {hidden_size = 3 : i64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none, none, none) -> (none, tensor<*xf32>, none)
|
||||
%Y, %Y_h, %Y_c = "onnx.LSTM"(%arg0, %arg1, %arg2, %cst, %cst, %cst, %cst, %cst) {hidden_size = 3 : si64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none, none, none) -> (none, tensor<*xf32>, none)
|
||||
return %Y_h : tensor<*xf32>
|
||||
|
||||
// CHECK-DAG: [[ACCESS_BY_OFFSET_MAP:#.+]] = affine_map<(d0)[s0, s1] -> (d0 + s0 * s1)>
|
||||
|
@ -1901,7 +1901,7 @@ func @test_lstm_general_computation(%arg0: tensor<4x3x2xf32>, %arg1: tensor<1x12
|
|||
|
||||
func @test_lstm_reverse_mode(%arg0: tensor<4x3x2xf32>, %arg1: tensor<1x12x2xf32>, %arg2: tensor<1x12x3xf32>) -> tensor<*xf32> {
|
||||
%cst = constant unit
|
||||
%Y, %Y_h, %Y_c = "onnx.LSTM"(%arg0, %arg1, %arg2, %cst, %cst, %cst, %cst, %cst) {hidden_size = 3 : i64, direction = "reverse"} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none, none, none) -> (none, tensor<*xf32>, none)
|
||||
%Y, %Y_h, %Y_c = "onnx.LSTM"(%arg0, %arg1, %arg2, %cst, %cst, %cst, %cst, %cst) {hidden_size = 3 : si64, direction = "reverse"} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none, none, none) -> (none, tensor<*xf32>, none)
|
||||
return %Y_h : tensor<*xf32>
|
||||
|
||||
// CHECK: [[REVERSE_IV_MAP:#.+]] = affine_map<(d0)[s0] -> (-d0 + s0 - 1)>
|
||||
|
@ -1918,7 +1918,7 @@ func @test_lstm_reverse_mode(%arg0: tensor<4x3x2xf32>, %arg1: tensor<1x12x2xf32>
|
|||
|
||||
func @test_lstm_bidirectional_mode(%arg0: tensor<4x3x2xf32>, %arg1: tensor<1x12x2xf32>, %arg2: tensor<1x12x3xf32>) -> tensor<*xf32> {
|
||||
%cst = constant unit
|
||||
%Y, %Y_h, %Y_c = "onnx.LSTM"(%arg0, %arg1, %arg2, %cst, %cst, %cst, %cst, %cst) {hidden_size = 3 : i64, direction = "bidirectional"} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none, none, none) -> (none, tensor<*xf32>, none)
|
||||
%Y, %Y_h, %Y_c = "onnx.LSTM"(%arg0, %arg1, %arg2, %cst, %cst, %cst, %cst, %cst) {hidden_size = 3 : si64, direction = "bidirectional"} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none, none, none) -> (none, tensor<*xf32>, none)
|
||||
return %Y_h : tensor<*xf32>
|
||||
|
||||
// CHECK: [[REVERSE_IV_MAP:#.+]] = affine_map<(d0)[s0] -> (-d0 + s0 - 1)>
|
||||
|
@ -1968,7 +1968,7 @@ func @test_squeeze_unknown_dimensions(%arg0 : tensor<?x1x32x?x64xf32>) -> tensor
|
|||
// -----
|
||||
|
||||
func @test_split_equal(%arg0 : tensor<16x32x64xf32>) -> (tensor<*xf32>, tensor<*xf32>) {
|
||||
%0, %1 = "onnx.Split"(%arg0) { axis = 0} : (tensor<16x32x64xf32>) -> (tensor<*xf32>, tensor<*xf32>)
|
||||
%0, %1 = "onnx.Split"(%arg0) { axis = 0 : si64} : (tensor<16x32x64xf32>) -> (tensor<*xf32>, tensor<*xf32>)
|
||||
"std.return"(%0, %1) : (tensor<*xf32>, tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK: [[INDEX_MAP:#.+]] = affine_map<(d0) -> (d0 + 8)>
|
||||
|
@ -1993,7 +1993,7 @@ func @test_split_equal(%arg0 : tensor<16x32x64xf32>) -> (tensor<*xf32>, tensor<*
|
|||
// -----
|
||||
|
||||
func @test_split_variable(%arg0 : tensor<16x32x64xf32>) -> (tensor<*xf32>, tensor<*xf32>) {
|
||||
%0, %1 = "onnx.Split"(%arg0) { axis = 1, split = [2, 30]} : (tensor<16x32x64xf32>) -> (tensor<*xf32>, tensor<*xf32>)
|
||||
%0, %1 = "onnx.Split"(%arg0) { axis = 1 : si64, split = [2, 30]} : (tensor<16x32x64xf32>) -> (tensor<*xf32>, tensor<*xf32>)
|
||||
"std.return"(%0, %1) : (tensor<*xf32>, tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK: [[INDEX_MAP:#.+]] = affine_map<(d0) -> (d0 + 2)>
|
||||
|
@ -2018,7 +2018,7 @@ func @test_split_variable(%arg0 : tensor<16x32x64xf32>) -> (tensor<*xf32>, tenso
|
|||
// -----
|
||||
|
||||
func @test_split_unknown_dimension(%arg0 : tensor<?x?x64xf32>) -> (tensor<*xf32>, tensor<*xf32>) {
|
||||
%0, %1 = "onnx.Split"(%arg0) { axis = 1, split = [2, 30]} : (tensor<?x?x64xf32>) -> (tensor<*xf32>, tensor<*xf32>)
|
||||
%0, %1 = "onnx.Split"(%arg0) { axis = 1 : si64, split = [2, 30]} : (tensor<?x?x64xf32>) -> (tensor<*xf32>, tensor<*xf32>)
|
||||
"std.return"(%0, %1) : (tensor<*xf32>, tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK: [[INDEX_MAP:#.+]] = affine_map<(d0) -> (d0 + 2)>
|
||||
|
@ -2051,7 +2051,7 @@ func @test_split_unknown_dimension(%arg0 : tensor<?x?x64xf32>) -> (tensor<*xf32>
|
|||
// -----
|
||||
|
||||
func @cast_lowering_sametype(%arg0: tensor<f32>) -> tensor<f32> {
|
||||
%0 = "onnx.Cast"(%arg0) {to = 1 : i64} : (tensor<f32>) -> tensor<f32>
|
||||
%0 = "onnx.Cast"(%arg0) {to = 1 : si64} : (tensor<f32>) -> tensor<f32>
|
||||
"std.return"(%0) : (tensor<f32>) -> ()
|
||||
|
||||
// CHECK-LABEL: cast_lowering_sametype
|
||||
|
@ -2064,7 +2064,7 @@ func @cast_lowering_sametype(%arg0: tensor<f32>) -> tensor<f32> {
|
|||
// -----
|
||||
|
||||
func @cast_lowering_intfloat(%arg0: tensor<i64>) -> tensor<f32> {
|
||||
%0 = "onnx.Cast"(%arg0) {to = 1 : i64} : (tensor<i64>) -> tensor<f32>
|
||||
%0 = "onnx.Cast"(%arg0) {to = 1 : si64} : (tensor<i64>) -> tensor<f32>
|
||||
"std.return"(%0) : (tensor<f32>) -> ()
|
||||
|
||||
// CHECK-LABEL: cast_lowering_intfloat
|
||||
|
@ -2078,7 +2078,7 @@ func @cast_lowering_intfloat(%arg0: tensor<i64>) -> tensor<f32> {
|
|||
// -----
|
||||
|
||||
func @cast_lowering_floatint(%arg0: tensor<f32>) -> tensor<i64> {
|
||||
%0 = "onnx.Cast"(%arg0) {to = 7 : i64} : (tensor<f32>) -> tensor<i64>
|
||||
%0 = "onnx.Cast"(%arg0) {to = 7 : si64} : (tensor<f32>) -> tensor<i64>
|
||||
"std.return"(%0) : (tensor<i64>) -> ()
|
||||
|
||||
// CHECK-LABEL: cast_lowering_floatint
|
||||
|
@ -2092,7 +2092,7 @@ func @cast_lowering_floatint(%arg0: tensor<f32>) -> tensor<i64> {
|
|||
// -----
|
||||
|
||||
func @cast_lowering_f16f32(%arg0: tensor<f16>) -> tensor<f32> {
|
||||
%0 = "onnx.Cast"(%arg0) {to = 1 : i64} : (tensor<f16>) -> tensor<f32>
|
||||
%0 = "onnx.Cast"(%arg0) {to = 1 : si64} : (tensor<f16>) -> tensor<f32>
|
||||
"std.return"(%0) : (tensor<f32>) -> ()
|
||||
|
||||
// CHECK-LABEL: cast_lowering_f16f32
|
||||
|
@ -2106,7 +2106,7 @@ func @cast_lowering_f16f32(%arg0: tensor<f16>) -> tensor<f32> {
|
|||
// -----
|
||||
|
||||
func @cast_lowering_f64f32(%arg0: tensor<f64>) -> tensor<f32> {
|
||||
%0 = "onnx.Cast"(%arg0) {to = 1 : i64} : (tensor<f64>) -> tensor<f32>
|
||||
%0 = "onnx.Cast"(%arg0) {to = 1 : si64} : (tensor<f64>) -> tensor<f32>
|
||||
"std.return"(%0) : (tensor<f32>) -> ()
|
||||
|
||||
// CHECK-LABEL: cast_lowering_f64f32
|
||||
|
@ -2120,7 +2120,7 @@ func @cast_lowering_f64f32(%arg0: tensor<f64>) -> tensor<f32> {
|
|||
// -----
|
||||
|
||||
func @cast_lowering_f64f32_10(%arg0: tensor<10xf64>) -> tensor<*xf32> {
|
||||
%0 = "onnx.Cast"(%arg0) {to = 1 : i64} : (tensor<10xf64>) -> tensor<*xf32>
|
||||
%0 = "onnx.Cast"(%arg0) {to = 1 : si64} : (tensor<10xf64>) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: cast_lowering_f64f32_10
|
||||
|
@ -2174,7 +2174,7 @@ func @test_size_unknown(%arg0 : tensor<?x2x?xf32>) -> tensor<i64> {
|
|||
// Test gather along axis 0, first example in ONNX for Gather.
|
||||
func @test_gather_axis0(%arg0 : tensor<3x2xf32>) -> tensor<2x2x2xf32> {
|
||||
%indices = "onnx.Constant"() {value = dense<[[0, 1], [1, 2]]> : tensor<2x2xi64>} : () -> tensor<2x2xi64>
|
||||
%0 = "onnx.Gather"(%arg0, %indices) {axis = 0} : (tensor<3x2xf32>, tensor<2x2xi64>) -> tensor<2x2x2xf32>
|
||||
%0 = "onnx.Gather"(%arg0, %indices) {axis = 0 : si64} : (tensor<3x2xf32>, tensor<2x2xi64>) -> tensor<2x2x2xf32>
|
||||
"std.return"(%0) : (tensor<2x2x2xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_gather_axis0
|
||||
|
@ -2193,7 +2193,7 @@ func @test_gather_axis0(%arg0 : tensor<3x2xf32>) -> tensor<2x2x2xf32> {
|
|||
// Test gather along axis 1, second example in ONNX for Gather.
|
||||
func @test_gather_axis1(%arg0 : tensor<3x3xf32>) -> tensor<1x3x2xf32> {
|
||||
%indices = "onnx.Constant"() {value = dense<[[0, 2]]> : tensor<1x2xi64>} : () -> tensor<1x2xi64>
|
||||
%0 = "onnx.Gather"(%arg0, %indices) {axis = 1} : (tensor<3x3xf32>, tensor<1x2xi64>) -> tensor<1x3x2xf32>
|
||||
%0 = "onnx.Gather"(%arg0, %indices) {axis = 1 : si64} : (tensor<3x3xf32>, tensor<1x2xi64>) -> tensor<1x3x2xf32>
|
||||
"std.return"(%0) : (tensor<1x3x2xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_gather_axis1
|
||||
|
|
|
@ -173,11 +173,11 @@ func @test_matmul_10(%arg0 : tensor<?x42x32xf32>, %arg1 : tensor<32xf32>) -> ten
|
|||
|
||||
func @test_conv_no_bias_0(%arg0 : tensor<1x2x32xf32>, %arg1 : tensor<5x2x6xf32>) -> tensor<*xf32> {
|
||||
%cst = constant unit
|
||||
%0 = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", group = 1 : i64} : (tensor<1x2x32xf32>, tensor<5x2x6xf32>, none) -> tensor<*xf32>
|
||||
%0 = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", group = 1 : si64} : (tensor<1x2x32xf32>, tensor<5x2x6xf32>, none) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_conv_no_bias_0
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", dilations = [1], group = 1 : i64, kernel_shape = [6], pads = [0, 0], strides = [1]} : (tensor<1x2x32xf32>, tensor<5x2x6xf32>, none) -> tensor<1x5x27xf32>
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", dilations = [1], group = 1 : si64, kernel_shape = [6], pads = [0, 0], strides = [1]} : (tensor<1x2x32xf32>, tensor<5x2x6xf32>, none) -> tensor<1x5x27xf32>
|
||||
// CHECK: return [[RES_ATTR]] : tensor<1x5x27xf32>
|
||||
}
|
||||
|
||||
|
@ -187,11 +187,11 @@ func @test_conv_no_bias_0(%arg0 : tensor<1x2x32xf32>, %arg1 : tensor<5x2x6xf32>)
|
|||
|
||||
func @test_conv_no_bias_1(%arg0 : tensor<1x2x32x64xf32>, %arg1 : tensor<5x2x6x7xf32>) -> tensor<*xf32> {
|
||||
%cst = constant unit
|
||||
%0 = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", group = 1 : i64} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>, none) -> tensor<*xf32>
|
||||
%0 = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", group = 1 : si64} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>, none) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_conv_no_bias_1
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : i64, kernel_shape = [6, 7], pads = [0, 0, 0, 0], strides = [1, 1]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>, none) -> tensor<1x5x27x58xf32>
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : si64, kernel_shape = [6, 7], pads = [0, 0, 0, 0], strides = [1, 1]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>, none) -> tensor<1x5x27x58xf32>
|
||||
// CHECK: return [[RES_ATTR]] : tensor<1x5x27x58xf32>
|
||||
}
|
||||
|
||||
|
@ -201,11 +201,11 @@ func @test_conv_no_bias_1(%arg0 : tensor<1x2x32x64xf32>, %arg1 : tensor<5x2x6x7x
|
|||
|
||||
func @test_conv_no_bias_2(%arg0 : tensor<1x2x32x64xf32>, %arg1 : tensor<5x2x6x7xf32>) -> tensor<*xf32> {
|
||||
%cst = constant unit
|
||||
%0 = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", group = 1 : i64, kernel_shape = [8, 9]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>, none) -> tensor<*xf32>
|
||||
%0 = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", group = 1 : si64, kernel_shape = [8, 9]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>, none) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_conv_no_bias_2
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : i64, kernel_shape = [8, 9], pads = [0, 0, 0, 0], strides = [1, 1]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>, none) -> tensor<1x5x25x56xf32>
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : si64, kernel_shape = [8, 9], pads = [0, 0, 0, 0], strides = [1, 1]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>, none) -> tensor<1x5x25x56xf32>
|
||||
// CHECK: return [[RES_ATTR]] : tensor<1x5x25x56xf32>
|
||||
}
|
||||
|
||||
|
@ -216,11 +216,11 @@ func @test_conv_no_bias_2(%arg0 : tensor<1x2x32x64xf32>, %arg1 : tensor<5x2x6x7x
|
|||
|
||||
func @test_conv_no_bias_3(%arg0 : tensor<1x2x32x64xf32>, %arg1 : tensor<5x2x6x10xf32>) -> tensor<*xf32> {
|
||||
%cst = constant unit
|
||||
%0 = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", group = 1 : i64, pads = [2, 4, 3, 5]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x10xf32>, none) -> tensor<*xf32>
|
||||
%0 = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", group = 1 : si64, pads = [2, 4, 3, 5]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x10xf32>, none) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_conv_no_bias_3
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : i64, kernel_shape = [6, 10], pads = [2, 4, 3, 5], strides = [1, 1]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x10xf32>, none) -> tensor<1x5x32x64xf32>
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : si64, kernel_shape = [6, 10], pads = [2, 4, 3, 5], strides = [1, 1]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x10xf32>, none) -> tensor<1x5x32x64xf32>
|
||||
// CHECK: return [[RES_ATTR]] : tensor<1x5x32x64xf32>
|
||||
}
|
||||
|
||||
|
@ -230,11 +230,11 @@ func @test_conv_no_bias_3(%arg0 : tensor<1x2x32x64xf32>, %arg1 : tensor<5x2x6x10
|
|||
|
||||
func @test_conv_no_bias_4(%arg0 : tensor<1x2x32x64xf32>, %arg1 : tensor<5x2x6x10xf32>) -> tensor<*xf32> {
|
||||
%cst = constant unit
|
||||
%0 = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "SAME_UPPER", group = 1 : i64} : (tensor<1x2x32x64xf32>, tensor<5x2x6x10xf32>, none) -> tensor<*xf32>
|
||||
%0 = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "SAME_UPPER", group = 1 : si64} : (tensor<1x2x32x64xf32>, tensor<5x2x6x10xf32>, none) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_conv_no_bias_4
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : i64, kernel_shape = [6, 10], pads = [2, 4, 3, 5], strides = [1, 1]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x10xf32>, none) -> tensor<1x5x32x64xf32>
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : si64, kernel_shape = [6, 10], pads = [2, 4, 3, 5], strides = [1, 1]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x10xf32>, none) -> tensor<1x5x32x64xf32>
|
||||
// CHECK: return [[RES_ATTR]] : tensor<1x5x32x64xf32>
|
||||
}
|
||||
|
||||
|
@ -242,11 +242,11 @@ func @test_conv_no_bias_4(%arg0 : tensor<1x2x32x64xf32>, %arg1 : tensor<5x2x6x10
|
|||
|
||||
func @test_conv_no_bias_5(%arg0 : tensor<1x2x32x64xf32>, %arg1 : tensor<5x2x6x10xf32>) -> tensor<*xf32> {
|
||||
%cst = constant unit
|
||||
%0 = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "SAME_LOWER", group = 1 : i64} : (tensor<1x2x32x64xf32>, tensor<5x2x6x10xf32>, none) -> tensor<*xf32>
|
||||
%0 = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "SAME_LOWER", group = 1 : si64} : (tensor<1x2x32x64xf32>, tensor<5x2x6x10xf32>, none) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_conv_no_bias_5
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : i64, kernel_shape = [6, 10], pads = [3, 5, 2, 4], strides = [1, 1]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x10xf32>, none) -> tensor<1x5x32x64xf32>
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : si64, kernel_shape = [6, 10], pads = [3, 5, 2, 4], strides = [1, 1]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x10xf32>, none) -> tensor<1x5x32x64xf32>
|
||||
// CHECK: return [[RES_ATTR]] : tensor<1x5x32x64xf32>
|
||||
}
|
||||
|
||||
|
@ -256,11 +256,11 @@ func @test_conv_no_bias_5(%arg0 : tensor<1x2x32x64xf32>, %arg1 : tensor<5x2x6x10
|
|||
|
||||
func @test_conv_no_bias_6(%arg0 : tensor<1x2x32x64xf32>, %arg1 : tensor<5x2x6x10xf32>) -> tensor<*xf32> {
|
||||
%cst = constant unit
|
||||
%0 = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "VALID", group = 1 : i64} : (tensor<1x2x32x64xf32>, tensor<5x2x6x10xf32>, none) -> tensor<*xf32>
|
||||
%0 = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "VALID", group = 1 : si64} : (tensor<1x2x32x64xf32>, tensor<5x2x6x10xf32>, none) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_conv_no_bias_6
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : i64, kernel_shape = [6, 10], pads = [0, 0, 0, 0], strides = [1, 1]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x10xf32>, none) -> tensor<1x5x27x55xf32>
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : si64, kernel_shape = [6, 10], pads = [0, 0, 0, 0], strides = [1, 1]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x10xf32>, none) -> tensor<1x5x27x55xf32>
|
||||
// CHECK: return [[RES_ATTR]] : tensor<1x5x27x55xf32>
|
||||
}
|
||||
|
||||
|
@ -270,11 +270,11 @@ func @test_conv_no_bias_6(%arg0 : tensor<1x2x32x64xf32>, %arg1 : tensor<5x2x6x10
|
|||
|
||||
func @test_conv_no_bias_7(%arg0 : tensor<1x2x32x64xf32>, %arg1 : tensor<5x2x6x7xf32>) -> tensor<*xf32> {
|
||||
%cst = constant unit
|
||||
%0 = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", group = 1 : i64, strides = [2, 3]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>, none) -> tensor<*xf32>
|
||||
%0 = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", group = 1 : si64, strides = [2, 3]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>, none) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_conv_no_bias_7
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : i64, kernel_shape = [6, 7], pads = [0, 0, 0, 0], strides = [2, 3]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>, none) -> tensor<1x5x14x20xf32>
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : si64, kernel_shape = [6, 7], pads = [0, 0, 0, 0], strides = [2, 3]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>, none) -> tensor<1x5x14x20xf32>
|
||||
// CHECK: return [[RES_ATTR]] : tensor<1x5x14x20xf32>
|
||||
}
|
||||
|
||||
|
@ -285,11 +285,11 @@ func @test_conv_no_bias_7(%arg0 : tensor<1x2x32x64xf32>, %arg1 : tensor<5x2x6x7x
|
|||
|
||||
func @test_conv_no_bias_8(%arg0 : tensor<1x2x32x64xf32>, %arg1 : tensor<5x2x6x7xf32>) -> tensor<*xf32> {
|
||||
%cst = constant unit
|
||||
%0 = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "SAME_UPPER", group = 1 : i64, strides = [2, 3]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>, none) -> tensor<*xf32>
|
||||
%0 = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "SAME_UPPER", group = 1 : si64, strides = [2, 3]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>, none) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_conv_no_bias_8
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : i64, kernel_shape = [6, 7], pads = [2, 3, 2, 3], strides = [2, 3]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>, none) -> tensor<1x5x16x22xf32>
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : si64, kernel_shape = [6, 7], pads = [2, 3, 2, 3], strides = [2, 3]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>, none) -> tensor<1x5x16x22xf32>
|
||||
// CHECK: return [[RES_ATTR]] : tensor<1x5x16x22xf32>
|
||||
}
|
||||
|
||||
|
@ -299,11 +299,11 @@ func @test_conv_no_bias_8(%arg0 : tensor<1x2x32x64xf32>, %arg1 : tensor<5x2x6x7x
|
|||
|
||||
func @test_conv_no_bias_9(%arg0 : tensor<1x2x32x64xf32>, %arg1 : tensor<5x2x6x7xf32>) -> tensor<*xf32> {
|
||||
%cst = constant unit
|
||||
%0 = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", group = 1 : i64, dilations = [2, 3]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>, none) -> tensor<*xf32>
|
||||
%0 = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", group = 1 : si64, dilations = [2, 3]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>, none) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_conv_no_bias_9
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", dilations = [2, 3], group = 1 : i64, kernel_shape = [6, 7], pads = [0, 0, 0, 0], strides = [1, 1]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>, none) -> tensor<1x5x22x46xf32>
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", dilations = [2, 3], group = 1 : si64, kernel_shape = [6, 7], pads = [0, 0, 0, 0], strides = [1, 1]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>, none) -> tensor<1x5x22x46xf32>
|
||||
// CHECK: return [[RES_ATTR]] : tensor<1x5x22x46xf32>
|
||||
}
|
||||
|
||||
|
@ -313,11 +313,11 @@ func @test_conv_no_bias_9(%arg0 : tensor<1x2x32x64xf32>, %arg1 : tensor<5x2x6x7x
|
|||
|
||||
func @test_conv_no_bias_10(%arg0 : tensor<1x2x32x64xf32>, %arg1 : tensor<5x2x6x7xf32>) -> tensor<*xf32> {
|
||||
%cst = constant unit
|
||||
%0 = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", group = 1 : i64, dilations = [2, 3], strides = [2, 2]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>, none) -> tensor<*xf32>
|
||||
%0 = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", group = 1 : si64, dilations = [2, 3], strides = [2, 2]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>, none) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_conv_no_bias_10
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", dilations = [2, 3], group = 1 : i64, kernel_shape = [6, 7], pads = [0, 0, 0, 0], strides = [2, 2]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>, none) -> tensor<1x5x11x23xf32>
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", dilations = [2, 3], group = 1 : si64, kernel_shape = [6, 7], pads = [0, 0, 0, 0], strides = [2, 2]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>, none) -> tensor<1x5x11x23xf32>
|
||||
// CHECK: return [[RES_ATTR]] : tensor<1x5x11x23xf32>
|
||||
}
|
||||
|
||||
|
@ -327,11 +327,11 @@ func @test_conv_no_bias_10(%arg0 : tensor<1x2x32x64xf32>, %arg1 : tensor<5x2x6x7
|
|||
|
||||
func @test_conv_no_bias_11(%arg0 : tensor<1x2x32x64xf32>, %arg1 : tensor<5x2x6x7xf32>) -> tensor<*xf32> {
|
||||
%cst = constant unit
|
||||
%0 = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "SAME_UPPER", group = 1 : i64, dilations = [2, 3]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>, none) -> tensor<*xf32>
|
||||
%0 = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "SAME_UPPER", group = 1 : si64, dilations = [2, 3]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>, none) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_conv_no_bias_11
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", dilations = [2, 3], group = 1 : i64, kernel_shape = [6, 7], pads = [5, 9, 5, 9], strides = [1, 1]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>, none) -> tensor<1x5x32x64xf32>
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.Conv"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", dilations = [2, 3], group = 1 : si64, kernel_shape = [6, 7], pads = [5, 9, 5, 9], strides = [1, 1]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>, none) -> tensor<1x5x32x64xf32>
|
||||
// CHECK: return [[RES_ATTR]] : tensor<1x5x32x64xf32>
|
||||
}
|
||||
|
||||
|
@ -340,11 +340,11 @@ func @test_conv_no_bias_11(%arg0 : tensor<1x2x32x64xf32>, %arg1 : tensor<5x2x6x7
|
|||
// Test convolution with bias input.
|
||||
|
||||
func @test_conv_12(%arg0 : tensor<1x2x32xf32>, %arg1 : tensor<5x2x6xf32>, %arg2 : tensor<5xf32>) -> tensor<*xf32> {
|
||||
%0 = "onnx.Conv"(%arg0, %arg1, %arg2) {auto_pad = "NOTSET", group = 1 : i64} : (tensor<1x2x32xf32>, tensor<5x2x6xf32>, tensor<5xf32>) -> tensor<*xf32>
|
||||
%0 = "onnx.Conv"(%arg0, %arg1, %arg2) {auto_pad = "NOTSET", group = 1 : si64} : (tensor<1x2x32xf32>, tensor<5x2x6xf32>, tensor<5xf32>) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_conv_12
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.Conv"(%arg0, %arg1, %arg2) {auto_pad = "NOTSET", dilations = [1], group = 1 : i64, kernel_shape = [6], pads = [0, 0], strides = [1]} : (tensor<1x2x32xf32>, tensor<5x2x6xf32>, tensor<5xf32>) -> tensor<1x5x27xf32>
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.Conv"(%arg0, %arg1, %arg2) {auto_pad = "NOTSET", dilations = [1], group = 1 : si64, kernel_shape = [6], pads = [0, 0], strides = [1]} : (tensor<1x2x32xf32>, tensor<5x2x6xf32>, tensor<5xf32>) -> tensor<1x5x27xf32>
|
||||
// CHECK: return [[RES_ATTR]] : tensor<1x5x27xf32>
|
||||
}
|
||||
|
||||
|
@ -360,17 +360,17 @@ func @test_conv_transpose_1(%arg0 : tensor<1x64x36x48xf32>, %arg1 : tensor<64x1x
|
|||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_conv_transpose_1
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvTranspose"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : i64, kernel_shape = [2, 2], output_shape = [1, 1, 72, 96], pads = [0, 0, 0, 0], strides = [2, 2]} : (tensor<1x64x36x48xf32>, tensor<64x1x2x2xf32>, none) -> tensor<1x1x72x96xf32>
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvTranspose"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : si64, kernel_shape = [2, 2], output_shape = [1, 1, 72, 96], pads = [0, 0, 0, 0], strides = [2, 2]} : (tensor<1x64x36x48xf32>, tensor<64x1x2x2xf32>, none) -> tensor<1x1x72x96xf32>
|
||||
// CHECK: return [[RES_ATTR]] : tensor<1x1x72x96xf32>
|
||||
}
|
||||
|
||||
func @test_conv_transpose_2(%arg0 : tensor<1x64x36x48xf32>, %arg1 : tensor<64x1x2x2xf32>) -> tensor<*xf32> {
|
||||
%cst = constant unit
|
||||
%0 = "onnx.ConvTranspose"(%arg0, %arg1, %cst) {dilations = [1, 1], group = 64 : i64, kernel_shape = [2, 2], pads = [0, 0, 0, 0], strides = [2, 2]} : (tensor<1x64x36x48xf32>, tensor<64x1x2x2xf32>, none) -> tensor<*xf32>
|
||||
%0 = "onnx.ConvTranspose"(%arg0, %arg1, %cst) {dilations = [1, 1], group = 64 : si64, kernel_shape = [2, 2], pads = [0, 0, 0, 0], strides = [2, 2]} : (tensor<1x64x36x48xf32>, tensor<64x1x2x2xf32>, none) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_conv_transpose_2
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvTranspose"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", dilations = [1, 1], group = 64 : i64, kernel_shape = [2, 2], output_shape = [1, 64, 72, 96], pads = [0, 0, 0, 0], strides = [2, 2]} : (tensor<1x64x36x48xf32>, tensor<64x1x2x2xf32>, none) -> tensor<1x64x72x96xf32>
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvTranspose"(%arg0, %arg1, %cst) {auto_pad = "NOTSET", dilations = [1, 1], group = 64 : si64, kernel_shape = [2, 2], output_shape = [1, 64, 72, 96], pads = [0, 0, 0, 0], strides = [2, 2]} : (tensor<1x64x36x48xf32>, tensor<64x1x2x2xf32>, none) -> tensor<1x64x72x96xf32>
|
||||
// CHECK: return [[RES_ATTR]] : tensor<1x64x72x96xf32>
|
||||
}
|
||||
|
||||
|
@ -493,11 +493,11 @@ func @test_constant_sparse_2d_value() -> tensor<*xf32> {
|
|||
|
||||
/// Test the default behavior of Average Pool with no padding (pad are set but shoud be ignored)
|
||||
func @test_default_averagepool(%arg0 : tensor<5x5x32x32xf32>) -> tensor<*xf32> {
|
||||
%0 = "onnx.AveragePool"(%arg0) {auto_pad = "VALID", ceil_mode = 0, kernel_shape = [3,3], pads = [1, 1, 1, 1] } : (tensor<5x5x32x32xf32>) -> tensor<*xf32>
|
||||
%0 = "onnx.AveragePool"(%arg0) {auto_pad = "VALID", ceil_mode = 0 : si64, kernel_shape = [3,3], pads = [1, 1, 1, 1] } : (tensor<5x5x32x32xf32>) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_default_averagepool
|
||||
// CHECK: [[RES:%.+]] = "onnx.AveragePool"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : i64, kernel_shape = [3, 3], pads = [0, 0, 0, 0], strides = [1, 1]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x30x30xf32>
|
||||
// CHECK: [[RES:%.+]] = "onnx.AveragePool"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : si64, kernel_shape = [3, 3], pads = [0, 0, 0, 0], strides = [1, 1]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x30x30xf32>
|
||||
// CHECK: return [[RES]] : tensor<5x5x30x30xf32>
|
||||
}
|
||||
|
||||
|
@ -505,11 +505,11 @@ func @test_default_averagepool(%arg0 : tensor<5x5x32x32xf32>) -> tensor<*xf32> {
|
|||
|
||||
/// Test the default behavior of Average Pool with no padding (pad are not set, default to zero)
|
||||
func @test_default_averagepool_defpad(%arg0 : tensor<5x5x32x32xf32>) -> tensor<*xf32> {
|
||||
%0 = "onnx.AveragePool"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0, kernel_shape = [3,3]} : (tensor<5x5x32x32xf32>) -> tensor<*xf32>
|
||||
%0 = "onnx.AveragePool"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : si64, kernel_shape = [3,3]} : (tensor<5x5x32x32xf32>) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_default_averagepool_defpad
|
||||
// CHECK: [[RES:%.+]] = "onnx.AveragePool"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : i64, kernel_shape = [3, 3], pads = [0, 0, 0, 0], strides = [1, 1]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x30x30xf32>
|
||||
// CHECK: [[RES:%.+]] = "onnx.AveragePool"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : si64, kernel_shape = [3, 3], pads = [0, 0, 0, 0], strides = [1, 1]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x30x30xf32>
|
||||
// CHECK: return [[RES]] : tensor<5x5x30x30xf32>
|
||||
}
|
||||
|
||||
|
@ -517,11 +517,11 @@ func @test_default_averagepool_defpad(%arg0 : tensor<5x5x32x32xf32>) -> tensor<*
|
|||
|
||||
/// Test the default behavior of Average Pool with uniform padding
|
||||
func @test_default_averagepool_pad(%arg0 : tensor<5x5x32x32xf32>) -> tensor<*xf32> {
|
||||
%0 = "onnx.AveragePool"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0, kernel_shape = [3,3], pads = [1, 1, 1, 1] } : (tensor<5x5x32x32xf32>) -> tensor<*xf32>
|
||||
%0 = "onnx.AveragePool"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : si64, kernel_shape = [3,3], pads = [1, 1, 1, 1] } : (tensor<5x5x32x32xf32>) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_default_averagepool_pad
|
||||
// CHECK: [[RES:%.+]] = "onnx.AveragePool"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : i64, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x32x32xf32>
|
||||
// CHECK: [[RES:%.+]] = "onnx.AveragePool"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : si64, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x32x32xf32>
|
||||
// CHECK: return [[RES]] : tensor<5x5x32x32xf32>
|
||||
}
|
||||
|
||||
|
@ -529,11 +529,11 @@ func @test_default_averagepool_pad(%arg0 : tensor<5x5x32x32xf32>) -> tensor<*xf3
|
|||
|
||||
/// Test the default behavior of Average Pool with non uniform padding
|
||||
func @test_default_averagepool_pad_nonunif(%arg0 : tensor<5x5x32x32xf32>) -> tensor<*xf32> {
|
||||
%0 = "onnx.AveragePool"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0, kernel_shape = [5,3], pads = [2, 1, 1, 0] } : (tensor<5x5x32x32xf32>) -> tensor<*xf32>
|
||||
%0 = "onnx.AveragePool"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : si64, kernel_shape = [5,3], pads = [2, 1, 1, 0] } : (tensor<5x5x32x32xf32>) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_default_averagepool_pad_nonunif
|
||||
// CHECK: [[RES:%.+]] = "onnx.AveragePool"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : i64, kernel_shape = [5, 3], pads = [2, 1, 1, 0], strides = [1, 1]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x31x31xf32>
|
||||
// CHECK: [[RES:%.+]] = "onnx.AveragePool"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : si64, kernel_shape = [5, 3], pads = [2, 1, 1, 0], strides = [1, 1]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x31x31xf32>
|
||||
// CHECK: return [[RES]] : tensor<5x5x31x31xf32>
|
||||
}
|
||||
|
||||
|
@ -541,11 +541,11 @@ func @test_default_averagepool_pad_nonunif(%arg0 : tensor<5x5x32x32xf32>) -> ten
|
|||
|
||||
/// Test the default behavior of Average Pool with non uniform padding
|
||||
func @test_default_averagepool_strides(%arg0 : tensor<5x5x32x32xf32>) -> tensor<*xf32> {
|
||||
%0 = "onnx.AveragePool"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0, kernel_shape = [3,3], pads = [1, 1, 1, 1], strides = [2, 2] } : (tensor<5x5x32x32xf32>) -> tensor<*xf32>
|
||||
%0 = "onnx.AveragePool"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : si64, kernel_shape = [3,3], pads = [1, 1, 1, 1], strides = [2, 2] } : (tensor<5x5x32x32xf32>) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_default_averagepool_strides
|
||||
// CHECK: [[RES:%.+]] = "onnx.AveragePool"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : i64, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [2, 2]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x16x16xf32>
|
||||
// CHECK: [[RES:%.+]] = "onnx.AveragePool"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : si64, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [2, 2]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x16x16xf32>
|
||||
// CHECK: return [[RES]] : tensor<5x5x16x16xf32>
|
||||
}
|
||||
|
||||
|
@ -553,11 +553,11 @@ func @test_default_averagepool_strides(%arg0 : tensor<5x5x32x32xf32>) -> tensor<
|
|||
|
||||
/// Test the default behavior of Average Pool with non uniform padding
|
||||
func @test_default_averagepool_strides_nonunifpad(%arg0 : tensor<5x5x30x32xf32>) -> tensor<*xf32> {
|
||||
%0 = "onnx.AveragePool"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0, kernel_shape = [2,2], pads = [1, 0, 0, 0], strides = [2, 2] } : (tensor<5x5x30x32xf32>) -> tensor<*xf32>
|
||||
%0 = "onnx.AveragePool"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : si64, kernel_shape = [2,2], pads = [1, 0, 0, 0], strides = [2, 2] } : (tensor<5x5x30x32xf32>) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_default_averagepool_strides_nonunifpad
|
||||
// CHECK: [[RES:%.+]] = "onnx.AveragePool"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : i64, kernel_shape = [2, 2], pads = [1, 0, 0, 0], strides = [2, 2]} : (tensor<5x5x30x32xf32>) -> tensor<5x5x15x16xf32>
|
||||
// CHECK: [[RES:%.+]] = "onnx.AveragePool"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : si64, kernel_shape = [2, 2], pads = [1, 0, 0, 0], strides = [2, 2]} : (tensor<5x5x30x32xf32>) -> tensor<5x5x15x16xf32>
|
||||
// CHECK: return [[RES]] : tensor<5x5x15x16xf32>
|
||||
}
|
||||
|
||||
|
@ -565,11 +565,11 @@ func @test_default_averagepool_strides_nonunifpad(%arg0 : tensor<5x5x30x32xf32>)
|
|||
|
||||
/// Test the default behavior of Average Pool with non uniform padding
|
||||
func @test_default_averagepool_strides_nonunifpad_ceil(%arg0 : tensor<5x5x30x32xf32>) -> tensor<*xf32> {
|
||||
%0 = "onnx.AveragePool"(%arg0) {auto_pad = "NOTSET", ceil_mode = 1, kernel_shape = [2,2], pads = [1, 0, 0, 0], strides = [2, 2] } : (tensor<5x5x30x32xf32>) -> tensor<*xf32>
|
||||
%0 = "onnx.AveragePool"(%arg0) {auto_pad = "NOTSET", ceil_mode = 1 : si64, kernel_shape = [2,2], pads = [1, 0, 0, 0], strides = [2, 2] } : (tensor<5x5x30x32xf32>) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_default_averagepool_strides_nonunifpad_ceil
|
||||
// CHECK: [[RES:%.+]] = "onnx.AveragePool"(%arg0) {auto_pad = "NOTSET", ceil_mode = 1 : i64, kernel_shape = [2, 2], pads = [1, 0, 0, 0], strides = [2, 2]} : (tensor<5x5x30x32xf32>) -> tensor<5x5x16x16xf32>
|
||||
// CHECK: [[RES:%.+]] = "onnx.AveragePool"(%arg0) {auto_pad = "NOTSET", ceil_mode = 1 : si64, kernel_shape = [2, 2], pads = [1, 0, 0, 0], strides = [2, 2]} : (tensor<5x5x30x32xf32>) -> tensor<5x5x16x16xf32>
|
||||
// CHECK: return [[RES]] : tensor<5x5x16x16xf32>
|
||||
}
|
||||
|
||||
|
@ -631,11 +631,11 @@ func @test_reshape_3(%arg0 : tensor<5x5x1x32xf32>) -> tensor<*xf32> {
|
|||
//===----------------------------------------------------------------------===//
|
||||
|
||||
func @test_flatten_1(%arg0 : tensor<5x2x3x4xf32>) -> tensor<*xf32> {
|
||||
%1 = "onnx.Flatten"(%arg0) {axis = 1 : i64} : (tensor<5x2x3x4xf32>) -> tensor<*xf32>
|
||||
%1 = "onnx.Flatten"(%arg0) {axis = 1 : si64} : (tensor<5x2x3x4xf32>) -> tensor<*xf32>
|
||||
"std.return"(%1) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_flatten_1
|
||||
// CHECK: [[RES:%.+]] = "onnx.Flatten"(%arg0) {axis = 1 : i64} : (tensor<5x2x3x4xf32>) -> tensor<5x24xf32>
|
||||
// CHECK: [[RES:%.+]] = "onnx.Flatten"(%arg0) {axis = 1 : si64} : (tensor<5x2x3x4xf32>) -> tensor<5x24xf32>
|
||||
// CHECK: return [[RES]] : tensor<5x24xf32>
|
||||
}
|
||||
|
||||
|
@ -644,33 +644,33 @@ func @test_flatten_1(%arg0 : tensor<5x2x3x4xf32>) -> tensor<*xf32> {
|
|||
//===----------------------------------------------------------------------===//
|
||||
|
||||
func @test_concat_1(%arg0 : tensor<5x5x1x32xf32>, %arg1 : tensor<5x5x3x32xf32>, %arg2 : tensor<5x5x5x32xf32>) -> tensor<*xf32> {
|
||||
%1 = "onnx.Concat"(%arg0, %arg1, %arg2) { axis = 2 } : (tensor<5x5x1x32xf32>, tensor<5x5x3x32xf32>, tensor<5x5x5x32xf32>) -> tensor<*xf32>
|
||||
%1 = "onnx.Concat"(%arg0, %arg1, %arg2) { axis = 2 : si64} : (tensor<5x5x1x32xf32>, tensor<5x5x3x32xf32>, tensor<5x5x5x32xf32>) -> tensor<*xf32>
|
||||
"std.return"(%1) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_concat_1
|
||||
// CHECK: [[RES:%.+]] = "onnx.Concat"(%arg0, %arg1, %arg2) {axis = 2 : i64} : (tensor<5x5x1x32xf32>, tensor<5x5x3x32xf32>, tensor<5x5x5x32xf32>) -> tensor<5x5x9x32xf32>
|
||||
// CHECK: [[RES:%.+]] = "onnx.Concat"(%arg0, %arg1, %arg2) {axis = 2 : si64} : (tensor<5x5x1x32xf32>, tensor<5x5x3x32xf32>, tensor<5x5x5x32xf32>) -> tensor<5x5x9x32xf32>
|
||||
// CHECK: return [[RES]] : tensor<5x5x9x32xf32>
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @test_concat_2(%arg0 : tensor<5x1x32xf32>, %arg1 : tensor<5x3x32xf32>, %arg2 : tensor<5x5x32xf32>) -> tensor<*xf32> {
|
||||
%1 = "onnx.Concat"(%arg0, %arg1, %arg2) { axis = 1 } : (tensor<5x1x32xf32>, tensor<5x3x32xf32>, tensor<5x5x32xf32>) -> tensor<*xf32>
|
||||
%1 = "onnx.Concat"(%arg0, %arg1, %arg2) { axis = 1 : si64} : (tensor<5x1x32xf32>, tensor<5x3x32xf32>, tensor<5x5x32xf32>) -> tensor<*xf32>
|
||||
"std.return"(%1) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_concat_2
|
||||
// CHECK: [[RES:%.+]] = "onnx.Concat"(%arg0, %arg1, %arg2) {axis = 1 : i64} : (tensor<5x1x32xf32>, tensor<5x3x32xf32>, tensor<5x5x32xf32>) -> tensor<5x9x32xf32>
|
||||
// CHECK: [[RES:%.+]] = "onnx.Concat"(%arg0, %arg1, %arg2) {axis = 1 : si64} : (tensor<5x1x32xf32>, tensor<5x3x32xf32>, tensor<5x5x32xf32>) -> tensor<5x9x32xf32>
|
||||
// CHECK: return [[RES]] : tensor<5x9x32xf32>
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @test_concat_3(%arg0 : tensor<5x1x32xf32>, %arg1 : tensor<5x3x32xf32>, %arg2 : tensor<5x5x32xf32>) -> tensor<*xf32> {
|
||||
%1 = "onnx.Concat"(%arg0, %arg1, %arg2) { axis = -2 } : (tensor<5x1x32xf32>, tensor<5x3x32xf32>, tensor<5x5x32xf32>) -> tensor<*xf32>
|
||||
%1 = "onnx.Concat"(%arg0, %arg1, %arg2) { axis = -2 : si64} : (tensor<5x1x32xf32>, tensor<5x3x32xf32>, tensor<5x5x32xf32>) -> tensor<*xf32>
|
||||
"std.return"(%1) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_concat_3
|
||||
// CHECK: [[RES:%.+]] = "onnx.Concat"(%arg0, %arg1, %arg2) {axis = 1 : i64} : (tensor<5x1x32xf32>, tensor<5x3x32xf32>, tensor<5x5x32xf32>) -> tensor<5x9x32xf32>
|
||||
// CHECK: [[RES:%.+]] = "onnx.Concat"(%arg0, %arg1, %arg2) {axis = 1 : si64} : (tensor<5x1x32xf32>, tensor<5x3x32xf32>, tensor<5x5x32xf32>) -> tensor<5x9x32xf32>
|
||||
// CHECK: return [[RES]] : tensor<5x9x32xf32>
|
||||
}
|
||||
|
||||
|
@ -678,11 +678,11 @@ func @test_concat_3(%arg0 : tensor<5x1x32xf32>, %arg1 : tensor<5x3x32xf32>, %arg
|
|||
|
||||
func @test_rnn_all_results(%arg0: tensor<4x3x2xf32>, %arg1: tensor<1x12x2xf32>, %arg2: tensor<1x12x3xf32>) -> tensor<*xf32> {
|
||||
%cst = constant unit
|
||||
%Y, %Y_h = "onnx.RNN"(%arg0, %arg1, %arg2, %cst, %cst, %cst) {hidden_size = 3 : i64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none) -> (tensor<*xf32>, tensor<*xf32>)
|
||||
%Y, %Y_h = "onnx.RNN"(%arg0, %arg1, %arg2, %cst, %cst, %cst) {hidden_size = 3 : si64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none) -> (tensor<*xf32>, tensor<*xf32>)
|
||||
return %Y_h : tensor<*xf32>
|
||||
|
||||
// CHECK-LABEL: test_rnn_all_results
|
||||
// CHECK: %{{.*}}, [[RES:%.+]] = "onnx.RNN"(%arg0, %arg1, %arg2, %cst, %cst, %cst) {hidden_size = 3 : i64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none) -> (tensor<4x1x3x3xf32>, tensor<1x3x3xf32>)
|
||||
// CHECK: %{{.*}}, [[RES:%.+]] = "onnx.RNN"(%arg0, %arg1, %arg2, %cst, %cst, %cst) {hidden_size = 3 : si64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none) -> (tensor<4x1x3x3xf32>, tensor<1x3x3xf32>)
|
||||
// CHECK: return [[RES]] : tensor<1x3x3xf32>
|
||||
}
|
||||
|
||||
|
@ -690,11 +690,11 @@ func @test_rnn_all_results(%arg0: tensor<4x3x2xf32>, %arg1: tensor<1x12x2xf32>,
|
|||
|
||||
func @test_rnn_no_results(%arg0: tensor<4x3x2xf32>, %arg1: tensor<1x12x2xf32>, %arg2: tensor<1x12x3xf32>) -> () {
|
||||
%cst = constant unit
|
||||
%Y, %Y_h = "onnx.RNN"(%arg0, %arg1, %arg2, %cst, %cst, %cst) {hidden_size = 3 : i64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none) -> (none, none)
|
||||
%Y, %Y_h = "onnx.RNN"(%arg0, %arg1, %arg2, %cst, %cst, %cst) {hidden_size = 3 : si64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none) -> (none, none)
|
||||
return
|
||||
|
||||
// CHECK-LABEL: test_rnn_no_results
|
||||
// CHECK: %{{.*}}, [[RES:%.+]] = "onnx.RNN"(%arg0, %arg1, %arg2, %cst, %cst, %cst) {hidden_size = 3 : i64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none) -> (none, none)
|
||||
// CHECK: %{{.*}}, [[RES:%.+]] = "onnx.RNN"(%arg0, %arg1, %arg2, %cst, %cst, %cst) {hidden_size = 3 : si64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none) -> (none, none)
|
||||
// CHECK: return
|
||||
}
|
||||
|
||||
|
@ -702,11 +702,11 @@ func @test_rnn_no_results(%arg0: tensor<4x3x2xf32>, %arg1: tensor<1x12x2xf32>, %
|
|||
|
||||
func @test_rnn_missing_first_result(%arg0: tensor<4x3x2xf32>, %arg1: tensor<1x12x2xf32>, %arg2: tensor<1x12x3xf32>) -> tensor<*xf32> {
|
||||
%cst = constant unit
|
||||
%Y, %Y_h = "onnx.RNN"(%arg0, %arg1, %arg2, %cst, %cst, %cst) {hidden_size = 3 : i64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none) -> (none, tensor<*xf32>)
|
||||
%Y, %Y_h = "onnx.RNN"(%arg0, %arg1, %arg2, %cst, %cst, %cst) {hidden_size = 3 : si64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none) -> (none, tensor<*xf32>)
|
||||
return %Y_h : tensor<*xf32>
|
||||
|
||||
// CHECK-LABEL: test_rnn_missing_first_result
|
||||
// CHECK: %{{.*}}, [[RES:%.+]] = "onnx.RNN"(%arg0, %arg1, %arg2, %cst, %cst, %cst) {hidden_size = 3 : i64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none) -> (none, tensor<1x3x3xf32>)
|
||||
// CHECK: %{{.*}}, [[RES:%.+]] = "onnx.RNN"(%arg0, %arg1, %arg2, %cst, %cst, %cst) {hidden_size = 3 : si64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none) -> (none, tensor<1x3x3xf32>)
|
||||
// CHECK: return [[RES]] : tensor<1x3x3xf32>
|
||||
}
|
||||
|
||||
|
@ -714,11 +714,11 @@ func @test_rnn_missing_first_result(%arg0: tensor<4x3x2xf32>, %arg1: tensor<1x12
|
|||
|
||||
func @test_rnn_missing_trailing_result(%arg0: tensor<4x3x2xf32>, %arg1: tensor<1x12x2xf32>, %arg2: tensor<1x12x3xf32>) -> () {
|
||||
%cst = constant unit
|
||||
%Y, %Y_h = "onnx.RNN"(%arg0, %arg1, %arg2, %cst, %cst, %cst) {hidden_size = 3 : i64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none) -> (tensor<*xf32>, none)
|
||||
%Y, %Y_h = "onnx.RNN"(%arg0, %arg1, %arg2, %cst, %cst, %cst) {hidden_size = 3 : si64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none) -> (tensor<*xf32>, none)
|
||||
return
|
||||
|
||||
// CHECK-LABEL: test_rnn_missing_trailing_result
|
||||
// CHECK: %{{.*}}, [[RES:%.+]] = "onnx.RNN"(%arg0, %arg1, %arg2, %cst, %cst, %cst) {hidden_size = 3 : i64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none) -> (tensor<4x1x3x3xf32>, none)
|
||||
// CHECK: %{{.*}}, [[RES:%.+]] = "onnx.RNN"(%arg0, %arg1, %arg2, %cst, %cst, %cst) {hidden_size = 3 : si64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none) -> (tensor<4x1x3x3xf32>, none)
|
||||
// CHECK: return
|
||||
}
|
||||
|
||||
|
@ -730,7 +730,7 @@ func @test_rnn_all_results_no_hidden_size(%arg0: tensor<4x3x2xf32>, %arg1: tenso
|
|||
return %Y_h : tensor<*xf32>
|
||||
|
||||
// CHECK-LABEL: test_rnn_all_results_no_hidden_size
|
||||
// CHECK: %{{.*}}, [[RES:%.+]] = "onnx.RNN"(%arg0, %arg1, %arg2, %cst, %cst, %cst) {hidden_size = 3 : i64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none) -> (tensor<4x1x3x3xf32>, tensor<1x3x3xf32>)
|
||||
// CHECK: %{{.*}}, [[RES:%.+]] = "onnx.RNN"(%arg0, %arg1, %arg2, %cst, %cst, %cst) {hidden_size = 3 : si64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none) -> (tensor<4x1x3x3xf32>, tensor<1x3x3xf32>)
|
||||
// CHECK: return [[RES]] : tensor<1x3x3xf32>
|
||||
}
|
||||
|
||||
|
@ -750,11 +750,11 @@ func @test_rnn_all_results_unknown_dims(%arg0: tensor<?x?x?xf32>, %arg1: tensor<
|
|||
|
||||
func @test_gru_all_results(%arg0: tensor<4x3x2xf32>, %arg1: tensor<1x12x2xf32>, %arg2: tensor<1x12x3xf32>) -> tensor<*xf32> {
|
||||
%cst = constant unit
|
||||
%Y, %Y_h = "onnx.GRU"(%arg0, %arg1, %arg2, %cst, %cst, %cst) {hidden_size = 3 : i64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none) -> (tensor<*xf32>, tensor<*xf32>)
|
||||
%Y, %Y_h = "onnx.GRU"(%arg0, %arg1, %arg2, %cst, %cst, %cst) {hidden_size = 3 : si64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none) -> (tensor<*xf32>, tensor<*xf32>)
|
||||
return %Y_h : tensor<*xf32>
|
||||
|
||||
// CHECK-LABEL: test_gru_all_results
|
||||
// CHECK: %{{.*}}, [[RES:%.+]] = "onnx.GRU"(%arg0, %arg1, %arg2, %cst, %cst, %cst) {hidden_size = 3 : i64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none) -> (tensor<4x1x3x3xf32>, tensor<1x3x3xf32>)
|
||||
// CHECK: %{{.*}}, [[RES:%.+]] = "onnx.GRU"(%arg0, %arg1, %arg2, %cst, %cst, %cst) {hidden_size = 3 : si64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none) -> (tensor<4x1x3x3xf32>, tensor<1x3x3xf32>)
|
||||
// CHECK: return [[RES]] : tensor<1x3x3xf32>
|
||||
}
|
||||
|
||||
|
@ -762,11 +762,11 @@ func @test_gru_all_results(%arg0: tensor<4x3x2xf32>, %arg1: tensor<1x12x2xf32>,
|
|||
|
||||
func @test_gru_no_results(%arg0: tensor<4x3x2xf32>, %arg1: tensor<1x12x2xf32>, %arg2: tensor<1x12x3xf32>) -> () {
|
||||
%cst = constant unit
|
||||
%Y, %Y_h = "onnx.GRU"(%arg0, %arg1, %arg2, %cst, %cst, %cst) {hidden_size = 3 : i64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none) -> (none, none)
|
||||
%Y, %Y_h = "onnx.GRU"(%arg0, %arg1, %arg2, %cst, %cst, %cst) {hidden_size = 3 : si64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none) -> (none, none)
|
||||
return
|
||||
|
||||
// CHECK-LABEL: test_gru_no_results
|
||||
// CHECK: %{{.*}}, [[RES:%.+]] = "onnx.GRU"(%arg0, %arg1, %arg2, %cst, %cst, %cst) {hidden_size = 3 : i64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none) -> (none, none)
|
||||
// CHECK: %{{.*}}, [[RES:%.+]] = "onnx.GRU"(%arg0, %arg1, %arg2, %cst, %cst, %cst) {hidden_size = 3 : si64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none) -> (none, none)
|
||||
// CHECK: return
|
||||
}
|
||||
|
||||
|
@ -774,11 +774,11 @@ func @test_gru_no_results(%arg0: tensor<4x3x2xf32>, %arg1: tensor<1x12x2xf32>, %
|
|||
|
||||
func @test_gru_missing_first_result(%arg0: tensor<4x3x2xf32>, %arg1: tensor<1x12x2xf32>, %arg2: tensor<1x12x3xf32>) -> tensor<*xf32> {
|
||||
%cst = constant unit
|
||||
%Y, %Y_h = "onnx.GRU"(%arg0, %arg1, %arg2, %cst, %cst, %cst) {hidden_size = 3 : i64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none) -> (none, tensor<*xf32>)
|
||||
%Y, %Y_h = "onnx.GRU"(%arg0, %arg1, %arg2, %cst, %cst, %cst) {hidden_size = 3 : si64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none) -> (none, tensor<*xf32>)
|
||||
return %Y_h : tensor<*xf32>
|
||||
|
||||
// CHECK-LABEL: test_gru_missing_first_result
|
||||
// CHECK: %{{.*}}, [[RES:%.+]] = "onnx.GRU"(%arg0, %arg1, %arg2, %cst, %cst, %cst) {hidden_size = 3 : i64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none) -> (none, tensor<1x3x3xf32>)
|
||||
// CHECK: %{{.*}}, [[RES:%.+]] = "onnx.GRU"(%arg0, %arg1, %arg2, %cst, %cst, %cst) {hidden_size = 3 : si64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none) -> (none, tensor<1x3x3xf32>)
|
||||
// CHECK: return [[RES]] : tensor<1x3x3xf32>
|
||||
}
|
||||
|
||||
|
@ -786,11 +786,11 @@ func @test_gru_missing_first_result(%arg0: tensor<4x3x2xf32>, %arg1: tensor<1x12
|
|||
|
||||
func @test_gru_missing_trailing_result(%arg0: tensor<4x3x2xf32>, %arg1: tensor<1x12x2xf32>, %arg2: tensor<1x12x3xf32>) -> () {
|
||||
%cst = constant unit
|
||||
%Y, %Y_h = "onnx.GRU"(%arg0, %arg1, %arg2, %cst, %cst, %cst) {hidden_size = 3 : i64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none) -> (tensor<*xf32>, none)
|
||||
%Y, %Y_h = "onnx.GRU"(%arg0, %arg1, %arg2, %cst, %cst, %cst) {hidden_size = 3 : si64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none) -> (tensor<*xf32>, none)
|
||||
return
|
||||
|
||||
// CHECK-LABEL: test_gru_missing_trailing_result
|
||||
// CHECK: %{{.*}}, [[RES:%.+]] = "onnx.GRU"(%arg0, %arg1, %arg2, %cst, %cst, %cst) {hidden_size = 3 : i64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none) -> (tensor<4x1x3x3xf32>, none)
|
||||
// CHECK: %{{.*}}, [[RES:%.+]] = "onnx.GRU"(%arg0, %arg1, %arg2, %cst, %cst, %cst) {hidden_size = 3 : si64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none) -> (tensor<4x1x3x3xf32>, none)
|
||||
// CHECK: return
|
||||
}
|
||||
|
||||
|
@ -802,7 +802,7 @@ func @test_gru_all_results_no_hidden_size(%arg0: tensor<4x3x2xf32>, %arg1: tenso
|
|||
return %Y_h : tensor<*xf32>
|
||||
|
||||
// CHECK-LABEL: test_gru_all_results_no_hidden_size
|
||||
// CHECK: %{{.*}}, [[RES:%.+]] = "onnx.GRU"(%arg0, %arg1, %arg2, %cst, %cst, %cst) {hidden_size = 3 : i64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none) -> (tensor<4x1x3x3xf32>, tensor<1x3x3xf32>)
|
||||
// CHECK: %{{.*}}, [[RES:%.+]] = "onnx.GRU"(%arg0, %arg1, %arg2, %cst, %cst, %cst) {hidden_size = 3 : si64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none) -> (tensor<4x1x3x3xf32>, tensor<1x3x3xf32>)
|
||||
// CHECK: return [[RES]] : tensor<1x3x3xf32>
|
||||
}
|
||||
|
||||
|
@ -822,11 +822,11 @@ func @test_gru_all_results_unknown_dims(%arg0: tensor<?x?x?xf32>, %arg1: tensor<
|
|||
|
||||
func @test_lstm_all_results(%arg0: tensor<4x3x2xf32>, %arg1: tensor<1x12x2xf32>, %arg2: tensor<1x12x3xf32>) -> tensor<*xf32> {
|
||||
%cst = constant unit
|
||||
%Y, %Y_h, %Y_c = "onnx.LSTM"(%arg0, %arg1, %arg2, %cst, %cst, %cst, %cst, %cst) {hidden_size = 3 : i64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none, none, none) -> (tensor<*xf32>, tensor<*xf32>, tensor<*xf32>)
|
||||
%Y, %Y_h, %Y_c = "onnx.LSTM"(%arg0, %arg1, %arg2, %cst, %cst, %cst, %cst, %cst) {hidden_size = 3 : si64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none, none, none) -> (tensor<*xf32>, tensor<*xf32>, tensor<*xf32>)
|
||||
return %Y_h : tensor<*xf32>
|
||||
|
||||
// CHECK-LABEL: test_lstm_all_results
|
||||
// CHECK: %{{.*}}, [[RES:%.+]], %{{.*}} = "onnx.LSTM"(%arg0, %arg1, %arg2, %cst, %cst, %cst, %cst, %cst) {hidden_size = 3 : i64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none, none, none) -> (tensor<4x1x3x3xf32>, tensor<1x3x3xf32>, tensor<1x3x3xf32>)
|
||||
// CHECK: %{{.*}}, [[RES:%.+]], %{{.*}} = "onnx.LSTM"(%arg0, %arg1, %arg2, %cst, %cst, %cst, %cst, %cst) {hidden_size = 3 : si64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none, none, none) -> (tensor<4x1x3x3xf32>, tensor<1x3x3xf32>, tensor<1x3x3xf32>)
|
||||
// CHECK: return [[RES]] : tensor<1x3x3xf32>
|
||||
}
|
||||
|
||||
|
@ -834,11 +834,11 @@ func @test_lstm_all_results(%arg0: tensor<4x3x2xf32>, %arg1: tensor<1x12x2xf32>,
|
|||
|
||||
func @test_lstm_no_results(%arg0: tensor<4x3x2xf32>, %arg1: tensor<1x12x2xf32>, %arg2: tensor<1x12x3xf32>) -> () {
|
||||
%cst = constant unit
|
||||
%Y, %Y_h, %Y_c = "onnx.LSTM"(%arg0, %arg1, %arg2, %cst, %cst, %cst, %cst, %cst) {hidden_size = 3 : i64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none, none, none) -> (none, none, none)
|
||||
%Y, %Y_h, %Y_c = "onnx.LSTM"(%arg0, %arg1, %arg2, %cst, %cst, %cst, %cst, %cst) {hidden_size = 3 : si64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none, none, none) -> (none, none, none)
|
||||
return
|
||||
|
||||
// CHECK-LABEL: test_lstm_no_results
|
||||
// CHECK: %{{.*}}, [[RES:%.+]], %{{.*}} = "onnx.LSTM"(%arg0, %arg1, %arg2, %cst, %cst, %cst, %cst, %cst) {hidden_size = 3 : i64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none, none, none) -> (none, none, none)
|
||||
// CHECK: %{{.*}}, [[RES:%.+]], %{{.*}} = "onnx.LSTM"(%arg0, %arg1, %arg2, %cst, %cst, %cst, %cst, %cst) {hidden_size = 3 : si64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none, none, none) -> (none, none, none)
|
||||
// CHECK: return
|
||||
}
|
||||
|
||||
|
@ -846,11 +846,11 @@ func @test_lstm_no_results(%arg0: tensor<4x3x2xf32>, %arg1: tensor<1x12x2xf32>,
|
|||
|
||||
func @test_lstm_missing_first_result(%arg0: tensor<4x3x2xf32>, %arg1: tensor<1x12x2xf32>, %arg2: tensor<1x12x3xf32>) -> tensor<*xf32> {
|
||||
%cst = constant unit
|
||||
%Y, %Y_h, %Y_c = "onnx.LSTM"(%arg0, %arg1, %arg2, %cst, %cst, %cst, %cst, %cst) {hidden_size = 3 : i64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none, none, none) -> (none, tensor<*xf32>, tensor<*xf32>)
|
||||
%Y, %Y_h, %Y_c = "onnx.LSTM"(%arg0, %arg1, %arg2, %cst, %cst, %cst, %cst, %cst) {hidden_size = 3 : si64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none, none, none) -> (none, tensor<*xf32>, tensor<*xf32>)
|
||||
return %Y_h : tensor<*xf32>
|
||||
|
||||
// CHECK-LABEL: test_lstm_missing_first_result
|
||||
// CHECK: %{{.*}}, [[RES:%.+]], %{{.*}} = "onnx.LSTM"(%arg0, %arg1, %arg2, %cst, %cst, %cst, %cst, %cst) {hidden_size = 3 : i64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none, none, none) -> (none, tensor<1x3x3xf32>, tensor<1x3x3xf32>)
|
||||
// CHECK: %{{.*}}, [[RES:%.+]], %{{.*}} = "onnx.LSTM"(%arg0, %arg1, %arg2, %cst, %cst, %cst, %cst, %cst) {hidden_size = 3 : si64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none, none, none) -> (none, tensor<1x3x3xf32>, tensor<1x3x3xf32>)
|
||||
// CHECK: return [[RES]] : tensor<1x3x3xf32>
|
||||
}
|
||||
|
||||
|
@ -858,11 +858,11 @@ func @test_lstm_missing_first_result(%arg0: tensor<4x3x2xf32>, %arg1: tensor<1x1
|
|||
|
||||
func @test_lstm_missing_trailing_result(%arg0: tensor<4x3x2xf32>, %arg1: tensor<1x12x2xf32>, %arg2: tensor<1x12x3xf32>) -> tensor<*xf32> {
|
||||
%cst = constant unit
|
||||
%Y, %Y_h, %Y_c = "onnx.LSTM"(%arg0, %arg1, %arg2, %cst, %cst, %cst, %cst, %cst) {hidden_size = 3 : i64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none, none, none) -> (tensor<*xf32>, tensor<*xf32>, none)
|
||||
%Y, %Y_h, %Y_c = "onnx.LSTM"(%arg0, %arg1, %arg2, %cst, %cst, %cst, %cst, %cst) {hidden_size = 3 : si64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none, none, none) -> (tensor<*xf32>, tensor<*xf32>, none)
|
||||
return %Y_h : tensor<*xf32>
|
||||
|
||||
// CHECK-LABEL: test_lstm_missing_trailing_result
|
||||
// CHECK: %{{.*}}, [[RES:%.+]], %{{.*}} = "onnx.LSTM"(%arg0, %arg1, %arg2, %cst, %cst, %cst, %cst, %cst) {hidden_size = 3 : i64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none, none, none) -> (tensor<4x1x3x3xf32>, tensor<1x3x3xf32>, none)
|
||||
// CHECK: %{{.*}}, [[RES:%.+]], %{{.*}} = "onnx.LSTM"(%arg0, %arg1, %arg2, %cst, %cst, %cst, %cst, %cst) {hidden_size = 3 : si64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none, none, none) -> (tensor<4x1x3x3xf32>, tensor<1x3x3xf32>, none)
|
||||
// CHECK: return [[RES]] : tensor<1x3x3xf32>
|
||||
}
|
||||
|
||||
|
@ -874,7 +874,7 @@ func @test_lstm_all_results_no_hidden_size(%arg0: tensor<4x3x2xf32>, %arg1: tens
|
|||
return %Y_h : tensor<*xf32>
|
||||
|
||||
// CHECK-LABEL: test_lstm_all_results_no_hidden_size
|
||||
// CHECK: %{{.*}}, [[RES:%.+]], %{{.*}} = "onnx.LSTM"(%arg0, %arg1, %arg2, %cst, %cst, %cst, %cst, %cst) {hidden_size = 3 : i64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none, none, none) -> (tensor<4x1x3x3xf32>, tensor<1x3x3xf32>, tensor<1x3x3xf32>)
|
||||
// CHECK: %{{.*}}, [[RES:%.+]], %{{.*}} = "onnx.LSTM"(%arg0, %arg1, %arg2, %cst, %cst, %cst, %cst, %cst) {hidden_size = 3 : si64} : (tensor<4x3x2xf32>, tensor<1x12x2xf32>, tensor<1x12x3xf32>, none, none, none, none, none) -> (tensor<4x1x3x3xf32>, tensor<1x3x3xf32>, tensor<1x3x3xf32>)
|
||||
// CHECK: return [[RES]] : tensor<1x3x3xf32>
|
||||
}
|
||||
|
||||
|
@ -893,33 +893,33 @@ func @test_lstm_all_results_unknown_dims(%arg0: tensor<?x?x?xf32>, %arg1: tensor
|
|||
// -----
|
||||
|
||||
func @test_split_1(%arg0 : tensor<16x32x64xf32>) -> tensor<*xf32> {
|
||||
%0, %1 = "onnx.Split"(%arg0) { axis = 1 } : (tensor<16x32x64xf32>) -> (tensor<*xf32>, tensor<*xf32>)
|
||||
%0, %1 = "onnx.Split"(%arg0) { axis = 1 : si64} : (tensor<16x32x64xf32>) -> (tensor<*xf32>, tensor<*xf32>)
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_split_1
|
||||
// CHECK: [[RES:%.+]]:2 = "onnx.Split"(%arg0) {axis = 1 : i64, split = [16, 16]} : (tensor<16x32x64xf32>) -> (tensor<16x16x64xf32>, tensor<16x16x64xf32>)
|
||||
// CHECK: [[RES:%.+]]:2 = "onnx.Split"(%arg0) {axis = 1 : si64, split = [16, 16]} : (tensor<16x32x64xf32>) -> (tensor<16x16x64xf32>, tensor<16x16x64xf32>)
|
||||
// CHECK: return [[RES]]#0 : tensor<16x16x64xf32>
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @test_split_2(%arg0 : tensor<16x32x64xf32>) -> tensor<*xf32> {
|
||||
%0, %1 = "onnx.Split"(%arg0) { axis = -2 } : (tensor<16x32x64xf32>) -> (tensor<*xf32>, tensor<*xf32>)
|
||||
%0, %1 = "onnx.Split"(%arg0) { axis = -2 : si64} : (tensor<16x32x64xf32>) -> (tensor<*xf32>, tensor<*xf32>)
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_split_2
|
||||
// CHECK: [[RES:%.+]]:2 = "onnx.Split"(%arg0) {axis = 1 : i64, split = [16, 16]} : (tensor<16x32x64xf32>) -> (tensor<16x16x64xf32>, tensor<16x16x64xf32>)
|
||||
// CHECK: [[RES:%.+]]:2 = "onnx.Split"(%arg0) {axis = 1 : si64, split = [16, 16]} : (tensor<16x32x64xf32>) -> (tensor<16x16x64xf32>, tensor<16x16x64xf32>)
|
||||
// CHECK: return [[RES]]#0 : tensor<16x16x64xf32>
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @test_split_3(%arg0 : tensor<16x32x64xf32>) -> tensor<*xf32> {
|
||||
%0, %1 = "onnx.Split"(%arg0) { axis = 1, split = [2, 30]} : (tensor<16x32x64xf32>) -> (tensor<*xf32>, tensor<*xf32>)
|
||||
%0, %1 = "onnx.Split"(%arg0) {axis = 1 : si64, split = [2, 30]} : (tensor<16x32x64xf32>) -> (tensor<*xf32>, tensor<*xf32>)
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_split_3
|
||||
// CHECK: [[RES:%.+]]:2 = "onnx.Split"(%arg0) {axis = 1 : i64, split = [2, 30]} : (tensor<16x32x64xf32>) -> (tensor<16x2x64xf32>, tensor<16x30x64xf32>)
|
||||
// CHECK: [[RES:%.+]]:2 = "onnx.Split"(%arg0) {axis = 1 : si64, split = [2, 30]} : (tensor<16x32x64xf32>) -> (tensor<16x2x64xf32>, tensor<16x30x64xf32>)
|
||||
// CHECK: return [[RES]]#0 : tensor<16x2x64xf32>
|
||||
}
|
||||
|
||||
|
@ -961,38 +961,38 @@ func @test_squeeze_mix(%arg0 : tensor<16x1x32x1x64xf32>) -> tensor<*xf32> {
|
|||
//===----------------------------------------------------------------------===//
|
||||
|
||||
func @test_cast_1(%arg0 : tensor<2x3x4xf32>) -> tensor<*xf32> {
|
||||
%1 = "onnx.Cast"(%arg0) {to = 1} : (tensor<2x3x4xf32>) -> tensor<*xf32>
|
||||
%1 = "onnx.Cast"(%arg0) {to = 1 : si64} : (tensor<2x3x4xf32>) -> tensor<*xf32>
|
||||
"std.return"(%1) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_cast_1
|
||||
// CHECK: [[RES:%.+]] = "onnx.Cast"(%arg0) {to = 1 : i64} : (tensor<2x3x4xf32>) -> tensor<2x3x4xf32>
|
||||
// CHECK: [[RES:%.+]] = "onnx.Cast"(%arg0) {to = 1 : si64} : (tensor<2x3x4xf32>) -> tensor<2x3x4xf32>
|
||||
// CHECK: return [[RES]] : tensor<2x3x4xf32>
|
||||
}
|
||||
|
||||
func @test_cast_2(%arg0 : tensor<2x3x4xf32>) -> tensor<*xui8> {
|
||||
%1 = "onnx.Cast"(%arg0) {to = 2} : (tensor<2x3x4xf32>) -> tensor<*xui8>
|
||||
%1 = "onnx.Cast"(%arg0) {to = 2 : si64} : (tensor<2x3x4xf32>) -> tensor<*xui8>
|
||||
"std.return"(%1) : (tensor<*xui8>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_cast_2
|
||||
// CHECK: [[RES:%.+]] = "onnx.Cast"(%arg0) {to = 2 : i64} : (tensor<2x3x4xf32>) -> tensor<2x3x4xui8>
|
||||
// CHECK: [[RES:%.+]] = "onnx.Cast"(%arg0) {to = 2 : si64} : (tensor<2x3x4xf32>) -> tensor<2x3x4xui8>
|
||||
// CHECK: return [[RES]] : tensor<2x3x4xui8>
|
||||
}
|
||||
|
||||
func @test_cast_3(%arg0 : tensor<2x3x4xf32>) -> tensor<*xi8> {
|
||||
%1 = "onnx.Cast"(%arg0) {to = 3} : (tensor<2x3x4xf32>) -> tensor<*xi8>
|
||||
%1 = "onnx.Cast"(%arg0) {to = 3 : si64} : (tensor<2x3x4xf32>) -> tensor<*xi8>
|
||||
"std.return"(%1) : (tensor<*xi8>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_cast_3
|
||||
// CHECK: [[RES:%.+]] = "onnx.Cast"(%arg0) {to = 3 : i64} : (tensor<2x3x4xf32>) -> tensor<2x3x4xi8>
|
||||
// CHECK: [[RES:%.+]] = "onnx.Cast"(%arg0) {to = 3 : si64} : (tensor<2x3x4xf32>) -> tensor<2x3x4xi8>
|
||||
// CHECK: return [[RES]] : tensor<2x3x4xi8>
|
||||
}
|
||||
|
||||
func @test_cast_10(%arg0 : tensor<2x3x4xf32>) -> tensor<*xf16> {
|
||||
%1 = "onnx.Cast"(%arg0) {to = 10} : (tensor<2x3x4xf32>) -> tensor<*xf16>
|
||||
%1 = "onnx.Cast"(%arg0) {to = 10 : si64} : (tensor<2x3x4xf32>) -> tensor<*xf16>
|
||||
"std.return"(%1) : (tensor<*xf16>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_cast_10
|
||||
// CHECK: [[RES:%.+]] = "onnx.Cast"(%arg0) {to = 10 : i64} : (tensor<2x3x4xf32>) -> tensor<2x3x4xf16>
|
||||
// CHECK: [[RES:%.+]] = "onnx.Cast"(%arg0) {to = 10 : si64} : (tensor<2x3x4xf32>) -> tensor<2x3x4xf16>
|
||||
// CHECK: return [[RES]] : tensor<2x3x4xf16>
|
||||
}
|
||||
|
||||
|
@ -1037,33 +1037,33 @@ func @test_dequantize_linear_1(%arg0 : tensor<5x2x3x4xi8>, %arg1 : tensor<f32>,
|
|||
/// Default and required attributes for 1-D convolution.
|
||||
|
||||
func @test_convinteger_0(%arg0 : tensor<1x2x32xi8>, %arg1 : tensor<5x2x6xi8>, %arg2 : tensor<i8>, %arg3 : tensor<i8>) -> tensor<*xi32> {
|
||||
%0 = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "NOTSET", group = 1 : i64} : (tensor<1x2x32xi8>, tensor<5x2x6xi8>, tensor<i8>, tensor<i8>) -> tensor<*xi32>
|
||||
%0 = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "NOTSET", group = 1 : si64} : (tensor<1x2x32xi8>, tensor<5x2x6xi8>, tensor<i8>, tensor<i8>) -> tensor<*xi32>
|
||||
"std.return"(%0) : (tensor<*xi32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_convinteger_0
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "NOTSET", dilations = [1], group = 1 : i64, kernel_shape = [6], pads = [0, 0], strides = [1]} : (tensor<1x2x32xi8>, tensor<5x2x6xi8>, tensor<i8>, tensor<i8>) -> tensor<1x5x27xi32>
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "NOTSET", dilations = [1], group = 1 : si64, kernel_shape = [6], pads = [0, 0], strides = [1]} : (tensor<1x2x32xi8>, tensor<5x2x6xi8>, tensor<i8>, tensor<i8>) -> tensor<1x5x27xi32>
|
||||
// CHECK: return [[RES_ATTR]] : tensor<1x5x27xi32>
|
||||
}
|
||||
|
||||
/// Default and required attributes.
|
||||
|
||||
func @test_convinteger_1(%arg0 : tensor<1x2x32x64xi8>, %arg1 : tensor<5x2x6x7xi8>, %arg2 : tensor<i8>, %arg3 : tensor<i8>) -> tensor<*xi32> {
|
||||
%0 = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "NOTSET", group = 1 : i64} : (tensor<1x2x32x64xi8>, tensor<5x2x6x7xi8>, tensor<i8>, tensor<i8>) -> tensor<*xi32>
|
||||
%0 = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "NOTSET", group = 1 : si64} : (tensor<1x2x32x64xi8>, tensor<5x2x6x7xi8>, tensor<i8>, tensor<i8>) -> tensor<*xi32>
|
||||
"std.return"(%0) : (tensor<*xi32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_convinteger_1
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : i64, kernel_shape = [6, 7], pads = [0, 0, 0, 0], strides = [1, 1]} : (tensor<1x2x32x64xi8>, tensor<5x2x6x7xi8>, tensor<i8>, tensor<i8>) -> tensor<1x5x27x58xi32>
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : si64, kernel_shape = [6, 7], pads = [0, 0, 0, 0], strides = [1, 1]} : (tensor<1x2x32x64xi8>, tensor<5x2x6x7xi8>, tensor<i8>, tensor<i8>) -> tensor<1x5x27x58xi32>
|
||||
// CHECK: return [[RES_ATTR]] : tensor<1x5x27x58xi32>
|
||||
}
|
||||
|
||||
/// kernel_shape attribute.
|
||||
|
||||
func @test_convinteger_2(%arg0 : tensor<1x2x32x64xi8>, %arg1 : tensor<5x2x6x7xi8>, %arg2 : tensor<i8>, %arg3 : tensor<i8>) -> tensor<*xi32> {
|
||||
%0 = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "NOTSET", group = 1 : i64, kernel_shape = [8, 9]} : (tensor<1x2x32x64xi8>, tensor<5x2x6x7xi8>, tensor<i8>, tensor<i8>) -> tensor<*xi32>
|
||||
%0 = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "NOTSET", group = 1 : si64, kernel_shape = [8, 9]} : (tensor<1x2x32x64xi8>, tensor<5x2x6x7xi8>, tensor<i8>, tensor<i8>) -> tensor<*xi32>
|
||||
"std.return"(%0) : (tensor<*xi32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_convinteger_2
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : i64, kernel_shape = [8, 9], pads = [0, 0, 0, 0], strides = [1, 1]} : (tensor<1x2x32x64xi8>, tensor<5x2x6x7xi8>, tensor<i8>, tensor<i8>) -> tensor<1x5x25x56xi32>
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : si64, kernel_shape = [8, 9], pads = [0, 0, 0, 0], strides = [1, 1]} : (tensor<1x2x32x64xi8>, tensor<5x2x6x7xi8>, tensor<i8>, tensor<i8>) -> tensor<1x5x25x56xi32>
|
||||
// CHECK: return [[RES_ATTR]] : tensor<1x5x25x56xi32>
|
||||
}
|
||||
|
||||
|
@ -1071,53 +1071,53 @@ func @test_convinteger_2(%arg0 : tensor<1x2x32x64xi8>, %arg1 : tensor<5x2x6x7xi8
|
|||
/// Use pads to make output size equal to input size by adding K - 1 to the result.
|
||||
|
||||
func @test_convinteger_3(%arg0 : tensor<1x2x32x64xi8>, %arg1 : tensor<5x2x6x10xi8>, %arg2 : tensor<i8>, %arg3 : tensor<i8>) -> tensor<*xi32> {
|
||||
%0 = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "NOTSET", group = 1 : i64, pads = [2, 4, 3, 5]} : (tensor<1x2x32x64xi8>, tensor<5x2x6x10xi8>, tensor<i8>, tensor<i8>) -> tensor<*xi32>
|
||||
%0 = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "NOTSET", group = 1 : si64, pads = [2, 4, 3, 5]} : (tensor<1x2x32x64xi8>, tensor<5x2x6x10xi8>, tensor<i8>, tensor<i8>) -> tensor<*xi32>
|
||||
"std.return"(%0) : (tensor<*xi32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_convinteger_3
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : i64, kernel_shape = [6, 10], pads = [2, 4, 3, 5], strides = [1, 1]} : (tensor<1x2x32x64xi8>, tensor<5x2x6x10xi8>, tensor<i8>, tensor<i8>) -> tensor<1x5x32x64xi32>
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : si64, kernel_shape = [6, 10], pads = [2, 4, 3, 5], strides = [1, 1]} : (tensor<1x2x32x64xi8>, tensor<5x2x6x10xi8>, tensor<i8>, tensor<i8>) -> tensor<1x5x32x64xi32>
|
||||
// CHECK: return [[RES_ATTR]] : tensor<1x5x32x64xi32>
|
||||
}
|
||||
|
||||
/// auto_pad set to SAME_UPPER and SAME_LOWER.
|
||||
|
||||
func @test_convinteger_4(%arg0 : tensor<1x2x32x64xi8>, %arg1 : tensor<5x2x6x10xi8>, %arg2 : tensor<i8>, %arg3 : tensor<i8>) -> tensor<*xi32> {
|
||||
%0 = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "SAME_UPPER", group = 1 : i64} : (tensor<1x2x32x64xi8>, tensor<5x2x6x10xi8>, tensor<i8>, tensor<i8>) -> tensor<*xi32>
|
||||
%0 = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "SAME_UPPER", group = 1 : si64} : (tensor<1x2x32x64xi8>, tensor<5x2x6x10xi8>, tensor<i8>, tensor<i8>) -> tensor<*xi32>
|
||||
"std.return"(%0) : (tensor<*xi32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_convinteger_4
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : i64, kernel_shape = [6, 10], pads = [2, 4, 3, 5], strides = [1, 1]} : (tensor<1x2x32x64xi8>, tensor<5x2x6x10xi8>, tensor<i8>, tensor<i8>) -> tensor<1x5x32x64xi32>
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : si64, kernel_shape = [6, 10], pads = [2, 4, 3, 5], strides = [1, 1]} : (tensor<1x2x32x64xi8>, tensor<5x2x6x10xi8>, tensor<i8>, tensor<i8>) -> tensor<1x5x32x64xi32>
|
||||
// CHECK: return [[RES_ATTR]] : tensor<1x5x32x64xi32>
|
||||
}
|
||||
|
||||
func @test_convinteger_5(%arg0 : tensor<1x2x32x64xi8>, %arg1 : tensor<5x2x6x10xi8>, %arg2 : tensor<i8>, %arg3 : tensor<i8>) -> tensor<*xi32> {
|
||||
%0 = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "SAME_LOWER", group = 1 : i64} : (tensor<1x2x32x64xi8>, tensor<5x2x6x10xi8>, tensor<i8>, tensor<i8>) -> tensor<*xi32>
|
||||
%0 = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "SAME_LOWER", group = 1 : si64} : (tensor<1x2x32x64xi8>, tensor<5x2x6x10xi8>, tensor<i8>, tensor<i8>) -> tensor<*xi32>
|
||||
"std.return"(%0) : (tensor<*xi32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_convinteger_5
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : i64, kernel_shape = [6, 10], pads = [3, 5, 2, 4], strides = [1, 1]} : (tensor<1x2x32x64xi8>, tensor<5x2x6x10xi8>, tensor<i8>, tensor<i8>) -> tensor<1x5x32x64xi32>
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : si64, kernel_shape = [6, 10], pads = [3, 5, 2, 4], strides = [1, 1]} : (tensor<1x2x32x64xi8>, tensor<5x2x6x10xi8>, tensor<i8>, tensor<i8>) -> tensor<1x5x32x64xi32>
|
||||
// CHECK: return [[RES_ATTR]] : tensor<1x5x32x64xi32>
|
||||
}
|
||||
|
||||
/// auto_pad set to VALID.
|
||||
|
||||
func @test_convinteger_6(%arg0 : tensor<1x2x32x64xi8>, %arg1 : tensor<5x2x6x10xi8>, %arg2 : tensor<i8>, %arg3 : tensor<i8>) -> tensor<*xi32> {
|
||||
%0 = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "VALID", group = 1 : i64} : (tensor<1x2x32x64xi8>, tensor<5x2x6x10xi8>, tensor<i8>, tensor<i8>) -> tensor<*xi32>
|
||||
%0 = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "VALID", group = 1 : si64} : (tensor<1x2x32x64xi8>, tensor<5x2x6x10xi8>, tensor<i8>, tensor<i8>) -> tensor<*xi32>
|
||||
"std.return"(%0) : (tensor<*xi32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_convinteger_6
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : i64, kernel_shape = [6, 10], pads = [0, 0, 0, 0], strides = [1, 1]} : (tensor<1x2x32x64xi8>, tensor<5x2x6x10xi8>, tensor<i8>, tensor<i8>) -> tensor<1x5x27x55xi32>
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : si64, kernel_shape = [6, 10], pads = [0, 0, 0, 0], strides = [1, 1]} : (tensor<1x2x32x64xi8>, tensor<5x2x6x10xi8>, tensor<i8>, tensor<i8>) -> tensor<1x5x27x55xi32>
|
||||
// CHECK: return [[RES_ATTR]] : tensor<1x5x27x55xi32>
|
||||
}
|
||||
|
||||
/// With strides attribute.
|
||||
|
||||
func @test_convinteger_7(%arg0 : tensor<1x2x32x64xi8>, %arg1 : tensor<5x2x6x7xi8>, %arg2 : tensor<i8>, %arg3 : tensor<i8>) -> tensor<*xi32> {
|
||||
%0 = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "NOTSET", group = 1 : i64, strides = [2, 3]} : (tensor<1x2x32x64xi8>, tensor<5x2x6x7xi8>, tensor<i8>, tensor<i8>) -> tensor<*xi32>
|
||||
%0 = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "NOTSET", group = 1 : si64, strides = [2, 3]} : (tensor<1x2x32x64xi8>, tensor<5x2x6x7xi8>, tensor<i8>, tensor<i8>) -> tensor<*xi32>
|
||||
"std.return"(%0) : (tensor<*xi32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_convinteger_7
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : i64, kernel_shape = [6, 7], pads = [0, 0, 0, 0], strides = [2, 3]} : (tensor<1x2x32x64xi8>, tensor<5x2x6x7xi8>, tensor<i8>, tensor<i8>) -> tensor<1x5x14x20xi32>
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : si64, kernel_shape = [6, 7], pads = [0, 0, 0, 0], strides = [2, 3]} : (tensor<1x2x32x64xi8>, tensor<5x2x6x7xi8>, tensor<i8>, tensor<i8>) -> tensor<1x5x14x20xi32>
|
||||
// CHECK: return [[RES_ATTR]] : tensor<1x5x14x20xi32>
|
||||
}
|
||||
|
||||
|
@ -1125,44 +1125,44 @@ func @test_convinteger_7(%arg0 : tensor<1x2x32x64xi8>, %arg1 : tensor<5x2x6x7xi8
|
|||
/// The auto_pad will pas as if stride is equal to 1.
|
||||
|
||||
func @test_convinteger_8(%arg0 : tensor<1x2x32x64xi8>, %arg1 : tensor<5x2x6x7xi8>, %arg2 : tensor<i8>, %arg3 : tensor<i8>) -> tensor<*xi32> {
|
||||
%0 = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "SAME_UPPER", group = 1 : i64, strides = [2, 3]} : (tensor<1x2x32x64xi8>, tensor<5x2x6x7xi8>, tensor<i8>, tensor<i8>) -> tensor<*xi32>
|
||||
%0 = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "SAME_UPPER", group = 1 : si64, strides = [2, 3]} : (tensor<1x2x32x64xi8>, tensor<5x2x6x7xi8>, tensor<i8>, tensor<i8>) -> tensor<*xi32>
|
||||
"std.return"(%0) : (tensor<*xi32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_convinteger_8
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : i64, kernel_shape = [6, 7], pads = [2, 3, 2, 3], strides = [2, 3]} : (tensor<1x2x32x64xi8>, tensor<5x2x6x7xi8>, tensor<i8>, tensor<i8>) -> tensor<1x5x16x22xi32>
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "NOTSET", dilations = [1, 1], group = 1 : si64, kernel_shape = [6, 7], pads = [2, 3, 2, 3], strides = [2, 3]} : (tensor<1x2x32x64xi8>, tensor<5x2x6x7xi8>, tensor<i8>, tensor<i8>) -> tensor<1x5x16x22xi32>
|
||||
// CHECK: return [[RES_ATTR]] : tensor<1x5x16x22xi32>
|
||||
}
|
||||
|
||||
/// dilations attribute.
|
||||
|
||||
func @test_convinteger_9(%arg0 : tensor<1x2x32x64xi8>, %arg1 : tensor<5x2x6x7xi8>, %arg2 : tensor<i8>, %arg3 : tensor<i8>) -> tensor<*xi32> {
|
||||
%0 = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "NOTSET", group = 1 : i64, dilations = [2, 3]} : (tensor<1x2x32x64xi8>, tensor<5x2x6x7xi8>, tensor<i8>, tensor<i8>) -> tensor<*xi32>
|
||||
%0 = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "NOTSET", group = 1 : si64, dilations = [2, 3]} : (tensor<1x2x32x64xi8>, tensor<5x2x6x7xi8>, tensor<i8>, tensor<i8>) -> tensor<*xi32>
|
||||
"std.return"(%0) : (tensor<*xi32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_convinteger_9
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "NOTSET", dilations = [2, 3], group = 1 : i64, kernel_shape = [6, 7], pads = [0, 0, 0, 0], strides = [1, 1]} : (tensor<1x2x32x64xi8>, tensor<5x2x6x7xi8>, tensor<i8>, tensor<i8>) -> tensor<1x5x22x46xi32>
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "NOTSET", dilations = [2, 3], group = 1 : si64, kernel_shape = [6, 7], pads = [0, 0, 0, 0], strides = [1, 1]} : (tensor<1x2x32x64xi8>, tensor<5x2x6x7xi8>, tensor<i8>, tensor<i8>) -> tensor<1x5x22x46xi32>
|
||||
// CHECK: return [[RES_ATTR]] : tensor<1x5x22x46xi32>
|
||||
}
|
||||
|
||||
/// dilations attribute with stride.
|
||||
|
||||
func @test_convinteger_10(%arg0 : tensor<1x2x32x64xi8>, %arg1 : tensor<5x2x6x7xi8>, %arg2 : tensor<i8>, %arg3 : tensor<i8>) -> tensor<*xi32> {
|
||||
%0 = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "NOTSET", group = 1 : i64, dilations = [2, 3], strides = [2, 2]} : (tensor<1x2x32x64xi8>, tensor<5x2x6x7xi8>, tensor<i8>, tensor<i8>) -> tensor<*xi32>
|
||||
%0 = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "NOTSET", group = 1 : si64, dilations = [2, 3], strides = [2, 2]} : (tensor<1x2x32x64xi8>, tensor<5x2x6x7xi8>, tensor<i8>, tensor<i8>) -> tensor<*xi32>
|
||||
"std.return"(%0) : (tensor<*xi32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_convinteger_10
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "NOTSET", dilations = [2, 3], group = 1 : i64, kernel_shape = [6, 7], pads = [0, 0, 0, 0], strides = [2, 2]} : (tensor<1x2x32x64xi8>, tensor<5x2x6x7xi8>, tensor<i8>, tensor<i8>) -> tensor<1x5x11x23xi32>
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "NOTSET", dilations = [2, 3], group = 1 : si64, kernel_shape = [6, 7], pads = [0, 0, 0, 0], strides = [2, 2]} : (tensor<1x2x32x64xi8>, tensor<5x2x6x7xi8>, tensor<i8>, tensor<i8>) -> tensor<1x5x11x23xi32>
|
||||
// CHECK: return [[RES_ATTR]] : tensor<1x5x11x23xi32>
|
||||
}
|
||||
|
||||
/// dilations attribute with auto_pad set to SAME_UPPER.
|
||||
|
||||
func @test_convinteger_11(%arg0 : tensor<1x2x32x64xi8>, %arg1 : tensor<5x2x6x7xi8>, %arg2 : tensor<i8>, %arg3 : tensor<i8>) -> tensor<*xi32> {
|
||||
%0 = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "SAME_UPPER", group = 1 : i64, dilations = [2, 3]} : (tensor<1x2x32x64xi8>, tensor<5x2x6x7xi8>, tensor<i8>, tensor<i8>) -> tensor<*xi32>
|
||||
%0 = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "SAME_UPPER", group = 1 : si64, dilations = [2, 3]} : (tensor<1x2x32x64xi8>, tensor<5x2x6x7xi8>, tensor<i8>, tensor<i8>) -> tensor<*xi32>
|
||||
"std.return"(%0) : (tensor<*xi32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_convinteger_11
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "NOTSET", dilations = [2, 3], group = 1 : i64, kernel_shape = [6, 7], pads = [5, 9, 5, 9], strides = [1, 1]} : (tensor<1x2x32x64xi8>, tensor<5x2x6x7xi8>, tensor<i8>, tensor<i8>) -> tensor<1x5x32x64xi32>
|
||||
// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) {auto_pad = "NOTSET", dilations = [2, 3], group = 1 : si64, kernel_shape = [6, 7], pads = [5, 9, 5, 9], strides = [1, 1]} : (tensor<1x2x32x64xi8>, tensor<5x2x6x7xi8>, tensor<i8>, tensor<i8>) -> tensor<1x5x32x64xi32>
|
||||
// CHECK: return [[RES_ATTR]] : tensor<1x5x32x64xi32>
|
||||
}
|
||||
|
||||
|
@ -1203,33 +1203,33 @@ func @test_tile_constant(%arg0 : tensor<5x5x1x32xf32>) -> tensor<*xf32> {
|
|||
// -----
|
||||
|
||||
func @test_gather_axis0(%arg0 : tensor<3x3xf32>, %arg1 : tensor<1x2xi64>) -> tensor<*xf32> {
|
||||
%0 = "onnx.Gather"(%arg0, %arg1) {axis = 0} : (tensor<3x3xf32>, tensor<1x2xi64>) -> tensor<*xf32>
|
||||
%0 = "onnx.Gather"(%arg0, %arg1) {axis = 0 : si64} : (tensor<3x3xf32>, tensor<1x2xi64>) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_gather_axis0
|
||||
// CHECK: [[RES:%.+]] = "onnx.Gather"(%arg0, %arg1) {axis = 0 : i64} : (tensor<3x3xf32>, tensor<1x2xi64>) -> tensor<1x2x3xf32>
|
||||
// CHECK: [[RES:%.+]] = "onnx.Gather"(%arg0, %arg1) {axis = 0 : si64} : (tensor<3x3xf32>, tensor<1x2xi64>) -> tensor<1x2x3xf32>
|
||||
// CHECK: return [[RES]] : tensor<1x2x3xf32>
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @test_gather_axis1(%arg0 : tensor<3x3xf32>, %arg1 : tensor<1x2xi64>) -> tensor<*xf32> {
|
||||
%0 = "onnx.Gather"(%arg0, %arg1) {axis = 1} : (tensor<3x3xf32>, tensor<1x2xi64>) -> tensor<*xf32>
|
||||
%0 = "onnx.Gather"(%arg0, %arg1) {axis = 1 : si64} : (tensor<3x3xf32>, tensor<1x2xi64>) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_gather_axis1
|
||||
// CHECK: [[RES:%.+]] = "onnx.Gather"(%arg0, %arg1) {axis = 1 : i64} : (tensor<3x3xf32>, tensor<1x2xi64>) -> tensor<3x1x2xf32>
|
||||
// CHECK: [[RES:%.+]] = "onnx.Gather"(%arg0, %arg1) {axis = 1 : si64} : (tensor<3x3xf32>, tensor<1x2xi64>) -> tensor<3x1x2xf32>
|
||||
// CHECK: return [[RES]] : tensor<3x1x2xf32>
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @test_gather_negative_axis(%arg0 : tensor<3x3xf32>, %arg1 : tensor<1x2xi64>) -> tensor<*xf32> {
|
||||
%0 = "onnx.Gather"(%arg0, %arg1) {axis = -1} : (tensor<3x3xf32>, tensor<1x2xi64>) -> tensor<*xf32>
|
||||
%0 = "onnx.Gather"(%arg0, %arg1) {axis = -1 : si64} : (tensor<3x3xf32>, tensor<1x2xi64>) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_gather_negative_axis
|
||||
// CHECK: [[RES:%.+]] = "onnx.Gather"(%arg0, %arg1) {axis = 1 : i64} : (tensor<3x3xf32>, tensor<1x2xi64>) -> tensor<3x1x2xf32>
|
||||
// CHECK: [[RES:%.+]] = "onnx.Gather"(%arg0, %arg1) {axis = 1 : si64} : (tensor<3x3xf32>, tensor<1x2xi64>) -> tensor<3x1x2xf32>
|
||||
// CHECK: return [[RES]] : tensor<3x1x2xf32>
|
||||
}
|
||||
|
||||
|
@ -1473,33 +1473,33 @@ func @test_expand_with_shape(%arg0 : tensor<2x1x6x1xf32>, %arg1: tensor<6x2xf32>
|
|||
//===----------------------------------------------------------------------===//
|
||||
|
||||
func @test_reduce_mean_1(%arg0: tensor<1x2x3x4xf32>) -> tensor<*xf32> {
|
||||
%0 = "onnx.ReduceMean"(%arg0) {axes = [-1], keepdims = 1 : i64} : (tensor<1x2x3x4xf32>) -> tensor<*xf32>
|
||||
%0 = "onnx.ReduceMean"(%arg0) {axes = [-1], keepdims = 1 : si64} : (tensor<1x2x3x4xf32>) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_reduce_mean_1
|
||||
// CHECK: [[RES:%.+]] = "onnx.ReduceMean"(%arg0) {axes = [-1], keepdims = 1 : i64} : (tensor<1x2x3x4xf32>) -> tensor<1x2x3x1xf32>
|
||||
// CHECK: [[RES:%.+]] = "onnx.ReduceMean"(%arg0) {axes = [-1], keepdims = 1 : si64} : (tensor<1x2x3x4xf32>) -> tensor<1x2x3x1xf32>
|
||||
// CHECK: return [[RES]] : tensor<1x2x3x1xf32>
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @test_reduce_mean_2(%arg0: tensor<1x2x3x4xf32>) -> tensor<*xf32> {
|
||||
%0 = "onnx.ReduceMean"(%arg0) {axes = [2], keepdims = 1 : i64} : (tensor<1x2x3x4xf32>) -> tensor<*xf32>
|
||||
%0 = "onnx.ReduceMean"(%arg0) {axes = [2], keepdims = 1 : si64} : (tensor<1x2x3x4xf32>) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_reduce_mean_2
|
||||
// CHECK: [[RES:%.+]] = "onnx.ReduceMean"(%arg0) {axes = [2], keepdims = 1 : i64} : (tensor<1x2x3x4xf32>) -> tensor<1x2x1x4xf32>
|
||||
// CHECK: [[RES:%.+]] = "onnx.ReduceMean"(%arg0) {axes = [2], keepdims = 1 : si64} : (tensor<1x2x3x4xf32>) -> tensor<1x2x1x4xf32>
|
||||
// CHECK: return [[RES]] : tensor<1x2x1x4xf32>
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @test_reduce_mean_3(%arg0: tensor<1x2x3x4xf32>) -> tensor<*xf32> {
|
||||
%0 = "onnx.ReduceMean"(%arg0) {axes = [-1], keepdims = 0 : i64} : (tensor<1x2x3x4xf32>) -> tensor<*xf32>
|
||||
%0 = "onnx.ReduceMean"(%arg0) {axes = [-1], keepdims = 0 : si64} : (tensor<1x2x3x4xf32>) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_reduce_mean_3
|
||||
// CHECK: [[RES:%.+]] = "onnx.ReduceMean"(%arg0) {axes = [-1], keepdims = 0 : i64} : (tensor<1x2x3x4xf32>) -> tensor<1x2x3xf32>
|
||||
// CHECK: [[RES:%.+]] = "onnx.ReduceMean"(%arg0) {axes = [-1], keepdims = 0 : si64} : (tensor<1x2x3x4xf32>) -> tensor<1x2x3xf32>
|
||||
// CHECK: return [[RES]] : tensor<1x2x3xf32>
|
||||
}
|
||||
|
||||
|
@ -1525,44 +1525,44 @@ func @test_dropout(%arg0: tensor<1x2x3x4xf32>) -> (tensor<*xf32>, tensor<*xi1>)
|
|||
//===----------------------------------------------------------------------===//
|
||||
|
||||
func @test_onehotencoder_string1 (%arg0: tensor<20x1x!onnx.String>) -> tensor<*xf32> {
|
||||
%0 = "onnx.OneHotEncoder"(%arg0) {cats_strings = ["female", "male"], zeros = 1 : i64} : (tensor<20x1x!onnx.String>) -> tensor<*xf32>
|
||||
%0 = "onnx.OneHotEncoder"(%arg0) {cats_strings = ["female", "male"], zeros = 1 : si64} : (tensor<20x1x!onnx.String>) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_onehotencoder_string1
|
||||
// CHECK: [[RES:%.+]] = "onnx.OneHotEncoder"(%arg0) {cats_strings = ["female", "male"], zeros = 1 : i64} : (tensor<20x1x!onnx.String>) -> tensor<20x1x2xf32>
|
||||
// CHECK: [[RES:%.+]] = "onnx.OneHotEncoder"(%arg0) {cats_strings = ["female", "male"], zeros = 1 : si64} : (tensor<20x1x!onnx.String>) -> tensor<20x1x2xf32>
|
||||
// CHECK: return [[RES]] : tensor<20x1x2xf32>
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @test_onehotencoder_string2 (%arg0: tensor<20x2x!onnx.String>) -> tensor<*xf32> {
|
||||
%0 = "onnx.OneHotEncoder"(%arg0) {cats_strings = ["female", "male"], zeros = 1 : i64} : (tensor<20x2x!onnx.String>) -> tensor<*xf32>
|
||||
%0 = "onnx.OneHotEncoder"(%arg0) {cats_strings = ["female", "male"], zeros = 1 : si64} : (tensor<20x2x!onnx.String>) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_onehotencoder_string2
|
||||
// CHECK: [[RES:%.+]] = "onnx.OneHotEncoder"(%arg0) {cats_strings = ["female", "male"], zeros = 1 : i64} : (tensor<20x2x!onnx.String>) -> tensor<20x2x2xf32>
|
||||
// CHECK: [[RES:%.+]] = "onnx.OneHotEncoder"(%arg0) {cats_strings = ["female", "male"], zeros = 1 : si64} : (tensor<20x2x!onnx.String>) -> tensor<20x2x2xf32>
|
||||
// CHECK: return [[RES]] : tensor<20x2x2xf32>
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @test_onehotencoder_float1(%arg0: tensor<20x1xf32>) -> tensor<*xf32> {
|
||||
%0 = "onnx.OneHotEncoder"(%arg0) {cats_strings = ["female", "male"], cats_int64s = [1, 2, 4], zeros = 1 : i64} : (tensor<20x1xf32>) -> tensor<*xf32>
|
||||
%0 = "onnx.OneHotEncoder"(%arg0) {cats_strings = ["female", "male"], cats_int64s = [1, 2, 4], zeros = 1 : si64} : (tensor<20x1xf32>) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_onehotencoder_float1
|
||||
// CHECK: [[RES:%.+]] = "onnx.OneHotEncoder"(%arg0) {cats_int64s = [1, 2, 4], cats_strings = ["female", "male"], zeros = 1 : i64} : (tensor<20x1xf32>) -> tensor<20x1x3xf32>
|
||||
// CHECK: [[RES:%.+]] = "onnx.OneHotEncoder"(%arg0) {cats_int64s = [1, 2, 4], cats_strings = ["female", "male"], zeros = 1 : si64} : (tensor<20x1xf32>) -> tensor<20x1x3xf32>
|
||||
// CHECK: return [[RES]] : tensor<20x1x3xf32>
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @test_onehotencoder_float2(%arg0: tensor<20x2x3xf32>) -> tensor<*xf32> {
|
||||
%0 = "onnx.OneHotEncoder"(%arg0) {cats_strings = ["female", "male"], cats_int64s = [1, 2, 4], zeros = 1 : i64} : (tensor<20x2x3xf32>) -> tensor<*xf32>
|
||||
%0 = "onnx.OneHotEncoder"(%arg0) {cats_strings = ["female", "male"], cats_int64s = [1, 2, 4], zeros = 1 : si64} : (tensor<20x2x3xf32>) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: test_onehotencoder_float2
|
||||
// CHECK: [[RES:%.+]] = "onnx.OneHotEncoder"(%arg0) {cats_int64s = [1, 2, 4], cats_strings = ["female", "male"], zeros = 1 : i64} : (tensor<20x2x3xf32>) -> tensor<20x2x3x3xf32>
|
||||
// CHECK: [[RES:%.+]] = "onnx.OneHotEncoder"(%arg0) {cats_int64s = [1, 2, 4], cats_strings = ["female", "male"], zeros = 1 : si64} : (tensor<20x2x3xf32>) -> tensor<20x2x3x3xf32>
|
||||
// CHECK: return [[RES]] : tensor<20x2x3x3xf32>
|
||||
}
|
||||
|
||||
|
|
|
@ -4,11 +4,11 @@
|
|||
|
||||
/// Test the default behavior of Max Pool with no padding (pad are set but shoudl be ignored)
|
||||
func @test_default_maxpoolsingleout(%arg0 : tensor<5x5x32x32xf32>) -> tensor<*xf32> {
|
||||
%0 = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "VALID", ceil_mode = 0, kernel_shape = [3,3], pads = [1, 1, 1, 1] } : (tensor<5x5x32x32xf32>) -> tensor<*xf32>
|
||||
%0 = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "VALID", ceil_mode = 0 : si64, kernel_shape = [3,3], pads = [1, 1, 1, 1] } : (tensor<5x5x32x32xf32>) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
}
|
||||
// CHECK-LABEL: test_default_maxpoolsingleout
|
||||
// CHECK: [[RES:%.+]] = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : i64, dilations = [1, 1], kernel_shape = [3, 3], pads = [0, 0, 0, 0], strides = [1, 1]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x30x30xf32>
|
||||
// CHECK: [[RES:%.+]] = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : si64, dilations = [1, 1], kernel_shape = [3, 3], pads = [0, 0, 0, 0], strides = [1, 1]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x30x30xf32>
|
||||
// CHECK: return [[RES]] : tensor<5x5x30x30xf32>
|
||||
|
||||
|
||||
|
@ -16,11 +16,11 @@ func @test_default_maxpoolsingleout(%arg0 : tensor<5x5x32x32xf32>) -> tensor<*xf
|
|||
|
||||
/// Test the default behavior of Max Pool with no padding (pad are not set, default to zero)
|
||||
func @test_default_maxpoolsingleout_defpad(%arg0 : tensor<5x5x32x32xf32>) -> tensor<*xf32> {
|
||||
%0 = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0, kernel_shape = [3,3]} : (tensor<5x5x32x32xf32>) -> tensor<*xf32>
|
||||
%0 = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : si64, kernel_shape = [3,3]} : (tensor<5x5x32x32xf32>) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
}
|
||||
// CHECK-LABEL: test_default_maxpoolsingleout_defpad
|
||||
// CHECK: [[RES:%.+]] = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : i64, dilations = [1, 1], kernel_shape = [3, 3], pads = [0, 0, 0, 0], strides = [1, 1]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x30x30xf32>
|
||||
// CHECK: [[RES:%.+]] = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : si64, dilations = [1, 1], kernel_shape = [3, 3], pads = [0, 0, 0, 0], strides = [1, 1]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x30x30xf32>
|
||||
// CHECK: return [[RES]] : tensor<5x5x30x30xf32>
|
||||
|
||||
|
||||
|
@ -28,11 +28,11 @@ func @test_default_maxpoolsingleout_defpad(%arg0 : tensor<5x5x32x32xf32>) -> ten
|
|||
|
||||
/// Test the default behavior of Max Pool with uniform padding
|
||||
func @test_default_maxpoolsingleout_pad(%arg0 : tensor<5x5x32x32xf32>) -> tensor<*xf32> {
|
||||
%0 = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0, kernel_shape = [3,3], pads = [1, 1, 1, 1] } : (tensor<5x5x32x32xf32>) -> tensor<*xf32>
|
||||
%0 = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : si64, kernel_shape = [3,3], pads = [1, 1, 1, 1] } : (tensor<5x5x32x32xf32>) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
}
|
||||
// CHECK-LABEL: test_default_maxpoolsingleout_pad
|
||||
// CHECK: [[RES:%.+]] = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : i64, dilations = [1, 1], kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x32x32xf32>
|
||||
// CHECK: [[RES:%.+]] = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : si64, dilations = [1, 1], kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x32x32xf32>
|
||||
// CHECK: return [[RES]] : tensor<5x5x32x32xf32>
|
||||
|
||||
|
||||
|
@ -40,44 +40,44 @@ func @test_default_maxpoolsingleout_pad(%arg0 : tensor<5x5x32x32xf32>) -> tensor
|
|||
|
||||
/// Test the default behavior of Max Pool with non uniform padding
|
||||
func @test_default_maxpoolsingleout_pad_nonunif(%arg0 : tensor<5x5x32x32xf32>) -> tensor<*xf32> {
|
||||
%0 = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0, kernel_shape = [5,3], pads = [2, 1, 1, 0] } : (tensor<5x5x32x32xf32>) -> tensor<*xf32>
|
||||
%0 = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : si64, kernel_shape = [5,3], pads = [2, 1, 1, 0] } : (tensor<5x5x32x32xf32>) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
}
|
||||
// CHECK-LABEL: test_default_maxpoolsingleout_pad_nonunif
|
||||
// CHECK: [[RES:%.+]] = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : i64, dilations = [1, 1], kernel_shape = [5, 3], pads = [2, 1, 1, 0], strides = [1, 1]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x31x31xf32>
|
||||
// CHECK: [[RES:%.+]] = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : si64, dilations = [1, 1], kernel_shape = [5, 3], pads = [2, 1, 1, 0], strides = [1, 1]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x31x31xf32>
|
||||
// CHECK: return [[RES]] : tensor<5x5x31x31xf32>
|
||||
|
||||
// -----
|
||||
|
||||
/// Test the default behavior of Max Pool with non uniform padding
|
||||
func @test_default_maxpoolsingleout_strides(%arg0 : tensor<5x5x32x32xf32>) -> tensor<*xf32> {
|
||||
%0 = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0, kernel_shape = [3,3], pads = [1, 1, 1, 1], strides = [2, 2] } : (tensor<5x5x32x32xf32>) -> tensor<*xf32>
|
||||
%0 = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : si64, kernel_shape = [3,3], pads = [1, 1, 1, 1], strides = [2, 2] } : (tensor<5x5x32x32xf32>) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
}
|
||||
// CHECK-LABEL: test_default_maxpoolsingleout_strides
|
||||
// CHECK: [[RES:%.+]] = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : i64, dilations = [1, 1], kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [2, 2]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x16x16xf32>
|
||||
// CHECK: [[RES:%.+]] = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : si64, dilations = [1, 1], kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [2, 2]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x16x16xf32>
|
||||
// CHECK: return [[RES]] : tensor<5x5x16x16xf32>
|
||||
|
||||
// -----
|
||||
|
||||
/// Test the default behavior of Max Pool with non uniform padding
|
||||
func @test_default_maxpoolsingleout_strides_nonunifpad(%arg0 : tensor<5x5x30x32xf32>) -> tensor<*xf32> {
|
||||
%0 = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0, kernel_shape = [2,2], pads = [1, 0, 0, 0], strides = [2, 2] } : (tensor<5x5x30x32xf32>) -> tensor<*xf32>
|
||||
%0 = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : si64, kernel_shape = [2,2], pads = [1, 0, 0, 0], strides = [2, 2] } : (tensor<5x5x30x32xf32>) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
}
|
||||
// CHECK-LABEL: test_default_maxpoolsingleout_strides_nonunifpad
|
||||
// CHECK: [[RES:%.+]] = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : i64, dilations = [1, 1], kernel_shape = [2, 2], pads = [1, 0, 0, 0], strides = [2, 2]} : (tensor<5x5x30x32xf32>) -> tensor<5x5x15x16xf32>
|
||||
// CHECK: [[RES:%.+]] = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : si64, dilations = [1, 1], kernel_shape = [2, 2], pads = [1, 0, 0, 0], strides = [2, 2]} : (tensor<5x5x30x32xf32>) -> tensor<5x5x15x16xf32>
|
||||
// CHECK: return [[RES]] : tensor<5x5x15x16xf32>
|
||||
|
||||
// -----
|
||||
|
||||
/// Test the default behavior of Max Pool with non uniform padding
|
||||
func @test_default_maxpoolsingleout_strides_nonunifpad_ceil(%arg0 : tensor<5x5x30x32xf32>) -> tensor<*xf32> {
|
||||
%0 = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 1, kernel_shape = [2,2], pads = [1, 0, 0, 0], strides = [2, 2] } : (tensor<5x5x30x32xf32>) -> tensor<*xf32>
|
||||
%0 = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 1 : si64, kernel_shape = [2,2], pads = [1, 0, 0, 0], strides = [2, 2] } : (tensor<5x5x30x32xf32>) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
}
|
||||
// CHECK-LABEL: test_default_maxpoolsingleout_strides_nonunifpad_ceil
|
||||
// CHECK: [[RES:%.+]] = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 1 : i64, dilations = [1, 1], kernel_shape = [2, 2], pads = [1, 0, 0, 0], strides = [2, 2]} : (tensor<5x5x30x32xf32>) -> tensor<5x5x16x16xf32>
|
||||
// CHECK: [[RES:%.+]] = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 1 : si64, dilations = [1, 1], kernel_shape = [2, 2], pads = [1, 0, 0, 0], strides = [2, 2]} : (tensor<5x5x30x32xf32>) -> tensor<5x5x16x16xf32>
|
||||
// CHECK: return [[RES]] : tensor<5x5x16x16xf32>
|
||||
|
||||
|
||||
|
@ -85,22 +85,22 @@ func @test_default_maxpoolsingleout_strides_nonunifpad_ceil(%arg0 : tensor<5x5x3
|
|||
|
||||
/// Test the default behavior of Max Pool with dilatation
|
||||
func @test_default_maxpoolsingleout_strides_dilatation(%arg0 : tensor<5x5x8x8xf32>) -> tensor<*xf32> {
|
||||
%0 = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0, kernel_shape = [2,2], dilations = [2, 2], strides = [3, 3] } : (tensor<5x5x8x8xf32>) -> tensor<*xf32>
|
||||
%0 = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : si64, kernel_shape = [2,2], dilations = [2, 2], strides = [3, 3] } : (tensor<5x5x8x8xf32>) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
}
|
||||
// CHECK-LABEL: test_default_maxpoolsingleout_strides_dilatation
|
||||
// CHECK: [[RES:%.+]] = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : i64, dilations = [2, 2], kernel_shape = [2, 2], pads = [0, 0, 0, 0], strides = [3, 3]} : (tensor<5x5x8x8xf32>) -> tensor<5x5x2x2xf32>
|
||||
// CHECK: [[RES:%.+]] = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : si64, dilations = [2, 2], kernel_shape = [2, 2], pads = [0, 0, 0, 0], strides = [3, 3]} : (tensor<5x5x8x8xf32>) -> tensor<5x5x2x2xf32>
|
||||
// CHECK: return [[RES]] : tensor<5x5x2x2xf32>
|
||||
|
||||
// -----
|
||||
|
||||
/// Test the default behavior of Max Pool with dilatation
|
||||
func @test_default_maxpoolsingleout_upper(%arg0 : tensor<5x5x16x13xf32>) -> tensor<*xf32> {
|
||||
%0 = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "SAME_UPPER", ceil_mode = 0, kernel_shape = [4,4], strides = [4, 4] } : (tensor<5x5x16x13xf32>) -> tensor<*xf32>
|
||||
%0 = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "SAME_UPPER", ceil_mode = 0 : si64, kernel_shape = [4,4], strides = [4, 4] } : (tensor<5x5x16x13xf32>) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
}
|
||||
// CHECK-LABEL: test_default_maxpoolsingleout_upper
|
||||
// CHECK: [[RES:%.+]] = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : i64, dilations = [1, 1], kernel_shape = [4, 4], pads = [0, 1, 0, 2], strides = [4, 4]} : (tensor<5x5x16x13xf32>) -> tensor<5x5x4x4xf32>
|
||||
// CHECK: [[RES:%.+]] = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : si64, dilations = [1, 1], kernel_shape = [4, 4], pads = [0, 1, 0, 2], strides = [4, 4]} : (tensor<5x5x16x13xf32>) -> tensor<5x5x4x4xf32>
|
||||
// CHECK: return [[RES]] : tensor<5x5x4x4xf32>
|
||||
|
||||
|
||||
|
@ -108,10 +108,10 @@ func @test_default_maxpoolsingleout_upper(%arg0 : tensor<5x5x16x13xf32>) -> tens
|
|||
|
||||
/// Test the default behavior of Max Pool with dilatation
|
||||
func @test_default_maxpoolsingleout_lower(%arg0 : tensor<5x5x16x13xf32>) -> tensor<*xf32> {
|
||||
%0 = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "SAME_LOWER", ceil_mode = 0, kernel_shape = [4,4], strides = [4, 4] } : (tensor<5x5x16x13xf32>) -> tensor<*xf32>
|
||||
%0 = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "SAME_LOWER", ceil_mode = 0 : si64, kernel_shape = [4,4], strides = [4, 4] } : (tensor<5x5x16x13xf32>) -> tensor<*xf32>
|
||||
"std.return"(%0) : (tensor<*xf32>) -> ()
|
||||
}
|
||||
// CHECK-LABEL: test_default_maxpoolsingleout_lower
|
||||
// CHECK: [[RES:%.+]] = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : i64, dilations = [1, 1], kernel_shape = [4, 4], pads = [0, 2, 0, 1], strides = [4, 4]} : (tensor<5x5x16x13xf32>) -> tensor<5x5x4x4xf32>
|
||||
// CHECK: [[RES:%.+]] = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : si64, dilations = [1, 1], kernel_shape = [4, 4], pads = [0, 2, 0, 1], strides = [4, 4]} : (tensor<5x5x16x13xf32>) -> tensor<5x5x4x4xf32>
|
||||
// CHECK: return [[RES]] : tensor<5x5x4x4xf32>
|
||||
|
||||
|
|
|
@ -3,9 +3,9 @@
|
|||
//===----------------------------------------------------------------------===//
|
||||
// CHECK-LABEL: @check_map1(%arg0: tuple<i64, f32>) -> tensor<*xf32> {
|
||||
func @check_map1(%arg0: tuple<i64, f32>) -> tensor<*xf32> {
|
||||
%0 = "onnx.CastMap"(%arg0) {cast_to = "TO_FLOAT", map_form = "DENSE", max_map = 1 : i64} : (tuple<i64, f32>) -> tensor<*xf32>
|
||||
%0 = "onnx.CastMap"(%arg0) {cast_to = "TO_FLOAT", map_form = "DENSE", max_map = 1 : si64} : (tuple<i64, f32>) -> tensor<*xf32>
|
||||
return %0 : tensor<*xf32>
|
||||
// CHECK-NEXT: %0 = "onnx.CastMap"(%arg0) {cast_to = "TO_FLOAT", map_form = "DENSE", max_map = 1 : i64} : (tuple<i64, f32>) -> tensor<*xf32>
|
||||
// CHECK-NEXT: %0 = "onnx.CastMap"(%arg0) {cast_to = "TO_FLOAT", map_form = "DENSE", max_map = 1 : si64} : (tuple<i64, f32>) -> tensor<*xf32>
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @check_string(%arg0: tensor<10x20x!onnx.String>) -> tensor<10x20x!onnx.String> {
|
||||
|
|
|
@ -62,7 +62,10 @@ bool isOMConvTheSameAsNaiveImplFor(const int N, const int C, const int H,
|
|||
/*Y=*/yType,
|
||||
/*X=*/xVal, /*W=*/wVal, /*B=*/bVal,
|
||||
/*auto_pad=*/builder.getStringAttr("NOTSET"),
|
||||
/*dilations=*/dilations, /*group=*/builder.getI64IntegerAttr(1),
|
||||
/*dilations=*/dilations,
|
||||
/*group=*/
|
||||
IntegerAttr::get(builder.getIntegerType(64, /*isSigned=*/true),
|
||||
APInt(64, 1, /*isSigned=*/true)),
|
||||
/*kernel_shape=*/kernel_shape, /*pads=*/pads,
|
||||
/*strides=*/strides);
|
||||
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
git clone https://github.com/llvm/llvm-project.git
|
||||
# Check out a specific branch that is known to work with ONNX MLIR.
|
||||
cd llvm-project && git checkout 1d01fc100bb5bef5f5eaf92520b2e52f64ee1d6e && cd ..
|
||||
cd llvm-project && git checkout 91671e13efbc5dbd17b832d7973401350d0a6ee6 && cd ..
|
||||
|
|
|
@ -345,7 +345,7 @@ OpsWithResultTypeInference = {
|
|||
resultTypes.push_back(attr.getType());
|
||||
}''',
|
||||
"Cast":
|
||||
'''auto toAttr = to().getSExtValue();
|
||||
'''auto toAttr = to();
|
||||
auto builder = mlir::OpBuilder(getContext());
|
||||
resultTypes.push_back(mlir::UnrankedTensorType::get(
|
||||
convertONNXTypeToMLIRType(builder, static_cast<onnx::TensorProto_DataType>(toAttr))));''',
|
||||
|
@ -440,7 +440,7 @@ def onnx_attr_type_to_mlir_attr_type(t):
|
|||
onnx_attr_type = onnx_attr_type[onnx_attr_type.rfind('.') + 1:].lower()
|
||||
|
||||
if onnx_attr_type == 'int':
|
||||
mlir_attr_type = 'I64Attr'
|
||||
mlir_attr_type = 'SI64Attr'
|
||||
elif onnx_attr_type == 'float':
|
||||
mlir_attr_type = 'F32Attr'
|
||||
elif onnx_attr_type == 'ints':
|
||||
|
|
Loading…
Reference in New Issue