diff --git a/.circleci/config.yml b/.circleci/config.yml index e578e10..f5e283c 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -18,7 +18,7 @@ jobs: git submodule update --init --recursive # Use cached mlir installation if possible. - restore_cache: - key: V13-LLVM-PROJECT-{{ arch }} + key: V15-LLVM-PROJECT-{{ arch }} - run: name: Install MLIR command: | @@ -29,7 +29,7 @@ jobs: source onnx-mlir/utils/install-mlir.sh fi - save_cache: - key: V13-LLVM-PROJECT-{{ arch }} + key: V15-LLVM-PROJECT-{{ arch }} paths: - llvm-project - run: @@ -60,4 +60,4 @@ jobs: diff docs/Dialects ../docs/Dialects - run: name: Print the Current Time - command: date \ No newline at end of file + command: date diff --git a/MLIR.cmake b/MLIR.cmake index 815bfd5..45a3995 100644 --- a/MLIR.cmake +++ b/MLIR.cmake @@ -165,6 +165,7 @@ find_mlir_lib(MLIRTransformUtils) find_mlir_lib(MLIRSupport) find_mlir_lib(MLIROpenMP) find_mlir_lib(MLIROptLib) +find_mlir_lib(MLIRTableGen) find_mlir_lib(MLIRTargetLLVMIRModuleTranslation) find_mlir_lib(MLIRTargetLLVMIR) find_mlir_lib(MLIRTransformUtils) @@ -181,7 +182,6 @@ find_mlir_lib(LLVMAsmParser) find_mlir_lib(LLVMBinaryFormat) find_mlir_lib(LLVMRemarks) find_mlir_lib(LLVMIRReader) -find_mlir_lib(LLVMMLIRTableGen) find_mlir_lib(LLVMTransformUtils) find_mlir_lib(LLVMBitstreamReader) find_mlir_lib(LLVMAnalysis) diff --git a/README.md b/README.md index bd41385..20e24f7 100644 --- a/README.md +++ b/README.md @@ -58,7 +58,7 @@ Firstly, install MLIR (as a part of LLVM-Project): ``` bash git clone https://github.com/llvm/llvm-project.git # Check out a specific branch that is known to work with ONNX MLIR. -cd llvm-project && git checkout 0dc91bfd11e6cced0c46c1a25cc96edea0d8fc22 && cd .. +cd llvm-project && git checkout 32791937d7aceb0a5e1eaabf1bb1a6dbe1639792 && cd .. ``` [same-as-file]: <> (utils/build-mlir.sh) @@ -148,7 +148,7 @@ Install MLIR (as a part of LLVM-Project): ```shell git clone https://github.com/llvm/llvm-project.git # Check out a specific branch that is known to work with ONNX MLIR. -cd llvm-project && git checkout 0dc91bfd11e6cced0c46c1a25cc96edea0d8fc22 && cd .. +cd llvm-project && git checkout 32791937d7aceb0a5e1eaabf1bb1a6dbe1639792 && cd .. ``` [same-as-file]: <> (utils/build-mlir.cmd) diff --git a/docs/Dialects/onnx.md b/docs/Dialects/onnx.md index 6f7ddf5..f1c5d6e 100644 --- a/docs/Dialects/onnx.md +++ b/docs/Dialects/onnx.md @@ -111,8 +111,8 @@ ONNX ArgMax operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`axis` | IntegerAttr | 64-bit signless integer attribute -`keepdims` | IntegerAttr | 64-bit signless integer attribute +`axis` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`keepdims` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Operands: @@ -139,8 +139,8 @@ ONNX ArgMin operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`axis` | IntegerAttr | 64-bit signless integer attribute -`keepdims` | IntegerAttr | 64-bit signless integer attribute +`axis` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`keepdims` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Operands: @@ -284,12 +284,12 @@ ONNX AveragePool operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`auto_pad` | StringAttr | string attribute -`ceil_mode` | IntegerAttr | 64-bit signless integer attribute -`count_include_pad` | IntegerAttr | 64-bit signless integer attribute -`kernel_shape` | ArrayAttr | 64-bit integer array attribute -`pads` | ArrayAttr | 64-bit integer array attribute -`strides` | ArrayAttr | 64-bit integer array attribute +`auto_pad` | ::mlir::StringAttr | string attribute +`ceil_mode` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`count_include_pad` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`kernel_shape` | ::mlir::ArrayAttr | 64-bit integer array attribute +`pads` | ::mlir::ArrayAttr | 64-bit integer array attribute +`strides` | ::mlir::ArrayAttr | 64-bit integer array attribute #### Operands: @@ -322,8 +322,8 @@ ONNX BatchNormalization operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`epsilon` | FloatAttr | 32-bit float attribute -`momentum` | FloatAttr | 32-bit float attribute +`epsilon` | ::mlir::FloatAttr | 32-bit float attribute +`momentum` | ::mlir::FloatAttr | 32-bit float attribute #### Operands: @@ -364,8 +364,8 @@ ONNX BatchNormalization operation in test mode | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`epsilon` | FloatAttr | 32-bit float attribute -`momentum` | FloatAttr | 32-bit float attribute +`epsilon` | ::mlir::FloatAttr | 32-bit float attribute +`momentum` | ::mlir::FloatAttr | 32-bit float attribute #### Operands: @@ -393,7 +393,7 @@ ONNX Binarizer operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`threshold` | FloatAttr | 32-bit float attribute +`threshold` | ::mlir::FloatAttr | 32-bit float attribute #### Operands: @@ -428,7 +428,7 @@ ONNX BitShift operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`direction` | StringAttr | string attribute +`direction` | ::mlir::StringAttr | string attribute #### Operands: @@ -455,9 +455,9 @@ ONNX CastMap operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`cast_to` | StringAttr | string attribute -`map_form` | StringAttr | string attribute -`max_map` | IntegerAttr | 64-bit signless integer attribute +`cast_to` | ::mlir::StringAttr | string attribute +`map_form` | ::mlir::StringAttr | string attribute +`max_map` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Operands: @@ -499,7 +499,7 @@ ONNX Cast operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`to` | IntegerAttr | 64-bit signless integer attribute +`to` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Operands: @@ -530,10 +530,10 @@ ONNX CategoryMapper operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`cats_int64s` | ArrayAttr | 64-bit integer array attribute -`cats_strings` | ArrayAttr | string array attribute -`default_int64` | IntegerAttr | 64-bit signless integer attribute -`default_string` | StringAttr | string attribute +`cats_int64s` | ::mlir::ArrayAttr | 64-bit integer array attribute +`cats_strings` | ::mlir::ArrayAttr | string array attribute +`default_int64` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`default_string` | ::mlir::StringAttr | string attribute #### Operands: @@ -602,7 +602,7 @@ ONNX Compress operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`axis` | IntegerAttr | 64-bit signless integer attribute +`axis` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Operands: @@ -630,8 +630,8 @@ ONNX ConcatFromSequence operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`axis` | IntegerAttr | 64-bit signless integer attribute -`new_axis` | IntegerAttr | 64-bit signless integer attribute +`axis` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`new_axis` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Operands: @@ -655,7 +655,7 @@ ONNX Concat operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`axis` | IntegerAttr | 64-bit signless integer attribute +`axis` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Operands: @@ -679,7 +679,7 @@ ONNX ConstantOfShape operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`value` | Attribute | any attribute +`value` | ::mlir::Attribute | any attribute #### Operands: @@ -704,8 +704,8 @@ ONNX Constant operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`sparse_value` | Attribute | any attribute -`value` | Attribute | any attribute +`sparse_value` | ::mlir::Attribute | any attribute +`value` | ::mlir::Attribute | any attribute #### Results: @@ -724,12 +724,12 @@ ONNX ConvInteger operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`auto_pad` | StringAttr | string attribute -`dilations` | ArrayAttr | 64-bit integer array attribute -`group` | IntegerAttr | 64-bit signless integer attribute -`kernel_shape` | ArrayAttr | 64-bit integer array attribute -`pads` | ArrayAttr | 64-bit integer array attribute -`strides` | ArrayAttr | 64-bit integer array attribute +`auto_pad` | ::mlir::StringAttr | string attribute +`dilations` | ::mlir::ArrayAttr | 64-bit integer array attribute +`group` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`kernel_shape` | ::mlir::ArrayAttr | 64-bit integer array attribute +`pads` | ::mlir::ArrayAttr | 64-bit integer array attribute +`strides` | ::mlir::ArrayAttr | 64-bit integer array attribute #### Operands: @@ -757,12 +757,12 @@ ONNX Conv operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`auto_pad` | StringAttr | string attribute -`dilations` | ArrayAttr | 64-bit integer array attribute -`group` | IntegerAttr | 64-bit signless integer attribute -`kernel_shape` | ArrayAttr | 64-bit integer array attribute -`pads` | ArrayAttr | 64-bit integer array attribute -`strides` | ArrayAttr | 64-bit integer array attribute +`auto_pad` | ::mlir::StringAttr | string attribute +`dilations` | ::mlir::ArrayAttr | 64-bit integer array attribute +`group` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`kernel_shape` | ::mlir::ArrayAttr | 64-bit integer array attribute +`pads` | ::mlir::ArrayAttr | 64-bit integer array attribute +`strides` | ::mlir::ArrayAttr | 64-bit integer array attribute #### Operands: @@ -801,14 +801,14 @@ ONNX ConvTranspose operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`auto_pad` | StringAttr | string attribute -`dilations` | ArrayAttr | 64-bit integer array attribute -`group` | IntegerAttr | 64-bit signless integer attribute -`kernel_shape` | ArrayAttr | 64-bit integer array attribute -`output_padding` | ArrayAttr | 64-bit integer array attribute -`output_shape` | ArrayAttr | 64-bit integer array attribute -`pads` | ArrayAttr | 64-bit integer array attribute -`strides` | ArrayAttr | 64-bit integer array attribute +`auto_pad` | ::mlir::StringAttr | string attribute +`dilations` | ::mlir::ArrayAttr | 64-bit integer array attribute +`group` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`kernel_shape` | ::mlir::ArrayAttr | 64-bit integer array attribute +`output_padding` | ::mlir::ArrayAttr | 64-bit integer array attribute +`output_shape` | ::mlir::ArrayAttr | 64-bit integer array attribute +`pads` | ::mlir::ArrayAttr | 64-bit integer array attribute +`strides` | ::mlir::ArrayAttr | 64-bit integer array attribute #### Operands: @@ -889,8 +889,8 @@ ONNX CumSum operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`exclusive` | IntegerAttr | 64-bit signless integer attribute -`reverse` | IntegerAttr | 64-bit signless integer attribute +`exclusive` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`reverse` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Operands: @@ -941,8 +941,8 @@ ONNX DepthToSpace operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`blocksize` | IntegerAttr | 64-bit signless integer attribute -`mode` | StringAttr | string attribute +`blocksize` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`mode` | ::mlir::StringAttr | string attribute #### Operands: @@ -1022,8 +1022,8 @@ ONNX DictVectorizer operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`int64_vocabulary` | ArrayAttr | 64-bit integer array attribute -`string_vocabulary` | ArrayAttr | string array attribute +`int64_vocabulary` | ::mlir::ArrayAttr | 64-bit integer array attribute +`string_vocabulary` | ::mlir::ArrayAttr | string array attribute #### Operands: @@ -1073,7 +1073,7 @@ ONNX Dropout operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`ratio` | FloatAttr | 32-bit float attribute +`ratio` | ::mlir::FloatAttr | 32-bit float attribute #### Operands: @@ -1142,7 +1142,7 @@ ONNX Elu operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`alpha` | FloatAttr | 32-bit float attribute +`alpha` | ::mlir::FloatAttr | 32-bit float attribute #### Operands: @@ -1262,8 +1262,8 @@ ONNX EyeLike operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`dtype` | IntegerAttr | 64-bit signless integer attribute -`k` | IntegerAttr | 64-bit signless integer attribute +`dtype` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`k` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Operands: @@ -1290,7 +1290,7 @@ ONNX FeatureVectorizer operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`inputdimensions` | ArrayAttr | 64-bit integer array attribute +`inputdimensions` | ::mlir::ArrayAttr | 64-bit integer array attribute #### Operands: @@ -1316,7 +1316,7 @@ ONNX Flatten operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`axis` | IntegerAttr | 64-bit signless integer attribute +`axis` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Operands: @@ -1432,13 +1432,13 @@ ONNX GRU operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`activation_alpha` | ArrayAttr | 32-bit float array attribute -`activation_beta` | ArrayAttr | 32-bit float array attribute -`activations` | ArrayAttr | string array attribute -`clip` | FloatAttr | 32-bit float attribute -`direction` | StringAttr | string attribute -`hidden_size` | IntegerAttr | 64-bit signless integer attribute -`linear_before_reset` | IntegerAttr | 64-bit signless integer attribute +`activation_alpha` | ::mlir::ArrayAttr | 32-bit float array attribute +`activation_beta` | ::mlir::ArrayAttr | 32-bit float array attribute +`activations` | ::mlir::ArrayAttr | string array attribute +`clip` | ::mlir::FloatAttr | 32-bit float attribute +`direction` | ::mlir::StringAttr | string attribute +`hidden_size` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`linear_before_reset` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Operands: @@ -1522,7 +1522,7 @@ ONNX GatherElements operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`axis` | IntegerAttr | 64-bit signless integer attribute +`axis` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Operands: @@ -1686,7 +1686,7 @@ ONNX Gather operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`axis` | IntegerAttr | 64-bit signless integer attribute +`axis` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Operands: @@ -1723,10 +1723,10 @@ ONNX Gemm operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`alpha` | FloatAttr | 32-bit float attribute -`beta` | FloatAttr | 32-bit float attribute -`transA` | IntegerAttr | 64-bit signless integer attribute -`transB` | IntegerAttr | 64-bit signless integer attribute +`alpha` | ::mlir::FloatAttr | 32-bit float attribute +`beta` | ::mlir::FloatAttr | 32-bit float attribute +`transA` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`transB` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Operands: @@ -1774,7 +1774,7 @@ ONNX GlobalLpPool operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`p` | IntegerAttr | 64-bit signless integer attribute +`p` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Operands: @@ -1842,8 +1842,8 @@ ONNX HardSigmoid operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`alpha` | FloatAttr | 32-bit float attribute -`beta` | FloatAttr | 32-bit float attribute +`alpha` | ::mlir::FloatAttr | 32-bit float attribute +`beta` | ::mlir::FloatAttr | 32-bit float attribute #### Operands: @@ -1880,7 +1880,7 @@ ONNX Hardmax operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`axis` | IntegerAttr | 64-bit signless integer attribute +`axis` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Operands: @@ -1922,8 +1922,8 @@ ONNX If operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`else_branch` | Attribute | any attribute -`then_branch` | Attribute | any attribute +`else_branch` | ::mlir::Attribute | any attribute +`then_branch` | ::mlir::Attribute | any attribute #### Operands: @@ -1954,10 +1954,10 @@ ONNX Imputer operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`imputed_value_floats` | ArrayAttr | 32-bit float array attribute -`imputed_value_int64s` | ArrayAttr | 64-bit integer array attribute -`replaced_value_float` | FloatAttr | 32-bit float attribute -`replaced_value_int64` | IntegerAttr | 64-bit signless integer attribute +`imputed_value_floats` | ::mlir::ArrayAttr | 32-bit float array attribute +`imputed_value_int64s` | ::mlir::ArrayAttr | 64-bit integer array attribute +`replaced_value_float` | ::mlir::FloatAttr | 32-bit float attribute +`replaced_value_int64` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Operands: @@ -1986,7 +1986,7 @@ ONNX InstanceNormalization operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`epsilon` | FloatAttr | 32-bit float attribute +`epsilon` | ::mlir::FloatAttr | 32-bit float attribute #### Operands: @@ -2012,8 +2012,8 @@ ONNX IsInf operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`detect_negative` | IntegerAttr | 64-bit signless integer attribute -`detect_positive` | IntegerAttr | 64-bit signless integer attribute +`detect_negative` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`detect_positive` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Operands: @@ -2064,10 +2064,10 @@ ONNX LRN operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`alpha` | FloatAttr | 32-bit float attribute -`beta` | FloatAttr | 32-bit float attribute -`bias` | FloatAttr | 32-bit float attribute -`size` | IntegerAttr | 64-bit signless integer attribute +`alpha` | ::mlir::FloatAttr | 32-bit float attribute +`beta` | ::mlir::FloatAttr | 32-bit float attribute +`bias` | ::mlir::FloatAttr | 32-bit float attribute +`size` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Operands: @@ -2171,13 +2171,13 @@ ONNX LSTM operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`activation_alpha` | ArrayAttr | 32-bit float array attribute -`activation_beta` | ArrayAttr | 32-bit float array attribute -`activations` | ArrayAttr | string array attribute -`clip` | FloatAttr | 32-bit float attribute -`direction` | StringAttr | string attribute -`hidden_size` | IntegerAttr | 64-bit signless integer attribute -`input_forget` | IntegerAttr | 64-bit signless integer attribute +`activation_alpha` | ::mlir::ArrayAttr | 32-bit float array attribute +`activation_beta` | ::mlir::ArrayAttr | 32-bit float array attribute +`activations` | ::mlir::ArrayAttr | string array attribute +`clip` | ::mlir::FloatAttr | 32-bit float attribute +`direction` | ::mlir::StringAttr | string attribute +`hidden_size` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`input_forget` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Operands: @@ -2226,15 +2226,15 @@ ONNX LabelEncoder operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`default_float` | FloatAttr | 32-bit float attribute -`default_int64` | IntegerAttr | 64-bit signless integer attribute -`default_string` | StringAttr | string attribute -`keys_floats` | ArrayAttr | 32-bit float array attribute -`keys_int64s` | ArrayAttr | 64-bit integer array attribute -`keys_strings` | ArrayAttr | string array attribute -`values_floats` | ArrayAttr | 32-bit float array attribute -`values_int64s` | ArrayAttr | 64-bit integer array attribute -`values_strings` | ArrayAttr | string array attribute +`default_float` | ::mlir::FloatAttr | 32-bit float attribute +`default_int64` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`default_string` | ::mlir::StringAttr | string attribute +`keys_floats` | ::mlir::ArrayAttr | 32-bit float array attribute +`keys_int64s` | ::mlir::ArrayAttr | 64-bit integer array attribute +`keys_strings` | ::mlir::ArrayAttr | string array attribute +`values_floats` | ::mlir::ArrayAttr | 32-bit float array attribute +`values_int64s` | ::mlir::ArrayAttr | 64-bit integer array attribute +`values_strings` | ::mlir::ArrayAttr | string array attribute #### Operands: @@ -2260,7 +2260,7 @@ ONNX LeakyRelu operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`alpha` | FloatAttr | 32-bit float attribute +`alpha` | ::mlir::FloatAttr | 32-bit float attribute #### Operands: @@ -2306,12 +2306,12 @@ ONNX LinearClassifier operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`classlabels_ints` | ArrayAttr | 64-bit integer array attribute -`classlabels_strings` | ArrayAttr | string array attribute -`coefficients` | ArrayAttr | 32-bit float array attribute -`intercepts` | ArrayAttr | 32-bit float array attribute -`multi_class` | IntegerAttr | 64-bit signless integer attribute -`post_transform` | StringAttr | string attribute +`classlabels_ints` | ::mlir::ArrayAttr | 64-bit integer array attribute +`classlabels_strings` | ::mlir::ArrayAttr | string array attribute +`coefficients` | ::mlir::ArrayAttr | 32-bit float array attribute +`intercepts` | ::mlir::ArrayAttr | 32-bit float array attribute +`multi_class` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`post_transform` | ::mlir::StringAttr | string attribute #### Operands: @@ -2341,10 +2341,10 @@ ONNX LinearRegressor operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`coefficients` | ArrayAttr | 32-bit float array attribute -`intercepts` | ArrayAttr | 32-bit float array attribute -`post_transform` | StringAttr | string attribute -`targets` | IntegerAttr | 64-bit signless integer attribute +`coefficients` | ::mlir::ArrayAttr | 32-bit float array attribute +`intercepts` | ::mlir::ArrayAttr | 32-bit float array attribute +`post_transform` | ::mlir::StringAttr | string attribute +`targets` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Operands: @@ -2399,7 +2399,7 @@ ONNX LogSoftmax operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`axis` | IntegerAttr | 64-bit signless integer attribute +`axis` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Operands: @@ -2535,7 +2535,7 @@ ONNX Loop operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`body` | Attribute | any attribute +`body` | ::mlir::Attribute | any attribute #### Operands: @@ -2561,8 +2561,8 @@ ONNX LpNormalization operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`axis` | IntegerAttr | 64-bit signless integer attribute -`p` | IntegerAttr | 64-bit signless integer attribute +`axis` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`p` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Operands: @@ -2590,11 +2590,11 @@ ONNX LpPool operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`auto_pad` | StringAttr | string attribute -`kernel_shape` | ArrayAttr | 64-bit integer array attribute -`p` | IntegerAttr | 64-bit signless integer attribute -`pads` | ArrayAttr | 64-bit integer array attribute -`strides` | ArrayAttr | 64-bit integer array attribute +`auto_pad` | ::mlir::StringAttr | string attribute +`kernel_shape` | ::mlir::ArrayAttr | 64-bit integer array attribute +`p` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`pads` | ::mlir::ArrayAttr | 64-bit integer array attribute +`strides` | ::mlir::ArrayAttr | 64-bit integer array attribute #### Operands: @@ -2707,13 +2707,13 @@ ONNX MaxPool operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`auto_pad` | StringAttr | string attribute -`ceil_mode` | IntegerAttr | 64-bit signless integer attribute -`dilations` | ArrayAttr | 64-bit integer array attribute -`kernel_shape` | ArrayAttr | 64-bit integer array attribute -`pads` | ArrayAttr | 64-bit integer array attribute -`storage_order` | IntegerAttr | 64-bit signless integer attribute -`strides` | ArrayAttr | 64-bit integer array attribute +`auto_pad` | ::mlir::StringAttr | string attribute +`ceil_mode` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`dilations` | ::mlir::ArrayAttr | 64-bit integer array attribute +`kernel_shape` | ::mlir::ArrayAttr | 64-bit integer array attribute +`pads` | ::mlir::ArrayAttr | 64-bit integer array attribute +`storage_order` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`strides` | ::mlir::ArrayAttr | 64-bit integer array attribute #### Operands: @@ -2739,13 +2739,13 @@ ONNX MaxPool operation with a single output. | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`auto_pad` | StringAttr | string attribute -`ceil_mode` | IntegerAttr | 64-bit signless integer attribute -`dilations` | ArrayAttr | 64-bit integer array attribute -`kernel_shape` | ArrayAttr | 64-bit integer array attribute -`pads` | ArrayAttr | 64-bit integer array attribute -`storage_order` | IntegerAttr | 64-bit signless integer attribute -`strides` | ArrayAttr | 64-bit integer array attribute +`auto_pad` | ::mlir::StringAttr | string attribute +`ceil_mode` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`dilations` | ::mlir::ArrayAttr | 64-bit integer array attribute +`kernel_shape` | ::mlir::ArrayAttr | 64-bit integer array attribute +`pads` | ::mlir::ArrayAttr | 64-bit integer array attribute +`storage_order` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`strides` | ::mlir::ArrayAttr | 64-bit integer array attribute #### Operands: @@ -2771,8 +2771,8 @@ ONNX MaxRoiPool operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`pooled_shape` | ArrayAttr | 64-bit integer array attribute -`spatial_scale` | FloatAttr | 32-bit float attribute +`pooled_shape` | ::mlir::ArrayAttr | 64-bit integer array attribute +`spatial_scale` | ::mlir::FloatAttr | 32-bit float attribute #### Operands: @@ -2814,9 +2814,9 @@ ONNX MaxUnpool operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`kernel_shape` | ArrayAttr | 64-bit integer array attribute -`pads` | ArrayAttr | 64-bit integer array attribute -`strides` | ArrayAttr | 64-bit integer array attribute +`kernel_shape` | ::mlir::ArrayAttr | 64-bit integer array attribute +`pads` | ::mlir::ArrayAttr | 64-bit integer array attribute +`strides` | ::mlir::ArrayAttr | 64-bit integer array attribute #### Operands: @@ -2863,7 +2863,7 @@ ONNX MeanVarianceNormalization operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`axes` | ArrayAttr | 64-bit integer array attribute +`axes` | ::mlir::ArrayAttr | 64-bit integer array attribute #### Operands: @@ -2919,7 +2919,7 @@ ONNX Mod operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`fmod` | IntegerAttr | 64-bit signless integer attribute +`fmod` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Operands: @@ -2966,9 +2966,9 @@ ONNX Multinomial operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`dtype` | IntegerAttr | 64-bit signless integer attribute -`sample_size` | IntegerAttr | 64-bit signless integer attribute -`seed` | FloatAttr | 32-bit float attribute +`dtype` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`sample_size` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`seed` | ::mlir::FloatAttr | 32-bit float attribute #### Operands: @@ -3018,7 +3018,7 @@ ONNX NonMaxSuppression operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`center_point_box` | IntegerAttr | 64-bit signless integer attribute +`center_point_box` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Operands: @@ -3076,7 +3076,7 @@ ONNX Normalizer operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`norm` | StringAttr | string attribute +`norm` | ::mlir::StringAttr | string attribute #### Operands: @@ -3125,9 +3125,9 @@ ONNX OneHotEncoder operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`cats_int64s` | ArrayAttr | 64-bit integer array attribute -`cats_strings` | ArrayAttr | string array attribute -`zeros` | IntegerAttr | 64-bit signless integer attribute +`cats_int64s` | ::mlir::ArrayAttr | 64-bit integer array attribute +`cats_strings` | ::mlir::ArrayAttr | string array attribute +`zeros` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Operands: @@ -3169,7 +3169,7 @@ ONNX OneHot operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`axis` | IntegerAttr | 64-bit signless integer attribute +`axis` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Operands: @@ -3241,8 +3241,8 @@ ONNX Pad operation with constant padding value | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`pads` | ArrayAttr | 64-bit integer array attribute -`mode` | StringAttr | string attribute +`pads` | ::mlir::ArrayAttr | 64-bit integer array attribute +`mode` | ::mlir::StringAttr | string attribute #### Operands: @@ -3271,8 +3271,8 @@ ONNX Pad operation with constant padding value | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`constant_value` | FloatAttr | 32-bit float attribute -`mode` | StringAttr | string attribute +`constant_value` | ::mlir::FloatAttr | 32-bit float attribute +`mode` | ::mlir::StringAttr | string attribute #### Operands: @@ -3299,9 +3299,9 @@ ONNX Pad operation with constant padding value | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`pads` | ArrayAttr | 64-bit integer array attribute -`constant_value` | FloatAttr | 32-bit float attribute -`mode` | StringAttr | string attribute +`pads` | ::mlir::ArrayAttr | 64-bit integer array attribute +`constant_value` | ::mlir::FloatAttr | 32-bit float attribute +`mode` | ::mlir::StringAttr | string attribute #### Operands: @@ -3405,7 +3405,7 @@ ONNX Pad operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`mode` | StringAttr | string attribute +`mode` | ::mlir::StringAttr | string attribute #### Operands: @@ -3457,12 +3457,12 @@ ONNX QLinearConv operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`auto_pad` | StringAttr | string attribute -`dilations` | ArrayAttr | 64-bit integer array attribute -`group` | IntegerAttr | 64-bit signless integer attribute -`kernel_shape` | ArrayAttr | 64-bit integer array attribute -`pads` | ArrayAttr | 64-bit integer array attribute -`strides` | ArrayAttr | 64-bit integer array attribute +`auto_pad` | ::mlir::StringAttr | string attribute +`dilations` | ::mlir::ArrayAttr | 64-bit integer array attribute +`group` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`kernel_shape` | ::mlir::ArrayAttr | 64-bit integer array attribute +`pads` | ::mlir::ArrayAttr | 64-bit integer array attribute +`strides` | ::mlir::ArrayAttr | 64-bit integer array attribute #### Operands: @@ -3608,12 +3608,12 @@ ONNX RNN operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`activation_alpha` | ArrayAttr | 32-bit float array attribute -`activation_beta` | ArrayAttr | 32-bit float array attribute -`activations` | ArrayAttr | string array attribute -`clip` | FloatAttr | 32-bit float attribute -`direction` | StringAttr | string attribute -`hidden_size` | IntegerAttr | 64-bit signless integer attribute +`activation_alpha` | ::mlir::ArrayAttr | 32-bit float array attribute +`activation_beta` | ::mlir::ArrayAttr | 32-bit float array attribute +`activations` | ::mlir::ArrayAttr | string array attribute +`clip` | ::mlir::FloatAttr | 32-bit float attribute +`direction` | ::mlir::StringAttr | string attribute +`hidden_size` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Operands: @@ -3649,10 +3649,10 @@ ONNX RandomNormalLike operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`dtype` | IntegerAttr | 64-bit signless integer attribute -`mean` | FloatAttr | 32-bit float attribute -`scale` | FloatAttr | 32-bit float attribute -`seed` | FloatAttr | 32-bit float attribute +`dtype` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`mean` | ::mlir::FloatAttr | 32-bit float attribute +`scale` | ::mlir::FloatAttr | 32-bit float attribute +`seed` | ::mlir::FloatAttr | 32-bit float attribute #### Operands: @@ -3682,11 +3682,11 @@ ONNX RandomNormal operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`dtype` | IntegerAttr | 64-bit signless integer attribute -`mean` | FloatAttr | 32-bit float attribute -`scale` | FloatAttr | 32-bit float attribute -`seed` | FloatAttr | 32-bit float attribute -`shape` | ArrayAttr | 64-bit integer array attribute +`dtype` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`mean` | ::mlir::FloatAttr | 32-bit float attribute +`scale` | ::mlir::FloatAttr | 32-bit float attribute +`seed` | ::mlir::FloatAttr | 32-bit float attribute +`shape` | ::mlir::ArrayAttr | 64-bit integer array attribute #### Results: @@ -3710,10 +3710,10 @@ ONNX RandomUniformLike operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`dtype` | IntegerAttr | 64-bit signless integer attribute -`high` | FloatAttr | 32-bit float attribute -`low` | FloatAttr | 32-bit float attribute -`seed` | FloatAttr | 32-bit float attribute +`dtype` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`high` | ::mlir::FloatAttr | 32-bit float attribute +`low` | ::mlir::FloatAttr | 32-bit float attribute +`seed` | ::mlir::FloatAttr | 32-bit float attribute #### Operands: @@ -3742,11 +3742,11 @@ ONNX RandomUniform operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`dtype` | IntegerAttr | 64-bit signless integer attribute -`high` | FloatAttr | 32-bit float attribute -`low` | FloatAttr | 32-bit float attribute -`seed` | FloatAttr | 32-bit float attribute -`shape` | ArrayAttr | 64-bit integer array attribute +`dtype` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`high` | ::mlir::FloatAttr | 32-bit float attribute +`low` | ::mlir::FloatAttr | 32-bit float attribute +`seed` | ::mlir::FloatAttr | 32-bit float attribute +`shape` | ::mlir::ArrayAttr | 64-bit integer array attribute #### Results: @@ -3833,8 +3833,8 @@ ONNX ReduceL1 operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`axes` | ArrayAttr | 64-bit integer array attribute -`keepdims` | IntegerAttr | 64-bit signless integer attribute +`axes` | ::mlir::ArrayAttr | 64-bit integer array attribute +`keepdims` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Operands: @@ -3863,8 +3863,8 @@ ONNX ReduceL2 operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`axes` | ArrayAttr | 64-bit integer array attribute -`keepdims` | IntegerAttr | 64-bit signless integer attribute +`axes` | ::mlir::ArrayAttr | 64-bit integer array attribute +`keepdims` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Operands: @@ -3893,8 +3893,8 @@ ONNX ReduceLogSumExp operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`axes` | ArrayAttr | 64-bit integer array attribute -`keepdims` | IntegerAttr | 64-bit signless integer attribute +`axes` | ::mlir::ArrayAttr | 64-bit integer array attribute +`keepdims` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Operands: @@ -3923,8 +3923,8 @@ ONNX ReduceLogSum operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`axes` | ArrayAttr | 64-bit integer array attribute -`keepdims` | IntegerAttr | 64-bit signless integer attribute +`axes` | ::mlir::ArrayAttr | 64-bit integer array attribute +`keepdims` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Operands: @@ -3953,8 +3953,8 @@ ONNX ReduceMax operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`axes` | ArrayAttr | 64-bit integer array attribute -`keepdims` | IntegerAttr | 64-bit signless integer attribute +`axes` | ::mlir::ArrayAttr | 64-bit integer array attribute +`keepdims` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Operands: @@ -3983,8 +3983,8 @@ ONNX ReduceMean operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`axes` | ArrayAttr | 64-bit integer array attribute -`keepdims` | IntegerAttr | 64-bit signless integer attribute +`axes` | ::mlir::ArrayAttr | 64-bit integer array attribute +`keepdims` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Operands: @@ -4013,8 +4013,8 @@ ONNX ReduceMin operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`axes` | ArrayAttr | 64-bit integer array attribute -`keepdims` | IntegerAttr | 64-bit signless integer attribute +`axes` | ::mlir::ArrayAttr | 64-bit integer array attribute +`keepdims` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Operands: @@ -4043,8 +4043,8 @@ ONNX ReduceProd operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`axes` | ArrayAttr | 64-bit integer array attribute -`keepdims` | IntegerAttr | 64-bit signless integer attribute +`axes` | ::mlir::ArrayAttr | 64-bit integer array attribute +`keepdims` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Operands: @@ -4073,8 +4073,8 @@ ONNX ReduceSum operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`axes` | ArrayAttr | 64-bit integer array attribute -`keepdims` | IntegerAttr | 64-bit signless integer attribute +`axes` | ::mlir::ArrayAttr | 64-bit integer array attribute +`keepdims` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Operands: @@ -4103,8 +4103,8 @@ ONNX ReduceSumSquare operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`axes` | ArrayAttr | 64-bit integer array attribute -`keepdims` | IntegerAttr | 64-bit signless integer attribute +`axes` | ::mlir::ArrayAttr | 64-bit integer array attribute +`keepdims` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Operands: @@ -4174,12 +4174,12 @@ ONNX Resize operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`coordinate_transformation_mode` | StringAttr | string attribute -`cubic_coeff_a` | FloatAttr | 32-bit float attribute -`exclude_outside` | IntegerAttr | 64-bit signless integer attribute -`extrapolation_value` | FloatAttr | 32-bit float attribute -`mode` | StringAttr | string attribute -`nearest_mode` | StringAttr | string attribute +`coordinate_transformation_mode` | ::mlir::StringAttr | string attribute +`cubic_coeff_a` | ::mlir::FloatAttr | 32-bit float attribute +`exclude_outside` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`extrapolation_value` | ::mlir::FloatAttr | 32-bit float attribute +`mode` | ::mlir::StringAttr | string attribute +`nearest_mode` | ::mlir::StringAttr | string attribute #### Operands: @@ -4238,8 +4238,8 @@ ONNX ReverseSequence operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`batch_axis` | IntegerAttr | 64-bit signless integer attribute -`time_axis` | IntegerAttr | 64-bit signless integer attribute +`batch_axis` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`time_axis` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Operands: @@ -4274,11 +4274,11 @@ ONNX RoiAlign operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`mode` | StringAttr | string attribute -`output_height` | IntegerAttr | 64-bit signless integer attribute -`output_width` | IntegerAttr | 64-bit signless integer attribute -`sampling_ratio` | IntegerAttr | 64-bit signless integer attribute -`spatial_scale` | FloatAttr | 32-bit float attribute +`mode` | ::mlir::StringAttr | string attribute +`output_height` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`output_width` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`sampling_ratio` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`spatial_scale` | ::mlir::FloatAttr | 32-bit float attribute #### Operands: @@ -4334,17 +4334,17 @@ ONNX SVMClassifier operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`classlabels_ints` | ArrayAttr | 64-bit integer array attribute -`classlabels_strings` | ArrayAttr | string array attribute -`coefficients` | ArrayAttr | 32-bit float array attribute -`kernel_params` | ArrayAttr | 32-bit float array attribute -`kernel_type` | StringAttr | string attribute -`post_transform` | StringAttr | string attribute -`prob_a` | ArrayAttr | 32-bit float array attribute -`prob_b` | ArrayAttr | 32-bit float array attribute -`rho` | ArrayAttr | 32-bit float array attribute -`support_vectors` | ArrayAttr | 32-bit float array attribute -`vectors_per_class` | ArrayAttr | 64-bit integer array attribute +`classlabels_ints` | ::mlir::ArrayAttr | 64-bit integer array attribute +`classlabels_strings` | ::mlir::ArrayAttr | string array attribute +`coefficients` | ::mlir::ArrayAttr | 32-bit float array attribute +`kernel_params` | ::mlir::ArrayAttr | 32-bit float array attribute +`kernel_type` | ::mlir::StringAttr | string attribute +`post_transform` | ::mlir::StringAttr | string attribute +`prob_a` | ::mlir::ArrayAttr | 32-bit float array attribute +`prob_b` | ::mlir::ArrayAttr | 32-bit float array attribute +`rho` | ::mlir::ArrayAttr | 32-bit float array attribute +`support_vectors` | ::mlir::ArrayAttr | 32-bit float array attribute +`vectors_per_class` | ::mlir::ArrayAttr | 64-bit integer array attribute #### Operands: @@ -4369,14 +4369,14 @@ ONNX SVMRegressor operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`coefficients` | ArrayAttr | 32-bit float array attribute -`kernel_params` | ArrayAttr | 32-bit float array attribute -`kernel_type` | StringAttr | string attribute -`n_supports` | IntegerAttr | 64-bit signless integer attribute -`one_class` | IntegerAttr | 64-bit signless integer attribute -`post_transform` | StringAttr | string attribute -`rho` | ArrayAttr | 32-bit float array attribute -`support_vectors` | ArrayAttr | 32-bit float array attribute +`coefficients` | ::mlir::ArrayAttr | 32-bit float array attribute +`kernel_params` | ::mlir::ArrayAttr | 32-bit float array attribute +`kernel_type` | ::mlir::StringAttr | string attribute +`n_supports` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`one_class` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`post_transform` | ::mlir::StringAttr | string attribute +`rho` | ::mlir::ArrayAttr | 32-bit float array attribute +`support_vectors` | ::mlir::ArrayAttr | 32-bit float array attribute #### Operands: @@ -4400,8 +4400,8 @@ ONNX Scaler operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`offset` | ArrayAttr | 32-bit float array attribute -`scale` | ArrayAttr | 32-bit float array attribute +`offset` | ::mlir::ArrayAttr | 32-bit float array attribute +`scale` | ::mlir::ArrayAttr | 32-bit float array attribute #### Operands: @@ -4545,12 +4545,12 @@ ONNX Scan operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`body` | Attribute | any attribute -`num_scan_inputs` | IntegerAttr | 64-bit signless integer attribute -`scan_input_axes` | ArrayAttr | 64-bit integer array attribute -`scan_input_directions` | ArrayAttr | 64-bit integer array attribute -`scan_output_axes` | ArrayAttr | 64-bit integer array attribute -`scan_output_directions` | ArrayAttr | 64-bit integer array attribute +`body` | ::mlir::Attribute | any attribute +`num_scan_inputs` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`scan_input_axes` | ::mlir::ArrayAttr | 64-bit integer array attribute +`scan_input_directions` | ::mlir::ArrayAttr | 64-bit integer array attribute +`scan_output_axes` | ::mlir::ArrayAttr | 64-bit integer array attribute +`scan_output_directions` | ::mlir::ArrayAttr | 64-bit integer array attribute #### Operands: @@ -4624,7 +4624,7 @@ ONNX ScatterElements operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`axis` | IntegerAttr | 64-bit signless integer attribute +`axis` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Operands: @@ -4777,7 +4777,7 @@ ONNX Scatter operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`axis` | IntegerAttr | 64-bit signless integer attribute +`axis` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Operands: @@ -4806,8 +4806,8 @@ ONNX Selu operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`alpha` | FloatAttr | 32-bit float attribute -`gamma` | FloatAttr | 32-bit float attribute +`alpha` | ::mlir::FloatAttr | 32-bit float attribute +`gamma` | ::mlir::FloatAttr | 32-bit float attribute #### Operands: @@ -4871,7 +4871,7 @@ ONNX SequenceEmpty operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`dtype` | IntegerAttr | 64-bit signless integer attribute +`dtype` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Results: @@ -4974,8 +4974,8 @@ ONNX Shrink operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`bias` | FloatAttr | 32-bit float attribute -`lambd` | FloatAttr | 32-bit float attribute +`bias` | ::mlir::FloatAttr | 32-bit float attribute +`lambd` | ::mlir::FloatAttr | 32-bit float attribute #### Operands: @@ -5160,7 +5160,7 @@ ONNX Softmax operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`axis` | IntegerAttr | 64-bit signless integer attribute +`axis` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Operands: @@ -5224,7 +5224,7 @@ ONNX SpaceToDepth operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`blocksize` | IntegerAttr | 64-bit signless integer attribute +`blocksize` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Operands: @@ -5250,8 +5250,8 @@ ONNX Split operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`axis` | IntegerAttr | 64-bit signless integer attribute -`split` | ArrayAttr | 64-bit integer array attribute +`axis` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`split` | ::mlir::ArrayAttr | 64-bit integer array attribute #### Operands: @@ -5284,8 +5284,8 @@ ONNX SplitToSequence operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`axis` | IntegerAttr | 64-bit signless integer attribute -`keepdims` | IntegerAttr | 64-bit signless integer attribute +`axis` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`keepdims` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Operands: @@ -5333,7 +5333,7 @@ ONNX Squeeze operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`axes` | ArrayAttr | 64-bit integer array attribute +`axes` | ::mlir::ArrayAttr | 64-bit integer array attribute #### Operands: @@ -5365,10 +5365,10 @@ ONNX StringNormalizer operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`case_change_action` | StringAttr | string attribute -`is_case_sensitive` | IntegerAttr | 64-bit signless integer attribute -`locale` | StringAttr | string attribute -`stopwords` | ArrayAttr | string array attribute +`case_change_action` | ::mlir::StringAttr | string attribute +`is_case_sensitive` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`locale` | ::mlir::StringAttr | string attribute +`stopwords` | ::mlir::ArrayAttr | string array attribute #### Operands: @@ -5495,15 +5495,15 @@ ONNX TfIdfVectorizer operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`max_gram_length` | IntegerAttr | 64-bit signless integer attribute -`max_skip_count` | IntegerAttr | 64-bit signless integer attribute -`min_gram_length` | IntegerAttr | 64-bit signless integer attribute -`mode` | StringAttr | string attribute -`ngram_counts` | ArrayAttr | 64-bit integer array attribute -`ngram_indexes` | ArrayAttr | 64-bit integer array attribute -`pool_int64s` | ArrayAttr | 64-bit integer array attribute -`pool_strings` | ArrayAttr | string array attribute -`weights` | ArrayAttr | 32-bit float array attribute +`max_gram_length` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`max_skip_count` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`min_gram_length` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`mode` | ::mlir::StringAttr | string attribute +`ngram_counts` | ::mlir::ArrayAttr | 64-bit integer array attribute +`ngram_indexes` | ::mlir::ArrayAttr | 64-bit integer array attribute +`pool_int64s` | ::mlir::ArrayAttr | 64-bit integer array attribute +`pool_strings` | ::mlir::ArrayAttr | string array attribute +`weights` | ::mlir::ArrayAttr | 32-bit float array attribute #### Operands: @@ -5529,7 +5529,7 @@ ONNX ThresholdedRelu operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`alpha` | FloatAttr | 32-bit float attribute +`alpha` | ::mlir::FloatAttr | 32-bit float attribute #### Operands: @@ -5587,9 +5587,9 @@ ONNX TopK operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`axis` | IntegerAttr | 64-bit signless integer attribute -`largest` | IntegerAttr | 64-bit signless integer attribute -`sorted` | IntegerAttr | 64-bit signless integer attribute +`axis` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`largest` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`sorted` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Operands: @@ -5617,7 +5617,7 @@ ONNX Transpose operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`perm` | ArrayAttr | 64-bit integer array attribute +`perm` | ::mlir::ArrayAttr | 64-bit integer array attribute #### Operands: @@ -5649,23 +5649,23 @@ ONNX TreeEnsembleClassifier operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`base_values` | ArrayAttr | 32-bit float array attribute -`class_ids` | ArrayAttr | 64-bit integer array attribute -`class_nodeids` | ArrayAttr | 64-bit integer array attribute -`class_treeids` | ArrayAttr | 64-bit integer array attribute -`class_weights` | ArrayAttr | 32-bit float array attribute -`classlabels_int64s` | ArrayAttr | 64-bit integer array attribute -`classlabels_strings` | ArrayAttr | string array attribute -`nodes_falsenodeids` | ArrayAttr | 64-bit integer array attribute -`nodes_featureids` | ArrayAttr | 64-bit integer array attribute -`nodes_hitrates` | ArrayAttr | 32-bit float array attribute -`nodes_missing_value_tracks_true` | ArrayAttr | 64-bit integer array attribute -`nodes_modes` | ArrayAttr | string array attribute -`nodes_nodeids` | ArrayAttr | 64-bit integer array attribute -`nodes_treeids` | ArrayAttr | 64-bit integer array attribute -`nodes_truenodeids` | ArrayAttr | 64-bit integer array attribute -`nodes_values` | ArrayAttr | 32-bit float array attribute -`post_transform` | StringAttr | string attribute +`base_values` | ::mlir::ArrayAttr | 32-bit float array attribute +`class_ids` | ::mlir::ArrayAttr | 64-bit integer array attribute +`class_nodeids` | ::mlir::ArrayAttr | 64-bit integer array attribute +`class_treeids` | ::mlir::ArrayAttr | 64-bit integer array attribute +`class_weights` | ::mlir::ArrayAttr | 32-bit float array attribute +`classlabels_int64s` | ::mlir::ArrayAttr | 64-bit integer array attribute +`classlabels_strings` | ::mlir::ArrayAttr | string array attribute +`nodes_falsenodeids` | ::mlir::ArrayAttr | 64-bit integer array attribute +`nodes_featureids` | ::mlir::ArrayAttr | 64-bit integer array attribute +`nodes_hitrates` | ::mlir::ArrayAttr | 32-bit float array attribute +`nodes_missing_value_tracks_true` | ::mlir::ArrayAttr | 64-bit integer array attribute +`nodes_modes` | ::mlir::ArrayAttr | string array attribute +`nodes_nodeids` | ::mlir::ArrayAttr | 64-bit integer array attribute +`nodes_treeids` | ::mlir::ArrayAttr | 64-bit integer array attribute +`nodes_truenodeids` | ::mlir::ArrayAttr | 64-bit integer array attribute +`nodes_values` | ::mlir::ArrayAttr | 32-bit float array attribute +`post_transform` | ::mlir::StringAttr | string attribute #### Operands: @@ -5699,23 +5699,23 @@ ONNX TreeEnsembleRegressor operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`aggregate_function` | StringAttr | string attribute -`base_values` | ArrayAttr | 32-bit float array attribute -`n_targets` | IntegerAttr | 64-bit signless integer attribute -`nodes_falsenodeids` | ArrayAttr | 64-bit integer array attribute -`nodes_featureids` | ArrayAttr | 64-bit integer array attribute -`nodes_hitrates` | ArrayAttr | 32-bit float array attribute -`nodes_missing_value_tracks_true` | ArrayAttr | 64-bit integer array attribute -`nodes_modes` | ArrayAttr | string array attribute -`nodes_nodeids` | ArrayAttr | 64-bit integer array attribute -`nodes_treeids` | ArrayAttr | 64-bit integer array attribute -`nodes_truenodeids` | ArrayAttr | 64-bit integer array attribute -`nodes_values` | ArrayAttr | 32-bit float array attribute -`post_transform` | StringAttr | string attribute -`target_ids` | ArrayAttr | 64-bit integer array attribute -`target_nodeids` | ArrayAttr | 64-bit integer array attribute -`target_treeids` | ArrayAttr | 64-bit integer array attribute -`target_weights` | ArrayAttr | 32-bit float array attribute +`aggregate_function` | ::mlir::StringAttr | string attribute +`base_values` | ::mlir::ArrayAttr | 32-bit float array attribute +`n_targets` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`nodes_falsenodeids` | ::mlir::ArrayAttr | 64-bit integer array attribute +`nodes_featureids` | ::mlir::ArrayAttr | 64-bit integer array attribute +`nodes_hitrates` | ::mlir::ArrayAttr | 32-bit float array attribute +`nodes_missing_value_tracks_true` | ::mlir::ArrayAttr | 64-bit integer array attribute +`nodes_modes` | ::mlir::ArrayAttr | string array attribute +`nodes_nodeids` | ::mlir::ArrayAttr | 64-bit integer array attribute +`nodes_treeids` | ::mlir::ArrayAttr | 64-bit integer array attribute +`nodes_truenodeids` | ::mlir::ArrayAttr | 64-bit integer array attribute +`nodes_values` | ::mlir::ArrayAttr | 32-bit float array attribute +`post_transform` | ::mlir::StringAttr | string attribute +`target_ids` | ::mlir::ArrayAttr | 64-bit integer array attribute +`target_nodeids` | ::mlir::ArrayAttr | 64-bit integer array attribute +`target_treeids` | ::mlir::ArrayAttr | 64-bit integer array attribute +`target_weights` | ::mlir::ArrayAttr | 32-bit float array attribute #### Operands: @@ -5813,8 +5813,8 @@ ONNX Unique operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`axis` | IntegerAttr | 64-bit signless integer attribute -`sorted` | IntegerAttr | 64-bit signless integer attribute +`axis` | ::mlir::IntegerAttr | 64-bit signless integer attribute +`sorted` | ::mlir::IntegerAttr | 64-bit signless integer attribute #### Operands: @@ -5852,7 +5852,7 @@ ONNX Unsqueeze operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`axes` | ArrayAttr | 64-bit integer array attribute +`axes` | ::mlir::ArrayAttr | 64-bit integer array attribute #### Operands: @@ -5878,7 +5878,7 @@ ONNX Upsample operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`mode` | StringAttr | string attribute +`mode` | ::mlir::StringAttr | string attribute #### Operands: @@ -5951,8 +5951,8 @@ ONNX ZipMap operation | Attribute | MLIR Type | Description | | :-------: | :-------: | ----------- | -`classlabels_int64s` | ArrayAttr | 64-bit integer array attribute -`classlabels_strings` | ArrayAttr | string array attribute +`classlabels_int64s` | ::mlir::ArrayAttr | 64-bit integer array attribute +`classlabels_strings` | ::mlir::ArrayAttr | string array attribute #### Operands: diff --git a/docs/README.md b/docs/README.md index bac5809..80ac015 100644 --- a/docs/README.md +++ b/docs/README.md @@ -20,7 +20,7 @@ Firstly, install MLIR (as a part of LLVM-Project): ``` bash git clone https://github.com/llvm/llvm-project.git # Check out a specific branch that is known to work with ONNX MLIR. -cd llvm-project && git checkout 0dc91bfd11e6cced0c46c1a25cc96edea0d8fc22 && cd .. +cd llvm-project && git checkout 32791937d7aceb0a5e1eaabf1bb1a6dbe1639792 && cd .. ``` [same-as-file]: <> (utils/build-mlir.sh) @@ -110,7 +110,7 @@ Install MLIR (as a part of LLVM-Project): ```shell git clone https://github.com/llvm/llvm-project.git # Check out a specific branch that is known to work with ONNX MLIR. -cd llvm-project && git checkout 0dc91bfd11e6cced0c46c1a25cc96edea0d8fc22 && cd .. +cd llvm-project && git checkout 32791937d7aceb0a5e1eaabf1bb1a6dbe1639792 && cd .. ``` [same-as-file]: <> (utils/build-mlir.cmd) diff --git a/src/Conversion/ONNXToKrnl/Math/Gemm.cpp b/src/Conversion/ONNXToKrnl/Math/Gemm.cpp index 4748721..0ba8396 100644 --- a/src/Conversion/ONNXToKrnl/Math/Gemm.cpp +++ b/src/Conversion/ONNXToKrnl/Math/Gemm.cpp @@ -24,7 +24,7 @@ struct ONNXGemmOpLowering : public ConversionPattern { bool hasBias = !op->getOperand(2).getType().isa(); Value A, B, C; - ONNXGemmOpOperandAdaptor operandAdaptor(operands); + ONNXGemmOpAdaptor operandAdaptor(operands); A = operandAdaptor.A(); B = operandAdaptor.B(); if (hasBias) diff --git a/src/Conversion/ONNXToKrnl/Math/MatMul.cpp b/src/Conversion/ONNXToKrnl/Math/MatMul.cpp index 6eccdb6..def7f4f 100644 --- a/src/Conversion/ONNXToKrnl/Math/MatMul.cpp +++ b/src/Conversion/ONNXToKrnl/Math/MatMul.cpp @@ -20,7 +20,7 @@ struct ONNXMatMulOpLowering : public ConversionPattern { ConversionPatternRewriter &rewriter) const final { auto loc = op->getLoc(); - ONNXMatMulOpOperandAdaptor operandAdaptor(operands); + ONNXMatMulOpAdaptor operandAdaptor(operands); Value A = operandAdaptor.A(); Value B = operandAdaptor.B(); auto AShape = A.getType().cast().getShape(); diff --git a/src/Conversion/ONNXToKrnl/Math/Softmax.cpp b/src/Conversion/ONNXToKrnl/Math/Softmax.cpp index 84927aa..1f9ec3f 100644 --- a/src/Conversion/ONNXToKrnl/Math/Softmax.cpp +++ b/src/Conversion/ONNXToKrnl/Math/Softmax.cpp @@ -28,7 +28,7 @@ struct ONNXSoftmaxOpLowering : public ConversionPattern { assert(axis >= -rank && axis <= rank - 1); auto loc = op->getLoc(); - ONNXSoftmaxOpOperandAdaptor operandAdaptor(operands); + ONNXSoftmaxOpAdaptor operandAdaptor(operands); Value input = operandAdaptor.input(); // Insert an allocation and deallocation for the result of this operation. auto elementType = memRefType.getElementType(); diff --git a/src/Conversion/ONNXToKrnl/NN/Conv.cpp b/src/Conversion/ONNXToKrnl/NN/Conv.cpp index 3c006a2..c11b93c 100644 --- a/src/Conversion/ONNXToKrnl/NN/Conv.cpp +++ b/src/Conversion/ONNXToKrnl/NN/Conv.cpp @@ -20,7 +20,7 @@ struct ONNXConvOpLowering : public ConversionPattern { LogicalResult matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const final { auto loc = op->getLoc(); - ONNXConvOpOperandAdaptor operandAdaptor(operands); + ONNXConvOpAdaptor operandAdaptor(operands); // Insert an allocation and deallocation for the result of this operation. auto memRefType = convertToMemRefType(*op->result_type_begin()); Value alloc; diff --git a/src/Conversion/ONNXToKrnl/NN/Normalization.cpp b/src/Conversion/ONNXToKrnl/NN/Normalization.cpp index 5d5ca65..144bfc9 100644 --- a/src/Conversion/ONNXToKrnl/NN/Normalization.cpp +++ b/src/Conversion/ONNXToKrnl/NN/Normalization.cpp @@ -21,7 +21,7 @@ struct ONNXBatchNormalizationTestModeOpLowering : public ConversionPattern { ConversionPatternRewriter &rewriter) const final { // batchnorm{epsilon}(x, scale, bias, mean, variance) = // scale * (x - mean) / sqrt(variance + epsilon) + bias - ONNXBatchNormalizationTestModeOpOperandAdaptor operandAdaptor(operands); + ONNXBatchNormalizationTestModeOpAdaptor operandAdaptor(operands); auto loc = op->getLoc(); auto memRefType = convertToMemRefType(*op->result_type_begin()); diff --git a/src/Conversion/ONNXToKrnl/NN/Pooling.cpp b/src/Conversion/ONNXToKrnl/NN/Pooling.cpp index f006525..14738a0 100644 --- a/src/Conversion/ONNXToKrnl/NN/Pooling.cpp +++ b/src/Conversion/ONNXToKrnl/NN/Pooling.cpp @@ -190,7 +190,7 @@ struct ONNXPoolOpLowering : public ConversionPattern { LogicalResult matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const final { - ONNXMaxPoolSingleOutOpOperandAdaptor operandAdaptor(operands); + ONNXMaxPoolSingleOutOpAdaptor operandAdaptor(operands); auto loc = op->getLoc(); PoolOp poolOp = llvm::dyn_cast(op); diff --git a/src/Conversion/ONNXToKrnl/RNN/LSTM.cpp b/src/Conversion/ONNXToKrnl/RNN/LSTM.cpp index 0a934e1..47752b0 100644 --- a/src/Conversion/ONNXToKrnl/RNN/LSTM.cpp +++ b/src/Conversion/ONNXToKrnl/RNN/LSTM.cpp @@ -159,7 +159,7 @@ getActivationPack(ONNXLSTMOp *op) { template <> LstmState allocAndInitializeStates( ConversionPatternRewriter &rewriter, Location loc, ONNXLSTMOp *op, - OperandAdaptor operandAdaptor) { + typename ONNXLSTMOp::Adaptor operandAdaptor) { LstmState state; // Insert allocation and deallocation for the results of this operation. @@ -239,7 +239,7 @@ LstmState allocAndInitializeStates( template <> void calculateState( ConversionPatternRewriter &rewriter, Location loc, - OperandAdaptor operandAdaptor, LstmState state, + typename ONNXLSTMOp::Adaptor operandAdaptor, LstmState state, LstmActivationPack activationPack, Value directionIV, Value sequenceIV) { bool hasBiasForInput = false, hasPeepholes = false; diff --git a/src/Conversion/ONNXToKrnl/RNN/RNNBase.hpp b/src/Conversion/ONNXToKrnl/RNN/RNNBase.hpp index 3d748e4..eeebcd9 100644 --- a/src/Conversion/ONNXToKrnl/RNN/RNNBase.hpp +++ b/src/Conversion/ONNXToKrnl/RNN/RNNBase.hpp @@ -51,12 +51,12 @@ std::tuple getActivationPack(RNNOp *op); // Allocate memory for RNN states and initialize them. template S allocAndInitializeStates(ConversionPatternRewriter &rewriter, Location loc, - RNNOp *op, OperandAdaptor operandAdaptor); + RNNOp *op, typename RNNOp::Adaptor operandAdaptor); // Calculate new states from the current input and states. template void calculateState(ConversionPatternRewriter &rewriter, Location loc, - OperandAdaptor operandAdaptor, S state, A activationSet, + typename RNNOp::Adaptor operandAdaptor, S state, A activationSet, Value directionIV, Value sequenceIV); // Write states to the RNN's outputs. @@ -74,7 +74,7 @@ struct ONNXRNNOpLowering : public ConversionPattern { auto loc = op->getLoc(); RNNOp rnnOp = llvm::dyn_cast(op); - OperandAdaptor operandAdaptor(operands); + typename RNNOp::Adaptor operandAdaptor(operands); if (hasAllNoneOutput(&rnnOp)) { rewriter.eraseOp(op); diff --git a/src/Conversion/ONNXToKrnl/Tensor/Identity.cpp b/src/Conversion/ONNXToKrnl/Tensor/Identity.cpp index e31a68f..3f0b305 100644 --- a/src/Conversion/ONNXToKrnl/Tensor/Identity.cpp +++ b/src/Conversion/ONNXToKrnl/Tensor/Identity.cpp @@ -18,7 +18,7 @@ struct ONNXIdentityOpLowering : public ConversionPattern { LogicalResult matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const final { - ONNXIdentityOpOperandAdaptor operandAdaptor(operands); + ONNXIdentityOpAdaptor operandAdaptor(operands); rewriter.replaceOp(op, operandAdaptor.input()); return success(); } diff --git a/src/Conversion/ONNXToKrnl/Tensor/Pad.cpp b/src/Conversion/ONNXToKrnl/Tensor/Pad.cpp index 81e9105..2ac65d2 100644 --- a/src/Conversion/ONNXToKrnl/Tensor/Pad.cpp +++ b/src/Conversion/ONNXToKrnl/Tensor/Pad.cpp @@ -19,7 +19,7 @@ struct ONNXPadOpLowering : public ConversionPattern { LogicalResult matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const final { ONNXPadOp myOp = llvm::dyn_cast(op); - ONNXPadOpOperandAdaptor operandAdaptor(operands); + ONNXPadOpAdaptor operandAdaptor(operands); auto tensorType = myOp.output().getType(); auto loc = op->getLoc(); diff --git a/src/Conversion/ONNXToKrnl/Tensor/PadConstantValuePad.cpp b/src/Conversion/ONNXToKrnl/Tensor/PadConstantValuePad.cpp index a201107..c932cae 100644 --- a/src/Conversion/ONNXToKrnl/Tensor/PadConstantValuePad.cpp +++ b/src/Conversion/ONNXToKrnl/Tensor/PadConstantValuePad.cpp @@ -20,7 +20,7 @@ struct ONNXPadConstantValuePadOpLowering : public ConversionPattern { LogicalResult matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const final { auto tensorType = (*op->result_type_begin()); - ONNXPadConstantValuePadOpOperandAdaptor operandAdaptor(operands); + ONNXPadConstantValuePadOpAdaptor operandAdaptor(operands); auto loc = op->getLoc(); // Only constant padding is supported now. diff --git a/src/Conversion/ONNXToKrnl/Tensor/Reshape.cpp b/src/Conversion/ONNXToKrnl/Tensor/Reshape.cpp index eeabde7..d7032a2 100644 --- a/src/Conversion/ONNXToKrnl/Tensor/Reshape.cpp +++ b/src/Conversion/ONNXToKrnl/Tensor/Reshape.cpp @@ -18,7 +18,7 @@ struct ONNXReshapeOpLowering : public ConversionPattern { LogicalResult matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const final { - ONNXReshapeOpOperandAdaptor operandAdaptor(operands); + ONNXReshapeOpAdaptor operandAdaptor(operands); auto loc = op->getLoc(); Value data = operandAdaptor.data(); auto inputShape = data.getType().cast().getShape(); diff --git a/src/Conversion/ONNXToKrnl/Tensor/Squeeze.cpp b/src/Conversion/ONNXToKrnl/Tensor/Squeeze.cpp index 1e69f10..87ca2fe 100644 --- a/src/Conversion/ONNXToKrnl/Tensor/Squeeze.cpp +++ b/src/Conversion/ONNXToKrnl/Tensor/Squeeze.cpp @@ -18,7 +18,7 @@ struct ONNXSqueezeOpLowering : public ConversionPattern { LogicalResult matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const final { - ONNXSqueezeOpOperandAdaptor operandAdaptor(operands); + ONNXSqueezeOpAdaptor operandAdaptor(operands); auto loc = op->getLoc(); auto memRefType = convertToMemRefType(*op->result_type_begin()); auto memRefShape = memRefType.getShape(); diff --git a/src/Conversion/ONNXToKrnl/Tensor/Transpose.cpp b/src/Conversion/ONNXToKrnl/Tensor/Transpose.cpp index d4f48e6..7ecf946 100644 --- a/src/Conversion/ONNXToKrnl/Tensor/Transpose.cpp +++ b/src/Conversion/ONNXToKrnl/Tensor/Transpose.cpp @@ -18,7 +18,7 @@ struct ONNXTransposeOpLowering : public ConversionPattern { LogicalResult matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const final { - ONNXTransposeOpOperandAdaptor operandAdaptor(operands); + ONNXTransposeOpAdaptor operandAdaptor(operands); auto loc = op->getLoc(); // Insert an allocation and deallocation for the result of this operation. auto memRefType = convertToMemRefType(*op->result_type_begin()); diff --git a/src/Conversion/ONNXToKrnl/Tensor/Unsqueeze.cpp b/src/Conversion/ONNXToKrnl/Tensor/Unsqueeze.cpp index d9215e2..254ddcf 100644 --- a/src/Conversion/ONNXToKrnl/Tensor/Unsqueeze.cpp +++ b/src/Conversion/ONNXToKrnl/Tensor/Unsqueeze.cpp @@ -18,7 +18,7 @@ struct ONNXUnsqueezeOpLowering : public ConversionPattern { LogicalResult matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const final { - ONNXUnsqueezeOpOperandAdaptor operandAdaptor(operands); + ONNXUnsqueezeOpAdaptor operandAdaptor(operands); auto loc = op->getLoc(); auto memRefType = convertToMemRefType(*op->result_type_begin()); int outRank = memRefType.getRank(); diff --git a/src/Transform/LowerToLLVM.cpp b/src/Transform/LowerToLLVM.cpp index 0305c36..42f13da 100644 --- a/src/Transform/LowerToLLVM.cpp +++ b/src/Transform/LowerToLLVM.cpp @@ -139,7 +139,7 @@ public: op->getContext()->getRegisteredDialect(); assert(llvmDialect && "expected llvm dialect to be registered"); - KrnlGetRefOpOperandAdaptor operandAdaptor(operands); + KrnlGetRefOpAdaptor operandAdaptor(operands); // This is the type of the krnl.getref output. This type is used // for the type of the internal MemRef. @@ -340,7 +340,7 @@ public: LogicalResult matchAndRewrite(Operation *op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { auto *context = op->getContext(); - KrnlMemcpyOpOperandAdaptor operandAdaptor(operands); + KrnlMemcpyOpAdaptor operandAdaptor(operands); auto loc = op->getLoc(); auto *llvmDialect = op->getContext()->getRegisteredDialect(); @@ -878,6 +878,8 @@ void KrnlToLLVMLoweringPass::runOnOperation() { target.addLegalOp(); // Lower the MemRef types to a representation in LLVM. + LowerToLLVMOptions options; + options.emitCWrappers = true; LLVMTypeConverter typeConverter(&getContext()); // We have a combination of `krnl`, `affine`, and `std` operations. We @@ -885,9 +887,7 @@ void KrnlToLLVMLoweringPass::runOnOperation() { OwningRewritePatternList patterns; populateAffineToStdConversionPatterns(patterns, &getContext()); populateLoopToStdConversionPatterns(patterns, &getContext()); - populateStdToLLVMConversionPatterns(typeConverter, patterns, - /*emitCWrapperS=*/true, - /*useAlignedAlloc=*/false); + populateStdToLLVMConversionPatterns(typeConverter, patterns, options); patterns.insert( &getContext(), typeConverter); @@ -899,8 +899,7 @@ void KrnlToLLVMLoweringPass::runOnOperation() { // We want to completely lower to LLVM, so we use a `FullConversion`. This // ensures that only legal operations will remain after the conversion. - if (failed(applyFullConversion( - getOperation(), target, patterns, &typeConverter))) { + if (failed(applyFullConversion(getOperation(), target, patterns))) { signalPassFailure(); } } diff --git a/test/mlir/krnl/constant.mlir b/test/mlir/krnl/constant.mlir index 6068ecb..8f71c7c 100644 --- a/test/mlir/krnl/constant.mlir +++ b/test/mlir/krnl/constant.mlir @@ -24,7 +24,7 @@ func @test_constant(%arg0 : tensor<1xf32>) -> tensor<*xf32> { // CHECK: [[GLOBAL_SIZE_BYTES:%.+]] = llvm.sext [[CONST_MUL1]] : !llvm.i64 to !llvm.i64 /// Volatile flag - // CHECK: [[CONST0:%.+]] = llvm.mlir.constant(0 : i1) : !llvm.i1 + // CHECK: [[CONST0:%.+]] = llvm.mlir.constant(false) : !llvm.i1 // CHECK: llvm.call @llvm.memcpy.p0i8.p0i8.i64([[I8ALLOCA]], [[I8GLOBAL]], [[GLOBAL_SIZE_BYTES]], [[CONST0]]) : (!llvm<"i8*">, !llvm<"i8*">, !llvm.i64, !llvm.i1) -> !llvm.void diff --git a/test/mlir/krnl/reshape.mlir b/test/mlir/krnl/reshape.mlir index 8a055b5..dc105d2 100644 --- a/test/mlir/krnl/reshape.mlir +++ b/test/mlir/krnl/reshape.mlir @@ -21,7 +21,7 @@ func @test_reshape(%arg0 : tensor, %arg1 : tensor<4xi64>) -> tensor<*x // CHECK: [[EXT_VAL_1:%.+]] = llvm.extractvalue [[TMP1]][1] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> // CHECK: [[SRC:%.+]] = llvm.bitcast [[EXT_VAL_1]] : !llvm<"float*"> to !llvm<"i8*"> // CHECK: [[SIZE:%.+]] = llvm.sext %{{.*}} : !llvm.i64 to !llvm.i64 - // CHECK: [[VOLATILE:%.+]] = llvm.mlir.constant(0 : i1) : !llvm.i1 + // CHECK: [[VOLATILE:%.+]] = llvm.mlir.constant(false) : !llvm.i1 // CHECK: llvm.call @llvm.memcpy.p0i8.p0i8.i64([[DST]], [[SRC]], [[SIZE]], [[VOLATILE]]) : (!llvm<"i8*">, !llvm<"i8*">, !llvm.i64, !llvm.i1) -> !llvm.void // CHECK: llvm.return [[RES]] : !llvm<"{ float*, float*, i64, [4 x i64], [4 x i64] }"> } diff --git a/test/mlir/onnx/onnx_lowering.mlir b/test/mlir/onnx/onnx_lowering.mlir index 7b8152b..f0c20cd 100644 --- a/test/mlir/onnx/onnx_lowering.mlir +++ b/test/mlir/onnx/onnx_lowering.mlir @@ -209,13 +209,15 @@ func @test_exp(%arg0 : tensor) -> tensor<*xf32> { "std.return"(%0) : (tensor<*xf32>) -> () // CHECK-LABEL: test_exp - // CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim %arg0, [[C0]] : memref // CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 // CHECK: } : () -> (!krnl.loop, !krnl.loop) - // CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0_0:%.+]] = constant 0 : index + // CHECK: [[DIM_2:%.+]] = dim %arg0, [[C0_0]] : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { // CHECK: [[LOAD:%.+]] = affine.load %arg0[%arg1, %arg2] : memref // CHECK: [[EXP:%.+]] = exp [[LOAD]] : f32 @@ -230,13 +232,15 @@ func @test_tanh(%arg0 : tensor) -> tensor<*xf32> { "std.return"(%0) : (tensor<*xf32>) -> () // CHECK-LABEL: test_tanh - // CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim %arg0, [[C0]] : memref // CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 // CHECK: } : () -> (!krnl.loop, !krnl.loop) - // CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0_0:%.+]] = constant 0 : index + // CHECK: [[DIM_2:%.+]] = dim %arg0, [[C0_0]] : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { // CHECK: [[LOAD:%.+]] = affine.load %arg0[%arg1, %arg2] : memref // CHECK: [[ZERO:%.+]] = constant {{0.+}} : f32 @@ -257,13 +261,15 @@ func @test_sinh(%arg0 : tensor) -> tensor<*xf32> { "std.return"(%0) : (tensor<*xf32>) -> () // CHECK-LABEL: test_sinh - // CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim %arg0, [[C0]] : memref // CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 // CHECK: } : () -> (!krnl.loop, !krnl.loop) - // CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0_0:%.+]] = constant 0 : index + // CHECK: [[DIM_2:%.+]] = dim %arg0, [[C0_0]] : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { // CHECK: [[LOAD:%.+]] = affine.load %arg0[%arg1, %arg2] : memref // CHECK: [[ZERO:%.+]] = constant {{0.+}} : f32 @@ -284,13 +290,15 @@ func @test_cosh(%arg0 : tensor) -> tensor<*xf32> { "std.return"(%0) : (tensor<*xf32>) -> () // CHECK-LABEL: test_cosh - // CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim %arg0, [[C0]] : memref // CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 // CHECK: } : () -> (!krnl.loop, !krnl.loop) - // CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0_0:%.+]] = constant 0 : index + // CHECK: [[DIM_2:%.+]] = dim %arg0, [[C0_0]] : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { // CHECK: [[LOAD:%.+]] = affine.load %arg0[%arg1, %arg2] : memref // CHECK: [[ZERO:%.+]] = constant {{0.+}} : f32 @@ -311,13 +319,15 @@ func @test_cos(%arg0 : tensor) -> tensor<*xf32> { "std.return"(%0) : (tensor<*xf32>) -> () // CHECK-LABEL: test_cos - // CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim %arg0, [[C0]] : memref // CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 // CHECK: } : () -> (!krnl.loop, !krnl.loop) - // CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0_0:%.+]] = constant 0 : index + // CHECK: [[DIM_2:%.+]] = dim %arg0, [[C0_0]] : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { // CHECK: [[LOAD:%.+]] = affine.load %arg0[%arg1, %arg2] : memref // CHECK: [[COS:%.+]] = cos [[LOAD]] : f32 @@ -332,13 +342,15 @@ func @test_log(%arg0 : tensor) -> tensor<*xf32> { "std.return"(%0) : (tensor<*xf32>) -> () // CHECK-LABEL: test_log - // CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim %arg0, [[C0]] : memref // CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 // CHECK: } : () -> (!krnl.loop, !krnl.loop) - // CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0_0:%.+]] = constant 0 : index + // CHECK: [[DIM_2:%.+]] = dim %arg0, [[C0_0]] : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { // CHECK: [[LOAD:%.+]] = affine.load %arg0[%arg1, %arg2] : memref // CHECK: [[LOG:%.+]] = log [[LOAD]] : f32 @@ -353,13 +365,15 @@ func @test_sigmoid(%arg0 : tensor) -> tensor<*xf32> { "std.return"(%0) : (tensor<*xf32>) -> () // CHECK-LABEL: test_sigmoid - // CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim %arg0, [[C0]] : memref // CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 // CHECK: } : () -> (!krnl.loop, !krnl.loop) - // CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0_0:%.+]] = constant 0 : index + // CHECK: [[DIM_2:%.+]] = dim %arg0, [[C0_0]] : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { // CHECK: [[LOAD:%.+]] = affine.load %arg0[%arg1, %arg2] : memref // CHECK: [[ZERO:%.+]] = constant {{0.+}} : f32 @@ -379,13 +393,15 @@ func @test_relu(%arg0 : tensor) -> tensor<*xf32> { "std.return"(%0) : (tensor<*xf32>) -> () // CHECK-LABEL: test_relu - // CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim %arg0, [[C0]] : memref // CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 // CHECK: } : () -> (!krnl.loop, !krnl.loop) - // CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0_0:%.+]] = constant 0 : index + // CHECK: [[DIM_2:%.+]] = dim %arg0, [[C0_0]] : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { // CHECK: [[LOAD:%.+]] = affine.load %arg0[%arg1, %arg2] : memref // CHECK: [[ZERO:%.+]] = constant {{0.+}} : f32 @@ -403,7 +419,8 @@ func @test_reshape(%arg0 : tensor, %arg1 : tensor<4xi64>) -> tensor<*x // CHECK-LABEL: test_reshape // CHECK: [[TYPE_IN_BYTES_0:%.+]] = constant 4 : i64 - // CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim %arg0, [[C0]] : memref // CHECK: [[DIM_0_CAST:%.+]] = index_cast [[DIM_0]] : index to i64 // CHECK: [[MUL_0:%.+]] = muli [[TYPE_IN_BYTES_0]], [[DIM_0_CAST]] : i64 // CHECK: [[CONSTANT_0:%.+]] = constant 10 : i64 @@ -412,7 +429,8 @@ func @test_reshape(%arg0 : tensor, %arg1 : tensor<4xi64>) -> tensor<*x // CHECK: [[TYPE_IN_BYTES_1:%.+]] = constant 4 : i64 // CHECK: %[[CONSTANT_1:.+]] = constant 0 : index // CHECK: [[LOAD_0:%.+]] = affine.load %arg1[%[[CONSTANT_1]]] : memref<4xi64> - // CHECK: [[DIM_1:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0_0:%.+]] = constant 0 : index + // CHECK: [[DIM_1:%.+]] = dim %arg0, [[C0_0]] : memref // CHECK: [[DIM_1_CAST:%.+]] = index_cast [[DIM_1]] : index to i64 // CHECK: [[CONSTANT_2:%.+]] = constant 0 : i64 // CHECK: [[CMP_0:%.+]] = cmpi "eq", [[LOAD_0]], [[CONSTANT_2]] : i64 @@ -533,13 +551,15 @@ func @test_elu(%arg0 : tensor) -> tensor<*xf32> { "std.return"(%0) : (tensor<*xf32>) -> () // CHECK-LABEL: test_elu - // CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim %arg0, [[C0]] : memref // CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 // CHECK: } : () -> (!krnl.loop, !krnl.loop) - // CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0_0:%.+]] = constant 0 : index + // CHECK: [[DIM_2:%.+]] = dim %arg0, [[C0_0]] : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { // CHECK: [[LOAD:%.+]] = affine.load %arg0[%arg1, %arg2] : memref // CHECK: [[ZERO:%.+]] = constant {{0.+}} : f32 @@ -561,13 +581,15 @@ func @test_leakyrelu(%arg0 : tensor) -> tensor<*xf32> { "std.return"(%0) : (tensor<*xf32>) -> () // CHECK-LABEL: test_leakyrelu - // CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim %arg0, [[C0]] : memref // CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 // CHECK: } : () -> (!krnl.loop, !krnl.loop) - // CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0_0:%.+]] = constant 0 : index + // CHECK: [[DIM_2:%.+]] = dim %arg0, [[C0_0]] : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { // CHECK: [[LOAD:%.+]] = affine.load %arg0[%arg1, %arg2] : memref // CHECK: [[ZERO:%.+]] = constant {{0.+}} : f32 @@ -586,13 +608,15 @@ func @test_selu(%arg0 : tensor) -> tensor<*xf32> { "std.return"(%0) : (tensor<*xf32>) -> () // CHECK-LABEL: test_selu - // CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim %arg0, [[C0]] : memref // CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 // CHECK: } : () -> (!krnl.loop, !krnl.loop) - // CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0_0:%.+]] = constant 0 : index + // CHECK: [[DIM_2:%.+]] = dim %arg0, [[C0_0]] : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { // CHECK: [[LOAD:%.+]] = affine.load %arg0[%arg1, %arg2] : memref // CHECK: [[ZERO:%.+]] = constant {{0.+}} : f32 @@ -615,13 +639,15 @@ func @test_hardsigmoid(%arg0 : tensor) -> tensor<*xf32> { "std.return"(%0) : (tensor<*xf32>) -> () // CHECK-LABEL: test_hardsigmoid - // CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim %arg0, [[C0]] : memref // CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 // CHECK: } : () -> (!krnl.loop, !krnl.loop) - // CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0_0:%.+]] = constant 0 : index + // CHECK: [[DIM_2:%.+]] = dim %arg0, [[C0_0]] : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { // CHECK: [[LOAD:%.+]] = affine.load %arg0[%arg1, %arg2] : memref // CHECK: [[ZERO:%.+]] = constant {{0.+}} : f32 @@ -645,13 +671,15 @@ func @test_reciprocal(%arg0 : tensor) -> tensor<*xf32> { "std.return"(%0) : (tensor<*xf32>) -> () // CHECK-LABEL: test_reciprocal - // CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim %arg0, [[C0]] : memref // CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 // CHECK: } : () -> (!krnl.loop, !krnl.loop) - // CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0_0:%.+]] = constant 0 : index + // CHECK: [[DIM_2:%.+]] = dim %arg0, [[C0_0]] : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { // CHECK: [[LOAD:%.+]] = affine.load %arg0[%arg1, %arg2] : memref // CHECK: [[ONE:%.+]] = constant {{1.+}} : f32 @@ -667,13 +695,15 @@ func @test_softplus(%arg0 : tensor) -> tensor<*xf32> { "std.return"(%0) : (tensor<*xf32>) -> () // CHECK-LABEL: test_softplus - // CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim %arg0, [[C0]] : memref // CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 // CHECK: } : () -> (!krnl.loop, !krnl.loop) - // CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0_0:%.+]] = constant 0 : index + // CHECK: [[DIM_2:%.+]] = dim %arg0, [[C0_0]] : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { // CHECK: [[LOAD:%.+]] = affine.load %arg0[%arg1, %arg2] : memref // CHECK: [[EXP:%.+]] = exp [[LOAD]] : f32 @@ -691,13 +721,15 @@ func @test_softsign(%arg0 : tensor) -> tensor<*xf32> { "std.return"(%0) : (tensor<*xf32>) -> () // CHECK-LABEL: test_softsign - // CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim %arg0, [[C0]] : memref // CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 // CHECK: } : () -> (!krnl.loop, !krnl.loop) - // CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0_0:%.+]] = constant 0 : index + // CHECK: [[DIM_2:%.+]] = dim %arg0, [[C0_0]] : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { // CHECK: [[LOAD:%.+]] = affine.load %arg0[%arg1, %arg2] : memref // CHECK: [[ABS:%.+]] = absf [[LOAD]] : f32 @@ -715,16 +747,19 @@ func @test_add_with_broadcasting(%arg0 : tensor, %arg1 : tensor "std.return"(%0) : (tensor<*xf32>) -> () // CHECK-LABEL: test_add_with_broadcasting - // CHECK: [[DIM1:%.+]] = dim %arg1, 0 : memref + // CHECK: [[C0:%.+]] = constant 0 : index + // CHECK: [[DIM1:%.+]] = dim %arg1, [[C0]] : memref // CHECK: [[RES:%.+]] = alloc([[DIM1]]) : memref - // CHECK: [[DIM2:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0_0:%.+]] = constant 0 : index + // CHECK: [[DIM2:%.+]] = dim %arg0, [[C0_0]] : memref // CHECK: [[ONE:%.+]] = constant 1 : index // CHECK: [[IS_ONE:%.+]] = cmpi "eq", [[DIM2]], [[ONE]] : index // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 // CHECK: } : () -> (!krnl.loop, !krnl.loop) - // CHECK: [[DIM3:%.+]] = dim [[RES]], 0 : memref + // CHECK: [[C0_1:%.+]] = constant 0 : index + // CHECK: [[DIM3:%.+]] = dim [[RES]], [[C0_1]] : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg2 = 0 to [[DIM3]], [[DEF_LOOPS]]#1 -> %arg3 = 0 to 10) { // CHECK: [[ZERO:%.+]] = constant 0 : index // CHECK: %[[SELECT1:.+]] = select [[IS_ONE]], [[ZERO]], %arg3 : index @@ -943,13 +978,15 @@ func @test_sqrt(%arg0 : tensor) -> tensor<*xf32> { "std.return"(%0) : (tensor<*xf32>) -> () // CHECK-LABEL: test_sqrt - // CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim %arg0, [[C0]] : memref // CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 // CHECK: } : () -> (!krnl.loop, !krnl.loop) - // CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0_0:%.+]] = constant 0 : index + // CHECK: [[DIM_2:%.+]] = dim %arg0, [[C0_0]] : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { // CHECK: [[LOAD:%.+]] = affine.load %arg0[%arg1, %arg2] : memref // CHECK: [[SQRT:%.+]] = sqrt [[LOAD]] : f32 @@ -1026,13 +1063,15 @@ func @test_sign_f(%arg0 : tensor) -> tensor<*xf32> { "std.return"(%0) : (tensor<*xf32>) -> () // CHECK-LABEL: test_sign_f - // CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0_0:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim %arg0, [[C0_0]] : memref // CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 // CHECK: } : () -> (!krnl.loop, !krnl.loop) - // CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0_0:%.+]] = constant 0 : index + // CHECK: [[DIM_2:%.+]] = dim %arg0, [[C0_0]] : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { // CHECK: [[LOAD:%.+]] = affine.load %arg0[%arg1, %arg2] : memref // CHECK: [[ZERO:%.+]] = constant {{0.+}} : f32 @@ -1053,13 +1092,15 @@ func @test_sign_i(%arg0 : tensor) -> tensor<*xi32> { "std.return"(%0) : (tensor<*xi32>) -> () // CHECK-LABEL: test_sign_i - // CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim %arg0, [[C0]] : memref // CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 // CHECK: } : () -> (!krnl.loop, !krnl.loop) - // CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0_0:%.+]] = constant 0 : index + // CHECK: [[DIM_2:%.+]] = dim %arg0, [[C0_0]] : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { // CHECK: [[LOAD:%.+]] = affine.load %arg0[%arg1, %arg2] : memref // CHECK: [[ZERO:%.+]] = constant 0 : i32 @@ -1214,13 +1255,15 @@ func @test_matmul5(%arg0 : tensor<5xf32>, %arg1 : tensor) -> tensor< // CHECK-LABEL: test_matmul5 // CHECK: [[CONSTANT:%.+]] = constant 0.000000e+00 : f32 - // CHECK: [[DIM_0:%.+]] = dim %arg1, 0 : memref + // CHECK: [[C0:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim %arg1, [[C0]] : memref // CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref // CHECK: [[LOOPS:%.+]]:2 = krnl.define_loops 2 // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { // CHECK: krnl.return_loops [[LOOPS]]#0, [[LOOPS]]#1 // CHECK: } : () -> (!krnl.loop, !krnl.loop) - // CHECK: [[DIM_1:%.+]] = dim [[RES]], 0 : memref + // CHECK: [[C0_0:%.+]] = constant 0 : index + // CHECK: [[DIM_1:%.+]] = dim [[RES]], [[C0_0]] : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0) with ([[LOOPS]]#0 -> %arg2 = 0 to [[DIM_1]]) { // CHECK: krnl.iterate([[OPT_LOOPS]]#1) with ([[LOOPS]]#1 -> %arg3 = 0 to 10) { // CHECK: affine.store [[CONSTANT]], [[RES]][%arg2, %arg3] : memref @@ -1250,13 +1293,15 @@ func @test_matmul6(%arg0 : tensor, %arg1 : tensor<5xf32>) -> tensor< // CHECK-LABEL: test_matmul6 // CHECK: [[CONSTANT:%.+]] = constant 0.000000e+00 : f32 - // CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim %arg0, [[C0]] : memref // CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref // CHECK: [[LOOPS:%.+]]:2 = krnl.define_loops 2 // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { // CHECK: krnl.return_loops [[LOOPS]]#0, [[LOOPS]]#1 // CHECK: } : () -> (!krnl.loop, !krnl.loop) - // CHECK: [[DIM_1:%.+]] = dim [[RES]], 0 : memref + // CHECK: [[C0_0:%.+]] = constant 0 : index + // CHECK: [[DIM_1:%.+]] = dim [[RES]], [[C0_0]] : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0) with ([[LOOPS]]#0 -> %arg2 = 0 to [[DIM_1]]) { // CHECK: krnl.iterate([[OPT_LOOPS]]#1) with ([[LOOPS]]#1 -> %arg3 = 0 to 10) { // CHECK: affine.store [[CONSTANT]], [[RES]][%arg2, %arg3] : memref @@ -1562,13 +1607,15 @@ func @test_abs_float(%arg0 : tensor) -> tensor<*xf32> { "std.return"(%0) : (tensor<*xf32>) -> () // CHECK-LABEL: test_abs_float - // CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim %arg0, [[C0]] : memref // CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 // CHECK: } : () -> (!krnl.loop, !krnl.loop) - // CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0_0:%.+]] = constant 0 : index + // CHECK: [[DIM_2:%.+]] = dim %arg0, [[C0_0]] : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { // CHECK: [[LOAD:%.+]] = affine.load %arg0[%arg1, %arg2] : memref // CHECK: [[ABS:%.+]] = absf [[LOAD]] : f32 @@ -1583,13 +1630,15 @@ func @test_abs_int(%arg0 : tensor) -> tensor<*xi32> { "std.return"(%0) : (tensor<*xi32>) -> () // CHECK-LABEL: test_abs_int - // CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim %arg0, [[C0]] : memref // CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 // CHECK: } : () -> (!krnl.loop, !krnl.loop) - // CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0_0:%.+]] = constant 0 : index + // CHECK: [[DIM_2:%.+]] = dim %arg0, [[C0_0]] : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { // CHECK: [[LOAD:%.+]] = affine.load %arg0[%arg1, %arg2] : memref // CHECK: [[ZERO:%.+]] = constant 0 : i32 @@ -1745,7 +1794,8 @@ func @test_pool_unknown_dimensions(%arg0 : tensor<1x3x?x32xf32>) -> tensor<*xf32 // CHECK-DAG: #[[AFFINE_MAP:.+]] = affine_map<(d0)[s0, s1, s2, s3] -> ((d0 + s1 - (s0 - 1) * s3 - 1) floordiv s2 + 1)> // CHECK-LABEL: test_pool_unknown_dimensions - // CHECK: [[DIM:%.+]] = dim %arg0, 2 : memref<1x3x?x32xf32> + // CHECK: [[C0:%.+]] = constant 2 : index + // CHECK: [[DIM:%.+]] = dim %arg0, [[C0]] : memref<1x3x?x32xf32> // CHECK: [[KERNEL:%.+]] = constant 2 : index // CHECK: [[PAD:%.+]] = constant 0 : index // CHECK: [[STRIDE:%.+]] = constant 1 : index @@ -2146,7 +2196,8 @@ func @test_squeeze_unknown_dimensions(%arg0 : tensor) -> tensor "std.return"(%0) : (tensor<*xf32>) -> () // CHECK-LABEL: @test_squeeze_unknown_dimensions - // CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim %arg0, [[C0]] : memref // CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref // CHECK: [[TENSOR_SIZE_0:%.+]] = constant 8192 : i64 // CHECK: [[DIM_0_i64:%.+]] = index_cast [[DIM_0]] : index to i64 @@ -2226,15 +2277,18 @@ func @test_split_unknown_dimension(%arg0 : tensor) -> (tensor<*xf32> // CHECK: [[INDEX_MAP:#.+]] = affine_map<(d0) -> (d0 + 2)> // CHECK-LABEL: @test_split_unknown_dimension - // CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim %arg0, [[C0]] : memref // CHECK: [[RES_0:%.+]] = alloc([[DIM_0]]) : memref - // CHECK: [[DIM_1:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0_0:%.+]] = constant 0 : index + // CHECK: [[DIM_1:%.+]] = dim %arg0, [[C0_0]] : memref // CHECK: [[RES_1:%.+]] = alloc([[DIM_1]]) : memref // CHECK: [[DEF_LOOP_0:%.+]]:3 = krnl.define_loops 3 // CHECK: [[OPT_LOOP_0:%.+]]:3 = krnl.optimize_loops { // CHECK: krnl.return_loops [[DEF_LOOP_0]]#0, [[DEF_LOOP_0]]#1, [[DEF_LOOP_0]]#2 // CHECK: } : () -> (!krnl.loop, !krnl.loop, !krnl.loop) - // CHECK: [[DIM_0:%.+]] = dim [[RES_0]], 0 : memref + // CHECK: [[C0_2:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim [[RES_0]], [[C0_2]] : memref // CHECK: krnl.iterate([[OPT_LOOP_0]]#0, [[OPT_LOOP_0]]#1, [[OPT_LOOP_0]]#2) with ([[DEF_LOOP_0]]#0 -> %arg1 = 0 to [[DIM_0]], [[DEF_LOOP_0]]#1 -> %arg2 = 0 to 2, [[DEF_LOOP_0]]#2 -> %arg3 = 0 to 64) { // CHECK: [[LOAD_0:%.+]] = affine.load %arg0[%arg1, %arg2, %arg3] : memref // CHECK: affine.store [[LOAD_0]], [[RES_0]][%arg1, %arg2, %arg3] : memref @@ -2243,7 +2297,8 @@ func @test_split_unknown_dimension(%arg0 : tensor) -> (tensor<*xf32> // CHECK: [[OPT_LOOP_1:%.+]]:3 = krnl.optimize_loops { // CHECK: krnl.return_loops [[DEF_LOOP_1]]#0, [[DEF_LOOP_1]]#1, [[DEF_LOOP_1]]#2 // CHECK: } : () -> (!krnl.loop, !krnl.loop, !krnl.loop) - // CHECK: [[DIM_1:%.+]] = dim [[RES_1]], 0 : memref + // CHECK: [[C0_3:%.+]] = constant 0 : index + // CHECK: [[DIM_1:%.+]] = dim [[RES_1]], [[C0_3]] : memref // CHECK: krnl.iterate([[OPT_LOOP_1]]#0, [[OPT_LOOP_1]]#1, [[OPT_LOOP_1]]#2) with ([[DEF_LOOP_1]]#0 -> %arg1 = 0 to [[DIM_1]], [[DEF_LOOP_1]]#1 -> %arg2 = 0 to 30, [[DEF_LOOP_1]]#2 -> %arg3 = 0 to 64) { // CHECK: %[[INDEX:.+]] = affine.apply [[INDEX_MAP]](%arg2) // CHECK: [[LOAD_1:%.+]] = affine.load %arg0[%arg1, %[[INDEX]], %arg3] : memref diff --git a/test/mlir/onnx/onnx_lowering_with_dealloc.mlir b/test/mlir/onnx/onnx_lowering_with_dealloc.mlir index d931fc8..86fa0df 100644 --- a/test/mlir/onnx/onnx_lowering_with_dealloc.mlir +++ b/test/mlir/onnx/onnx_lowering_with_dealloc.mlir @@ -282,26 +282,30 @@ func @test_exp_exp(%arg0 : tensor) -> tensor<*xf32> { // CHECK-LABEL: test_exp_exp /// First Exp - // CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim %arg0, [[C0]] : memref // CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 // CHECK: } : () -> (!krnl.loop, !krnl.loop) - // CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0_0:%.+]] = constant 0 : index + // CHECK: [[DIM_2:%.+]] = dim %arg0, [[C0_0]] : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { // CHECK: [[LOAD:%.+]] = affine.load %arg0[%arg1, %arg2] : memref // CHECK: [[EXP:%.+]] = exp [[LOAD]] : f32 // CHECK: affine.store [[EXP]], [[RES]][%arg1, %arg2] : memref /// Second Exp - // CHECK: [[DIM_0:%.+]] = dim [[RES]], 0 : memref + // CHECK: [[C0_1:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim [[RES]], [[C0_1]] : memref // CHECK: [[RET_RES:%.+]] = alloc([[DIM_0]]) : memref // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 // CHECK: } : () -> (!krnl.loop, !krnl.loop) - // CHECK: [[DIM_2:%.+]] = dim [[RES]], 0 : memref + // CHECK: [[C0_2:%.+]] = constant 0 : index + // CHECK: [[DIM_2:%.+]] = dim [[RES]], [[C0_2]] : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { // CHECK: [[LOAD:%.+]] = affine.load [[RES]][%arg1, %arg2] : memref // CHECK: [[EXP:%.+]] = exp [[LOAD]] : f32 @@ -323,13 +327,15 @@ func @test_tanh_tanh(%arg0 : tensor) -> tensor<*xf32> { // CHECK-LABEL: test_tanh_tanh /// First Tanh - // CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim %arg0, [[C0]] : memref // CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 // CHECK: } : () -> (!krnl.loop, !krnl.loop) - // CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0_0:%.+]] = constant 0 : index + // CHECK: [[DIM_2:%.+]] = dim %arg0, [[C0_0]] : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { // CHECK: [[LOAD:%.+]] = affine.load %arg0[%arg1, %arg2] : memref // CHECK: [[ZERO:%.+]] = constant {{0.+}} : f32 @@ -342,13 +348,15 @@ func @test_tanh_tanh(%arg0 : tensor) -> tensor<*xf32> { // CHECK: affine.store [[TANH]], [[RES]][%arg1, %arg2] : memref /// Second Tanh - // CHECK: [[DIM_0:%.+]] = dim [[RES]], 0 : memref + // CHECK: [[C0_1:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim [[RES]], [[C0_1]] : memref // CHECK: [[RET_RES:%.+]] = alloc([[DIM_0]]) : memref // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 // CHECK: } : () -> (!krnl.loop, !krnl.loop) - // CHECK: [[DIM_2:%.+]] = dim [[RES]], 0 : memref + // CHECK: [[C0_2:%.+]] = constant 0 : index + // CHECK: [[DIM_2:%.+]] = dim [[RES]], [[C0_2]] : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { // CHECK: [[LOAD:%.+]] = affine.load [[RES]][%arg1, %arg2] : memref // CHECK: [[ZERO:%.+]] = constant {{0.+}} : f32 @@ -376,13 +384,15 @@ func @test_sinh_sinh(%arg0 : tensor) -> tensor<*xf32> { // CHECK-LABEL: test_sinh_sinh /// First Sinh - // CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim %arg0, [[C0]] : memref // CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 // CHECK: } : () -> (!krnl.loop, !krnl.loop) - // CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0_0:%.+]] = constant 0 : index + // CHECK: [[DIM_2:%.+]] = dim %arg0, [[C0_0]] : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { // CHECK: [[LOAD:%.+]] = affine.load %arg0[%arg1, %arg2] : memref // CHECK: [[ZERO:%.+]] = constant {{0.+}} : f32 @@ -395,13 +405,15 @@ func @test_sinh_sinh(%arg0 : tensor) -> tensor<*xf32> { // CHECK: affine.store [[SINH_RES]], [[RES]][%arg1, %arg2] : memref /// Second Sinh - // CHECK: [[DIM_0:%.+]] = dim [[RES]], 0 : memref + // CHECK: [[C0_0:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim [[RES]], [[C0_0]] : memref // CHECK: [[RET_RES:%.+]] = alloc([[DIM_0]]) : memref // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 // CHECK: } : () -> (!krnl.loop, !krnl.loop) - // CHECK: [[DIM_2:%.+]] = dim [[RES]], 0 : memref + // CHECK: [[C0_2:%.+]] = constant 0 : index + // CHECK: [[DIM_2:%.+]] = dim [[RES]], [[C0_2]] : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { // CHECK: [[LOAD:%.+]] = affine.load [[RES]][%arg1, %arg2] : memref // CHECK: [[ZERO:%.+]] = constant {{0.+}} : f32 @@ -429,13 +441,15 @@ func @test_cosh_cosh(%arg0 : tensor) -> tensor<*xf32> { // CHECK-LABEL: test_cosh_cosh /// First Cosh - // CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim %arg0, [[C0]] : memref // CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 // CHECK: } : () -> (!krnl.loop, !krnl.loop) - // CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0_0:%.+]] = constant 0 : index + // CHECK: [[DIM_2:%.+]] = dim %arg0, [[C0_0]] : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { // CHECK: [[LOAD:%.+]] = affine.load %arg0[%arg1, %arg2] : memref // CHECK: [[ZERO:%.+]] = constant {{0.+}} : f32 @@ -448,13 +462,15 @@ func @test_cosh_cosh(%arg0 : tensor) -> tensor<*xf32> { // CHECK: affine.store [[COSH_RES]], [[RES]][%arg1, %arg2] : memref /// Second Cosh - // CHECK: [[DIM_0:%.+]] = dim [[RES]], 0 : memref + // CHECK: [[C0_1:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim [[RES]], [[C0_1]] : memref // CHECK: [[RET_RES:%.+]] = alloc([[DIM_0]]) : memref // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 // CHECK: } : () -> (!krnl.loop, !krnl.loop) - // CHECK: [[DIM_2:%.+]] = dim [[RES]], 0 : memref + // CHECK: [[C0_2:%.+]] = constant 0 : index + // CHECK: [[DIM_2:%.+]] = dim [[RES]], [[C0_2]] : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { // CHECK: [[LOAD:%.+]] = affine.load [[RES]][%arg1, %arg2] : memref // CHECK: [[ZERO:%.+]] = constant {{0.+}} : f32 @@ -482,13 +498,15 @@ func @test_sigmoid_sigmoid(%arg0 : tensor) -> tensor<*xf32> { // CHECK-LABEL: test_sigmoid_sigmoid /// First Sigmoid - // CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim %arg0, [[C0]] : memref // CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 // CHECK: } : () -> (!krnl.loop, !krnl.loop) - // CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0_0:%.+]] = constant 0 : index + // CHECK: [[DIM_2:%.+]] = dim %arg0, [[C0_0]] : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { // CHECK: [[LOAD:%.+]] = affine.load %arg0[%arg1, %arg2] : memref // CHECK: [[ZERO:%.+]] = constant {{0.+}} : f32 @@ -500,13 +518,15 @@ func @test_sigmoid_sigmoid(%arg0 : tensor) -> tensor<*xf32> { // CHECK: affine.store [[SIGMOID_RES]], [[RES]][%arg1, %arg2] : memref /// Second Sigmoid - // CHECK: [[DIM_0:%.+]] = dim [[RES]], 0 : memref + // CHECK: [[C0_1:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim [[RES]], [[C0_1]] : memref // CHECK: [[RET_RES:%.+]] = alloc([[DIM_0]]) : memref // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 // CHECK: } : () -> (!krnl.loop, !krnl.loop) - // CHECK: [[DIM_2:%.+]] = dim [[RES]], 0 : memref + // CHECK: [[C0_2:%.+]] = constant 0 : index + // CHECK: [[DIM_2:%.+]] = dim [[RES]], [[C0_2]] : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { // CHECK: [[LOAD:%.+]] = affine.load [[RES]][%arg1, %arg2] : memref // CHECK: [[ZERO:%.+]] = constant {{0.+}} : f32 @@ -533,13 +553,15 @@ func @test_relu_relu(%arg0 : tensor) -> tensor<*xf32> { // CHECK-LABEL: test_relu_relu /// First Relu - // CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim %arg0, [[C0]] : memref // CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 // CHECK: } : () -> (!krnl.loop, !krnl.loop) - // CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0_0:%.+]] = constant 0 : index + // CHECK: [[DIM_2:%.+]] = dim %arg0, [[C0_0]] : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { // CHECK: [[LOAD:%.+]] = affine.load %arg0[%arg1, %arg2] : memref // CHECK: [[ZERO:%.+]] = constant {{0.+}} : f32 @@ -548,13 +570,15 @@ func @test_relu_relu(%arg0 : tensor) -> tensor<*xf32> { // CHECK: affine.store [[RELU_RES]], [[RES]][%arg1, %arg2] : memref /// Second Relu - // CHECK: [[DIM_0:%.+]] = dim [[RES]], 0 : memref + // CHECK: [[C0_1:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim [[RES]], [[C0_1]] : memref // CHECK: [[RET_RES:%.+]] = alloc([[DIM_0]]) : memref // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 // CHECK: } : () -> (!krnl.loop, !krnl.loop) - // CHECK: [[DIM_2:%.+]] = dim [[RES]], 0 : memref + // CHECK: [[C0_2:%.+]] = constant 0 : index + // CHECK: [[DIM_2:%.+]] = dim [[RES]], [[C0_2]] : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { // CHECK: [[LOAD:%.+]] = affine.load [[RES]][%arg1, %arg2] : memref // CHECK: [[ZERO:%.+]] = constant {{0.+}} : f32 @@ -699,13 +723,15 @@ func @test_elu_elu(%arg0 : tensor) -> tensor<*xf32> { // CHECK-LABEL: test_elu_elu /// First Elu - // CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim %arg0, [[C0]] : memref // CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 // CHECK: } : () -> (!krnl.loop, !krnl.loop) - // CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0_0:%.+]] = constant 0 : index + // CHECK: [[DIM_2:%.+]] = dim %arg0, [[C0_0]] : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { // CHECK: [[LOAD:%.+]] = affine.load %arg0[%arg1, %arg2] : memref // CHECK: [[ZERO:%.+]] = constant {{0.+}} : f32 @@ -719,13 +745,15 @@ func @test_elu_elu(%arg0 : tensor) -> tensor<*xf32> { // CHECK: affine.store [[SELECT]], [[RES]][%arg1, %arg2] : memref /// Second Elu - // CHECK: [[DIM_0:%.+]] = dim [[RES]], 0 : memref + // CHECK: [[C0_1:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim [[RES]], [[C0_1]] : memref // CHECK: [[RET_RES:%.+]] = alloc([[DIM_0]]) : memref // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 // CHECK: } : () -> (!krnl.loop, !krnl.loop) - // CHECK: [[DIM_2:%.+]] = dim [[RES]], 0 : memref + // CHECK: [[C0_2:%.+]] = constant 0 : index + // CHECK: [[DIM_2:%.+]] = dim [[RES]], [[C0_2]] : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { // CHECK: [[LOAD:%.+]] = affine.load [[RES]][%arg1, %arg2] : memref // CHECK: [[ZERO:%.+]] = constant {{0.+}} : f32 @@ -754,13 +782,15 @@ func @test_leakyrelu_leakyrelu(%arg0 : tensor) -> tensor<*xf32> { // CHECK-LABEL: test_leakyrelu_leakyrelu /// First LeakyRelu - // CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim %arg0, [[C0]] : memref // CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 // CHECK: } : () -> (!krnl.loop, !krnl.loop) - // CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0_0:%.+]] = constant 0 : index + // CHECK: [[DIM_2:%.+]] = dim %arg0, [[C0_0]] : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { // CHECK: [[LOAD:%.+]] = affine.load %arg0[%arg1, %arg2] : memref // CHECK: [[ZERO:%.+]] = constant {{0.+}} : f32 @@ -771,13 +801,15 @@ func @test_leakyrelu_leakyrelu(%arg0 : tensor) -> tensor<*xf32> { // CHECK: affine.store [[SELECT]], [[RES]][%arg1, %arg2] : memref /// Second LeakyRelu - // CHECK: [[DIM_0:%.+]] = dim [[RES]], 0 : memref + // CHECK: [[C0_1:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim [[RES]], [[C0_1]] : memref // CHECK: [[RET_RES:%.+]] = alloc([[DIM_0]]) : memref // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 // CHECK: } : () -> (!krnl.loop, !krnl.loop) - // CHECK: [[DIM_2:%.+]] = dim [[RES]], 0 : memref + // CHECK: [[C0_2:%.+]] = constant 0 : index + // CHECK: [[DIM_2:%.+]] = dim [[RES]], [[C0_2]] : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { // CHECK: [[LOAD:%.+]] = affine.load [[RES]][%arg1, %arg2] : memref // CHECK: [[ZERO:%.+]] = constant {{0.+}} : f32 @@ -803,13 +835,15 @@ func @test_selu_selu(%arg0 : tensor) -> tensor<*xf32> { // CHECK-LABEL: test_selu_selu /// First Selu - // CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim %arg0, [[C0]] : memref // CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 // CHECK: } : () -> (!krnl.loop, !krnl.loop) - // CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0_0:%.+]] = constant 0 : index + // CHECK: [[DIM_2:%.+]] = dim %arg0, [[C0_0]] : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { // CHECK: [[LOAD:%.+]] = affine.load %arg0[%arg1, %arg2] : memref // CHECK: [[ZERO:%.+]] = constant {{0.+}} : f32 @@ -824,13 +858,15 @@ func @test_selu_selu(%arg0 : tensor) -> tensor<*xf32> { // CHECK: affine.store [[SELU_RES]], [[RES]][%arg1, %arg2] : memref /// Second Selu - // CHECK: [[DIM_0:%.+]] = dim [[RES]], 0 : memref + // CHECK: [[C0_1:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim [[RES]], [[C0_1]] : memref // CHECK: [[RET_RES:%.+]] = alloc([[DIM_0]]) : memref // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 // CHECK: } : () -> (!krnl.loop, !krnl.loop) - // CHECK: [[DIM_2:%.+]] = dim [[RES]], 0 : memref + // CHECK: [[C0_2:%.+]] = constant 0 : index + // CHECK: [[DIM_2:%.+]] = dim [[RES]], [[C0_2]] : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { // CHECK: [[LOAD:%.+]] = affine.load [[RES]][%arg1, %arg2] : memref // CHECK: [[ZERO:%.+]] = constant {{0.+}} : f32 @@ -860,13 +896,15 @@ func @test_hardsigmoid_hardsigmoid(%arg0 : tensor) -> tensor<*xf32> { // CHECK-LABEL: test_hardsigmoid_hardsigmoid /// First HardSigmoid - // CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim %arg0, [[C0]] : memref // CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 // CHECK: } : () -> (!krnl.loop, !krnl.loop) - // CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0_0:%.+]] = constant 0 : index + // CHECK: [[DIM_2:%.+]] = dim %arg0, [[C0_0]] : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { // CHECK: [[LOAD:%.+]] = affine.load %arg0[%arg1, %arg2] : memref // CHECK: [[ZERO:%.+]] = constant {{0.+}} : f32 @@ -882,13 +920,15 @@ func @test_hardsigmoid_hardsigmoid(%arg0 : tensor) -> tensor<*xf32> { // CHECK: affine.store [[SELECT2]], [[RES]][%arg1, %arg2] : memref /// Second HardSigmoid - // CHECK: [[DIM_0:%.+]] = dim [[RES]], 0 : memref + // CHECK: [[C0_1:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim [[RES]], [[C0_1]] : memref // CHECK: [[RET_RES:%.+]] = alloc([[DIM_0]]) : memref // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 // CHECK: } : () -> (!krnl.loop, !krnl.loop) - // CHECK: [[DIM_2:%.+]] = dim [[RES]], 0 : memref + // CHECK: [[C0_2:%.+]] = constant 0 : index + // CHECK: [[DIM_2:%.+]] = dim [[RES]], [[C0_2]] : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { // CHECK: [[LOAD:%.+]] = affine.load [[RES]][%arg1, %arg2] : memref // CHECK: [[ZERO:%.+]] = constant {{0.+}} : f32 @@ -919,13 +959,15 @@ func @test_reciprocal_reciprocal(%arg0 : tensor) -> tensor<*xf32> { // CHECK-LABEL: test_reciprocal_reciprocal /// First Reciprocal - // CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim %arg0, [[C0]] : memref // CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 // CHECK: } : () -> (!krnl.loop, !krnl.loop) - // CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref + // CHECK: [[C0_0:%.+]] = constant 0 : index + // CHECK: [[DIM_2:%.+]] = dim %arg0, [[C0_0]] : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { // CHECK: [[LOAD:%.+]] = affine.load %arg0[%arg1, %arg2] : memref // CHECK: [[ONE:%.+]] = constant {{1.+}} : f32 @@ -933,13 +975,15 @@ func @test_reciprocal_reciprocal(%arg0 : tensor) -> tensor<*xf32> { // CHECK: affine.store [[RECIPROCAL_RES]], [[RES]][%arg1, %arg2] : memref /// Second Reciprocal - // CHECK: [[DIM_0:%.+]] = dim [[RES]], 0 : memref + // CHECK: [[C0_1:%.+]] = constant 0 : index + // CHECK: [[DIM_0:%.+]] = dim [[RES]], [[C0_1]] : memref // CHECK: [[RET_RES:%.+]] = alloc([[DIM_0]]) : memref // CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2 // CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops { // CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1 // CHECK: } : () -> (!krnl.loop, !krnl.loop) - // CHECK: [[DIM_2:%.+]] = dim [[RES]], 0 : memref + // CHECK: [[C0_2:%.+]] = constant 0 : index + // CHECK: [[DIM_2:%.+]] = dim [[RES]], [[C0_2]] : memref // CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) { // CHECK: [[LOAD:%.+]] = affine.load [[RES]][%arg1, %arg2] : memref // CHECK: [[ONE:%.+]] = constant {{1.+}} : f32 diff --git a/utils/clone-mlir.sh b/utils/clone-mlir.sh index efd2fb5..4abfc92 100644 --- a/utils/clone-mlir.sh +++ b/utils/clone-mlir.sh @@ -1,3 +1,3 @@ git clone https://github.com/llvm/llvm-project.git # Check out a specific branch that is known to work with ONNX MLIR. -cd llvm-project && git checkout 0dc91bfd11e6cced0c46c1a25cc96edea0d8fc22 && cd .. +cd llvm-project && git checkout 32791937d7aceb0a5e1eaabf1bb1a6dbe1639792 && cd ..