diff --git a/src/builder/frontend_dialect_transformer.cpp b/src/builder/frontend_dialect_transformer.cpp index 6065cd3..5f60809 100644 --- a/src/builder/frontend_dialect_transformer.cpp +++ b/src/builder/frontend_dialect_transformer.cpp @@ -226,7 +226,7 @@ private: std::string _name; mlir::NamedAttribute operator()(int64_t const &r) { - auto val = _builder.getI32IntegerAttr(r); + auto val = _builder.getI64IntegerAttr(r); return _builder.getNamedAttr(_name, val); } @@ -288,21 +288,12 @@ private: } std::vector ImportNodeAttributes( - const onnx::NodeProto &node, - std::initializer_list> - defaultAttrList) { + const onnx::NodeProto &node) { std::vector attributes; - std::set definedAttributeSet; for (int i = 0; i < node.attribute_size(); ++i) { auto attr = node.attribute(i); auto nameValPair = convertAttributeProtoToNameValuePair(attr); attributes.push_back(convertNameValuePairToNamedAttribute(nameValPair)); - definedAttributeSet.insert(attr.name()); - } - for (const auto &defaultAttr : defaultAttrList) { - if (definedAttributeSet.find(defaultAttr.first) == - definedAttributeSet.end()) - attributes.push_back(convertNameValuePairToNamedAttribute(defaultAttr)); } return attributes; } @@ -340,9 +331,7 @@ private: */ template void - ImportNodeOneOut(const onnx::NodeProto &node, int nIn, int nOut, - std::initializer_list> - defaultAttrList) { + ImportNodeOneOut(const onnx::NodeProto &node, int nIn, int nOut) { std::vector inputs; for (const auto &item : node.input()) { if (frontend_symbols_.ContainKey(legalize_name(item))) { @@ -356,7 +345,7 @@ private: mlir::UnrankedTensorType::get(builder_.getF32Type())); } - auto attributes = ImportNodeAttributes(node, defaultAttrList); + auto attributes = ImportNodeAttributes(node); llvm::StringRef OpName = node.op_type(); @@ -372,9 +361,7 @@ private: template void ImportNodeMultipleOuts( - const onnx::NodeProto &node, int nIn, int nOut, - std::initializer_list> - defaultAttrList) { + const onnx::NodeProto &node, int nIn, int nOut) { std::vector inputs; for (const auto &item : node.input()) { if (frontend_symbols_.ContainKey(legalize_name(item))) { @@ -388,7 +375,7 @@ private: mlir::UnrankedTensorType::get(builder_.getF32Type())); } - auto attributes = ImportNodeAttributes(node, defaultAttrList); + auto attributes = ImportNodeAttributes(node); llvm::StringRef OpName = node.op_type(); @@ -410,9 +397,7 @@ private: * a specialized function is used */ void - ImportNodeConv(onnx::NodeProto node, int nIn, int nOut, - std::initializer_list> - defaultAttrList) { + ImportNodeConv(onnx::NodeProto node, int nIn, int nOut) { // Conv has attribute dilations, kernel_shape, pads, the default value of // which is determined by the shape of first argument. However, since the // shape is unknown now, these attributes can be not generated auto @@ -427,25 +412,23 @@ private: if (nOps == 2) ImportNodeOneOut( - node, nOps, nOut, defaultAttrList); + node, nOps, nOut); else - ImportNodeOneOut(node, nOps, nOut, defaultAttrList); + ImportNodeOneOut(node, nOps, nOut); } /*! * Special handle for MaxPool operations. */ void ImportNodeMaxPool( - onnx::NodeProto node, int nIn, int nOut, - std::initializer_list> - defaultAttrList) { + onnx::NodeProto node, int nIn, int nOut) { int nOuts = node.output().size(); if (nOuts == 1) { ImportNodeOneOut( - node, nIn, nOuts, defaultAttrList); + node, nIn, nOuts); } else { ImportNodeMultipleOuts( - node, nIn, nOuts, defaultAttrList); + node, nIn, nOuts); } } diff --git a/src/builder/op_build_table.inc b/src/builder/op_build_table.inc index 0e7f20e..7771473 100644 --- a/src/builder/op_build_table.inc +++ b/src/builder/op_build_table.inc @@ -1,592 +1,314 @@ if (OpName == "DUMMY") { }else if (OpName == "Abs") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Acos") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Acosh") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Add") { - ImportNodeOneOut(node, 2, 1, { - }); + ImportNodeOneOut(node, 2, 1); }else if (OpName == "And") { - ImportNodeOneOut(node, 2, 1, { - }); + ImportNodeOneOut(node, 2, 1); }else if (OpName == "ArgMax") { - ImportNodeOneOut(node, 1, 1, { - {"axis", 0} - ,{"keepdims", 1} - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "ArgMin") { - ImportNodeOneOut(node, 1, 1, { - {"axis", 0} - ,{"keepdims", 1} - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Asin") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Asinh") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Atan") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Atanh") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "AveragePool") { - ImportNodeOneOut(node, 1, 1, { - {"auto_pad", "NOTSET"} - ,{"ceil_mode", 0} - ,{"count_include_pad", 0} - ,{"kernel_shape", std::vector {}} - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "BatchNormalization") { - ImportNodeMultipleOuts(node, 5, 5, { - {"epsilon", (float)1e-05} - ,{"momentum", (float)0.9} - }); + ImportNodeMultipleOuts(node, 5, 5); }else if (OpName == "BitShift") { - ImportNodeOneOut(node, 2, 1, { - }); + ImportNodeOneOut(node, 2, 1); }else if (OpName == "Cast") { - ImportNodeOneOut(node, 1, 1, { - {"to", 0} - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Ceil") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Clip") { - ImportNodeOneOut(node, 3, 1, { - }); + ImportNodeOneOut(node, 3, 1); }else if (OpName == "Compress") { - ImportNodeOneOut(node, 2, 1, { - }); + ImportNodeOneOut(node, 2, 1); }else if (OpName == "Concat") { - ImportNodeOneOut(node, 1, 1, { - {"axis", 0} - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "ConcatFromSequence") { - ImportNodeOneOut(node, 1, 1, { - {"new_axis", 0} - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Constant") { - ImportNodeOneOut(node, 0, 1, { - }); + ImportNodeOneOut(node, 0, 1); }else if (OpName == "ConstantOfShape") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Conv") { - ImportNodeConv(node, 3, 1, { - {"auto_pad", "NOTSET"} - ,{"group", 1} - }); + ImportNodeConv(node, 3, 1); }else if (OpName == "ConvInteger") { - ImportNodeOneOut(node, 4, 1, { - {"auto_pad", "NOTSET"} - ,{"group", 1} - }); + ImportNodeOneOut(node, 4, 1); }else if (OpName == "ConvTranspose") { - ImportNodeOneOut(node, 3, 1, { - {"auto_pad", "NOTSET"} - ,{"group", 1} - }); + ImportNodeOneOut(node, 3, 1); }else if (OpName == "Cos") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Cosh") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "CumSum") { - ImportNodeOneOut(node, 2, 1, { - {"exclusive", 0} - ,{"reverse", 0} - }); + ImportNodeOneOut(node, 2, 1); }else if (OpName == "DepthToSpace") { - ImportNodeOneOut(node, 1, 1, { - {"mode", "DCR"} - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "DequantizeLinear") { - ImportNodeOneOut(node, 3, 1, { - }); + ImportNodeOneOut(node, 3, 1); }else if (OpName == "Det") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Div") { - ImportNodeOneOut(node, 2, 1, { - }); + ImportNodeOneOut(node, 2, 1); }else if (OpName == "Dropout") { - ImportNodeMultipleOuts(node, 1, 2, { - {"ratio", (float)0.5} - }); + ImportNodeMultipleOuts(node, 1, 2); }else if (OpName == "DynamicQuantizeLinear") { - ImportNodeMultipleOuts(node, 1, 3, { - }); + ImportNodeMultipleOuts(node, 1, 3); }else if (OpName == "Elu") { - ImportNodeOneOut(node, 1, 1, { - {"alpha", (float)1.0} - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Equal") { - ImportNodeOneOut(node, 2, 1, { - }); + ImportNodeOneOut(node, 2, 1); }else if (OpName == "Erf") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Exp") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Expand") { - ImportNodeOneOut(node, 2, 1, { - }); + ImportNodeOneOut(node, 2, 1); }else if (OpName == "EyeLike") { - ImportNodeOneOut(node, 1, 1, { - {"k", 0} - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Flatten") { - ImportNodeOneOut(node, 1, 1, { - {"axis", 1} - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Floor") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "GRU") { - ImportNodeMultipleOuts(node, 6, 2, { - {"direction", "forward"} - ,{"linear_before_reset", 0} - }); + ImportNodeMultipleOuts(node, 6, 2); }else if (OpName == "Gather") { - ImportNodeOneOut(node, 2, 1, { - {"axis", 0} - }); + ImportNodeOneOut(node, 2, 1); }else if (OpName == "GatherElements") { - ImportNodeOneOut(node, 2, 1, { - {"axis", 0} - }); + ImportNodeOneOut(node, 2, 1); }else if (OpName == "GatherND") { - ImportNodeOneOut(node, 2, 1, { - }); + ImportNodeOneOut(node, 2, 1); }else if (OpName == "Gemm") { - ImportNodeOneOut(node, 3, 1, { - {"alpha", (float)1.0} - ,{"beta", (float)1.0} - ,{"transA", 0} - ,{"transB", 0} - }); + ImportNodeOneOut(node, 3, 1); }else if (OpName == "GlobalAveragePool") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "GlobalLpPool") { - ImportNodeOneOut(node, 1, 1, { - {"p", 2} - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "GlobalMaxPool") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Greater") { - ImportNodeOneOut(node, 2, 1, { - }); + ImportNodeOneOut(node, 2, 1); }else if (OpName == "HardSigmoid") { - ImportNodeOneOut(node, 1, 1, { - {"alpha", (float)0.2} - ,{"beta", (float)0.5} - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Hardmax") { - ImportNodeOneOut(node, 1, 1, { - {"axis", 1} - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Identity") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "If") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "InstanceNormalization") { - ImportNodeOneOut(node, 3, 1, { - {"epsilon", (float)1e-05} - }); + ImportNodeOneOut(node, 3, 1); }else if (OpName == "IsInf") { - ImportNodeOneOut(node, 1, 1, { - {"detect_negative", 1} - ,{"detect_positive", 1} - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "IsNaN") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "LRN") { - ImportNodeOneOut(node, 1, 1, { - {"alpha", (float)0.0001} - ,{"beta", (float)0.75} - ,{"bias", (float)1.0} - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "LSTM") { - ImportNodeMultipleOuts(node, 8, 3, { - {"direction", "forward"} - ,{"input_forget", 0} - }); + ImportNodeMultipleOuts(node, 8, 3); }else if (OpName == "LeakyRelu") { - ImportNodeOneOut(node, 1, 1, { - {"alpha", (float)0.01} - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Less") { - ImportNodeOneOut(node, 2, 1, { - }); + ImportNodeOneOut(node, 2, 1); }else if (OpName == "Log") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "LogSoftmax") { - ImportNodeOneOut(node, 1, 1, { - {"axis", 1} - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Loop") { - ImportNodeOneOut(node, 3, 1, { - }); + ImportNodeOneOut(node, 3, 1); }else if (OpName == "LpNormalization") { - ImportNodeOneOut(node, 1, 1, { - {"axis", -1} - ,{"p", 2} - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "LpPool") { - ImportNodeOneOut(node, 1, 1, { - {"auto_pad", "NOTSET"} - ,{"p", 2} - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "MatMul") { - ImportNodeOneOut(node, 2, 1, { - }); + ImportNodeOneOut(node, 2, 1); }else if (OpName == "MatMulInteger") { - ImportNodeOneOut(node, 4, 1, { - }); + ImportNodeOneOut(node, 4, 1); }else if (OpName == "Max") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "MaxPool") { - ImportNodeMaxPool(node, 1, 2, { - {"auto_pad", "NOTSET"} - ,{"ceil_mode", 0} - ,{"kernel_shape", std::vector {}} - ,{"storage_order", 0} - }); + ImportNodeMaxPool(node, 1, 2); }else if (OpName == "MaxRoiPool") { - ImportNodeOneOut(node, 2, 1, { - {"spatial_scale", (float)1.0} - }); + ImportNodeOneOut(node, 2, 1); }else if (OpName == "MaxUnpool") { - ImportNodeOneOut(node, 3, 1, { - }); + ImportNodeOneOut(node, 3, 1); }else if (OpName == "Mean") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "MeanVarianceNormalization") { - ImportNodeOneOut(node, 1, 1, { - {"axes", std::vector{0, 2, 3}} - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Min") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Mod") { - ImportNodeOneOut(node, 2, 1, { - {"fmod", 0} - }); + ImportNodeOneOut(node, 2, 1); }else if (OpName == "Mul") { - ImportNodeOneOut(node, 2, 1, { - }); + ImportNodeOneOut(node, 2, 1); }else if (OpName == "Multinomial") { - ImportNodeOneOut(node, 1, 1, { - {"dtype", 6} - ,{"sample_size", 1} - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Neg") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "NonMaxSuppression") { - ImportNodeOneOut(node, 5, 1, { - {"center_point_box", 0} - }); + ImportNodeOneOut(node, 5, 1); }else if (OpName == "NonZero") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Not") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "OneHot") { - ImportNodeOneOut(node, 3, 1, { - {"axis", -1} - }); + ImportNodeOneOut(node, 3, 1); }else if (OpName == "Or") { - ImportNodeOneOut(node, 2, 1, { - }); + ImportNodeOneOut(node, 2, 1); }else if (OpName == "PRelu") { - ImportNodeOneOut(node, 2, 1, { - }); + ImportNodeOneOut(node, 2, 1); }else if (OpName == "Pad") { - ImportNodeOneOut(node, 3, 1, { - {"mode", "constant"} - }); + ImportNodeOneOut(node, 3, 1); }else if (OpName == "Pow") { - ImportNodeOneOut(node, 2, 1, { - }); + ImportNodeOneOut(node, 2, 1); }else if (OpName == "QLinearConv") { - ImportNodeOneOut(node, 9, 1, { - {"auto_pad", "NOTSET"} - ,{"group", 1} - }); + ImportNodeOneOut(node, 9, 1); }else if (OpName == "QLinearMatMul") { - ImportNodeOneOut(node, 8, 1, { - }); + ImportNodeOneOut(node, 8, 1); }else if (OpName == "QuantizeLinear") { - ImportNodeOneOut(node, 3, 1, { - }); + ImportNodeOneOut(node, 3, 1); }else if (OpName == "RNN") { - ImportNodeMultipleOuts(node, 6, 2, { - {"activation_alpha", std::vector {}} - ,{"activation_beta", std::vector {}} - ,{"activations", std::vector{"Tanh", "Tanh"}} - ,{"direction", "forward"} - }); + ImportNodeMultipleOuts(node, 6, 2); }else if (OpName == "RandomNormal") { - ImportNodeOneOut(node, 0, 1, { - {"dtype", 1} - ,{"mean", (float)0.0} - ,{"scale", (float)1.0} - }); + ImportNodeOneOut(node, 0, 1); }else if (OpName == "RandomNormalLike") { - ImportNodeOneOut(node, 1, 1, { - {"mean", (float)0.0} - ,{"scale", (float)1.0} - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "RandomUniform") { - ImportNodeOneOut(node, 0, 1, { - {"dtype", 1} - ,{"high", (float)1.0} - ,{"low", (float)0.0} - }); + ImportNodeOneOut(node, 0, 1); }else if (OpName == "RandomUniformLike") { - ImportNodeOneOut(node, 1, 1, { - {"high", (float)1.0} - ,{"low", (float)0.0} - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Range") { - ImportNodeOneOut(node, 3, 1, { - }); + ImportNodeOneOut(node, 3, 1); }else if (OpName == "Reciprocal") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "ReduceL1") { - ImportNodeOneOut(node, 1, 1, { - {"keepdims", 1} - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "ReduceL2") { - ImportNodeOneOut(node, 1, 1, { - {"keepdims", 1} - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "ReduceLogSum") { - ImportNodeOneOut(node, 1, 1, { - {"keepdims", 1} - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "ReduceLogSumExp") { - ImportNodeOneOut(node, 1, 1, { - {"keepdims", 1} - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "ReduceMax") { - ImportNodeOneOut(node, 1, 1, { - {"keepdims", 1} - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "ReduceMean") { - ImportNodeOneOut(node, 1, 1, { - {"keepdims", 1} - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "ReduceMin") { - ImportNodeOneOut(node, 1, 1, { - {"keepdims", 1} - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "ReduceProd") { - ImportNodeOneOut(node, 1, 1, { - {"keepdims", 1} - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "ReduceSum") { - ImportNodeOneOut(node, 1, 1, { - {"keepdims", 1} - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "ReduceSumSquare") { - ImportNodeOneOut(node, 1, 1, { - {"keepdims", 1} - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Relu") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Reshape") { - ImportNodeOneOut(node, 2, 1, { - }); + ImportNodeOneOut(node, 2, 1); }else if (OpName == "Resize") { - ImportNodeOneOut(node, 4, 1, { - {"coordinate_transformation_mode", "half_pixel"} - ,{"cubic_coeff_a", (float)-0.75} - ,{"exclude_outside", 0} - ,{"extrapolation_value", (float)0.0} - ,{"mode", "nearest"} - ,{"nearest_mode", "round_prefer_floor"} - }); + ImportNodeOneOut(node, 4, 1); }else if (OpName == "ReverseSequence") { - ImportNodeOneOut(node, 2, 1, { - {"batch_axis", 1} - ,{"time_axis", 0} - }); + ImportNodeOneOut(node, 2, 1); }else if (OpName == "RoiAlign") { - ImportNodeOneOut(node, 3, 1, { - {"mode", "avg"} - ,{"output_height", 1} - ,{"output_width", 1} - ,{"sampling_ratio", 0} - ,{"spatial_scale", (float)1.0} - }); + ImportNodeOneOut(node, 3, 1); }else if (OpName == "Round") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Scan") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Scatter") { - ImportNodeOneOut(node, 3, 1, { - {"axis", 0} - }); + ImportNodeOneOut(node, 3, 1); }else if (OpName == "ScatterElements") { - ImportNodeOneOut(node, 3, 1, { - {"axis", 0} - }); + ImportNodeOneOut(node, 3, 1); }else if (OpName == "ScatterND") { - ImportNodeOneOut(node, 3, 1, { - }); + ImportNodeOneOut(node, 3, 1); }else if (OpName == "Selu") { - ImportNodeOneOut(node, 1, 1, { - {"alpha", (float)1.67326} - ,{"gamma", (float)1.0507} - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "SequenceAt") { - ImportNodeOneOut(node, 2, 1, { - }); + ImportNodeOneOut(node, 2, 1); }else if (OpName == "SequenceConstruct") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "SequenceEmpty") { - ImportNodeOneOut(node, 0, 1, { - }); + ImportNodeOneOut(node, 0, 1); }else if (OpName == "SequenceErase") { - ImportNodeOneOut(node, 2, 1, { - }); + ImportNodeOneOut(node, 2, 1); }else if (OpName == "SequenceInsert") { - ImportNodeOneOut(node, 3, 1, { - }); + ImportNodeOneOut(node, 3, 1); }else if (OpName == "SequenceLength") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Shape") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Shrink") { - ImportNodeOneOut(node, 1, 1, { - {"bias", (float)0.0} - ,{"lambd", (float)0.5} - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Sigmoid") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Sign") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Sin") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Sinh") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Size") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Slice") { - ImportNodeOneOut(node, 5, 1, { - }); + ImportNodeOneOut(node, 5, 1); }else if (OpName == "Softmax") { - ImportNodeOneOut(node, 1, 1, { - {"axis", 1} - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Softplus") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Softsign") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "SpaceToDepth") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Split") { - ImportNodeOneOut(node, 1, 1, { - {"axis", 0} - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "SplitToSequence") { - ImportNodeOneOut(node, 2, 1, { - {"axis", 0} - ,{"keepdims", 1} - }); + ImportNodeOneOut(node, 2, 1); }else if (OpName == "Sqrt") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Squeeze") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "StringNormalizer") { - ImportNodeOneOut(node, 1, 1, { - {"case_change_action", "NONE"} - ,{"is_case_sensitive", 0} - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Sub") { - ImportNodeOneOut(node, 2, 1, { - }); + ImportNodeOneOut(node, 2, 1); }else if (OpName == "Sum") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Tan") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Tanh") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "TfIdfVectorizer") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "ThresholdedRelu") { - ImportNodeOneOut(node, 1, 1, { - {"alpha", (float)1.0} - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Tile") { - ImportNodeOneOut(node, 2, 1, { - }); + ImportNodeOneOut(node, 2, 1); }else if (OpName == "TopK") { - ImportNodeMultipleOuts(node, 2, 2, { - {"axis", -1} - ,{"largest", 1} - ,{"sorted", 1} - }); + ImportNodeMultipleOuts(node, 2, 2); }else if (OpName == "Transpose") { - ImportNodeOneOut(node, 1, 1, { - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Unique") { - ImportNodeMultipleOuts(node, 1, 4, { - {"sorted", 1} - }); + ImportNodeMultipleOuts(node, 1, 4); }else if (OpName == "Unsqueeze") { - ImportNodeOneOut(node, 1, 1, { - {"axes", std::vector {}} - }); + ImportNodeOneOut(node, 1, 1); }else if (OpName == "Upsample") { - ImportNodeOneOut(node, 2, 1, { - {"mode", "nearest"} - }); + ImportNodeOneOut(node, 2, 1); }else if (OpName == "Where") { - ImportNodeOneOut(node, 3, 1, { - }); + ImportNodeOneOut(node, 3, 1); }else if (OpName == "Xor") { - ImportNodeOneOut(node, 2, 1, { - }); - } + ImportNodeOneOut(node, 2, 1); + } \ No newline at end of file diff --git a/src/dialect/onnx/gen_doc.py b/src/dialect/onnx/gen_doc.py index bdf236a..9b4356a 100644 --- a/src/dialect/onnx/gen_doc.py +++ b/src/dialect/onnx/gen_doc.py @@ -270,6 +270,12 @@ def gen_schema(schema) : 'Identity', 'Cos', 'Log', 'Transpose', 'Softmax', 'Softplus', 'Softsign'] CanonicalList=['Add', 'Identity'] + manual_code = dict([ + ('DummyExample', ' let extraClassDeclaration = [{ \n'+ + ' static StringRef getPermAttrName() { return "perm"; }\n'+ + ' }];\n') + ]) + skip_attr_gen = [] line_indent = ' ' #s = 'def ONNX'+schema.name+str(schema.since_version)+'Op:ONNX_Op<"'+schema.name+'", \n' @@ -303,21 +309,23 @@ def gen_schema(schema) : #input s+= '\n'+line_indent+'let arguments = (ins ' + isfirst = True if schema.inputs: + isfirst = False for input in schema.inputs: if input != schema.inputs[0] : - s+= ', ' + s+= ',\n ' etypes=collect_types(schema, input) if OpSchema.FormalParameterOption.Optional == input.option: #TODO: handle optional - print("optional ", input.name) + print("warning: optional input for"+schema.name+' '+input.name) elif OpSchema.FormalParameterOption.Variadic == input.option: if input.isHomogeneous: s+= 'Variadic<' else: #TODO handle (variadic, heterogeneous)" - print('variadic, heterogeneous', input.name) + print("warning: (variadic, heterogeneous) for"+schema.name+' '+input.name) if etypes == '': s+= 'AnyTypeOf<[AnyMemRef, AnyTensor]>' else: @@ -333,6 +341,8 @@ def gen_schema(schema) : #TODO handle (variadic, heterogeneous)" t='' s+=':$'+input.name + if not schema.name in skip_attr_gen : + s += gen_attr_ins(schema, isfirst) s+= ');' #output @@ -347,11 +357,15 @@ def gen_schema(schema) : s+= 'AnyTypeOf<[AnyMemRef, AnyTensor]>' else: s+= 'TensorOf<['+etypes+']>' - s+= ');' + s += ':$o_'+output.name + s+= ');\n' #s+= 'let hasCanonicalizer = 1;' + #add special code + if schema.name in manual_code : + s += manual_code[schema.name] - s += '\n}\n\n' + s += '}\n\n' return s @@ -369,44 +383,91 @@ def gen_code(schema,fefile) : ("MaxPool", "ImportNodeMaxPool"), #("Transpose", "ImportNodeTranspose") ]) - list_str = 'std::vector' - empty_ints = list_str+' {}' - empty_floats = list_str+' {}' - special_default = dict([ - ("AveragePool "+"kernel_shape", empty_ints), - ("MaxPool "+"kernel_shape", empty_ints), - ("Cast "+"to", '0'), - ("Concat "+"axis", '0'), - ("Unsqueeze "+"axes", empty_ints), - ("RNN "+"activation_alpha", empty_floats), - ("RNN "+"activation_beta", empty_floats) - ]) + line_indent = ' ' fefile.write(' '+'}else if (OpName == "'+schema.name+'") {\n') op_type_str='mlir::ONNX'+schema.name+'Op' if schema.name in special_handler : fefile.write(' '+special_handler[schema.name]+'(node, ' +str(len(schema.inputs)) - +', ' +str(len(schema.outputs))+', {\n') + +', ' +str(len(schema.outputs))) elif len(schema.outputs) > 1 : fefile.write(' '+'ImportNodeMultipleOuts<'+op_type_str+'>(node, ' +str(len(schema.inputs)) - +', ' +str(len(schema.outputs))+', {\n') + +', ' +str(len(schema.outputs))) else : fefile.write(' '+'ImportNodeOneOut<'+op_type_str+'>(node, ' +str(len(schema.inputs)) - +', ' +str(len(schema.outputs))+', {\n') + +', ' +str(len(schema.outputs))) + fefile.write(');\n') + +def gen_attr_ins(schema, isfirst) : + special_defaults = dict([ + ("AveragePool "+"kernel_shape", ('ints', '{}')), + ("MaxPool "+"kernel_shape", ('ints', '{}')), + ("Cast "+"to", ('int', '0')), + ("Concat "+"axis", ('int', '0')), + ("Conv "+"group", ('int', '1')), + ("Unsqueeze "+"axes", ('ints', '{}')), + ("RNN "+"activation_alpha", ('floats', '{}')), + ("RNN "+"activation_beta", ('floats', '{}')), + ]) + def get_attr_type_basic(attr_type) : + if attr_type == 'int' : + mytype = 'I64Attr' + elif attr_type == 'float' : + mytype = 'F32Attr' + elif attr_type == 'ints' : + mytype = 'I64ArrayAttr' + elif attr_type == 'floats' : + mytype = 'F32ArrayAttr' + elif attr_type == "string" : + mytype = 'StrAttr' + elif attr_type == "strings" : + mytype = 'StrArrayAttr' + else : + mytype ='AnyAttr' + #TODO: tensor and sparse tensor + return mytype + def get_attr_type_optional(attr_type) : + mytype = 'OptionalAttr<' + mytype += get_attr_type_basic(attr_type) + mytype += '>' + return mytype + + def get_attr_type_with_default(attr_type, attr_default) : + mytype = 'DefaultValuedAttr<' + mytype += get_attr_type_basic(attr_type) + mytype += ', "'+attr_default+'">' + return mytype + + attr_line = '' if schema.attributes: - first_attr = True for _, attr in sorted(schema.attributes.items()): - #only generate default attr list - if schema.name+' '+attr.name in special_default: - attr_value = special_default[schema.name+' '+attr.name] - elif attr.default_value.name: - default_value = helper.get_attribute_value(attr.default_value) + #attr_line = line_indent+line_indent+line_indent+line_indent + if not isfirst: + attr_line += ',\n ' + else : + isfirst = False + + if schema.name+' '+attr.name in special_defaults: + (attr_type_str, attr_default_str) = special_defaults[schema.name+' '+attr.name] + attr_line += get_attr_type_with_default(attr_type_str, attr_default_str) + attr_line += ':$'+attr.name + elif attr.required: + s = Text(attr.type) + attr_type_str = s[s.rfind('.') + 1:].lower() + attr_line += get_attr_type_basic(attr_type_str) + attr_line += ':$'+attr.name + # option holds either required or default value + elif attr.default_value.name: + s = Text(attr.type) + attr_type_str = s[s.rfind('.') + 1:].lower() + + default_value = helper.get_attribute_value(attr.default_value) def format_value(value): # type: (Any) -> Text if isinstance(value, float): formatted = str(np.round(value, 5)) @@ -419,66 +480,25 @@ def gen_code(schema,fefile) : return str(value) if isinstance(default_value, list): - - value = default_value[0] default_value = [format_value(val) for val in default_value] attr_option_str = '{}'.format(default_value) attr_option_str = attr_option_str.replace('[', '{', 1) attr_option_str = attr_option_str.replace(']', '}', 1) - # TODO the list type is homogenous or htergeneous? - - if isinstance(value, float) : - attr_type_str = list_str+'' - attr_option_str = attr_option_str.replace("'", '') - elif isinstance(value, int) : - attr_type_str = list_str+'' - attr_option_str = attr_option_str.replace("'", '') - elif isinstance(value, str) : - attr_type_str = list_str+'' - attr_option_str = attr_option_str.replace("'", '"') - elif isinstance(value, (bytes, bytearray)) : - attr_type_str = list_str+'' - attr_option_str = attr_option_str.replace("'", '"') + if attr_type_str == 'strings' : + attr_option_str = attr_option_str.replace("'", '\\"') else : - attr_type_str = '"unknowns"' + attr_option_str = attr_option_str.replace("'", '') else: - if isinstance(default_value, float) : - attr_type_str = '(float)' - attr_option_str = default_value - elif isinstance(default_value, int) : - attr_option_str = default_value - attr_type_str='' - elif isinstance(default_value, str) : - attr_type_str = '"str"' - elif isinstance(default_value, (bytes, bytearray)) : - attr_type_str = '"str"' - else : - attr_type_str = '"unknown"' default_value = format_value(default_value) - if attr_type_str == '"str"' : - attr_option_str = '"'+default_value+'"' - attr_type_str='' - else : - attr_option_str = default_value - attr_value = attr_type_str+attr_option_str + attr_option_str = default_value + attr_line += get_attr_type_with_default(attr_type_str, attr_option_str) + attr_line += ':$'+attr.name else: - #no default value - continue - - attr_line = line_indent+line_indent+line_indent+line_indent - if not first_attr: - attr_line += ',{' - else : - attr_line += ' {' - first_attr = False - - attr_line += '"'+attr.name+'", ' - attr_line += attr_value - attr_line += '}\n' - fefile.write(attr_line) - fefile.write(line_indent+line_indent+line_indent+'});\n') - - + s = Text(attr.type) + attr_type_str = s[s.rfind('.') + 1:].lower() + attr_line += get_attr_type_optional(attr_type_str) + attr_line += ':$'+attr.name + return attr_line def main(args): # type: (Type[Args]) -> None with io.open(args.changelog, 'w', newline='') as fout: @@ -496,7 +516,6 @@ def main(args): # type: (Type[Args]) -> None fout.write('\n') for domain, versionmap in sorted(dv_index.items()): - print("domain", domain) if not should_render_domain(domain): continue @@ -633,6 +652,6 @@ if __name__ == '__main__': class Args(object): output = os.path.join(docs_dir, 'Operators' + ext) changelog = os.path.join(docs_dir, 'Changelog' + ext) - tdfile = os.path.join(docs_dir, 'onnxop.inc') + tdfile = os.path.join(base_dir, 'onnxop.inc') print(Args) main(Args) diff --git a/src/dialect/onnx/onnx.td b/src/dialect/onnx/onnx.td index 710f3af..8fd0e09 100644 --- a/src/dialect/onnx/onnx.td +++ b/src/dialect/onnx/onnx.td @@ -99,8 +99,14 @@ def ONNXGemmNoBiasOp: ONNX_Op<"GemmNoBias", }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$lhs_in, AnyTypeOf<[AnyMemRef, AnyTensor]>:$rhs_in); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$A, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$B, + DefaultValuedAttr:$alpha, + DefaultValuedAttr:$beta, + DefaultValuedAttr:$transA, + DefaultValuedAttr:$transB); + + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y); } def ONNXConvNoBiasOp:ONNX_Op<"ConvNoBias", @@ -110,10 +116,15 @@ def ONNXConvNoBiasOp:ONNX_Op<"ConvNoBias", "The convolution operator consumes an input tensor and a filter, and" "computes the output." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, AnyTypeOf<[AnyMemRef, AnyTensor]>:$W); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); - - let verifier = [{ return ::verify(*this); }]; + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$W, + DefaultValuedAttr:$auto_pad, + OptionalAttr:$dilations, + DefaultValuedAttr:$group, + OptionalAttr:$kernel_shape, + OptionalAttr:$pads, + OptionalAttr:$strides); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y); } def ONNXMaxPoolSingleOutOp: ONNX_Op<"MaxPoolSingleOut", @@ -123,8 +134,15 @@ def ONNXMaxPoolSingleOutOp: ONNX_Op<"MaxPoolSingleOut", "ONNX MaxPool operation with a single output." "See ONNXMaxPoolOp for a full description of the MaxPool semantics." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, + DefaultValuedAttr:$auto_pad, + DefaultValuedAttr:$ceil_mode, + OptionalAttr:$dilations, + DefaultValuedAttr:$kernel_shape, + OptionalAttr:$pads, + DefaultValuedAttr:$storage_order, + OptionalAttr:$strides); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y); } #endif // ONNX_OPS diff --git a/src/dialect/onnx/onnx_ops.cpp b/src/dialect/onnx/onnx_ops.cpp index 369d517..33468f9 100644 --- a/src/dialect/onnx/onnx_ops.cpp +++ b/src/dialect/onnx/onnx_ops.cpp @@ -435,8 +435,10 @@ void ONNXTransposeOp::inferShapes() { auto arrayTy = getOperand().getType().cast(); SmallVector dims; - if (auto permutation = getAttrOfType( - ONNXTransposeOp::getPermAttrName())) { + //if (auto permutation = getAttrOfType( + // ONNXTransposeOp::getPermAttrName())) { + auto permutation = ONNXTransposeOp::permAttr(); + if (permutation) { // Perform transposition according to perm attribute. for (auto perm : permutation.getValue()) dims.emplace_back(arrayTy.getShape()[perm.cast().getInt()]); @@ -449,20 +451,6 @@ void ONNXTransposeOp::inferShapes() { getResult().setType(RankedTensorType::get(dims, arrayTy.getElementType())); } -LogicalResult verify(ONNXTransposeOp op) { - auto module = op.getParentOfType(); - if (!module) - op.emitError("Expected to belong to a module."); - - if (auto permutation = op.getAttrOfType( - ONNXTransposeOp::getPermAttrName())) { - for (auto perm : permutation.getValue()) - if (perm.cast().getInt() < 0) - op.emitError("Cannot tranpose, permuation contains negative index."); - } - - return success(); -} //===----------------------------------------------------------------------===// @@ -491,11 +479,9 @@ void ONNXConvNoBiasOp::inferShapes() { emitError("Weight size not compatible with data size."); // Required attribute auto_pad defaults to NOTSET. - auto autoPad = getAttrOfType( - ONNXConvOp::getAutoPadAttrName()).getValue(); + auto autoPad = auto_pad(); // Group is a required attribute and should have default value of 1. - int64_t group = getAttrOfType( - ONNXConvOp::getGroupAttrName()).getInt(); + int64_t group = ONNXConvNoBiasOp::group().getSExtValue(); //.getLimitedValue(); // Check that the X.shape[1] == (W.shape[1] * group) == C condition holds. if (dataShape[1] != (weightShape[1] * group)) emitError("Channel dimension mismatch."); @@ -527,8 +513,7 @@ void ONNXConvNoBiasOp::inferShapes() { // Use kernel_shape attribute if present otherwise use size from weight // argument. SmallVector kernelDims; - if (auto kernelShape = getAttrOfType( - ONNXConvOp::getKernelShapeAttrName())) { + if (auto kernelShape = kernel_shapeAttr()) { if (kernelShape.getValue().size() != nDims) emitError("kernel_shape length incompatible with spatial dimensions."); for (int i = 0; i < nDims; ++i) @@ -550,8 +535,7 @@ void ONNXConvNoBiasOp::inferShapes() { // // From a dimensionality perspective the kernel size becomes the dilated // kernel size. - if (auto dilations = getAttrOfType( - ONNXConvOp::getDilationsAttrName())) { + if (auto dilations = dilationsAttr()) { if (dilations.getValue().size() != nDims) emitError("dilations length incompatible with spatial dimensions."); for (int i = 0; i < nDims; ++i) @@ -567,8 +551,7 @@ void ONNXConvNoBiasOp::inferShapes() { if (autoPad == "NOTSET") { // Use pads to to determine the padding. If attribute is not // present then pads is considered to be all zeros (no padding). - if (auto pads = getAttrOfType( - ONNXConvOp::getPadsAttrName())) { + if (auto pads = padsAttr()) { // pads consists of two entries for each spatial axis. if (pads.getValue().size() != 2 * nDims) emitError("pads size is not twice the spatial size."); @@ -599,13 +582,12 @@ void ONNXConvNoBiasOp::inferShapes() { } // Strides - if (auto strides = getAttrOfType( - ONNXConvOp::getStridesAttrName())) { + if (auto strides = ONNXConvNoBiasOp::stridesAttr()) { if (strides.getValue().size() != nDims) emitError("strides length incompatible with spatial dimensions."); for (int i = 0; i < nDims; ++i) { int64_t stride = - (strides.getValue()[i]).cast().getInt(); + strides.getValue()[i].cast().getInt(); outSpatialDims[i] = floor(outSpatialDims[i] / stride); } } @@ -617,28 +599,6 @@ void ONNXConvNoBiasOp::inferShapes() { getResult().setType(RankedTensorType::get(dims, dataTy.getElementType())); } -LogicalResult verify(ONNXConvNoBiasOp op) { - auto module = op.getParentOfType(); - if (!module) - op.emitError("expected to belong to a module"); - - auto autoPadAttr = op.getAttrOfType( - ONNXConvOp::getAutoPadAttrName()); - if (!autoPadAttr) - op.emitError("auto_pad attribute not specified."); - if (autoPadAttr.getValue() != "NOTSET") - if (auto pads = op.getAttrOfType( - ONNXConvOp::getPadsAttrName())) - op.emitError("auto_pad and pads are both set."); - - auto groupAttr = - op.getAttrOfType(ONNXConvOp::getGroupAttrName()); - if (!groupAttr) - op.emitError("group attribute not specified."); - - return success(); -} - //===----------------------------------------------------------------------===// // TableGen'd op method definitions //===----------------------------------------------------------------------===// diff --git a/src/dialect/onnx/onnxop.inc b/src/dialect/onnx/onnxop.inc index f377772..5db12e9 100644 --- a/src/dialect/onnx/onnxop.inc +++ b/src/dialect/onnx/onnxop.inc @@ -7,7 +7,7 @@ def ONNXAbsOp:ONNX_Op<"Abs", "the tensor elementwise." }]; let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y); } def ONNXAcosOp:ONNX_Op<"Acos", @@ -17,7 +17,7 @@ def ONNXAcosOp:ONNX_Op<"Acos", "Calculates the arccosine (inverse of cosine) of the given input tensor, element-wise." }]; let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXAcoshOp:ONNX_Op<"Acosh", @@ -27,7 +27,7 @@ def ONNXAcoshOp:ONNX_Op<"Acosh", "Calculates the hyperbolic arccosine of the given input tensor element-wise." }]; let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXAddOp:ONNX_Op<"Add", @@ -39,8 +39,9 @@ def ONNXAddOp:ONNX_Op<"Add", "" "This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md)." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$A, AnyTypeOf<[AnyMemRef, AnyTensor]>:$B); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$A, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$B); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_C); } def ONNXAndOp:ONNX_Op<"And", @@ -52,8 +53,9 @@ def ONNXAndOp:ONNX_Op<"And", "" "This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md)." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$A, AnyTypeOf<[AnyMemRef, AnyTensor]>:$B); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$A, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$B); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_C); } def ONNXArgMaxOp:ONNX_Op<"ArgMax", @@ -61,15 +63,14 @@ def ONNXArgMaxOp:ONNX_Op<"ArgMax", let summary = "ONNX ArgMax operation"; let description = [{ "Computes the indices of the max elements of the input tensor's element along the " - "provided axis. The resulting tensor has the same rank as the input if keepdims equal 1. " - "If keepdims equal 0, then the resulting tensor have the reduced dimension pruned. " - "If select_last_index is True (default False), the index of the last occurence of the max " - "is selected if the max appears more than once in the input. Otherwise the index of the " - "first occurence is selected." + "provided axis. The resulted tensor has the same rank as the input if keepdims equal 1." + "If keepdims equal 0, then the resulted tensor have the reduced dimension pruned. " "The type of the output tensor is integer." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data, + DefaultValuedAttr:$axis, + DefaultValuedAttr:$keepdims); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_reduced); } def ONNXArgMinOp:ONNX_Op<"ArgMin", @@ -77,15 +78,14 @@ def ONNXArgMinOp:ONNX_Op<"ArgMin", let summary = "ONNX ArgMin operation"; let description = [{ "Computes the indices of the min elements of the input tensor's element along the " - "provided axis. The resulting tensor has the same rank as the input if keepdims equal 1. " - "If keepdims equal 0, then the resulting tensor have the reduced dimension pruned. " - "If select_last_index is True (default False), the index of the last occurence of the min " - "is selected if the min appears more than once in the input. Otherwise the index of the " - "first occurence is selected." + "provided axis. The resulted tensor has the same rank as the input if keepdims equal 1." + "If keepdims equal 0, then the resulted tensor have the reduced dimension pruned. " "The type of the output tensor is integer." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data, + DefaultValuedAttr:$axis, + DefaultValuedAttr:$keepdims); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_reduced); } def ONNXAsinOp:ONNX_Op<"Asin", @@ -95,7 +95,7 @@ def ONNXAsinOp:ONNX_Op<"Asin", "Calculates the arcsine (inverse of sine) of the given input tensor, element-wise." }]; let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXAsinhOp:ONNX_Op<"Asinh", @@ -105,7 +105,7 @@ def ONNXAsinhOp:ONNX_Op<"Asinh", "Calculates the hyperbolic arcsine of the given input tensor element-wise." }]; let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXAtanOp:ONNX_Op<"Atan", @@ -115,7 +115,7 @@ def ONNXAtanOp:ONNX_Op<"Atan", "Calculates the arctangent (inverse of tangent) of the given input tensor, element-wise." }]; let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXAtanhOp:ONNX_Op<"Atanh", @@ -125,7 +125,7 @@ def ONNXAtanhOp:ONNX_Op<"Atanh", "Calculates the hyperbolic arctangent of the given input tensor element-wise." }]; let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXAveragePoolOp:ONNX_Op<"AveragePool", @@ -162,8 +162,14 @@ def ONNXAveragePoolOp:ONNX_Op<"AveragePool", " The output of each pooling window is divided by the number of elements (exclude pad when attribute count_include_pad is zero)." " " }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, + DefaultValuedAttr:$auto_pad, + DefaultValuedAttr:$ceil_mode, + DefaultValuedAttr:$count_include_pad, + DefaultValuedAttr:$kernel_shape, + OptionalAttr:$pads, + OptionalAttr:$strides); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y); } def ONNXBatchNormalizationOp:ONNX_Op<"BatchNormalization", @@ -181,8 +187,14 @@ def ONNXBatchNormalizationOp:ONNX_Op<"BatchNormalization", "to flatten the input shape to (N x C*D1*D2 ..*Dn) before a BatchNormalization Op." "This operator has **optional** inputs/outputs. See [the doc](IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, AnyTypeOf<[AnyMemRef, AnyTensor]>:$scale, AnyTypeOf<[AnyMemRef, AnyTensor]>:$B, AnyTypeOf<[AnyMemRef, AnyTensor]>:$mean, AnyTypeOf<[AnyMemRef, AnyTensor]>:$var); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>, AnyTypeOf<[AnyMemRef, AnyTensor]>, AnyTypeOf<[AnyMemRef, AnyTensor]>, AnyTypeOf<[AnyMemRef, AnyTensor]>, AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$scale, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$B, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$mean, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$var, + DefaultValuedAttr:$epsilon, + DefaultValuedAttr:$momentum); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y, AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_mean, AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_var, AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_saved_mean, AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_saved_var); } def ONNXBitShiftOp:ONNX_Op<"BitShift", @@ -202,8 +214,10 @@ def ONNXBitShiftOp:ONNX_Op<"BitShift", " not necessarily identical." "This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md)." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, AnyTypeOf<[AnyMemRef, AnyTensor]>:$Y); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$Y, + StrAttr:$direction); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Z); } def ONNXCastOp:ONNX_Op<"Cast", @@ -230,8 +244,9 @@ def ONNXCastOp:ONNX_Op<"Cast", "For example, a 64-bit float 3.1415926459 may be round to a 32-bit float 3.141592. Similarly, converting" "an integer 36 to Boolean may produce 1 because we truncate bits which can't be stored in the targeted type." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input, + DefaultValuedAttr:$to); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXCeilOp:ONNX_Op<"Ceil", @@ -243,7 +258,7 @@ def ONNXCeilOp:ONNX_Op<"Ceil", "the tensor elementwise." }]; let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y); } def ONNXClipOp:ONNX_Op<"Clip", @@ -254,8 +269,10 @@ def ONNXClipOp:ONNX_Op<"Clip", "specified by the inputs 'min' and 'max'. They default to" "numeric_limits::lowest() and numeric_limits::max(), respectively." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input, AnyTypeOf<[AnyMemRef, AnyTensor]>:$min, AnyTypeOf<[AnyMemRef, AnyTensor]>:$max); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$min, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$max); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXCompressOp:ONNX_Op<"Compress", @@ -267,8 +284,10 @@ def ONNXCompressOp:ONNX_Op<"Compress", " Compress behaves like numpy.compress: https://docs.scipy.org/doc/numpy/reference/generated/numpy.compress.html" " " }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input, AnyTypeOf<[AnyMemRef, AnyTensor]>:$condition); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$condition, + OptionalAttr:$axis); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXConcatOp:ONNX_Op<"Concat", @@ -277,8 +296,9 @@ def ONNXConcatOp:ONNX_Op<"Concat", let description = [{ "Concatenate a list of tensors into a single tensor. All input tensors must have the same shape, except for the dimension size of the axis to concatenate on." }]; - let arguments = (ins Variadic>:$inputs); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins Variadic>:$inputs, + DefaultValuedAttr:$axis); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_concat_result); } def ONNXConcatFromSequenceOp:ONNX_Op<"ConcatFromSequence", @@ -290,8 +310,10 @@ def ONNXConcatFromSequenceOp:ONNX_Op<"ConcatFromSequence", "By default 'new_axis' is 0, the behavior is similar to numpy.concatenate." "When 'new_axis' is 1, the behavior is similar to numpy.stack." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input_sequence); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input_sequence, + I64Attr:$axis, + DefaultValuedAttr:$new_axis); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_concat_result); } def ONNXConstantOp:ONNX_Op<"Constant", @@ -301,8 +323,9 @@ def ONNXConstantOp:ONNX_Op<"Constant", "A constant tensor. Exactly one of the two attributes, either value or sparse_value," "must be specified." }]; - let arguments = (ins ); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins OptionalAttr:$sparse_value, + OptionalAttr:$value); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXConstantOfShapeOp:ONNX_Op<"ConstantOfShape", @@ -311,28 +334,28 @@ def ONNXConstantOfShapeOp:ONNX_Op<"ConstantOfShape", let description = [{ "Generate a tensor with given value and shape." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input, + OptionalAttr:$value); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } -def ONNXConvOp:ONNX_Op<"Conv", +def ONNXConvOp:ONNX_Op<"Conv", [NoSideEffect]> { let summary = "ONNX Conv operation"; let description = [{ "The convolution operator consumes an input tensor and a filter, and" "computes the output." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, AnyTypeOf<[AnyMemRef, AnyTensor]>:$W, AnyTypeOf<[AnyMemRef, AnyTensor]>:$B); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); - - let extraClassDeclaration = [{ - static StringRef getAutoPadAttrName() { return "auto_pad"; } - static StringRef getDilationsAttrName() { return "dilations"; } - static StringRef getGroupAttrName() { return "group"; } - static StringRef getKernelShapeAttrName() { return "kernel_shape"; } - static StringRef getPadsAttrName() { return "pads"; } - static StringRef getStridesAttrName() { return "strides"; } - }]; + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$W, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$B, + DefaultValuedAttr:$auto_pad, + OptionalAttr:$dilations, + DefaultValuedAttr:$group, + OptionalAttr:$kernel_shape, + OptionalAttr:$pads, + OptionalAttr:$strides); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y); } def ONNXConvIntegerOp:ONNX_Op<"ConvInteger", @@ -342,8 +365,17 @@ def ONNXConvIntegerOp:ONNX_Op<"ConvInteger", "The integer convolution operator consumes an input tensor, its zero-point, a filter, and its zero-point," "and computes the output. The production MUST never overflow. The accumulation may overflow if and only if in 32 bits." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$x, AnyTypeOf<[AnyMemRef, AnyTensor]>:$w, AnyTypeOf<[AnyMemRef, AnyTensor]>:$x_zero_point, AnyTypeOf<[AnyMemRef, AnyTensor]>:$w_zero_point); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$x, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$w, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$x_zero_point, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$w_zero_point, + DefaultValuedAttr:$auto_pad, + OptionalAttr:$dilations, + DefaultValuedAttr:$group, + OptionalAttr:$kernel_shape, + OptionalAttr:$pads, + OptionalAttr:$strides); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_y); } def ONNXConvTransposeOp:ONNX_Op<"ConvTranspose", @@ -365,8 +397,18 @@ def ONNXConvTransposeOp:ONNX_Op<"ConvTranspose", "" " " }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, AnyTypeOf<[AnyMemRef, AnyTensor]>:$W, AnyTypeOf<[AnyMemRef, AnyTensor]>:$B); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$W, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$B, + DefaultValuedAttr:$auto_pad, + OptionalAttr:$dilations, + DefaultValuedAttr:$group, + OptionalAttr:$kernel_shape, + OptionalAttr:$output_padding, + OptionalAttr:$output_shape, + OptionalAttr:$pads, + OptionalAttr:$strides); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y); } def ONNXCosOp:ONNX_Op<"Cos", @@ -376,7 +418,7 @@ def ONNXCosOp:ONNX_Op<"Cos", "Calculates the cosine of the given input tensor, element-wise." }]; let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXCoshOp:ONNX_Op<"Cosh", @@ -386,7 +428,7 @@ def ONNXCoshOp:ONNX_Op<"Cosh", "Calculates the hyperbolic cosine of the given input tensor element-wise." }]; let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXCumSumOp:ONNX_Op<"CumSum", @@ -414,8 +456,11 @@ def ONNXCumSumOp:ONNX_Op<"CumSum", "```" " " }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$x, AnyTypeOf<[AnyMemRef, AnyTensor]>:$axis); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$x, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$axis, + DefaultValuedAttr:$exclusive, + DefaultValuedAttr:$reverse); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_y); } def ONNXDepthToSpaceOp:ONNX_Op<"DepthToSpace", @@ -450,8 +495,10 @@ def ONNXDepthToSpaceOp:ONNX_Op<"DepthToSpace", "y = np.reshape(tmp, [b, c // (blocksize ** 2), h * blocksize, w * blocksize])" "" }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input, + I64Attr:$blocksize, + DefaultValuedAttr:$mode); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXDequantizeLinearOp:ONNX_Op<"DequantizeLinear", @@ -463,8 +510,10 @@ def ONNXDequantizeLinearOp:ONNX_Op<"DequantizeLinear", "'x_zero_point' and 'x' must have same type. 'x' and 'y' must have same shape. In the case of dequantizing int32," "there's no zero point (zero point is supposed to be 0)." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$x, AnyTypeOf<[AnyMemRef, AnyTensor]>:$x_scale, AnyTypeOf<[AnyMemRef, AnyTensor]>:$x_zero_point); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$x, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$x_scale, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$x_zero_point); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_y); } def ONNXDetOp:ONNX_Op<"Det", @@ -478,7 +527,7 @@ def ONNXDetOp:ONNX_Op<"Det", "e.g., When the input is 2-D, the output is a scalar(shape is empty: `[]`)." }]; let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y); } def ONNXDivOp:ONNX_Op<"Div", @@ -489,8 +538,9 @@ def ONNXDivOp:ONNX_Op<"Div", "" "This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md)." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$A, AnyTypeOf<[AnyMemRef, AnyTensor]>:$B); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$A, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$B); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_C); } def ONNXDropoutOp:ONNX_Op<"Dropout", @@ -504,8 +554,9 @@ def ONNXDropoutOp:ONNX_Op<"Dropout", "the training phase, so during testing nothing needs to be done." "This operator has **optional** inputs/outputs. See [the doc](IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>, AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data, + DefaultValuedAttr:$ratio); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output, AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_mask); } def ONNXDynamicQuantizeLinearOp:ONNX_Op<"DynamicQuantizeLinear", @@ -536,7 +587,7 @@ def ONNXDynamicQuantizeLinearOp:ONNX_Op<"DynamicQuantizeLinear", "```" }]; let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$x); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>, AnyTypeOf<[AnyMemRef, AnyTensor]>, AnyTypeOf<[AnyMemRef, AnyTensor]>); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_y, AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_y_scale, AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_y_zero_point); } def ONNXEluOp:ONNX_Op<"Elu", @@ -548,8 +599,9 @@ def ONNXEluOp:ONNX_Op<"Elu", "0`, `f(x) = x for x >= 0`., is applied to the tensor elementwise." "" }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, + DefaultValuedAttr:$alpha); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y); } def ONNXEqualOp:ONNX_Op<"Equal", @@ -561,8 +613,9 @@ def ONNXEqualOp:ONNX_Op<"Equal", "" "This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md)." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$A, AnyTypeOf<[AnyMemRef, AnyTensor]>:$B); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$A, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$B); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_C); } def ONNXErfOp:ONNX_Op<"Erf", @@ -572,7 +625,7 @@ def ONNXErfOp:ONNX_Op<"Erf", "Computes the error function of the given input tensor element-wise." }]; let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXExpOp:ONNX_Op<"Exp", @@ -582,7 +635,7 @@ def ONNXExpOp:ONNX_Op<"Exp", "Calculates the exponential of the given input tensor, element-wise." }]; let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXExpandOp:ONNX_Op<"Expand", @@ -598,8 +651,9 @@ def ONNXExpandOp:ONNX_Op<"Expand", "It is possible that the output.shape is not equal to shape, when some dimensions in shape is equal to 1," "or the shape.ndim < input.shape.ndim." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input, AnyTypeOf<[AnyMemRef, AnyTensor]>:$shape); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$shape); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXEyeLikeOp:ONNX_Op<"EyeLike", @@ -614,8 +668,10 @@ def ONNXEyeLikeOp:ONNX_Op<"EyeLike", "The 'dtype' argument must be one of the data types specified in the 'DataType' enum field in the" "TensorProto message and be valid as an output type." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input, + OptionalAttr:$dtype, + DefaultValuedAttr:$k); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXFlattenOp:ONNX_Op<"Flatten", @@ -626,8 +682,9 @@ def ONNXFlattenOp:ONNX_Op<"Flatten", "(d_0, d_1, ... d_n) then the output will have shape" "(d_0 X d_1 ... d_(axis-1), d_axis X d_(axis+1) ... X dn)." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input, + DefaultValuedAttr:$axis); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXFloorOp:ONNX_Op<"Floor", @@ -639,7 +696,7 @@ def ONNXFloorOp:ONNX_Op<"Floor", "the tensor elementwise." }]; let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y); } def ONNXGRUOp:ONNX_Op<"GRU", @@ -720,8 +777,20 @@ def ONNXGRUOp:ONNX_Op<"GRU", " - Ht = (1 - zt) (.) ht + zt (.) Ht-1" "This operator has **optional** inputs/outputs. See [the doc](IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, AnyTypeOf<[AnyMemRef, AnyTensor]>:$W, AnyTypeOf<[AnyMemRef, AnyTensor]>:$R, AnyTypeOf<[AnyMemRef, AnyTensor]>:$B, AnyTypeOf<[AnyMemRef, AnyTensor]>:$sequence_lens, AnyTypeOf<[AnyMemRef, AnyTensor]>:$initial_h); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>, AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$W, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$R, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$B, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$sequence_lens, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$initial_h, + OptionalAttr:$activation_alpha, + OptionalAttr:$activation_beta, + OptionalAttr:$activations, + OptionalAttr:$clip, + DefaultValuedAttr:$direction, + OptionalAttr:$hidden_size, + DefaultValuedAttr:$linear_before_reset); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y, AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y_h); } def ONNXGatherOp:ONNX_Op<"Gather", @@ -786,8 +855,10 @@ def ONNXGatherOp:ONNX_Op<"Gather", " ]" "```" }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data, AnyTypeOf<[AnyMemRef, AnyTensor]>:$indices); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$indices, + DefaultValuedAttr:$axis); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXGatherElementsOp:ONNX_Op<"GatherElements", @@ -850,8 +921,10 @@ def ONNXGatherElementsOp:ONNX_Op<"GatherElements", " ]" "```" }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data, AnyTypeOf<[AnyMemRef, AnyTensor]>:$indices); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$indices, + DefaultValuedAttr:$axis); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXGatherNDOp:ONNX_Op<"GatherND", @@ -924,8 +997,9 @@ def ONNXGatherNDOp:ONNX_Op<"GatherND", " output = [[[2,3]],[[4,5]]] # output_shape = [2, 1, 2] " "" }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data, AnyTypeOf<[AnyMemRef, AnyTensor]>:$indices); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$indices); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXGemmOp:ONNX_Op<"Gemm", @@ -946,8 +1020,14 @@ def ONNXGemmOp:ONNX_Op<"Gemm", "This operator supports **unidirectional broadcasting** (tensor C should be unidirectional broadcastable to tensor A * B); for more details please check [the doc](Broadcasting.md)." "This operator has **optional** inputs/outputs. See [the doc](IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$A, AnyTypeOf<[AnyMemRef, AnyTensor]>:$B, AnyTypeOf<[AnyMemRef, AnyTensor]>:$C); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$A, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$B, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$C, + DefaultValuedAttr:$alpha, + DefaultValuedAttr:$beta, + DefaultValuedAttr:$transA, + DefaultValuedAttr:$transB); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y); } def ONNXGlobalAveragePoolOp:ONNX_Op<"GlobalAveragePool", @@ -959,7 +1039,7 @@ def ONNXGlobalAveragePoolOp:ONNX_Op<"GlobalAveragePool", " equal to the spatial dimension of input tensor." }]; let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y); } def ONNXGlobalLpPoolOp:ONNX_Op<"GlobalLpPool", @@ -970,8 +1050,9 @@ def ONNXGlobalLpPoolOp:ONNX_Op<"GlobalLpPool", " the values in the same channel. This is equivalent to LpPool with kernel size" " equal to the spatial dimension of input tensor." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, + DefaultValuedAttr:$p); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y); } def ONNXGlobalMaxPoolOp:ONNX_Op<"GlobalMaxPool", @@ -983,7 +1064,7 @@ def ONNXGlobalMaxPoolOp:ONNX_Op<"GlobalMaxPool", " equal to the spatial dimension of input tensor." }]; let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y); } def ONNXGreaterOp:ONNX_Op<"Greater", @@ -995,8 +1076,9 @@ def ONNXGreaterOp:ONNX_Op<"Greater", "" "This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md)." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$A, AnyTypeOf<[AnyMemRef, AnyTensor]>:$B); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$A, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$B); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_C); } def ONNXHardSigmoidOp:ONNX_Op<"HardSigmoid", @@ -1007,8 +1089,10 @@ def ONNXHardSigmoidOp:ONNX_Op<"HardSigmoid", "(Tensor) where the HardSigmoid function, y = max(0, min(1, alpha * x + beta))," "is applied to the tensor elementwise." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, + DefaultValuedAttr:$alpha, + DefaultValuedAttr:$beta); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y); } def ONNXHardmaxOp:ONNX_Op<"Hardmax", @@ -1030,8 +1114,9 @@ def ONNXHardmaxOp:ONNX_Op<"Hardmax", "will throw errors. The output tensor has the same shape" "and contains the hardmax values of the corresponding input." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input, + DefaultValuedAttr:$axis); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXIdentityOp:ONNX_Op<"Identity", @@ -1042,7 +1127,7 @@ def ONNXIdentityOp:ONNX_Op<"Identity", "Identity operator" }]; let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXIfOp:ONNX_Op<"If", @@ -1051,8 +1136,10 @@ def ONNXIfOp:ONNX_Op<"If", let description = [{ "If conditional" }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$cond); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$cond, + AnyAttr:$else_branch, + AnyAttr:$then_branch); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_outputs); } def ONNXInstanceNormalizationOp:ONNX_Op<"InstanceNormalization", @@ -1066,8 +1153,11 @@ def ONNXInstanceNormalizationOp:ONNX_Op<"InstanceNormalization", "where mean and variance are computed per instance per channel." "" }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input, AnyTypeOf<[AnyMemRef, AnyTensor]>:$scale, AnyTypeOf<[AnyMemRef, AnyTensor]>:$B); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$scale, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$B, + DefaultValuedAttr:$epsilon); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXIsInfOp:ONNX_Op<"IsInf", @@ -1076,8 +1166,10 @@ def ONNXIsInfOp:ONNX_Op<"IsInf", let description = [{ "Map infinity to true and other values to false." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, + DefaultValuedAttr:$detect_negative, + DefaultValuedAttr:$detect_positive); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y); } def ONNXIsNaNOp:ONNX_Op<"IsNaN", @@ -1087,7 +1179,7 @@ def ONNXIsNaNOp:ONNX_Op<"IsNaN", "Returns which elements of the input are NaN." }]; let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y); } def ONNXLRNOp:ONNX_Op<"LRN", @@ -1105,8 +1197,12 @@ def ONNXLRNOp:ONNX_Op<"LRN", "" "Y[n, c, d1, ..., dk] = X[n, c, d1, ..., dk] / (bias + alpha / size * square_sum[n, c, d1, ..., dk] ) ^ beta" }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, + DefaultValuedAttr:$alpha, + DefaultValuedAttr:$beta, + DefaultValuedAttr:$bias, + I64Attr:$size); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y); } def ONNXLSTMOp:ONNX_Op<"LSTM", @@ -1195,8 +1291,22 @@ def ONNXLSTMOp:ONNX_Op<"LSTM", " - Ht = ot (.) h(Ct)" "This operator has **optional** inputs/outputs. See [the doc](IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, AnyTypeOf<[AnyMemRef, AnyTensor]>:$W, AnyTypeOf<[AnyMemRef, AnyTensor]>:$R, AnyTypeOf<[AnyMemRef, AnyTensor]>:$B, AnyTypeOf<[AnyMemRef, AnyTensor]>:$sequence_lens, AnyTypeOf<[AnyMemRef, AnyTensor]>:$initial_h, AnyTypeOf<[AnyMemRef, AnyTensor]>:$initial_c, AnyTypeOf<[AnyMemRef, AnyTensor]>:$P); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>, AnyTypeOf<[AnyMemRef, AnyTensor]>, AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$W, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$R, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$B, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$sequence_lens, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$initial_h, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$initial_c, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$P, + OptionalAttr:$activation_alpha, + OptionalAttr:$activation_beta, + OptionalAttr:$activations, + OptionalAttr:$clip, + DefaultValuedAttr:$direction, + OptionalAttr:$hidden_size, + DefaultValuedAttr:$input_forget); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y, AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y_h, AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y_c); } def ONNXLeakyReluOp:ONNX_Op<"LeakyRelu", @@ -1207,8 +1317,9 @@ def ONNXLeakyReluOp:ONNX_Op<"LeakyRelu", "output data (Tensor) where the function `f(x) = alpha * x for x < 0`," "`f(x) = x for x >= 0`, is applied to the data tensor elementwise." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, + DefaultValuedAttr:$alpha); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y); } def ONNXLessOp:ONNX_Op<"Less", @@ -1220,8 +1331,9 @@ def ONNXLessOp:ONNX_Op<"Less", "" "This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md)." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$A, AnyTypeOf<[AnyMemRef, AnyTensor]>:$B); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$A, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$B); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_C); } def ONNXLogOp:ONNX_Op<"Log", @@ -1231,7 +1343,7 @@ def ONNXLogOp:ONNX_Op<"Log", "Calculates the natural log of the given input tensor, element-wise." }]; let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXLogSoftmaxOp:ONNX_Op<"LogSoftmax", @@ -1253,8 +1365,9 @@ def ONNXLogSoftmaxOp:ONNX_Op<"LogSoftmax", "will throw errors. The output tensor has the same shape" "and contains the logsoftmax values of the corresponding input." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input, + DefaultValuedAttr:$axis); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXLoopOp:ONNX_Op<"Loop", @@ -1395,8 +1508,11 @@ def ONNXLoopOp:ONNX_Op<"Loop", "the scan_outputs from the previous layer, possibly going through several" "point-wise operators (e.g. dropout, residual connections, linear layer)." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$M, AnyTypeOf<[AnyMemRef, AnyTensor]>:$cond, AnyTypeOf<[AnyMemRef, AnyTensor]>:$v_initial); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$M, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$cond, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$v_initial, + AnyAttr:$body); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_v_final_and_scan_outputs); } def ONNXLpNormalizationOp:ONNX_Op<"LpNormalization", @@ -1405,8 +1521,10 @@ def ONNXLpNormalizationOp:ONNX_Op<"LpNormalization", let description = [{ "Given a matrix, apply Lp-normalization along the provided axis." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input, + DefaultValuedAttr:$axis, + DefaultValuedAttr:$p); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXLpPoolOp:ONNX_Op<"LpPool", @@ -1419,8 +1537,13 @@ def ONNXLpPoolOp:ONNX_Op<"LpPool", " of the input tensor according to the kernel size and downsampling the" " data into the output tensor Y for further processing." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, + DefaultValuedAttr:$auto_pad, + I64ArrayAttr:$kernel_shape, + DefaultValuedAttr:$p, + OptionalAttr:$pads, + OptionalAttr:$strides); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y); } def ONNXMatMulOp:ONNX_Op<"MatMul", @@ -1429,8 +1552,9 @@ def ONNXMatMulOp:ONNX_Op<"MatMul", let description = [{ "Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html" }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$A, AnyTypeOf<[AnyMemRef, AnyTensor]>:$B); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$A, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$B); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y); } def ONNXMatMulIntegerOp:ONNX_Op<"MatMulInteger", @@ -1440,8 +1564,11 @@ def ONNXMatMulIntegerOp:ONNX_Op<"MatMulInteger", "Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html." "The production MUST never overflow. The accumulation may overflow if and only if in 32 bits." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$A, AnyTypeOf<[AnyMemRef, AnyTensor]>:$B, AnyTypeOf<[AnyMemRef, AnyTensor]>:$a_zero_point, AnyTypeOf<[AnyMemRef, AnyTensor]>:$b_zero_point); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$A, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$B, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$a_zero_point, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$b_zero_point); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y); } def ONNXMaxOp:ONNX_Op<"Max", @@ -1453,7 +1580,7 @@ def ONNXMaxOp:ONNX_Op<"Max", "This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md)." }]; let arguments = (ins Variadic>:$data_0); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_max); } def ONNXMaxPoolOp:ONNX_Op<"MaxPool", @@ -1490,8 +1617,15 @@ def ONNXMaxPoolOp:ONNX_Op<"MaxPool", " The output of each pooling window is maximum number of elements exclude pad." " " }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>, AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, + DefaultValuedAttr:$auto_pad, + DefaultValuedAttr:$ceil_mode, + OptionalAttr:$dilations, + DefaultValuedAttr:$kernel_shape, + OptionalAttr:$pads, + DefaultValuedAttr:$storage_order, + OptionalAttr:$strides); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y, AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Indices); } def ONNXMaxRoiPoolOp:ONNX_Op<"MaxRoiPool", @@ -1502,8 +1636,11 @@ def ONNXMaxRoiPoolOp:ONNX_Op<"MaxRoiPool", " apply max pooling across each RoI, to produce output 4-D tensor of shape" " (num_rois, channels, pooled_shape[0], pooled_shape[1])." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, AnyTypeOf<[AnyMemRef, AnyTensor]>:$rois); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$rois, + I64ArrayAttr:$pooled_shape, + DefaultValuedAttr:$spatial_scale); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y); } def ONNXMaxUnpoolOp:ONNX_Op<"MaxUnpool", @@ -1529,8 +1666,13 @@ def ONNXMaxUnpoolOp:ONNX_Op<"MaxUnpool", " which define the exact unpooling op. The attributes typically have the same values as the corrsponding" " pooling op that the unpooling op is trying to invert." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, AnyTypeOf<[AnyMemRef, AnyTensor]>:$I, AnyTypeOf<[AnyMemRef, AnyTensor]>:$output_shape); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$I, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$output_shape, + I64ArrayAttr:$kernel_shape, + OptionalAttr:$pads, + OptionalAttr:$strides); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXMeanOp:ONNX_Op<"Mean", @@ -1542,7 +1684,7 @@ def ONNXMeanOp:ONNX_Op<"Mean", "This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md)." }]; let arguments = (ins Variadic>:$data_0); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_mean); } def ONNXMeanVarianceNormalizationOp:ONNX_Op<"MeanVarianceNormalization", @@ -1552,8 +1694,9 @@ def ONNXMeanVarianceNormalizationOp:ONNX_Op<"MeanVarianceNormalization", "A MeanVarianceNormalization Function: Perform mean variance normalization" " on the input tensor X using formula:
``` (X-EX)/sqrt(E(X-EX)^2) ```" }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, + DefaultValuedAttr:$axes); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y); } def ONNXMinOp:ONNX_Op<"Min", @@ -1565,7 +1708,7 @@ def ONNXMinOp:ONNX_Op<"Min", "This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md)." }]; let arguments = (ins Variadic>:$data_0); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_min); } def ONNXModOp:ONNX_Op<"Mod", @@ -1586,8 +1729,10 @@ def ONNXModOp:ONNX_Op<"Mod", "" " This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md)." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$A, AnyTypeOf<[AnyMemRef, AnyTensor]>:$B); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$A, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$B, + DefaultValuedAttr:$fmod); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_C); } def ONNXMulOp:ONNX_Op<"Mul", @@ -1598,8 +1743,9 @@ def ONNXMulOp:ONNX_Op<"Mul", "" "This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md)." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$A, AnyTypeOf<[AnyMemRef, AnyTensor]>:$B); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$A, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$B); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_C); } def ONNXMultinomialOp:ONNX_Op<"Multinomial", @@ -1609,8 +1755,11 @@ def ONNXMultinomialOp:ONNX_Op<"Multinomial", "Generate a tensor of samples from a multinomial distribution according to the probabilities" "of each of the possible outcomes." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input, + DefaultValuedAttr:$dtype, + DefaultValuedAttr:$sample_size, + OptionalAttr:$seed); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXNegOp:ONNX_Op<"Neg", @@ -1622,7 +1771,7 @@ def ONNXNegOp:ONNX_Op<"Neg", "the tensor elementwise." }]; let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y); } def ONNXNonMaxSuppressionOp:ONNX_Op<"NonMaxSuppression", @@ -1637,8 +1786,13 @@ def ONNXNonMaxSuppressionOp:ONNX_Op<"NonMaxSuppression", "The selected_indices output is a set of integers indexing into the input collection of bounding boxes representing the selected boxes." "The bounding box coordinates corresponding to the selected indices can then be obtained using the Gather or GatherND operation." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$boxes, AnyTypeOf<[AnyMemRef, AnyTensor]>:$scores, AnyTypeOf<[AnyMemRef, AnyTensor]>:$max_output_boxes_per_class, AnyTypeOf<[AnyMemRef, AnyTensor]>:$iou_threshold, AnyTypeOf<[AnyMemRef, AnyTensor]>:$score_threshold); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$boxes, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$scores, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$max_output_boxes_per_class, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$iou_threshold, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$score_threshold, + DefaultValuedAttr:$center_point_box); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_selected_indices); } def ONNXNonZeroOp:ONNX_Op<"NonZero", @@ -1651,7 +1805,7 @@ def ONNXNonZeroOp:ONNX_Op<"NonZero", " https://docs.scipy.org/doc/numpy/reference/generated/numpy.nonzero.html" }]; let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y); } def ONNXNotOp:ONNX_Op<"Not", @@ -1661,7 +1815,7 @@ def ONNXNotOp:ONNX_Op<"Not", "Returns the negation of the input tensor element-wise." }]; let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y); } def ONNXOneHotOp:ONNX_Op<"OneHot", @@ -1688,8 +1842,11 @@ def ONNXOneHotOp:ONNX_Op<"OneHot", " output[i, j, k, input[i, j, k]] = 1 for all i, j, k and 0 otherwise." "" }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$indices, AnyTypeOf<[AnyMemRef, AnyTensor]>:$depth, AnyTypeOf<[AnyMemRef, AnyTensor]>:$values); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$indices, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$depth, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$values, + DefaultValuedAttr:$axis); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXOrOp:ONNX_Op<"Or", @@ -1701,8 +1858,9 @@ def ONNXOrOp:ONNX_Op<"Or", "" "This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md)." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$A, AnyTypeOf<[AnyMemRef, AnyTensor]>:$B); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$A, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$B); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_C); } def ONNXPReluOp:ONNX_Op<"PRelu", @@ -1714,8 +1872,9 @@ def ONNXPReluOp:ONNX_Op<"PRelu", "`f(x) = x for x >= 0`., is applied to the data tensor elementwise." "This operator supports **unidirectional broadcasting** (tensor slope should be unidirectional broadcastable to input tensor X); for more details please check [the doc](Broadcasting.md)." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, AnyTypeOf<[AnyMemRef, AnyTensor]>:$slope); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$slope); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y); } def ONNXPadOp:ONNX_Op<"Pad", @@ -1804,8 +1963,11 @@ def ONNXPadOp:ONNX_Op<"Pad", " ]" "" }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data, AnyTypeOf<[AnyMemRef, AnyTensor]>:$pads, AnyTypeOf<[AnyMemRef, AnyTensor]>:$constant_value); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$pads, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$constant_value, + DefaultValuedAttr:$mode); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXPowOp:ONNX_Op<"Pow", @@ -1817,8 +1979,9 @@ def ONNXPowOp:ONNX_Op<"Pow", "is applied to the data tensor elementwise." "This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md)." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, AnyTypeOf<[AnyMemRef, AnyTensor]>:$Y); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$Y); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Z); } def ONNXQLinearConvOp:ONNX_Op<"QLinearConv", @@ -1830,11 +1993,23 @@ def ONNXQLinearConvOp:ONNX_Op<"QLinearConv", "and computes the quantized output. Each scale and zero-point pair must have same shape." "It means they must be either scalars (per tensor) or 1-D tensors (per output channel)." "Each input or output and its related zero point must have same type." - "When bias is present it must be quantized using scale = input scale * weight scale and " - "zero point as 0." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$x, AnyTypeOf<[AnyMemRef, AnyTensor]>:$x_scale, AnyTypeOf<[AnyMemRef, AnyTensor]>:$x_zero_point, AnyTypeOf<[AnyMemRef, AnyTensor]>:$w, AnyTypeOf<[AnyMemRef, AnyTensor]>:$w_scale, AnyTypeOf<[AnyMemRef, AnyTensor]>:$w_zero_point, AnyTypeOf<[AnyMemRef, AnyTensor]>:$y_scale, AnyTypeOf<[AnyMemRef, AnyTensor]>:$y_zero_point, AnyTypeOf<[AnyMemRef, AnyTensor]>:$B); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$x, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$x_scale, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$x_zero_point, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$w, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$w_scale, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$w_zero_point, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$y_scale, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$y_zero_point, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$B, + DefaultValuedAttr:$auto_pad, + OptionalAttr:$dilations, + DefaultValuedAttr:$group, + OptionalAttr:$kernel_shape, + OptionalAttr:$pads, + OptionalAttr:$strides); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_y); } def ONNXQLinearMatMulOp:ONNX_Op<"QLinearMatMul", @@ -1850,8 +2025,15 @@ def ONNXQLinearMatMulOp:ONNX_Op<"QLinearMatMul", "and the number of elements of scale and zero point tensor of input 'b' should be equal to the number of columns of input 'b'." "Production must never overflow, and accumulation may overflow if and only if in 32 bits." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$a, AnyTypeOf<[AnyMemRef, AnyTensor]>:$a_scale, AnyTypeOf<[AnyMemRef, AnyTensor]>:$a_zero_point, AnyTypeOf<[AnyMemRef, AnyTensor]>:$b, AnyTypeOf<[AnyMemRef, AnyTensor]>:$b_scale, AnyTypeOf<[AnyMemRef, AnyTensor]>:$b_zero_point, AnyTypeOf<[AnyMemRef, AnyTensor]>:$y_scale, AnyTypeOf<[AnyMemRef, AnyTensor]>:$y_zero_point); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$a, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$a_scale, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$a_zero_point, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$b, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$b_scale, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$b_zero_point, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$y_scale, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$y_zero_point); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_y); } def ONNXQuantizeLinearOp:ONNX_Op<"QuantizeLinear", @@ -1862,8 +2044,10 @@ def ONNXQuantizeLinearOp:ONNX_Op<"QuantizeLinear", "The quantization formula is y = saturate ((x / y_scale) + y_zero_point). For saturation, it saturates to [0, 255] if it's uint8, or [-128, 127] if it's int8." "For (x / y_scale), it's rounding to nearest ties to even. Refer to https://en.wikipedia.org/wiki/Rounding for details. 'y_zero_point' and 'y' must have same type." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$x, AnyTypeOf<[AnyMemRef, AnyTensor]>:$y_scale, AnyTypeOf<[AnyMemRef, AnyTensor]>:$y_zero_point); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$x, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$y_scale, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$y_zero_point); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_y); } def ONNXRNNOp:ONNX_Op<"RNN", @@ -1932,8 +2116,19 @@ def ONNXRNNOp:ONNX_Op<"RNN", " - Ht = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Wbi + Rbi)" "This operator has **optional** inputs/outputs. See [the doc](IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, AnyTypeOf<[AnyMemRef, AnyTensor]>:$W, AnyTypeOf<[AnyMemRef, AnyTensor]>:$R, AnyTypeOf<[AnyMemRef, AnyTensor]>:$B, AnyTypeOf<[AnyMemRef, AnyTensor]>:$sequence_lens, AnyTypeOf<[AnyMemRef, AnyTensor]>:$initial_h); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>, AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$W, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$R, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$B, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$sequence_lens, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$initial_h, + DefaultValuedAttr:$activation_alpha, + DefaultValuedAttr:$activation_beta, + DefaultValuedAttr:$activations, + OptionalAttr:$clip, + DefaultValuedAttr:$direction, + OptionalAttr:$hidden_size); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y, AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y_h); } def ONNXRandomNormalOp:ONNX_Op<"RandomNormal", @@ -1948,8 +2143,12 @@ def ONNXRandomNormalOp:ONNX_Op<"RandomNormal", "be one of the data types specified in the 'DataType' enum field in the" "TensorProto message." }]; - let arguments = (ins ); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins DefaultValuedAttr:$dtype, + DefaultValuedAttr:$mean, + DefaultValuedAttr:$scale, + OptionalAttr:$seed, + I64ArrayAttr:$shape); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXRandomNormalLikeOp:ONNX_Op<"RandomNormalLike", @@ -1964,8 +2163,12 @@ def ONNXRandomNormalLikeOp:ONNX_Op<"RandomNormalLike", "The 'dtype' argument must be one of the data types specified in the 'DataType' enum field in the" "TensorProto message, and be valid as an output type." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input, + OptionalAttr:$dtype, + DefaultValuedAttr:$mean, + DefaultValuedAttr:$scale, + OptionalAttr:$seed); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXRandomUniformOp:ONNX_Op<"RandomUniform", @@ -1979,8 +2182,12 @@ def ONNXRandomUniformOp:ONNX_Op<"RandomUniform", "be one of the data types specified in the 'DataType' enum field in the" "TensorProto message." }]; - let arguments = (ins ); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins DefaultValuedAttr:$dtype, + DefaultValuedAttr:$high, + DefaultValuedAttr:$low, + OptionalAttr:$seed, + I64ArrayAttr:$shape); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXRandomUniformLikeOp:ONNX_Op<"RandomUniformLike", @@ -1995,8 +2202,12 @@ def ONNXRandomUniformLikeOp:ONNX_Op<"RandomUniformLike", "The 'dtype' argument must be one of the data types specified in the 'DataType' enum field in the" "TensorProto message and be valid as an output type." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input, + OptionalAttr:$dtype, + DefaultValuedAttr:$high, + DefaultValuedAttr:$low, + OptionalAttr:$seed); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXRangeOp:ONNX_Op<"Range", @@ -2029,8 +2240,10 @@ def ONNXRangeOp:ONNX_Op<"Range", "Output: [10, 8, 6]" "" }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$start, AnyTypeOf<[AnyMemRef, AnyTensor]>:$limit, AnyTypeOf<[AnyMemRef, AnyTensor]>:$delta); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$start, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$limit, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$delta); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXReciprocalOp:ONNX_Op<"Reciprocal", @@ -2042,7 +2255,7 @@ def ONNXReciprocalOp:ONNX_Op<"Reciprocal", "the tensor elementwise." }]; let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y); } def ONNXReduceL1Op:ONNX_Op<"ReduceL1", @@ -2056,8 +2269,10 @@ def ONNXReduceL1Op:ONNX_Op<"ReduceL1", "The above behavior is similar to numpy, with the exception that numpy default keepdims to" "False instead of True." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data, + OptionalAttr:$axes, + DefaultValuedAttr:$keepdims); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_reduced); } def ONNXReduceL2Op:ONNX_Op<"ReduceL2", @@ -2071,8 +2286,10 @@ def ONNXReduceL2Op:ONNX_Op<"ReduceL2", "The above behavior is similar to numpy, with the exception that numpy default keepdims to" "False instead of True." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data, + OptionalAttr:$axes, + DefaultValuedAttr:$keepdims); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_reduced); } def ONNXReduceLogSumOp:ONNX_Op<"ReduceLogSum", @@ -2086,8 +2303,10 @@ def ONNXReduceLogSumOp:ONNX_Op<"ReduceLogSum", "The above behavior is similar to numpy, with the exception that numpy default keepdims to" "False instead of True." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data, + OptionalAttr:$axes, + DefaultValuedAttr:$keepdims); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_reduced); } def ONNXReduceLogSumExpOp:ONNX_Op<"ReduceLogSumExp", @@ -2101,8 +2320,10 @@ def ONNXReduceLogSumExpOp:ONNX_Op<"ReduceLogSumExp", "The above behavior is similar to numpy, with the exception that numpy default keepdims to" "False instead of True." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data, + OptionalAttr:$axes, + DefaultValuedAttr:$keepdims); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_reduced); } def ONNXReduceMaxOp:ONNX_Op<"ReduceMax", @@ -2116,8 +2337,10 @@ def ONNXReduceMaxOp:ONNX_Op<"ReduceMax", "The above behavior is similar to numpy, with the exception that numpy default keepdims to" "False instead of True." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data, + OptionalAttr:$axes, + DefaultValuedAttr:$keepdims); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_reduced); } def ONNXReduceMeanOp:ONNX_Op<"ReduceMean", @@ -2131,8 +2354,10 @@ def ONNXReduceMeanOp:ONNX_Op<"ReduceMean", "The above behavior is similar to numpy, with the exception that numpy default keepdims to" "False instead of True." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data, + OptionalAttr:$axes, + DefaultValuedAttr:$keepdims); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_reduced); } def ONNXReduceMinOp:ONNX_Op<"ReduceMin", @@ -2146,8 +2371,10 @@ def ONNXReduceMinOp:ONNX_Op<"ReduceMin", "The above behavior is similar to numpy, with the exception that numpy default keepdims to" "False instead of True." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data, + OptionalAttr:$axes, + DefaultValuedAttr:$keepdims); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_reduced); } def ONNXReduceProdOp:ONNX_Op<"ReduceProd", @@ -2161,8 +2388,10 @@ def ONNXReduceProdOp:ONNX_Op<"ReduceProd", "The above behavior is similar to numpy, with the exception that numpy default keepdims to" "False instead of True." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data, + OptionalAttr:$axes, + DefaultValuedAttr:$keepdims); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_reduced); } def ONNXReduceSumOp:ONNX_Op<"ReduceSum", @@ -2176,8 +2405,10 @@ def ONNXReduceSumOp:ONNX_Op<"ReduceSum", "The above behavior is similar to numpy, with the exception that numpy default keepdims to" "False instead of True." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data, + OptionalAttr:$axes, + DefaultValuedAttr:$keepdims); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_reduced); } def ONNXReduceSumSquareOp:ONNX_Op<"ReduceSumSquare", @@ -2191,8 +2422,10 @@ def ONNXReduceSumSquareOp:ONNX_Op<"ReduceSumSquare", "The above behavior is similar to numpy, with the exception that numpy default keepdims to" "False instead of True." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data, + OptionalAttr:$axes, + DefaultValuedAttr:$keepdims); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_reduced); } def ONNXReluOp:ONNX_Op<"Relu", @@ -2204,7 +2437,7 @@ def ONNXReluOp:ONNX_Op<"Relu", "the tensor elementwise." }]; let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y); } def ONNXReshapeOp:ONNX_Op<"Reshape", @@ -2218,8 +2451,9 @@ def ONNXReshapeOp:ONNX_Op<"Reshape", "could also be 0, in which case the actual dimension value is unchanged (i.e. taken" "from the input tensor)." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data, AnyTypeOf<[AnyMemRef, AnyTensor]>:$shape); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$shape); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_reshaped); } def ONNXResizeOp:ONNX_Op<"Resize", @@ -2230,8 +2464,17 @@ def ONNXResizeOp:ONNX_Op<"Resize", "Each dimension value of the output tensor is:" " output_dimension = floor(input_dimension * (roi_end - roi_start) * scale) if input \"sizes\" is not specified." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, AnyTypeOf<[AnyMemRef, AnyTensor]>:$roi, AnyTypeOf<[AnyMemRef, AnyTensor]>:$scales, AnyTypeOf<[AnyMemRef, AnyTensor]>:$sizes); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$roi, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$scales, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$sizes, + DefaultValuedAttr:$coordinate_transformation_mode, + DefaultValuedAttr:$cubic_coeff_a, + DefaultValuedAttr:$exclude_outside, + DefaultValuedAttr:$extrapolation_value, + DefaultValuedAttr:$mode, + DefaultValuedAttr:$nearest_mode); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y); } def ONNXReverseSequenceOp:ONNX_Op<"ReverseSequence", @@ -2272,8 +2515,11 @@ def ONNXReverseSequenceOp:ONNX_Op<"ReverseSequence", " [10.0, 9.0, 8.0, 11.0]," " [15.0, 14.0, 13.0, 12.0]]" }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input, AnyTypeOf<[AnyMemRef, AnyTensor]>:$sequence_lens); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$sequence_lens, + DefaultValuedAttr:$batch_axis, + DefaultValuedAttr:$time_axis); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y); } def ONNXRoiAlignOp:ONNX_Op<"RoiAlign", @@ -2292,8 +2538,15 @@ def ONNXRoiAlignOp:ONNX_Op<"RoiAlign", "the value of the sampled locations are computed directly" "through bilinear interpolation." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, AnyTypeOf<[AnyMemRef, AnyTensor]>:$rois, AnyTypeOf<[AnyMemRef, AnyTensor]>:$batch_indices); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$rois, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$batch_indices, + DefaultValuedAttr:$mode, + DefaultValuedAttr:$output_height, + DefaultValuedAttr:$output_width, + DefaultValuedAttr:$sampling_ratio, + DefaultValuedAttr:$spatial_scale); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y); } def ONNXRoundOp:ONNX_Op<"Round", @@ -2315,7 +2568,7 @@ def ONNXRoundOp:ONNX_Op<"Round", "```" }]; let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y); } def ONNXScanOp:ONNX_Op<"Scan", @@ -2444,8 +2697,14 @@ def ONNXScanOp:ONNX_Op<"Scan", " }" "" }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$initial_state_and_scan_inputs); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$initial_state_and_scan_inputs, + AnyAttr:$body, + I64Attr:$num_scan_inputs, + OptionalAttr:$scan_input_axes, + OptionalAttr:$scan_input_directions, + OptionalAttr:$scan_output_axes, + OptionalAttr:$scan_output_directions); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_final_state_and_scan_outputs); } def ONNXScatterOp:ONNX_Op<"Scatter", @@ -2506,8 +2765,11 @@ def ONNXScatterOp:ONNX_Op<"Scatter", " output = [[1.0, 1.1, 3.0, 2.1, 5.0]]" "```" }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data, AnyTypeOf<[AnyMemRef, AnyTensor]>:$indices, AnyTypeOf<[AnyMemRef, AnyTensor]>:$updates); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$indices, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$updates, + DefaultValuedAttr:$axis); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXScatterElementsOp:ONNX_Op<"ScatterElements", @@ -2566,8 +2828,11 @@ def ONNXScatterElementsOp:ONNX_Op<"ScatterElements", " output = [[1.0, 1.1, 3.0, 2.1, 5.0]]" "```" }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data, AnyTypeOf<[AnyMemRef, AnyTensor]>:$indices, AnyTypeOf<[AnyMemRef, AnyTensor]>:$updates); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$indices, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$updates, + DefaultValuedAttr:$axis); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXScatterNDOp:ONNX_Op<"ScatterND", @@ -2631,8 +2896,10 @@ def ONNXScatterNDOp:ONNX_Op<"ScatterND", " [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]]" "```" }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data, AnyTypeOf<[AnyMemRef, AnyTensor]>:$indices, AnyTypeOf<[AnyMemRef, AnyTensor]>:$updates); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$indices, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$updates); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXSeluOp:ONNX_Op<"Selu", @@ -2644,8 +2911,10 @@ def ONNXSeluOp:ONNX_Op<"Selu", "`y = gamma * (alpha * e^x - alpha) for x <= 0`, `y = gamma * x for x > 0`," "is applied to the tensor elementwise." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, + DefaultValuedAttr:$alpha, + DefaultValuedAttr:$gamma); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y); } def ONNXSequenceAtOp:ONNX_Op<"SequenceAt", @@ -2656,8 +2925,9 @@ def ONNXSequenceAtOp:ONNX_Op<"SequenceAt", "Accepted range for 'position' is in `[-n, n - 1]`, where `n` is the number of tensors in 'input_sequence'." "Negative value means counting positions from the back." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input_sequence, AnyTypeOf<[AnyMemRef, AnyTensor]>:$position); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input_sequence, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$position); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_tensor); } def ONNXSequenceConstructOp:ONNX_Op<"SequenceConstruct", @@ -2668,7 +2938,7 @@ def ONNXSequenceConstructOp:ONNX_Op<"SequenceConstruct", "All tensors in 'inputs' must have the same data type." }]; let arguments = (ins Variadic>:$inputs); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output_sequence); } def ONNXSequenceEmptyOp:ONNX_Op<"SequenceEmpty", @@ -2677,8 +2947,8 @@ def ONNXSequenceEmptyOp:ONNX_Op<"SequenceEmpty", let description = [{ "Construct an empty tensor sequence, with given data type." }]; - let arguments = (ins ); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins OptionalAttr:$dtype); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXSequenceEraseOp:ONNX_Op<"SequenceErase", @@ -2690,8 +2960,9 @@ def ONNXSequenceEraseOp:ONNX_Op<"SequenceErase", "Negative value means counting positions from the back." "'position' is optional, by default it erases the last tensor from 'input_sequence'." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input_sequence, AnyTypeOf<[AnyMemRef, AnyTensor]>:$position); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input_sequence, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$position); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output_sequence); } def ONNXSequenceInsertOp:ONNX_Op<"SequenceInsert", @@ -2704,8 +2975,10 @@ def ONNXSequenceInsertOp:ONNX_Op<"SequenceInsert", "Negative value means counting positions from the back." "'position' is optional, by default it inserts 'tensor' to the back of 'input_sequence'." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input_sequence, AnyTypeOf<[AnyMemRef, AnyTensor]>:$tensor, AnyTypeOf<[AnyMemRef, AnyTensor]>:$position); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input_sequence, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$tensor, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$position); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output_sequence); } def ONNXSequenceLengthOp:ONNX_Op<"SequenceLength", @@ -2715,7 +2988,7 @@ def ONNXSequenceLengthOp:ONNX_Op<"SequenceLength", "Produces a scalar(tensor of empty shape) containing the number of tensors in 'input_sequence'." }]; let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input_sequence); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_length); } def ONNXShapeOp:ONNX_Op<"Shape", @@ -2725,7 +2998,7 @@ def ONNXShapeOp:ONNX_Op<"Shape", "Takes a tensor as input and outputs an 1D int64 tensor containing the shape of the input tensor." }]; let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_shape); } def ONNXShrinkOp:ONNX_Op<"Shrink", @@ -2737,8 +3010,10 @@ def ONNXShrinkOp:ONNX_Op<"Shrink", "bias. The formula of this operator is: If x < -lambd, y = x + bias;" "If x > lambd, y = x - bias; Otherwise, y = 0." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input, + DefaultValuedAttr:$bias, + DefaultValuedAttr:$lambd); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXSigmoidOp:ONNX_Op<"Sigmoid", @@ -2750,7 +3025,7 @@ def ONNXSigmoidOp:ONNX_Op<"Sigmoid", "tensor elementwise." }]; let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y); } def ONNXSignOp:ONNX_Op<"Sign", @@ -2761,7 +3036,7 @@ def ONNXSignOp:ONNX_Op<"Sign", "If input > 0, output 1. if input < 0, output -1. if input == 0, output 0." }]; let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXSinOp:ONNX_Op<"Sin", @@ -2771,7 +3046,7 @@ def ONNXSinOp:ONNX_Op<"Sin", "Calculates the sine of the given input tensor, element-wise." }]; let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXSinhOp:ONNX_Op<"Sinh", @@ -2781,7 +3056,7 @@ def ONNXSinhOp:ONNX_Op<"Sinh", "Calculates the hyperbolic sine of the given input tensor element-wise." }]; let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXSizeOp:ONNX_Op<"Size", @@ -2791,7 +3066,7 @@ def ONNXSizeOp:ONNX_Op<"Size", "Takes a tensor as input and outputs a int64 scalar that equals to the total number of elements of the input tensor." }]; let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_size); } def ONNXSliceOp:ONNX_Op<"Slice", @@ -2803,13 +3078,11 @@ def ONNXSliceOp:ONNX_Op<"Slice", "Slices uses `starts`, `ends`, `axes` and `steps` inputs to specify the start and end" "dimension and step for each axis in the list of axes, it uses this information to" "slice the input `data` tensor. If a negative value is passed for any of the" - "start or end indices, it represents number of elements before the end of that" + "start or end indices, it represent number of elements before the end of that" "dimension. If the value passed to start or end is larger than the `n` (the" "number of elements in this dimension), it represents `n`. For slicing to the" - "end of a dimension with unknown size, it is recommended to pass in `INT_MAX` " - "when sclicing forward and 'INT_MIN' when slicing backward." - "If a negative value is passed for step, it represents slicing backward. " - "However step value cannot be 0." + "end of a dimension with unknown size, it is recommended to pass in `INT_MAX`." + "If a negative value is passed for step, it represents slicing backward." "If `axes` are omitted, they are set to `[0, ..., ndim-1]`." "If `steps` are omitted, they are set to `[1, ..., 1]` of length `len(starts)`" "Example 1:" @@ -2835,8 +3108,12 @@ def ONNXSliceOp:ONNX_Op<"Slice", " [2, 3, 4]," " ]" }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data, AnyTypeOf<[AnyMemRef, AnyTensor]>:$starts, AnyTypeOf<[AnyMemRef, AnyTensor]>:$ends, AnyTypeOf<[AnyMemRef, AnyTensor]>:$axes, AnyTypeOf<[AnyMemRef, AnyTensor]>:$steps); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$starts, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$ends, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$axes, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$steps); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXSoftmaxOp:ONNX_Op<"Softmax", @@ -2858,8 +3135,9 @@ def ONNXSoftmaxOp:ONNX_Op<"Softmax", "will throw errors. The output tensor has the same shape" "and contains the softmax values of the corresponding input." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input, + DefaultValuedAttr:$axis); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXSoftplusOp:ONNX_Op<"Softplus", @@ -2871,7 +3149,7 @@ def ONNXSoftplusOp:ONNX_Op<"Softplus", "the tensor elementwise." }]; let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y); } def ONNXSoftsignOp:ONNX_Op<"Softsign", @@ -2881,7 +3159,7 @@ def ONNXSoftsignOp:ONNX_Op<"Softsign", "Calculates the softsign (x/(1+|x|)) of the given input tensor element-wise." }]; let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXSpaceToDepthOp:ONNX_Op<"SpaceToDepth", @@ -2892,8 +3170,9 @@ def ONNXSpaceToDepthOp:ONNX_Op<"SpaceToDepth", "this op outputs a copy of the input tensor where values from the height and width dimensions" "are moved to the depth dimension." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input, + I64Attr:$blocksize); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXSplitOp:ONNX_Op<"Split", @@ -2904,8 +3183,10 @@ def ONNXSplitOp:ONNX_Op<"Split", "'axis'. Lengths of the parts can be specified using argument 'split'." "Otherwise, the tensor is split to equal sized parts." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input, + DefaultValuedAttr:$axis, + OptionalAttr:$split); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_outputs); } def ONNXSplitToSequenceOp:ONNX_Op<"SplitToSequence", @@ -2923,8 +3204,11 @@ def ONNXSplitToSequenceOp:ONNX_Op<"SplitToSequence", "specified in 'split'. In this scenario, the sum of entries in 'split' must be equal to the" "dimension size of input tensor on 'axis'." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input, AnyTypeOf<[AnyMemRef, AnyTensor]>:$split); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$split, + DefaultValuedAttr:$axis, + DefaultValuedAttr:$keepdims); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output_sequence); } def ONNXSqrtOp:ONNX_Op<"Sqrt", @@ -2936,7 +3220,7 @@ def ONNXSqrtOp:ONNX_Op<"Sqrt", "the tensor elementwise. If x is negative, then it will return NaN." }]; let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y); } def ONNXSqueezeOp:ONNX_Op<"Squeeze", @@ -2948,8 +3232,9 @@ def ONNXSqueezeOp:ONNX_Op<"Squeeze", "If `axes` is not provided, all the single dimensions will be removed from" "the shape. If an axis is selected with shape entry not equal to one, an error is raised." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data, + OptionalAttr:$axes); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_squeezed); } def ONNXStringNormalizerOp:ONNX_Op<"StringNormalizer", @@ -2966,8 +3251,12 @@ def ONNXStringNormalizerOp:ONNX_Op<"StringNormalizer", "If all elements in X are dropped, the output will be the empty value of string tensor with shape [1]" "if input shape is [C] and shape [1, 1] if input shape is [1, C]." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, + DefaultValuedAttr:$case_change_action, + DefaultValuedAttr:$is_case_sensitive, + OptionalAttr:$locale, + OptionalAttr:$stopwords); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y); } def ONNXSubOp:ONNX_Op<"Sub", @@ -2978,8 +3267,9 @@ def ONNXSubOp:ONNX_Op<"Sub", "" "This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md)." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$A, AnyTypeOf<[AnyMemRef, AnyTensor]>:$B); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$A, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$B); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_C); } def ONNXSumOp:ONNX_Op<"Sum", @@ -2991,7 +3281,7 @@ def ONNXSumOp:ONNX_Op<"Sum", "This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md)." }]; let arguments = (ins Variadic>:$data_0); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_sum); } def ONNXTanOp:ONNX_Op<"Tan", @@ -3001,7 +3291,7 @@ def ONNXTanOp:ONNX_Op<"Tan", "Calculates the tangent of the given input tensor, element-wise." }]; let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXTanhOp:ONNX_Op<"Tanh", @@ -3011,7 +3301,7 @@ def ONNXTanhOp:ONNX_Op<"Tanh", "Calculates the hyperbolic tangent of the given input tensor element-wise." }]; let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXTfIdfVectorizerOp:ONNX_Op<"TfIdfVectorizer", @@ -3046,8 +3336,17 @@ def ONNXTfIdfVectorizerOp:ONNX_Op<"TfIdfVectorizer", "Only one of pool_strings and pool_int64s can be set. If pool_int64s is set, the input should be an integer tensor." "If pool_strings is set, the input must be a string tensor." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, + I64Attr:$max_gram_length, + I64Attr:$max_skip_count, + I64Attr:$min_gram_length, + StrAttr:$mode, + I64ArrayAttr:$ngram_counts, + I64ArrayAttr:$ngram_indexes, + OptionalAttr:$pool_int64s, + OptionalAttr:$pool_strings, + OptionalAttr:$weights); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y); } def ONNXThresholdedReluOp:ONNX_Op<"ThresholdedRelu", @@ -3058,8 +3357,9 @@ def ONNXThresholdedReluOp:ONNX_Op<"ThresholdedRelu", "(Tensor) where the rectified linear function, y = x for x > alpha, y = 0 otherwise," "is applied to the tensor elementwise." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, + DefaultValuedAttr:$alpha); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y); } def ONNXTileOp:ONNX_Op<"Tile", @@ -3070,8 +3370,9 @@ def ONNXTileOp:ONNX_Op<"Tile", "This is the same as function `tile` in Numpy, but no broadcast." "For example A = [[1, 2], [3, 4]], B = [1, 2], tile(A, B) = [[1, 2, 1, 2], [3, 4, 3, 4]]" }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input, AnyTypeOf<[AnyMemRef, AnyTensor]>:$repeats); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$input, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$repeats); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXTopKOp:ONNX_Op<"TopK", @@ -3093,8 +3394,12 @@ def ONNXTopKOp:ONNX_Op<"TopK", "Given two equivalent values, this operator uses the indices along the axis as" " a tiebreaker. That is, the element with the lower index will appear first." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, AnyTypeOf<[AnyMemRef, AnyTensor]>:$K); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>, AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$K, + DefaultValuedAttr:$axis, + DefaultValuedAttr:$largest, + DefaultValuedAttr:$sorted); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Values, AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Indices); } def ONNXTransposeOp:ONNX_Op<"Transpose", @@ -3105,14 +3410,9 @@ def ONNXTransposeOp:ONNX_Op<"Transpose", "perm=(1, 0, 2), given an input tensor of shape (1, 2, 3), the output shape" "will be (2, 1, 3)." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); - - let extraClassDeclaration = [{ - static StringRef getPermAttrName() { return "perm"; } - }]; - - let verifier = [{ return ::verify(*this); }]; + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data, + OptionalAttr:$perm); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_transposed); } def ONNXUniqueOp:ONNX_Op<"Unique", @@ -3195,8 +3495,10 @@ def ONNXUniqueOp:ONNX_Op<"Unique", "" " output_counts = [2 1 1]" }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>, AnyTypeOf<[AnyMemRef, AnyTensor]>, AnyTypeOf<[AnyMemRef, AnyTensor]>, AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, + OptionalAttr:$axis, + DefaultValuedAttr:$sorted); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y, AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_indices, AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_inverse_indices, AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_counts); } def ONNXUnsqueezeOp:ONNX_Op<"Unsqueeze", @@ -3216,8 +3518,9 @@ def ONNXUnsqueezeOp:ONNX_Op<"Unsqueeze", "The order of values in `axes` does not matter and can come in any order. " "" }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$data, + DefaultValuedAttr:$axes); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_expanded); } def ONNXUpsampleOp:ONNX_Op<"Upsample", @@ -3228,8 +3531,10 @@ def ONNXUpsampleOp:ONNX_Op<"Upsample", "Each dimension value of the output tensor is:" " output_dimension = floor(input_dimension * scale)." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, AnyTypeOf<[AnyMemRef, AnyTensor]>:$scales); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$scales, + DefaultValuedAttr:$mode); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_Y); } def ONNXWhereOp:ONNX_Op<"Where", @@ -3241,8 +3546,10 @@ def ONNXWhereOp:ONNX_Op<"Where", " Where behaves like numpy.where with three parameters:" " https://docs.scipy.org/doc/numpy/reference/generated/numpy.where.html" }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$condition, AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, AnyTypeOf<[AnyMemRef, AnyTensor]>:$Y); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$condition, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$X, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$Y); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_output); } def ONNXXorOp:ONNX_Op<"Xor", @@ -3254,7 +3561,8 @@ def ONNXXorOp:ONNX_Op<"Xor", "" "This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md)." }]; - let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$A, AnyTypeOf<[AnyMemRef, AnyTensor]>:$B); - let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>); + let arguments = (ins AnyTypeOf<[AnyMemRef, AnyTensor]>:$A, + AnyTypeOf<[AnyMemRef, AnyTensor]>:$B); + let results = (outs AnyTypeOf<[AnyMemRef, AnyTensor]>:$o_C); } diff --git a/src/pass/onnx_combine.td b/src/pass/onnx_combine.td index 25a4656..c1a8385 100644 --- a/src/pass/onnx_combine.td +++ b/src/pass/onnx_combine.td @@ -30,9 +30,14 @@ def HasOneUse : Constraint>; // Pattern-Match and Rewrite //===----------------------------------------------------------------------===// +def GemmAlpha : NativeCodeCall<"$_builder.getF32FloatAttr(1.0)">; +def GemmBeta : NativeCodeCall<"$_builder.getF32FloatAttr(1.0)">; +def GemmTransA : NativeCodeCall<"$_builder.getI64IntegerAttr(0)">; +def GemmTransB : NativeCodeCall<"$_builder.getI64IntegerAttr(0)">; + // onnx.add(onnx.matmul(%X, %Y), %Z) = onnx.Gemm(%X, %Y, %Z) def MulAddToGemmOptPattern : Pat<(ONNXAddOp (ONNXMatMulOp:$res $m1, $m2), $m3), - (ONNXGemmOp $m1, $m2, $m3), + (ONNXGemmOp $m1, $m2, $m3, (GemmAlpha), (GemmBeta), (GemmTransA), (GemmTransB)), [(HasOneUse $res)]>; // ONNX_Op (onnx.Identity (%X)) = ONNX_Op (%X) diff --git a/test/mlir/onnx/onnx_canonicalization.mlir b/test/mlir/onnx/onnx_canonicalization.mlir index 75fd4a5..9697c8e 100644 --- a/test/mlir/onnx/onnx_canonicalization.mlir +++ b/test/mlir/onnx/onnx_canonicalization.mlir @@ -2,7 +2,7 @@ func @test_matmul_add_simplification(%a0: tensor<10x10xf32>, %a1: tensor<10x10xf32>, %a2: tensor<10x10xf32>) -> tensor<10x10xf32> { // CHECK-LABEL: test_matmul_add_simplification - // CHECK: %{{[0-9]+}} = "onnx.Gemm"(%{{.*}}, %{{.*}}, %{{.*}}) : (tensor<10x10xf32>, tensor<10x10xf32>, tensor<10x10xf32>) -> tensor<10x10xf32> + // CHECK: %{{[0-9]+}} = "onnx.Gemm"(%{{.*}}, %{{.*}}, %{{.*}}) {alpha = 1.000000e+00 : f32, beta = 1.000000e+00 : f32, transA = 0 : i64, transB = 0 : i64} : (tensor<10x10xf32>, tensor<10x10xf32>, tensor<10x10xf32>) -> tensor<10x10xf32> %0 = "onnx.MatMul"(%a0, %a1) : (tensor<10x10xf32>, tensor<10x10xf32>) -> tensor<10x10xf32> %1 = "onnx.Add"(%0, %a2) : (tensor<10x10xf32>, tensor<10x10xf32>) -> tensor<10x10xf32> "std.return"(%1) : (tensor<10x10xf32>) -> () diff --git a/test/mlir/onnx/onnx_lowering.mlir b/test/mlir/onnx/onnx_lowering.mlir index 65120b6..2b8c52e 100644 --- a/test/mlir/onnx/onnx_lowering.mlir +++ b/test/mlir/onnx/onnx_lowering.mlir @@ -579,7 +579,7 @@ func @test_add_with_broadcasting(%arg0 : tensor, %arg1 : tensor } func @test_softmax(%arg0 : tensor<10x10xf32>) -> tensor<*xf32> { - %0 = "onnx.Softmax"(%arg0) {axis=1:i32} : (tensor<10x10xf32>) -> tensor<*xf32> + %0 = "onnx.Softmax"(%arg0) {axis=1:i64} : (tensor<10x10xf32>) -> tensor<*xf32> "std.return"(%0) : (tensor<*xf32>) -> () // CHECK-LABEL: test_softmax diff --git a/test/mlir/onnx/onnx_shape_inference.mlir b/test/mlir/onnx/onnx_shape_inference.mlir index 656866e..6334c5f 100644 --- a/test/mlir/onnx/onnx_shape_inference.mlir +++ b/test/mlir/onnx/onnx_shape_inference.mlir @@ -32,120 +32,120 @@ func @test_transpose(%arg0 : tensor<5x5x1x32xf32>) -> tensor<*xf32> { /// Default and required attributes. func @test_conv_no_bias_1(%arg0 : tensor<1x2x32x64xf32>, %arg1 : tensor<5x2x6x7xf32>) -> tensor<*xf32> { - %0 = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "NOTSET", group = 1 : i32} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>) -> tensor<*xf32> + %0 = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "NOTSET", group = 1 : i64} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>) -> tensor<*xf32> "std.return"(%0) : (tensor<*xf32>) -> () } // CHECK-LABEL: test_conv_no_bias_1 -// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "NOTSET", group = 1 : i32} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>) -> tensor<1x5x27x58xf32> +// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "NOTSET", group = 1 : i64} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>) -> tensor<1x5x27x58xf32> // CHECK: return [[RES_ATTR]] : tensor<1x5x27x58xf32> /// kernel_shape attribute. func @test_conv_no_bias_2(%arg0 : tensor<1x2x32x64xf32>, %arg1 : tensor<5x2x6x7xf32>) -> tensor<*xf32> { - %0 = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "NOTSET", group = 1 : i32, kernel_shape = [8, 9]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>) -> tensor<*xf32> + %0 = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "NOTSET", group = 1 : i64, kernel_shape = [8, 9]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>) -> tensor<*xf32> "std.return"(%0) : (tensor<*xf32>) -> () } // CHECK-LABEL: test_conv_no_bias_2 -// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "NOTSET", group = 1 : i32, kernel_shape = [8, 9]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>) -> tensor<1x5x25x56xf32> +// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "NOTSET", group = 1 : i64, kernel_shape = [8, 9]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>) -> tensor<1x5x25x56xf32> // CHECK: return [[RES_ATTR]] : tensor<1x5x25x56xf32> /// pads attribute. /// Use pads to make output size equal to input size by adding K - 1 to the result. func @test_conv_no_bias_3(%arg0 : tensor<1x2x32x64xf32>, %arg1 : tensor<5x2x6x10xf32>) -> tensor<*xf32> { - %0 = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "NOTSET", group = 1 : i32, pads = [2, 4, 3, 5]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x10xf32>) -> tensor<*xf32> + %0 = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "NOTSET", group = 1 : i64, pads = [2, 4, 3, 5]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x10xf32>) -> tensor<*xf32> "std.return"(%0) : (tensor<*xf32>) -> () } // CHECK-LABEL: test_conv_no_bias_3 -// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "NOTSET", group = 1 : i32, pads = [2, 4, 3, 5]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x10xf32>) -> tensor<1x5x32x64xf32> +// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "NOTSET", group = 1 : i64, pads = [2, 4, 3, 5]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x10xf32>) -> tensor<1x5x32x64xf32> // CHECK: return [[RES_ATTR]] : tensor<1x5x32x64xf32> /// auto_pad set to SAME_UPPER and SAME_LOWER. func @test_conv_no_bias_4(%arg0 : tensor<1x2x32x64xf32>, %arg1 : tensor<5x2x6x10xf32>) -> tensor<*xf32> { - %0 = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "SAME_UPPER", group = 1 : i32} : (tensor<1x2x32x64xf32>, tensor<5x2x6x10xf32>) -> tensor<*xf32> + %0 = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "SAME_UPPER", group = 1 : i64} : (tensor<1x2x32x64xf32>, tensor<5x2x6x10xf32>) -> tensor<*xf32> "std.return"(%0) : (tensor<*xf32>) -> () } // CHECK-LABEL: test_conv_no_bias_4 -// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "SAME_UPPER", group = 1 : i32} : (tensor<1x2x32x64xf32>, tensor<5x2x6x10xf32>) -> tensor<1x5x32x64xf32> +// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "SAME_UPPER", group = 1 : i64} : (tensor<1x2x32x64xf32>, tensor<5x2x6x10xf32>) -> tensor<1x5x32x64xf32> // CHECK: return [[RES_ATTR]] : tensor<1x5x32x64xf32> func @test_conv_no_bias_5(%arg0 : tensor<1x2x32x64xf32>, %arg1 : tensor<5x2x6x10xf32>) -> tensor<*xf32> { - %0 = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "SAME_LOWER", group = 1 : i32} : (tensor<1x2x32x64xf32>, tensor<5x2x6x10xf32>) -> tensor<*xf32> + %0 = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "SAME_LOWER", group = 1 : i64} : (tensor<1x2x32x64xf32>, tensor<5x2x6x10xf32>) -> tensor<*xf32> "std.return"(%0) : (tensor<*xf32>) -> () } // CHECK-LABEL: test_conv_no_bias_5 -// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "SAME_LOWER", group = 1 : i32} : (tensor<1x2x32x64xf32>, tensor<5x2x6x10xf32>) -> tensor<1x5x32x64xf32> +// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "SAME_LOWER", group = 1 : i64} : (tensor<1x2x32x64xf32>, tensor<5x2x6x10xf32>) -> tensor<1x5x32x64xf32> // CHECK: return [[RES_ATTR]] : tensor<1x5x32x64xf32> /// auto_pad set to VALID. func @test_conv_no_bias_6(%arg0 : tensor<1x2x32x64xf32>, %arg1 : tensor<5x2x6x10xf32>) -> tensor<*xf32> { - %0 = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "VALID", group = 1 : i32} : (tensor<1x2x32x64xf32>, tensor<5x2x6x10xf32>) -> tensor<*xf32> + %0 = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "VALID", group = 1 : i64} : (tensor<1x2x32x64xf32>, tensor<5x2x6x10xf32>) -> tensor<*xf32> "std.return"(%0) : (tensor<*xf32>) -> () } // CHECK-LABEL: test_conv_no_bias_6 -// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "VALID", group = 1 : i32} : (tensor<1x2x32x64xf32>, tensor<5x2x6x10xf32>) -> tensor<1x5x27x55xf32> +// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "VALID", group = 1 : i64} : (tensor<1x2x32x64xf32>, tensor<5x2x6x10xf32>) -> tensor<1x5x27x55xf32> // CHECK: return [[RES_ATTR]] : tensor<1x5x27x55xf32> /// With strides attribute. func @test_conv_no_bias_7(%arg0 : tensor<1x2x32x64xf32>, %arg1 : tensor<5x2x6x7xf32>) -> tensor<*xf32> { - %0 = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "NOTSET", group = 1 : i32, strides = [2, 3]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>) -> tensor<*xf32> + %0 = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "NOTSET", group = 1 : i64, strides = [2, 3]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>) -> tensor<*xf32> "std.return"(%0) : (tensor<*xf32>) -> () } // CHECK-LABEL: test_conv_no_bias_7 -// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "NOTSET", group = 1 : i32, strides = [2, 3]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>) -> tensor<1x5x14x20xf32> +// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "NOTSET", group = 1 : i64, strides = [2, 3]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>) -> tensor<1x5x14x20xf32> // CHECK: return [[RES_ATTR]] : tensor<1x5x14x20xf32> /// auto_pad set to SAME_UPPER with strides attribute. /// The auto_pad will pas as if stride is equal to 1. func @test_conv_no_bias_8(%arg0 : tensor<1x2x32x64xf32>, %arg1 : tensor<5x2x6x7xf32>) -> tensor<*xf32> { - %0 = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "SAME_UPPER", group = 1 : i32, strides = [2, 3]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>) -> tensor<*xf32> + %0 = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "SAME_UPPER", group = 1 : i64, strides = [2, 3]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>) -> tensor<*xf32> "std.return"(%0) : (tensor<*xf32>) -> () } // CHECK-LABEL: test_conv_no_bias_8 -// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "SAME_UPPER", group = 1 : i32, strides = [2, 3]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>) -> tensor<1x5x16x22xf32> +// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "SAME_UPPER", group = 1 : i64, strides = [2, 3]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>) -> tensor<1x5x16x22xf32> // CHECK: return [[RES_ATTR]] : tensor<1x5x16x22xf32> /// dilations attribute. func @test_conv_no_bias_9(%arg0 : tensor<1x2x32x64xf32>, %arg1 : tensor<5x2x6x7xf32>) -> tensor<*xf32> { - %0 = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "NOTSET", group = 1 : i32, dilations = [2, 3]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>) -> tensor<*xf32> + %0 = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "NOTSET", group = 1 : i64, dilations = [2, 3]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>) -> tensor<*xf32> "std.return"(%0) : (tensor<*xf32>) -> () } // CHECK-LABEL: test_conv_no_bias_9 -// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "NOTSET", dilations = [2, 3], group = 1 : i32} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>) -> tensor<1x5x20x42xf32> +// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "NOTSET", dilations = [2, 3], group = 1 : i64} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>) -> tensor<1x5x20x42xf32> // CHECK: return [[RES_ATTR]] : tensor<1x5x20x42xf32> /// dilations attribute with stride. func @test_conv_no_bias_10(%arg0 : tensor<1x2x32x64xf32>, %arg1 : tensor<5x2x6x7xf32>) -> tensor<*xf32> { - %0 = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "NOTSET", group = 1 : i32, dilations = [2, 3], strides = [2, 2]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>) -> tensor<*xf32> + %0 = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "NOTSET", group = 1 : i64, dilations = [2, 3], strides = [2, 2]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>) -> tensor<*xf32> "std.return"(%0) : (tensor<*xf32>) -> () } // CHECK-LABEL: test_conv_no_bias_10 -// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "NOTSET", dilations = [2, 3], group = 1 : i32, strides = [2, 2]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>) -> tensor<1x5x10x21xf32> +// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "NOTSET", dilations = [2, 3], group = 1 : i64, strides = [2, 2]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>) -> tensor<1x5x10x21xf32> // CHECK: return [[RES_ATTR]] : tensor<1x5x10x21xf32> /// dilations attribute with auto_pad set to SAME_UPPER. func @test_conv_no_bias_11(%arg0 : tensor<1x2x32x64xf32>, %arg1 : tensor<5x2x6x7xf32>) -> tensor<*xf32> { - %0 = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "SAME_UPPER", group = 1 : i32, dilations = [2, 3]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>) -> tensor<*xf32> + %0 = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "SAME_UPPER", group = 1 : i64, dilations = [2, 3]} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>) -> tensor<*xf32> "std.return"(%0) : (tensor<*xf32>) -> () } // CHECK-LABEL: test_conv_no_bias_11 -// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "SAME_UPPER", dilations = [2, 3], group = 1 : i32} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>) -> tensor<1x5x32x64xf32> +// CHECK: [[RES_ATTR:%.+]] = "onnx.ConvNoBias"(%arg0, %arg1) {auto_pad = "SAME_UPPER", dilations = [2, 3], group = 1 : i64} : (tensor<1x2x32x64xf32>, tensor<5x2x6x7xf32>) -> tensor<1x5x32x64xf32> // CHECK: return [[RES_ATTR]] : tensor<1x5x32x64xf32>