2020-01-30 02:54:46 +08:00
//********************************************************
2020-02-24 23:46:48 +08:00
// Do not modify this file directly.
// This file is automatically generated via script.
2020-07-25 05:03:08 +08:00
// Details can be found in docs/ImportONNXDefs.md .
2020-01-30 02:54:46 +08:00
//********************************************************
2020-02-24 23:46:48 +08:00
def ONNXAbsOp : ONNX_Op < " Abs " ,
2020-03-17 23:12:45 +08:00
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Abs operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Absolute takes one input data (Tensor<T>) and produces one output data "
" (Tensor<T>) where the absolute is, y = abs(x), is applied to "
" the tensor elementwise. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $X );
let results = ( outs AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $Y );
2020-02-21 22:28:24 +08:00
let builders = [
2020-05-20 15:45:42 +08:00
OpBuilder < " OpBuilder &builder, OperationState &state, Value X " , [{
2020-02-21 22:28:24 +08:00
auto elementType = X . getType () . cast < TensorType > () . getElementType ();
build ( builder , state , UnrankedTensorType :: get ( elementType ), X );
}] > ,
2020-05-20 15:45:42 +08:00
OpBuilder < " OpBuilder &builder, OperationState &state, ValueRange operands, ArrayRef<NamedAttribute> attributes " , [{
2020-02-21 22:28:24 +08:00
auto elementType = operands [ 0 ] . getType () . cast < TensorType > () . getElementType ();
std :: vector < mlir :: Type > outputTypes ;
outputTypes . emplace_back ( UnrankedTensorType :: get ( elementType ));
build ( builder , state , outputTypes , operands , attributes );
}] >
2020-03-17 17:41:59 +08:00
];
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXAcosOp : ONNX_Op < " Acos " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Acos operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Calculates the arccosine (inverse of cosine) of the given input tensor, element-wise. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $input );
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXAcoshOp : ONNX_Op < " Acosh " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Acosh operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Calculates the hyperbolic arccosine of the given input tensor element-wise. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $input );
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXAddOp : ONNX_Op < " Add " ,
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let hasCanonicalizer = 1 ;
let summary = " ONNX Add operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Performs element-wise binary addition (with Numpy-style broadcasting support). "
" "
" This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md). "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $A ,
AnyTypeOf < [ TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $B );
let results = ( outs AnyTypeOf < [ TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $C );
2020-06-09 03:45:32 +08:00
let builders = [
OpBuilder < " OpBuilder &builder, OperationState &state, Value A, Value B " , [{
2020-07-22 10:05:18 +08:00
auto lhsTy = A . getType ();
auto rhsTy = B . getType ();
2020-06-09 03:45:32 +08:00
auto elementType = getBroadcastedType ( lhsTy , rhsTy );
auto shapedType = elementType . dyn_cast_or_null < ShapedType > ();
if ( ! shapedType || ! shapedType . hasStaticShape ()) {
elementType = A . getType () . cast < TensorType > () . getElementType ();
elementType = UnrankedTensorType :: get ( elementType );
}
build ( builder , state , elementType , A , B );
}] > ,
OpBuilder < " OpBuilder &builder, OperationState &state, ValueRange operands, ArrayRef<NamedAttribute> attributes " , [{
2020-07-22 10:05:18 +08:00
auto lhsTy = operands [ 0 ] . getType ();
auto rhsTy = operands [ 1 ] . getType ();
2020-06-09 03:45:32 +08:00
auto elementType = getBroadcastedType ( lhsTy , rhsTy );
auto shapedType = elementType . dyn_cast_or_null < ShapedType > ();
if ( ! shapedType || ! shapedType . hasStaticShape ()) {
elementType = operands [ 0 ] . getType () . cast < TensorType > () . getElementType ();
elementType = UnrankedTensorType :: get ( elementType );
}
std :: vector < mlir :: Type > outputTypes ;
outputTypes . emplace_back ( elementType );
build ( builder , state , outputTypes , operands , attributes );
}] >
];
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 2 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXAndOp : ONNX_Op < " And " ,
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX And operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Returns the tensor resulted from performing the `and` logical operation "
" elementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support). "
" "
" This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md). "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ I1 ] > , AnyMemRef ] >: $A ,
AnyTypeOf < [ TensorOf < [ I1 ] > , AnyMemRef ] >: $B );
let results = ( outs AnyTypeOf < [ TensorOf < [ I1 ] > , AnyMemRef ] >: $C );
2020-06-09 03:45:32 +08:00
let builders = [
OpBuilder < " OpBuilder &builder, OperationState &state, Value A, Value B " , [{
2020-07-22 10:05:18 +08:00
auto lhsTy = A . getType ();
auto rhsTy = B . getType ();
2020-06-09 03:45:32 +08:00
auto elementType = getBroadcastedType ( lhsTy , rhsTy );
auto shapedType = elementType . dyn_cast_or_null < ShapedType > ();
if ( ! shapedType || ! shapedType . hasStaticShape ()) {
elementType = A . getType () . cast < TensorType > () . getElementType ();
elementType = UnrankedTensorType :: get ( elementType );
}
build ( builder , state , elementType , A , B );
}] > ,
OpBuilder < " OpBuilder &builder, OperationState &state, ValueRange operands, ArrayRef<NamedAttribute> attributes " , [{
2020-07-22 10:05:18 +08:00
auto lhsTy = operands [ 0 ] . getType ();
auto rhsTy = operands [ 1 ] . getType ();
2020-06-09 03:45:32 +08:00
auto elementType = getBroadcastedType ( lhsTy , rhsTy );
auto shapedType = elementType . dyn_cast_or_null < ShapedType > ();
if ( ! shapedType || ! shapedType . hasStaticShape ()) {
elementType = operands [ 0 ] . getType () . cast < TensorType > () . getElementType ();
elementType = UnrankedTensorType :: get ( elementType );
}
std :: vector < mlir :: Type > outputTypes ;
outputTypes . emplace_back ( elementType );
build ( builder , state , outputTypes , operands , attributes );
}] >
];
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 2 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 0 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXArgMaxOp : ONNX_Op < " ArgMax " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX ArgMax operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Computes the indices of the max elements of the input tensor's element along the "
" provided axis. The resulted tensor has the same rank as the input if keepdims equal 1. "
" If keepdims equal 0, then the resulted tensor have the reduced dimension pruned. "
" The type of the output tensor is integer. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $data ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < I64Attr , " 0 " >: $axis ,
DefaultValuedAttr < I64Attr , " 1 " >: $keepdims );
2020-06-26 04:34:37 +08:00
let results = ( outs TensorOf < [ I64 ] >: $reduced );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
2020-07-09 04:52:56 +08:00
return { 4 };
2020-05-22 10:03:16 +08:00
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXArgMinOp : ONNX_Op < " ArgMin " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX ArgMin operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Computes the indices of the min elements of the input tensor's element along the "
" provided axis. The resulted tensor has the same rank as the input if keepdims equal 1. "
" If keepdims equal 0, then the resulted tensor have the reduced dimension pruned. "
" The type of the output tensor is integer. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $data ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < I64Attr , " 0 " >: $axis ,
DefaultValuedAttr < I64Attr , " 1 " >: $keepdims );
2020-06-26 04:34:37 +08:00
let results = ( outs TensorOf < [ I64 ] >: $reduced );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
2020-07-09 04:52:56 +08:00
return { 4 };
2020-05-22 10:03:16 +08:00
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXAsinOp : ONNX_Op < " Asin " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Asin operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Calculates the arcsine (inverse of sine) of the given input tensor, element-wise. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $input );
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXAsinhOp : ONNX_Op < " Asinh " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Asinh operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Calculates the hyperbolic arcsine of the given input tensor element-wise. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $input );
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXAtanOp : ONNX_Op < " Atan " ,
2020-06-09 14:55:49 +08:00
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Atan operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Calculates the arctangent (inverse of tangent) of the given input tensor, element-wise. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $input );
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXAtanhOp : ONNX_Op < " Atanh " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Atanh operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Calculates the hyperbolic arctangent of the given input tensor element-wise. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $input );
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXAveragePoolOp : ONNX_Op < " AveragePool " ,
2020-03-13 21:59:16 +08:00
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX AveragePool operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" AveragePool consumes an input tensor X and applies average pooling across "
" the tensor according to kernel sizes, stride sizes, and pad lengths. "
" average pooling consisting of computing the average on all values of a "
" subset of the input tensor according to the kernel size and downsampling the "
" data into the output tensor Y for further processing. The output spatial shape will be following: "
" ``` "
" output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1) "
" ``` "
" or "
" ``` "
" output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1) "
" ``` "
" if ceil_mode is enabled "
" "
" ``` "
" * pad_shape[i] is sum of pads along axis i "
" ``` "
" "
" `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following: "
" ``` "
" VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - kernel_spatial_shape[i] + 1) / strides_spatial_shape[i]) "
" SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i]) "
" ``` "
" And pad shape will be following if `SAME_UPPER` or `SAME_LOWER`: "
" ``` "
" pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + kernel_spatial_shape[i] - input_spatial_shape[i] "
" ``` "
" The output of each pooling window is divided by the number of elements (exclude pad when attribute count_include_pad is zero). "
" "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $X ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < StrAttr , " NOTSET " >: $auto_pad ,
DefaultValuedAttr < I64Attr , " 0 " >: $ceil_mode ,
DefaultValuedAttr < I64Attr , " 0 " >: $count_include_pad ,
I64ArrayAttr : $kernel_shape ,
OptionalAttr < I64ArrayAttr >: $pads ,
OptionalAttr < I64ArrayAttr >: $strides );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $Y );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXBatchNormalizationOp : ONNX_Op < " BatchNormalization " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX BatchNormalization operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Carries out batch normalization as described in the paper "
" https://arxiv.org/abs/1502.03167. Depending on the mode it is being run, "
" there are multiple cases for the number of outputs, which we list below: "
" "
" Output case #1: Y, mean, var, saved_mean, saved_var (training mode) "
" Output case #2: Y (test mode) "
" "
" For previous (depreciated) non-spatial cases, implementors are suggested "
" to flatten the input shape to (N x C*D1*D2 ..*Dn) before a BatchNormalization Op. "
" This operator has **optional** inputs/outputs. See [the doc](IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $X ,
AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $scale ,
AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $B ,
AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $mean ,
AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $var ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < F32Attr , " 1e-05 " >: $epsilon ,
DefaultValuedAttr < F32Attr , " 0.9 " >: $momentum );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $Y ,
AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef , NoneType ] >: $out_mean ,
AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef , NoneType , NoneType ] >: $out_var ,
AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef , NoneType , NoneType , NoneType ] >: $saved_mean ,
AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef , NoneType , NoneType , NoneType , NoneType ] >: $saved_var );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 5 ;
}
static int getNumberOfResults () {
return 5 ;
}
static std :: vector < int > getTypeMap () {
return { 20 , 20 , 20 , 20 , 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXBitShiftOp : ONNX_Op < " BitShift " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX BitShift operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Bitwise shift operator performs element-wise operation. For each input element, if the "
" attribute \" direction \" is \" RIGHT \" , this operator moves its binary representation toward "
" the right side so that the input value is effectively decreased. If the attribute \" direction \" "
" is \" LEFT \" , bits of binary representation moves toward the left side, which results the "
" increase of its actual value. The input X is the tensor to be shifted and another input "
" Y specifies the amounts of shifting. For example, if \" direction \" is \" Right \" , X is [1, 4], "
" and S is [1, 1], the corresponding output Z would be [0, 2]. If \" direction \" is \" LEFT \" with "
" X=[1, 2] and S=[1, 2], the corresponding output Y would be [2, 8]. "
" "
" Because this operator supports Numpy-style broadcasting, X's and Y's shapes are "
" not necessarily identical. "
" This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md). "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , AnyMemRef ] >: $X ,
AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , AnyMemRef ] >: $Y ,
2020-02-24 23:46:48 +08:00
StrAttr : $direction );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , AnyMemRef ] >: $Z );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 2 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXCastOp : ONNX_Op < " Cast " ,
2020-06-09 14:55:49 +08:00
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > , OpInterface < " ResultTypeInferenceOpInterface " > ] > {
2020-07-28 00:49:14 +08:00
let hasCanonicalizer = 1 ;
2019-11-19 10:08:21 +08:00
let summary = " ONNX Cast operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" The operator casts the elements of a given input tensor to a data type "
" specified by the 'to' argument and returns an output tensor of the same size in "
" the converted type. The 'to' argument must be one of the data types specified "
" in the 'DataType' enum field in the TensorProto message. "
" "
" Casting from string tensor in plain (e.g., \" 3.14 \" and \" 1000 \" ) and scientific numeric representations "
" (e.g., \" 1e-5 \" and \" 1E8 \" ) to float types is supported. For example, converting string \" 100.5 \" to an integer may "
" result 100. There are some string literals reserved for special floating-point values; "
" \" +INF \" (and \" INF \" ), \" -INF \" , and \" NaN \" are positive infinity, negative infinity, and not-a-number, respectively. "
" Any string which can exactly match \" +INF \" in a case-insensitive way would be mapped to positive infinite. Similarly, "
" this case-insensitive rule is applied to \" INF \" and \" NaN \" . When casting from numeric tensors "
" to string tensors, plain floating-point representation (such as \" 314.15926 \" ) would be used. "
" Converting non-numerical-literal string such as \" Hello World! \" is an undefined behavior. Cases "
" of converting string representing floating-point arithmetic value, such as \" 2.718 \" , to INT is an undefined behavior. "
" "
" Conversion from a numerical type to any numerical type is always allowed. "
" User must be aware of precision loss and value change caused by range difference between two types. "
" For example, a 64-bit float 3.1415926459 may be round to a 32-bit float 3.141592. Similarly, converting "
" an integer 36 to Boolean may produce 1 because we truncate bits which can't be stored in the targeted type. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I1 ] > , TensorOf < [ StringType ] > , AnyMemRef ] >: $input ,
2020-02-24 23:46:48 +08:00
I64Attr : $to );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I1 ] > , TensorOf < [ StringType ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { - 1 };
}
2020-06-04 21:05:04 +08:00
std :: vector < mlir :: Type > resultTypeInference () {
std :: vector < mlir :: Type > resultTypes ;
auto toAttr = to () . getSExtValue ();
auto builder = mlir :: OpBuilder ( getContext ());
resultTypes . push_back ( mlir :: UnrankedTensorType :: get (
convertONNXTypeToMLIRType ( builder , static_cast < onnx :: TensorProto_DataType > ( toAttr ))));
return resultTypes ;
}
2020-05-22 10:03:16 +08:00
}];
2020-07-24 22:57:52 +08:00
let builders = [
OpBuilder < " OpBuilder &builder, OperationState &state, Value input, IntegerAttr to " , [{
auto toAttr = to . getValue () . getSExtValue ();
auto resultType = mlir :: UnrankedTensorType :: get (
convertONNXTypeToMLIRType ( builder , static_cast < onnx :: TensorProto_DataType > ( toAttr )));
build ( builder , state , resultType , input , to );
}] >
];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXCeilOp : ONNX_Op < " Ceil " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Ceil operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Ceil takes one input data (Tensor<T>) and produces one output data "
" (Tensor<T>) where the ceil is, y = ceil(x), is applied to "
" the tensor elementwise. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $X );
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $Y );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXClipOp : ONNX_Op < " Clip " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Clip operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Clip operator limits the given input within an interval. The interval is "
" specified by the inputs 'min' and 'max'. They default to "
" numeric_limits::lowest() and numeric_limits::max(), respectively. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $input ,
AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef , NoneType ] >: $min ,
AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef , NoneType , NoneType ] >: $max );
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef , NoneType , NoneType ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 3 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXCompressOp : ONNX_Op < " Compress " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Compress operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Selects slices from an input tensor along a given axis where condition evaluates to True for each axis index. "
" In case axis is not provided, input is flattened before elements are selected. "
" Compress behaves like numpy.compress: https://docs.scipy.org/doc/numpy/reference/generated/numpy.compress.html "
" "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $input ,
AnyTypeOf < [ TensorOf < [ I1 ] > , AnyMemRef ] >: $condition ,
2020-02-24 23:46:48 +08:00
OptionalAttr < I64Attr >: $axis );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 2 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXConcatOp : ONNX_Op < " Concat " ,
2020-04-08 04:13:41 +08:00
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Concat operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Concatenate a list of tensors into a single tensor. All input tensors must have the same shape, except for the dimension size of the axis to concatenate on. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins Variadic < AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >>: $inputs ,
2020-02-24 23:46:48 +08:00
I64Attr : $axis );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $concat_result );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return - 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXConcatFromSequenceOp : ONNX_Op < " ConcatFromSequence " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX ConcatFromSequence operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Concatenate a sequence of tensors into a single tensor. "
" All input tensors must have the same shape, except for the dimension size of the axis to concatenate on. "
" By default 'new_axis' is 0, the behavior is similar to numpy.concatenate. "
" When 'new_axis' is 1, the behavior is similar to numpy.stack. "
2019-11-19 10:08:21 +08:00
}];
2020-07-31 20:05:59 +08:00
let arguments = ( ins AnyTypeOf < [ SeqOf < [ TensorOf < [ UI8 ] > ] > , SeqOf < [ TensorOf < [ UI16 ] > ] > , SeqOf < [ TensorOf < [ UI32 ] > ] > , SeqOf < [ TensorOf < [ UI64 ] > ] > , SeqOf < [ TensorOf < [ I8 ] > ] > , SeqOf < [ TensorOf < [ I16 ] > ] > , SeqOf < [ TensorOf < [ I32 ] > ] > , SeqOf < [ TensorOf < [ I64 ] > ] > , SeqOf < [ TensorOf < [ F16 ] > ] > , SeqOf < [ TensorOf < [ F32 ] > ] > , SeqOf < [ TensorOf < [ F64 ] > ] > , SeqOf < [ TensorOf < [ StringType ] > ] > , SeqOf < [ TensorOf < [ I1 ] > ] > , SeqOf < [ TensorOf < [ Complex < F32 > ] > ] > , SeqOf < [ TensorOf < [ Complex < F64 > ] > ] > , AnyMemRef ] >: $input_sequence ,
2020-02-24 23:46:48 +08:00
I64Attr : $axis ,
DefaultValuedAttr < I64Attr , " 0 " >: $new_axis );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $concat_result );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { - 1 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXConstantOp : ONNX_Op < " Constant " ,
2020-05-26 09:54:19 +08:00
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > , OpInterface < " ResultTypeInferenceOpInterface " > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Constant operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" A constant tensor. Exactly one of the two attributes, either value or sparse_value, "
" must be specified. "
2019-11-19 10:08:21 +08:00
}];
2020-01-27 23:09:14 +08:00
let arguments = ( ins OptionalAttr < AnyAttr >: $sparse_value ,
2020-02-24 23:46:48 +08:00
OptionalAttr < AnyAttr >: $value );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 0 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { - 1 };
}
2020-05-26 09:54:19 +08:00
std :: vector < mlir :: Type > resultTypeInference () {
std :: vector < mlir :: Type > resultTypes ;
if ( auto attr = valueAttr ()) {
resultTypes . push_back ( attr . getType ());
} else if ( auto attr = sparse_valueAttr ()) {
resultTypes . push_back ( attr . getType ());
}
return resultTypes ;
}
2020-05-22 10:03:16 +08:00
}];
2020-07-24 22:57:52 +08:00
let builders = [
OpBuilder < " OpBuilder &builder, OperationState &state, Attribute sparse_value, Attribute value " , [{
if ( value ) {
auto tensorType = value . getType ();
build ( builder , state , tensorType , sparse_value , value );
} else {
auto tensorType = sparse_value . getType ();
build ( builder , state , tensorType , sparse_value , value );
}
}] >
];
2020-05-26 09:54:19 +08:00
}
2019-11-19 10:08:21 +08:00
2020-02-24 23:46:48 +08:00
def ONNXConstantOfShapeOp : ONNX_Op < " ConstantOfShape " ,
2020-07-22 22:15:56 +08:00
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > , OpInterface < " ResultTypeInferenceOpInterface " > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX ConstantOfShape operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Generate a tensor with given value and shape. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ I64 ] > , AnyMemRef ] >: $input ,
2020-02-24 23:46:48 +08:00
OptionalAttr < AnyAttr >: $value );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I1 ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { - 1 };
}
2020-07-22 22:15:56 +08:00
std :: vector < mlir :: Type > resultTypeInference () {
std :: vector < mlir :: Type > resultTypes ;
if ( auto attr = valueAttr ()) {
resultTypes . push_back ( mlir :: UnrankedTensorType :: get (
attr . getType () . cast < ShapedType > () . getElementType ()));
} else {
resultTypes . push_back ( mlir :: UnrankedTensorType :: get (
FloatType :: getF32 ( getContext ())));
}
return resultTypes ;
}
2020-05-22 10:03:16 +08:00
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXConvOp : ONNX_Op < " Conv " ,
2020-03-26 23:03:19 +08:00
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
let hasCanonicalizer = 1 ;
2019-11-19 10:08:21 +08:00
let summary = " ONNX Conv operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" The convolution operator consumes an input tensor and a filter, and "
" computes the output. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $X ,
AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $W ,
AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef , NoneType ] >: $B ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < StrAttr , " NOTSET " >: $auto_pad ,
OptionalAttr < I64ArrayAttr >: $dilations ,
DefaultValuedAttr < I64Attr , " 1 " >: $group ,
OptionalAttr < I64ArrayAttr >: $kernel_shape ,
OptionalAttr < I64ArrayAttr >: $pads ,
OptionalAttr < I64ArrayAttr >: $strides );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef , NoneType ] >: $Y );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 3 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXConvIntegerOp : ONNX_Op < " ConvInteger " ,
2020-06-09 14:55:49 +08:00
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX ConvInteger operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" The integer convolution operator consumes an input tensor, its zero-point, a filter, and its zero-point, "
" and computes the output. The production MUST never overflow. The accumulation may overflow if and only if in 32 bits. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ I8 ] > , TensorOf < [ UI8 ] > , AnyMemRef ] >: $x ,
AnyTypeOf < [ TensorOf < [ I8 ] > , TensorOf < [ UI8 ] > , AnyMemRef ] >: $w ,
AnyTypeOf < [ TensorOf < [ I8 ] > , TensorOf < [ UI8 ] > , AnyMemRef , NoneType ] >: $x_zero_point ,
AnyTypeOf < [ TensorOf < [ I8 ] > , TensorOf < [ UI8 ] > , AnyMemRef , NoneType ] >: $w_zero_point ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < StrAttr , " NOTSET " >: $auto_pad ,
OptionalAttr < I64ArrayAttr >: $dilations ,
DefaultValuedAttr < I64Attr , " 1 " >: $group ,
OptionalAttr < I64ArrayAttr >: $kernel_shape ,
OptionalAttr < I64ArrayAttr >: $pads ,
OptionalAttr < I64ArrayAttr >: $strides );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ I32 ] > , AnyMemRef ] >: $y );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 4 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 3 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXConvTransposeOp : ONNX_Op < " ConvTranspose " ,
2020-06-09 14:55:49 +08:00
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX ConvTranspose operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" The convolution transpose operator consumes an input tensor and a filter, "
" and computes the output. "
" "
" If the pads parameter is provided the shape of the output is calculated via the following equation: "
" "
" output_shape[i] = stride[i] * (input_size[i] - 1) + output_padding[i] + ((kernel_shape[i] - 1) * dilations[i] + 1) - pads[start_i] - pads[end_i] "
" "
" output_shape can also be explicitly specified in which case pads values are auto generated using these equations: "
" "
" total_padding[i] = stride[i] * (input_size[i] - 1) + output_padding[i] + ((kernel_shape[i] - 1) * dilations[i] + 1) - output_shape[i] "
" If (auto_pads != SAME_UPPER): pads[start_i] = total_padding[i]/2; pads[end_i] = total_padding[i] - (total_padding[i]/2) "
" Else: pads[start_i] = total_padding[i] - (total_padding[i]/2); pads[end_i] = (total_padding[i]/2). "
" "
" "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $X ,
AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $W ,
AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef , NoneType ] >: $B ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < StrAttr , " NOTSET " >: $auto_pad ,
OptionalAttr < I64ArrayAttr >: $dilations ,
DefaultValuedAttr < I64Attr , " 1 " >: $group ,
OptionalAttr < I64ArrayAttr >: $kernel_shape ,
OptionalAttr < I64ArrayAttr >: $output_padding ,
OptionalAttr < I64ArrayAttr >: $output_shape ,
OptionalAttr < I64ArrayAttr >: $pads ,
OptionalAttr < I64ArrayAttr >: $strides );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef , NoneType ] >: $Y );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 3 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXCosOp : ONNX_Op < " Cos " ,
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Cos operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Calculates the cosine of the given input tensor, element-wise. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $input );
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXCoshOp : ONNX_Op < " Cosh " ,
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Cosh operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Calculates the hyperbolic cosine of the given input tensor element-wise. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $input );
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXCumSumOp : ONNX_Op < " CumSum " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX CumSum operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Performs cumulative sum of the input elements along the given axis. "
" By default, it will do the sum inclusively meaning the first element is copied as is. "
" Through an `exclusive` attribute, this behavior can change to exclude the first element. "
" It can also perform summation in the opposite direction of the axis. For that, set `reverse` attribute to 1. "
" "
" Example: "
" ``` "
" input_x = [1, 2, 3] "
" axis=0 "
" output = [1, 3, 6] "
" exclusive=1 "
" output = [0, 1, 3] "
" exclusive=0 "
" reverse=1 "
" output = [6, 5, 3] "
" exclusive=1 "
" reverse=1 "
" output = [5, 3, 0] "
" ``` "
" "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $x ,
AnyTypeOf < [ TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , AnyMemRef ] >: $axis ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < I64Attr , " 0 " >: $exclusive ,
DefaultValuedAttr < I64Attr , " 0 " >: $reverse );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $y );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 2 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXDepthToSpaceOp : ONNX_Op < " DepthToSpace " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX DepthToSpace operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" DepthToSpace rearranges (permutes) data from depth into blocks of spatial data. "
" This is the reverse transformation of SpaceToDepth. More specifically, this op outputs a copy of "
" the input tensor where values from the depth dimension are moved in spatial blocks to the height "
" and width dimensions. By default, `mode` = `DCR`. "
" In the DCR mode, elements along the depth dimension from the input tensor are rearranged in the "
" following order: depth, column, and then row. The output y is computed from the input x as below: "
" "
" b, c, h, w = x.shape "
" "
" tmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w]) "
" "
" tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2]) "
" "
" y = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize]) "
" "
" "
" In the CRD mode, elements along the depth dimension from the input tensor are rearranged in the "
" following order: column, row, and the depth. The output y is computed from the input x as below: "
" "
" b, c, h, w = x.shape "
" "
" tmp = np.reshape(x, [b, c // (blocksize ** 2), blocksize, blocksize, h, w]) "
" "
" tmp = np.transpose(tmp, [0, 1, 4, 2, 5, 3]) "
" "
" y = np.reshape(tmp, [b, c // (blocksize ** 2), h * blocksize, w * blocksize]) "
" "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $input ,
2020-02-24 23:46:48 +08:00
I64Attr : $blocksize ,
DefaultValuedAttr < StrAttr , " DCR " >: $mode );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXDequantizeLinearOp : ONNX_Op < " DequantizeLinear " ,
2020-06-09 14:55:49 +08:00
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX DequantizeLinear operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" The linear dequantization operator. It consumes a quantized tensor, a scale, a zero point to compute the full precision tensor. "
" The dequantization formula is y = (x - x_zero_point) * x_scale. 'x_scale' and 'x_zero_point' must have same shape. "
" 'x_zero_point' and 'x' must have same type. 'x' and 'y' must have same shape. In the case of dequantizing int32, "
" there's no zero point (zero point is supposed to be 0). "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ I8 ] > , TensorOf < [ UI8 ] > , TensorOf < [ I32 ] > , AnyMemRef ] >: $x ,
2020-06-26 04:34:37 +08:00
TensorOf < [ F32 ] >: $x_scale ,
2020-07-14 23:15:06 +08:00
AnyTypeOf < [ TensorOf < [ I8 ] > , TensorOf < [ UI8 ] > , TensorOf < [ I32 ] > , AnyMemRef , NoneType ] >: $x_zero_point );
2020-06-26 04:34:37 +08:00
let results = ( outs TensorOf < [ F32 ] >: $y );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 3 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
2020-07-09 04:52:56 +08:00
return { 7 };
2020-05-22 10:03:16 +08:00
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXDetOp : ONNX_Op < " Det " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Det operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Det calculates determinant of a square matrix or batches of square matrices. "
" Det takes one input tensor of shape `[*, M, M]`, where `*` is zero or more batch dimensions, "
" and the inner-most 2 dimensions form square matrices. "
" The output is a tensor of shape `[*]`, containing the determinants of all input submatrices. "
" e.g., When the input is 2-D, the output is a scalar(shape is empty: `[]`). "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $X );
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $Y );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXDivOp : ONNX_Op < " Div " ,
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Div operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Performs element-wise binary division (with Numpy-style broadcasting support). "
" "
" This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md). "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $A ,
AnyTypeOf < [ TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $B );
let results = ( outs AnyTypeOf < [ TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $C );
2020-06-09 03:45:32 +08:00
let builders = [
OpBuilder < " OpBuilder &builder, OperationState &state, Value A, Value B " , [{
2020-07-22 10:05:18 +08:00
auto lhsTy = A . getType ();
auto rhsTy = B . getType ();
2020-06-09 03:45:32 +08:00
auto elementType = getBroadcastedType ( lhsTy , rhsTy );
auto shapedType = elementType . dyn_cast_or_null < ShapedType > ();
if ( ! shapedType || ! shapedType . hasStaticShape ()) {
elementType = A . getType () . cast < TensorType > () . getElementType ();
elementType = UnrankedTensorType :: get ( elementType );
}
build ( builder , state , elementType , A , B );
}] > ,
OpBuilder < " OpBuilder &builder, OperationState &state, ValueRange operands, ArrayRef<NamedAttribute> attributes " , [{
2020-07-22 10:05:18 +08:00
auto lhsTy = operands [ 0 ] . getType ();
auto rhsTy = operands [ 1 ] . getType ();
2020-06-09 03:45:32 +08:00
auto elementType = getBroadcastedType ( lhsTy , rhsTy );
auto shapedType = elementType . dyn_cast_or_null < ShapedType > ();
if ( ! shapedType || ! shapedType . hasStaticShape ()) {
elementType = operands [ 0 ] . getType () . cast < TensorType > () . getElementType ();
elementType = UnrankedTensorType :: get ( elementType );
}
std :: vector < mlir :: Type > outputTypes ;
outputTypes . emplace_back ( elementType );
build ( builder , state , outputTypes , operands , attributes );
}] >
];
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 2 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXDropoutOp : ONNX_Op < " Dropout " ,
2020-08-07 13:08:00 +08:00
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Dropout operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Dropout takes one input floating tensor and produces two tensor outputs, "
" output (floating tensor) and mask (`Tensor<bool>`). Depending on whether it is "
" in test mode or not, the output Y will either be a random dropout, or a simple "
" copy of the input. Note that our implementation of Dropout does scaling in "
" the training phase, so during testing nothing needs to be done. "
" This operator has **optional** inputs/outputs. See [the doc](IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $data ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < F32Attr , " 0.5 " >: $ratio );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $output ,
AnyTypeOf < [ TensorOf < [ I1 ] > , AnyMemRef , NoneType ] >: $mask );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 2 ;
}
static std :: vector < int > getTypeMap () {
return { 20 , 0 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXDynamicQuantizeLinearOp : ONNX_Op < " DynamicQuantizeLinear " ,
2020-06-09 14:55:49 +08:00
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX DynamicQuantizeLinear operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" A Function to fuse calculation for Scale, Zero Point and FP32->8Bit convertion of FP32 Input data. "
" Outputs Scale, ZeroPoint and Quantized Input for a given FP32 Input. "
" Scale is calculated as: "
" ``` "
" y_scale = (max(x) - min(x))/(qmax - qmin) "
" * where qmax and qmin are max and min values for quantization range .i.e [0, 255] in case of uint8 "
" * data range is adjusted to include 0. "
" ``` "
" Zero point is calculated as: "
" ``` "
" intermediate_zero_point = (qmin - min(x))/(qmax - qmin) "
" y_zero_point = cast(round(saturate(itermediate_zero_point))) "
" * where qmax and qmin are max and min values for quantization range .i.e [0, 255] in case of uint8 "
" * for saturation, it saturates to [0, 255] if it's uint8, or [-127, 127] if it's int8. Right now only uint8 is supported. "
" * rounding to nearest ties to even. "
" ``` "
" Data quantization formula is: "
" ``` "
" y = saturate (round (x / y_scale) + y_zero_point) "
" * for saturation, it saturates to [0, 255] if it's uint8, or [-127, 127] if it's int8. Right now only uint8 is supported. "
" * rounding to nearest ties to even. "
" ``` "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F32 ] > , AnyMemRef ] >: $x );
let results = ( outs AnyTypeOf < [ TensorOf < [ UI8 ] > , AnyMemRef ] >: $y ,
2020-06-26 04:34:37 +08:00
TensorOf < [ F32 ] >: $y_scale ,
2020-07-14 23:15:06 +08:00
AnyTypeOf < [ TensorOf < [ UI8 ] > , AnyMemRef ] >: $y_zero_point );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 3 ;
}
static std :: vector < int > getTypeMap () {
2020-07-09 04:52:56 +08:00
return { 1 , 7 , 1 };
2020-05-22 10:03:16 +08:00
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXEluOp : ONNX_Op < " Elu " ,
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Elu operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Elu takes one input data (Tensor<T>) and produces one output data "
" (Tensor<T>) where the function `f(x) = alpha * (exp(x) - 1.) for x < "
" 0`, `f(x) = x for x >= 0`., is applied to the tensor elementwise. "
" "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $X ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < F32Attr , " 1.0 " >: $alpha );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $Y );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXEqualOp : ONNX_Op < " Equal " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Equal operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Returns the tensor resulted from performing the `equal` logical operation "
" elementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support). "
" "
" This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md). "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ I1 ] > , TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $A ,
AnyTypeOf < [ TensorOf < [ I1 ] > , TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $B );
let results = ( outs AnyTypeOf < [ TensorOf < [ I1 ] > , AnyMemRef ] >: $C );
2020-06-09 03:45:32 +08:00
let builders = [
OpBuilder < " OpBuilder &builder, OperationState &state, Value A, Value B " , [{
2020-07-22 10:05:18 +08:00
auto lhsTy = A . getType ();
auto rhsTy = B . getType ();
2020-06-09 03:45:32 +08:00
auto elementType = getBroadcastedType ( lhsTy , rhsTy );
auto shapedType = elementType . dyn_cast_or_null < ShapedType > ();
if ( ! shapedType || ! shapedType . hasStaticShape ()) {
elementType = A . getType () . cast < TensorType > () . getElementType ();
elementType = UnrankedTensorType :: get ( elementType );
}
build ( builder , state , elementType , A , B );
}] > ,
OpBuilder < " OpBuilder &builder, OperationState &state, ValueRange operands, ArrayRef<NamedAttribute> attributes " , [{
2020-07-22 10:05:18 +08:00
auto lhsTy = operands [ 0 ] . getType ();
auto rhsTy = operands [ 1 ] . getType ();
2020-06-09 03:45:32 +08:00
auto elementType = getBroadcastedType ( lhsTy , rhsTy );
auto shapedType = elementType . dyn_cast_or_null < ShapedType > ();
if ( ! shapedType || ! shapedType . hasStaticShape ()) {
elementType = operands [ 0 ] . getType () . cast < TensorType > () . getElementType ();
elementType = UnrankedTensorType :: get ( elementType );
}
std :: vector < mlir :: Type > outputTypes ;
outputTypes . emplace_back ( elementType );
build ( builder , state , outputTypes , operands , attributes );
}] >
];
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 2 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 0 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXErfOp : ONNX_Op < " Erf " ,
2020-08-07 13:08:00 +08:00
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Erf operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Computes the error function of the given input tensor element-wise. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $input );
let results = ( outs AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXExpOp : ONNX_Op < " Exp " ,
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Exp operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Calculates the exponential of the given input tensor, element-wise. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $input );
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $output );
2020-02-21 22:28:24 +08:00
let builders = [
2020-05-20 15:45:42 +08:00
OpBuilder < " OpBuilder &builder, OperationState &state, Value input " , [{
2020-02-21 22:28:24 +08:00
auto elementType = input . getType () . cast < TensorType > () . getElementType ();
build ( builder , state , UnrankedTensorType :: get ( elementType ), input );
}] > ,
2020-05-20 15:45:42 +08:00
OpBuilder < " OpBuilder &builder, OperationState &state, ValueRange operands, ArrayRef<NamedAttribute> attributes " , [{
2020-02-21 22:28:24 +08:00
auto elementType = operands [ 0 ] . getType () . cast < TensorType > () . getElementType ();
std :: vector < mlir :: Type > outputTypes ;
outputTypes . emplace_back ( UnrankedTensorType :: get ( elementType ));
build ( builder , state , outputTypes , operands , attributes );
}] >
2020-02-24 23:46:48 +08:00
];
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXExpandOp : ONNX_Op < " Expand " ,
2020-08-07 13:08:00 +08:00
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Expand operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Broadcast the input tensor following the given shape and the broadcast rule. "
" The broadcast rule is similar to numpy.array(input) * numpy.ones(shape): "
" Dimensions are right alignment; "
" Two corresponding dimension must have the same value, or one of them is equal to 1. "
" Also, this operator is similar to numpy.broadcast_to(input, shape), "
" but the major difference is numpy.broadcast_to() does not allow shape to be smaller than input.size(). "
" It is possible that the output.shape is not equal to shape, when some dimensions in shape is equal to 1, "
" or the shape.ndim < input.shape.ndim. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $input ,
2020-06-26 04:34:37 +08:00
TensorOf < [ I64 ] >: $shape );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 2 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXEyeLikeOp : ONNX_Op < " EyeLike " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX EyeLike operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Generate a 2D tensor (matrix) with ones on the diagonal and zeros everywhere else. Only 2D "
" tensors are supported, i.e. input T1 must be of rank 2. The shape of the output tensor is the "
" same as the input tensor. The data type can be specified by the 'dtype' argument. If "
" 'dtype' is not specified, then the type of input tensor is used. By default, the main diagonal "
" is populated with ones, but attribute 'k' can be used to populate upper or lower diagonals. "
" The 'dtype' argument must be one of the data types specified in the 'DataType' enum field in the "
" TensorProto message and be valid as an output type. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I1 ] > , AnyMemRef ] >: $input ,
2020-02-24 23:46:48 +08:00
OptionalAttr < I64Attr >: $dtype ,
DefaultValuedAttr < I64Attr , " 0 " >: $k );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I1 ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { - 1 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXFlattenOp : ONNX_Op < " Flatten " ,
2020-06-09 14:55:49 +08:00
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Flatten operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Flattens the input tensor into a 2D matrix. If input tensor has shape "
" (d_0, d_1, ... d_n) then the output will have shape "
" (d_0 X d_1 ... d_(axis-1), d_axis X d_(axis+1) ... X dn). "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $input ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < I64Attr , " 1 " >: $axis );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXFloorOp : ONNX_Op < " Floor " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Floor operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Floor takes one input data (Tensor<T>) and produces one output data "
" (Tensor<T>) where the floor is, y = floor(x), is applied to "
" the tensor elementwise. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $X );
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $Y );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXGRUOp : ONNX_Op < " GRU " ,
2020-05-13 21:08:06 +08:00
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX GRU operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Computes an one-layer GRU. This operator is usually supported via some custom "
" implementation such as CuDNN. "
" "
" Notations: "
" "
" `X` - input tensor "
" "
" `z` - update gate "
" "
" `r` - reset gate "
" "
" `h` - hidden gate "
" "
" `t` - time step (t-1 means previous time step) "
" "
" `W[zrh]` - W parameter weight matrix for update, reset, and hidden gates "
" "
" `R[zrh]` - R recurrence weight matrix for update, reset, and hidden gates "
" "
" `Wb[zrh]` - W bias vectors for update, reset, and hidden gates "
" "
" `Rb[zrh]` - R bias vectors for update, reset, and hidden gates "
" "
" `WB[zrh]` - W parameter weight matrix for backward update, reset, and hidden gates "
" "
" `RB[zrh]` - R recurrence weight matrix for backward update, reset, and hidden gates "
" "
" `WBb[zrh]` - W bias vectors for backward update, reset, and hidden gates "
" "
" `RBb[zrh]` - R bias vectors for backward update, reset, and hidden gates "
" "
" `H` - Hidden state "
" "
" `num_directions` - 2 if direction == bidirectional else 1 "
" "
" Activation functions: "
" "
" Relu(x) - max(0, x) "
" "
" Tanh(x) - (1 - e^ { -2x})/(1 + e^ { -2x}) "
" "
" Sigmoid(x) - 1/(1 + e^ { -x}) "
" "
" (NOTE: Below are optional) "
" "
" Affine(x) - alpha*x + beta "
" "
" LeakyRelu(x) - x if x >= 0 else alpha * x "
" "
" ThresholdedRelu(x) - x if x >= alpha else 0 "
" "
" ScaledTanh(x) - alpha*Tanh(beta*x) "
" "
" HardSigmoid(x) - min(max(alpha*x + beta, 0), 1) "
" "
" Elu(x) - x if x >= 0 else alpha*(e^x - 1) "
" "
" Softsign(x) - x/(1 + |x|) "
" "
" Softplus(x) - log(1 + e^x) "
" "
" Equations (Default: f=Sigmoid, g=Tanh): "
" "
" - zt = f(Xt*(Wz^T) + Ht-1*(Rz^T) + Wbz + Rbz) "
" "
" - rt = f(Xt*(Wr^T) + Ht-1*(Rr^T) + Wbr + Rbr) "
" "
" - ht = g(Xt*(Wh^T) + (rt (.) Ht-1)*(Rh^T) + Rbh + Wbh) # default, when linear_before_reset = 0 "
" "
" - ht = g(Xt*(Wh^T) + (rt (.) (Ht-1*(Rh^T) + Rbh)) + Wbh) # when linear_before_reset != 0 "
" "
" - Ht = (1 - zt) (.) ht + zt (.) Ht-1 "
" This operator has **optional** inputs/outputs. See [the doc](IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $X ,
AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $W ,
AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $R ,
AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef , NoneType ] >: $B ,
AnyTypeOf < [ TensorOf < [ I32 ] > , AnyMemRef , NoneType ] >: $sequence_lens ,
AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef , NoneType , NoneType ] >: $initial_h ,
2020-02-24 23:46:48 +08:00
OptionalAttr < F32ArrayAttr >: $activation_alpha ,
OptionalAttr < F32ArrayAttr >: $activation_beta ,
OptionalAttr < StrArrayAttr >: $activations ,
OptionalAttr < F32Attr >: $clip ,
DefaultValuedAttr < StrAttr , " forward " >: $direction ,
OptionalAttr < I64Attr >: $hidden_size ,
DefaultValuedAttr < I64Attr , " 0 " >: $linear_before_reset );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef , NoneType , NoneType , NoneType ] >: $Y ,
AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef , NoneType , NoneType , NoneType , NoneType ] >: $Y_h );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 6 ;
}
static int getNumberOfResults () {
return 2 ;
}
static std :: vector < int > getTypeMap () {
return { 20 , 20 };
}
}];
2020-02-24 23:46:48 +08:00
}
def ONNXGatherOp : ONNX_Op < " Gather " ,
2020-07-22 22:15:56 +08:00
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Gather operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Given `data` tensor of rank r >= 1, and `indices` tensor of rank q, gather "
" entries of the axis dimension of `data` (by default outer-most one as axis=0) indexed by `indices`, and concatenates "
" them in an output tensor of rank q + (r - 1). "
" "
" axis = 0 : "
" "
" Let "
" k = indices[i_ { 0}, ..., i_ { q-1 \ } \ ] "
" Then "
" output[i_ { 0}, ..., i_ { q-1}, j_ { 0}, ..., j_ { r-2 \ } \ ] = input[k , j_ { 0}, ..., j_ { r-2 \ } \ ] "
" "
" ``` "
" data = [ "
" [1.0, 1.2], "
" [2.3, 3.4], "
" [4.5, 5.7], "
" ] "
" indices = [ "
" [0, 1], "
" [1, 2], "
" ] "
" output = [ "
" [ "
" [1.0, 1.2], "
" [2.3, 3.4], "
" ], "
" [ "
" [2.3, 3.4], "
" [4.5, 5.7], "
" ], "
" ] "
" ``` "
" axis = 1 : "
" "
" Let "
" k = indices[i_ { 0}, ..., i_ { q-1 \ } \ ] "
" Then "
" output[i_ { 0}, ..., i_ { q-1}, j_ { 0}, ..., j_ { r-2 \ } \ ] = input[j_ { 0}, k, j_ { 1}, ..., j_ { r-2 \ } \ ] "
" "
" ``` "
" data = [ "
" [1.0, 1.2, 1.9], "
" [2.3, 3.4, 3.9], "
" [4.5, 5.7, 5.9], "
" ] "
" indices = [ "
" [0, 2], "
" ] "
" axis = 1, "
" output = [ "
" [ "
" [1.0, 1.9], "
" [2.3, 3.9], "
" [4.5, 5.9], "
" ], "
" ] "
" ``` "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $data ,
AnyTypeOf < [ TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , AnyMemRef ] >: $indices ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < I64Attr , " 0 " >: $axis );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 2 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXGatherElementsOp : ONNX_Op < " GatherElements " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX GatherElements operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" GatherElements takes two inputs `data` and `indices` of the same rank r >= 1 "
" and an optional attribute `axis` that identifies an axis of `data` "
" (by default, the outer-most axis, that is axis 0). It is an indexing operation "
" that produces its output by indexing into the input data tensor at index "
" positions determined by elements of the `indices` tensor. "
" Its output shape is the same as the shape of `indices` and consists of one value "
" (gathered from the `data`) for each element in `indices`. "
" "
" For instance, in the 3-D case (r = 3), the output produced is determined "
" by the following equations: "
" ``` "
" out[i][j][k] = input[index[i][j][k]][j][k] if axis = 0, "
" out[i][j][k] = input[i][index[i][j][k]][k] if axis = 1, "
" out[i][j][k] = input[i][j][index[i][j][k]] if axis = 2, "
" ``` "
" "
" This operator is also the inverse of ScatterElements. It is similar to Torch's gather operation. "
" "
" Example 1: "
" ``` "
" data = [ "
" [1, 2], "
" [3, 4], "
" ] "
" indices = [ "
" [0, 0], "
" [1, 0], "
" ] "
" axis = 1 "
" output = [ "
" [ "
" [1, 1], "
" [4, 3], "
" ], "
" ] "
" ``` "
" Example 2: "
" ``` "
" data = [ "
" [1, 2, 3], "
" [4, 5, 6], "
" [7, 8, 9], "
" ] "
" indices = [ "
" [1, 2, 0], "
" [2, 0, 0], "
" ] "
" axis = 0 "
" output = [ "
" [ "
" [4, 8, 3], "
" [7, 2, 3], "
" ], "
" ] "
" ``` "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $data ,
AnyTypeOf < [ TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , AnyMemRef ] >: $indices ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < I64Attr , " 0 " >: $axis );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 2 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXGatherNDOp : ONNX_Op < " GatherND " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX GatherND operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Given `data` tensor of rank `r` >= 1, and `indices` tensor of rank `q` >= 1, this operator gathers "
" slices of `data` into an output tensor of rank `q + r - indices_shape[-1] - 1`. "
" "
" `indices` is an q-dimensional integer tensor, best thought of as a `(q-1)`-dimensional tensor of index-tuples into `data`, "
" where each element defines a slice of `data` "
" "
" Some salient points about the inputs' rank and shape: "
" "
" 1) r >= 1 and q >= 1 are to be honored. There is no dependency condition to be met between ranks `r` and `q` "
" "
" 2) The `indices_shape[-1]` should have a value between 1 (inclusive) and rank `r` (inclusive) "
" "
" 3) All values in `indices` are expected to be within bounds [-s, s-1] along axis of size `s` (i.e.) `-data_shape[i] <= indices[...,i] <= data_shape[i] - 1`. "
" It is an error if any of the index values are out of bounds. "
" "
" The output is computed as follows: "
" "
" The output tensor is obtained by mapping each index-tuple in the `indices` tensor to the corresponding slice of the input `data`. "
" "
" 1) If `indices_shape[-1] > r` => error condition "
" "
" 2) If `indices_shape[-1] == r`, since the rank of `indices` is `q`, `indices` can be thought of as a `(q-1)`-dimensional tensor "
" containing 1-D tensors of dimension `r`. Let us think of each such `r` ranked tensor as `indices_slice`. "
" Each *scalar value* corresponding to `data[indices_slice]` is filled into the corresponding location of the `(q-1)`-dimensional tensor "
" to form the `output` tensor (Example 1 below) "
" "
" 3) If `indices_shape[-1] < r`, since the rank of `indices` is `q`, `indices` can be thought of as a `(q-1)`-dimensional tensor "
" containing 1-D tensors of dimension `< r`. Let us think of each such tensors as `indices_slice`. "
" Each *tensor slice* corresponding to `data[indices_slice , :]` is filled into the corresponding location of the `(q-1)`-dimensional tensor "
" to form the `output` tensor (Examples 2, 3, and 4 below) "
" "
" This operator is the inverse of `ScatterND`. "
" "
" `Example 1` "
" "
" data = [[0,1],[2,3]] # data_shape = [2, 2] "
" "
" indices = [[0,0],[1,1]] # indices_shape = [2, 2] "
" "
" output = [0,3] # output_shape = [2] "
" "
" `Example 2` "
" "
" data = [[0,1],[2,3]] # data_shape = [2, 2] "
" "
" indices = [[1],[0]] # indices_shape = [2, 1] "
" "
" output = [[2,3],[0,1]] # output_shape = [2, 2] "
" "
" `Example 3` "
" "
" data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2] "
" "
" indices = [[0,1],[1,0]] # indices_shape = [2, 2] "
" "
" output = [[2,3],[4,5]] # output_shape = [2, 2] "
" "
" `Example 4` "
" "
" data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2] "
" "
" indices = [[[0,1]],[[1,0]]] # indices_shape = [2, 1, 2] "
" "
" output = [[[2,3]],[[4,5]]] # output_shape = [2, 1, 2] "
" "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $data ,
2020-06-26 04:34:37 +08:00
TensorOf < [ I64 ] >: $indices );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 2 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXGemmOp : ONNX_Op < " Gemm " ,
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
let hasCanonicalizer = 1 ;
2019-11-19 10:08:21 +08:00
let summary = " ONNX Gemm operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" General Matrix multiplication: "
" https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3 "
" "
" A' = transpose(A) if transA else A "
" "
" B' = transpose(B) if transB else B "
" "
" Compute Y = alpha * A' * B' + beta * C, where input tensor A has shape (M, K) or (K, M), "
" input tensor B has shape (K, N) or (N, K), input tensor C is broadcastable to shape (M, N), "
" and output tensor Y has shape (M, N). A will be transposed before doing the "
" computation if attribute transA is non-zero, same for B and transB. "
" This operator supports **unidirectional broadcasting** (tensor C should be unidirectional broadcastable to tensor A * B); for more details please check [the doc](Broadcasting.md). "
" This operator has **optional** inputs/outputs. See [the doc](IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , AnyMemRef ] >: $A ,
AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , AnyMemRef ] >: $B ,
AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , AnyMemRef , NoneType ] >: $C ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < F32Attr , " 1.0 " >: $alpha ,
DefaultValuedAttr < F32Attr , " 1.0 " >: $beta ,
DefaultValuedAttr < I64Attr , " 0 " >: $transA ,
DefaultValuedAttr < I64Attr , " 0 " >: $transB );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , AnyMemRef , NoneType ] >: $Y );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 3 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXGlobalAveragePoolOp : ONNX_Op < " GlobalAveragePool " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX GlobalAveragePool operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" GlobalAveragePool consumes an input tensor X and applies average pooling across "
" the values in the same channel. This is equivalent to AveragePool with kernel size "
" equal to the spatial dimension of input tensor. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $X );
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $Y );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXGlobalLpPoolOp : ONNX_Op < " GlobalLpPool " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX GlobalLpPool operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" GlobalLpPool consumes an input tensor X and applies lp pool pooling across "
" the values in the same channel. This is equivalent to LpPool with kernel size "
" equal to the spatial dimension of input tensor. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $X ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < I64Attr , " 2 " >: $p );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $Y );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXGlobalMaxPoolOp : ONNX_Op < " GlobalMaxPool " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX GlobalMaxPool operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" GlobalMaxPool consumes an input tensor X and applies max pooling across "
" the values in the same channel. This is equivalent to MaxPool with kernel size "
" equal to the spatial dimension of input tensor. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $X );
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $Y );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXGreaterOp : ONNX_Op < " Greater " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Greater operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Returns the tensor resulted from performing the `greater` logical operation "
" elementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support). "
" "
" This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md). "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $A ,
AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $B );
let results = ( outs AnyTypeOf < [ TensorOf < [ I1 ] > , AnyMemRef ] >: $C );
2020-06-09 03:45:32 +08:00
let builders = [
OpBuilder < " OpBuilder &builder, OperationState &state, Value A, Value B " , [{
2020-07-22 10:05:18 +08:00
auto lhsTy = A . getType ();
auto rhsTy = B . getType ();
2020-06-09 03:45:32 +08:00
auto elementType = getBroadcastedType ( lhsTy , rhsTy );
auto shapedType = elementType . dyn_cast_or_null < ShapedType > ();
if ( ! shapedType || ! shapedType . hasStaticShape ()) {
elementType = A . getType () . cast < TensorType > () . getElementType ();
elementType = UnrankedTensorType :: get ( elementType );
}
build ( builder , state , elementType , A , B );
}] > ,
OpBuilder < " OpBuilder &builder, OperationState &state, ValueRange operands, ArrayRef<NamedAttribute> attributes " , [{
2020-07-22 10:05:18 +08:00
auto lhsTy = operands [ 0 ] . getType ();
auto rhsTy = operands [ 1 ] . getType ();
2020-06-09 03:45:32 +08:00
auto elementType = getBroadcastedType ( lhsTy , rhsTy );
auto shapedType = elementType . dyn_cast_or_null < ShapedType > ();
if ( ! shapedType || ! shapedType . hasStaticShape ()) {
elementType = operands [ 0 ] . getType () . cast < TensorType > () . getElementType ();
elementType = UnrankedTensorType :: get ( elementType );
}
std :: vector < mlir :: Type > outputTypes ;
outputTypes . emplace_back ( elementType );
build ( builder , state , outputTypes , operands , attributes );
}] >
];
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 2 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 0 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXHardSigmoidOp : ONNX_Op < " HardSigmoid " ,
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX HardSigmoid operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" HardSigmoid takes one input data (Tensor<T>) and produces one output data "
" (Tensor<T>) where the HardSigmoid function, y = max(0, min(1, alpha * x + beta)), "
" is applied to the tensor elementwise. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $X ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < F32Attr , " 0.2 " >: $alpha ,
DefaultValuedAttr < F32Attr , " 0.5 " >: $beta );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $Y );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXHardmaxOp : ONNX_Op < " Hardmax " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Hardmax operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" The operator computes the hardmax (1 for the first maximum value, and 0 for all others) values for each layer in the batch "
" of the given input. "
" "
" The input does not need to explicitly be a 2D vector; rather, it will be "
" coerced into one. For an arbitrary n-dimensional tensor "
" input \ in [a_0, a_1, ..., a_ { k-1}, a_k, ..., a_ { n-1 \ } \ ] and k is "
" the axis provided, then input will be coerced into a 2-dimensional tensor with "
" dimensions [a_0 * ... * a_ { k-1}, a_k * ... * a_ { n-1 \ } \ ]. For the default "
" case where axis=1, this means the input tensor will be coerced into a 2D tensor "
" of dimensions [a_0, a_1 * ... * a_ { n-1 \ } \ ], where a_0 is often the batch size. "
" In this situation, we must have a_0 = N and a_1 * ... * a_ { n-1} = D. "
" Each of these dimensions must be matched correctly, or else the operator "
" will throw errors. The output tensor has the same shape "
" and contains the hardmax values of the corresponding input. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $input ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < I64Attr , " 1 " >: $axis );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXIdentityOp : ONNX_Op < " Identity " ,
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-21 10:57:13 +08:00
let hasCanonicalizer = 1 ;
2019-11-19 10:08:21 +08:00
let summary = " ONNX Identity operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Identity operator "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $input );
let results = ( outs AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXIfOp : ONNX_Op < " If " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX If operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" If conditional "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ I1 ] > , AnyMemRef ] >: $cond ,
2020-02-24 23:46:48 +08:00
AnyAttr : $else_branch ,
AnyAttr : $then_branch );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $outputs );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return - 1 ;
}
static std :: vector < int > getTypeMap () {
return { - 1 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXInstanceNormalizationOp : ONNX_Op < " InstanceNormalization " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX InstanceNormalization operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Carries out instance normalization as described in the paper "
" https://arxiv.org/abs/1607.08022. "
" "
" y = scale * (x - mean) / sqrt(variance + epsilon) + B, "
" where mean and variance are computed per instance per channel. "
" "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $input ,
AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $scale ,
AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $B ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < F32Attr , " 1e-05 " >: $epsilon );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 3 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXIsInfOp : ONNX_Op < " IsInf " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX IsInf operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Map infinity to true and other values to false. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $X ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < I64Attr , " 1 " >: $detect_negative ,
DefaultValuedAttr < I64Attr , " 1 " >: $detect_positive );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ I1 ] > , AnyMemRef ] >: $Y );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 0 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXIsNaNOp : ONNX_Op < " IsNaN " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX IsNaN operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Returns which elements of the input are NaN. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $X );
let results = ( outs AnyTypeOf < [ TensorOf < [ I1 ] > , AnyMemRef ] >: $Y );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 0 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXLRNOp : ONNX_Op < " LRN " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX LRN operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Local Response Normalization proposed in the [AlexNet paper](https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf). "
" It normalizes over local input regions. "
" The local region is defined across the channels. For an element X[n, c, d1, ..., dk] in a tensor "
" of shape (N x C x D1 x D2, ..., Dk), its region is "
" { X[n, i, d1, ..., dk] | max(0, c - floor((size - 1) / 2)) <= i <= min(C - 1, c + ceil((size - 1) / 2))}. "
" "
" square_sum[n, c, d1, ..., dk] = sum(X[n, i, d1, ..., dk] ^ 2), "
" where max(0, c - floor((size - 1) / 2)) <= i <= min(C - 1, c + ceil((size - 1) / 2)). "
" "
" Y[n, c, d1, ..., dk] = X[n, c, d1, ..., dk] / (bias + alpha / size * square_sum[n, c, d1, ..., dk] ) ^ beta "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $X ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < F32Attr , " 0.0001 " >: $alpha ,
DefaultValuedAttr < F32Attr , " 0.75 " >: $beta ,
DefaultValuedAttr < F32Attr , " 1.0 " >: $bias ,
I64Attr : $size );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $Y );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXLSTMOp : ONNX_Op < " LSTM " ,
2020-05-13 21:08:06 +08:00
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX LSTM operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Computes an one-layer LSTM. This operator is usually supported via some "
" custom implementation such as CuDNN. "
" "
" Notations: "
" "
" `X` - input tensor "
" "
" `i` - input gate "
" "
" `o` - output gate "
" "
" `f` - forget gate "
" "
" `c` - cell gate "
" "
" `t` - time step (t-1 means previous time step) "
" "
" `W[iofc]` - W parameter weight matrix for input, output, forget, and cell gates "
" "
" `R[iofc]` - R recurrence weight matrix for input, output, forget, and cell gates "
" "
" `Wb[iofc]` - W bias vectors for input, output, forget, and cell gates "
" "
" `Rb[iofc]` - R bias vectors for input, output, forget, and cell gates "
" "
" `P[iof]` - P peephole weight vector for input, output, and forget gates "
" "
" `WB[iofc]` - W parameter weight matrix for backward input, output, forget, and cell gates "
" "
" `RB[iofc]` - R recurrence weight matrix for backward input, output, forget, and cell gates "
" "
" `WBb[iofc]` - W bias vectors for backward input, output, forget, and cell gates "
" "
" `RBb[iofc]` - R bias vectors for backward input, output, forget, and cell gates "
" "
" `PB[iof]` - P peephole weight vector for backward input, output, and forget gates "
" "
" `H` - Hidden state "
" "
" `num_directions` - 2 if direction == bidirectional else 1 "
" "
" Activation functions: "
" "
" Relu(x) - max(0, x) "
" "
" Tanh(x) - (1 - e^ { -2x})/(1 + e^ { -2x}) "
" "
" Sigmoid(x) - 1/(1 + e^ { -x}) "
" "
" (NOTE: Below are optional) "
" "
" Affine(x) - alpha*x + beta "
" "
" LeakyRelu(x) - x if x >= 0 else alpha * x "
" "
" ThresholdedRelu(x) - x if x >= alpha else 0 "
" "
" ScaledTanh(x) - alpha*Tanh(beta*x) "
" "
" HardSigmoid(x) - min(max(alpha*x + beta, 0), 1) "
" "
" Elu(x) - x if x >= 0 else alpha*(e^x - 1) "
" "
" Softsign(x) - x/(1 + |x|) "
" "
" Softplus(x) - log(1 + e^x) "
" "
" Equations (Default: f=Sigmoid, g=Tanh, h=Tanh): "
" "
" - it = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Pi (.) Ct-1 + Wbi + Rbi) "
" "
" - ft = f(Xt*(Wf^T) + Ht-1*(Rf^T) + Pf (.) Ct-1 + Wbf + Rbf) "
" "
" - ct = g(Xt*(Wc^T) + Ht-1*(Rc^T) + Wbc + Rbc) "
" "
" - Ct = ft (.) Ct-1 + it (.) ct "
" "
" - ot = f(Xt*(Wo^T) + Ht-1*(Ro^T) + Po (.) Ct + Wbo + Rbo) "
" "
" - Ht = ot (.) h(Ct) "
" This operator has **optional** inputs/outputs. See [the doc](IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $X ,
AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $W ,
AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $R ,
AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef , NoneType ] >: $B ,
AnyTypeOf < [ TensorOf < [ I32 ] > , AnyMemRef , NoneType ] >: $sequence_lens ,
AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef , NoneType , NoneType ] >: $initial_h ,
AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef , NoneType , NoneType , NoneType ] >: $initial_c ,
AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef , NoneType , NoneType , NoneType , NoneType ] >: $P ,
2020-02-24 23:46:48 +08:00
OptionalAttr < F32ArrayAttr >: $activation_alpha ,
OptionalAttr < F32ArrayAttr >: $activation_beta ,
OptionalAttr < StrArrayAttr >: $activations ,
OptionalAttr < F32Attr >: $clip ,
DefaultValuedAttr < StrAttr , " forward " >: $direction ,
OptionalAttr < I64Attr >: $hidden_size ,
DefaultValuedAttr < I64Attr , " 0 " >: $input_forget );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef , NoneType , NoneType , NoneType , NoneType , NoneType ] >: $Y ,
AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef , NoneType , NoneType , NoneType , NoneType , NoneType , NoneType ] >: $Y_h ,
AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef , NoneType , NoneType , NoneType , NoneType , NoneType , NoneType , NoneType ] >: $Y_c );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 8 ;
}
static int getNumberOfResults () {
return 3 ;
}
static std :: vector < int > getTypeMap () {
return { 20 , 20 , 20 };
}
}];
2020-02-24 23:46:48 +08:00
}
def ONNXLeakyReluOp : ONNX_Op < " LeakyRelu " ,
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX LeakyRelu operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" LeakyRelu takes input data (Tensor<T>) and an argument alpha, and produces one "
" output data (Tensor<T>) where the function `f(x) = alpha * x for x < 0`, "
" `f(x) = x for x >= 0`, is applied to the data tensor elementwise. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $X ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < F32Attr , " 0.01 " >: $alpha );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $Y );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXLessOp : ONNX_Op < " Less " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Less operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Returns the tensor resulted from performing the `less` logical operation "
" elementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support). "
" "
" This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md). "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $A ,
AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $B );
let results = ( outs AnyTypeOf < [ TensorOf < [ I1 ] > , AnyMemRef ] >: $C );
2020-06-09 03:45:32 +08:00
let builders = [
OpBuilder < " OpBuilder &builder, OperationState &state, Value A, Value B " , [{
2020-07-22 10:05:18 +08:00
auto lhsTy = A . getType ();
auto rhsTy = B . getType ();
2020-06-09 03:45:32 +08:00
auto elementType = getBroadcastedType ( lhsTy , rhsTy );
auto shapedType = elementType . dyn_cast_or_null < ShapedType > ();
if ( ! shapedType || ! shapedType . hasStaticShape ()) {
elementType = A . getType () . cast < TensorType > () . getElementType ();
elementType = UnrankedTensorType :: get ( elementType );
}
build ( builder , state , elementType , A , B );
}] > ,
OpBuilder < " OpBuilder &builder, OperationState &state, ValueRange operands, ArrayRef<NamedAttribute> attributes " , [{
2020-07-22 10:05:18 +08:00
auto lhsTy = operands [ 0 ] . getType ();
auto rhsTy = operands [ 1 ] . getType ();
2020-06-09 03:45:32 +08:00
auto elementType = getBroadcastedType ( lhsTy , rhsTy );
auto shapedType = elementType . dyn_cast_or_null < ShapedType > ();
if ( ! shapedType || ! shapedType . hasStaticShape ()) {
elementType = operands [ 0 ] . getType () . cast < TensorType > () . getElementType ();
elementType = UnrankedTensorType :: get ( elementType );
}
std :: vector < mlir :: Type > outputTypes ;
outputTypes . emplace_back ( elementType );
build ( builder , state , outputTypes , operands , attributes );
}] >
];
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 2 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 0 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXLogOp : ONNX_Op < " Log " ,
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Log operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Calculates the natural log of the given input tensor, element-wise. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $input );
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXLogSoftmaxOp : ONNX_Op < " LogSoftmax " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX LogSoftmax operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" The operator computes the logsoftmax (log of softmax) values for each layer in the batch "
" of the given input. "
" "
" The input does not need to explicitly be a 2D vector; rather, it will be "
" coerced into one. For an arbitrary n-dimensional tensor "
" input \ in [a_0, a_1, ..., a_ { k-1}, a_k, ..., a_ { n-1 \ } \ ] and k is "
" the axis provided, then input will be coerced into a 2-dimensional tensor with "
" dimensions [a_0 * ... * a_ { k-1}, a_k * ... * a_ { n-1 \ } \ ]. For the default "
" case where axis=1, this means the input tensor will be coerced into a 2D tensor "
" of dimensions [a_0, a_1 * ... * a_ { n-1 \ } \ ], where a_0 is often the batch size. "
" In this situation, we must have a_0 = N and a_1 * ... * a_ { n-1} = D. "
" Each of these dimensions must be matched correctly, or else the operator "
" will throw errors. The output tensor has the same shape "
" and contains the logsoftmax values of the corresponding input. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $input ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < I64Attr , " 1 " >: $axis );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXLoopOp : ONNX_Op < " Loop " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Loop operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Generic Looping construct. This loop has multiple termination conditions: "
" "
" 1) Trip count. Iteration count specified at runtime. Set by "
" specifying the input M. Optional. Set to empty string to omit. "
" Note that a static trip count (specified at graph construction time) can be "
" specified by passing in a constant node for input M. "
" 2) Loop termination condition. This is an input to the op that determines "
" whether to run the first iteration and also a loop-carried dependency for "
" the body graph. The body graph must yield a value for the condition variable, "
" whether this input is provided or not. "
" "
" This table summarizes the operating modes of this operator with equivalent "
" C-style code: "
" "
" Operator inputs defined as (max_trip_count, condition_var). "
" "
" input ( \" \" , \" \" ): "
" for (int i=0; ; ++i) { "
" cond = ... // Note this value is ignored, but is required in the body "
" } "
" "
" input ( \" \" , cond) // Note this is analogous to a while loop "
" bool cond = ...; "
" for (int i=0; cond; ++i) { "
" cond = ...; "
" } "
" "
" input ( \" \" , 1) // Note this is analogous to a do-while loop "
" bool cond = true "
" for (int i=0; cond; ++i) { "
" cond = ...; "
" } "
" "
" input (trip_count, \" \" ) // Note this is analogous to a for loop "
" int trip_count = ... "
" for (int i=0; i < trip_count; ++i) { "
" cond = ...; // ignored "
" } "
" "
" input (trip_count, cond) "
" int trip_count = ...; "
" bool cond = ...; "
" for (int i=0; i < trip_count && cond; ++i) { "
" cond = ...; "
" } "
" "
" "
" *Sample usage - cond as well as trip count* "
" "
" graph predict-net { "
" %a = Constant[value = <Scalar Tensor [3]>]() "
" %b = Constant[value = <Scalar Tensor [6]>]() "
" %keepgoing = Constant[value = <Scalar Tensor [1]>]() "
" %max_trip_count = Constant[value = <Scalar Tensor [10]>]() "
" %keepgoing_out, %b_out, %user_defined_vals = Loop[body = <graph body-net>](%max_trip_count, %keepgoing, %b) "
" return "
" } "
" "
" graph body-net ( "
" %i[INT32, scalar] "
" %keepgoing[BOOL, scalar] "
" %b[INT32, scalar] "
" ) { "
" %my_local = Add(%a, %b) "
" %b_out = Sub(%a, %b) "
" %keepgoing_out = Greater(%my_local, %b_out) "
" %user_defined_vals = Add(%b, %b) "
" return %keepgoing_out, %b_out, %user_defined_vals "
" } "
" "
" *Sample equivalent C code* "
" "
" { "
" /* User-defined code (enclosing scope) */ "
" int a = 3, b = 6; "
" bool keepgoing = true; // Analogous to input cond "
" /* End user-defined code */ "
" "
" /* Implicitly-defined code */ "
" const int max_trip_count = 10; // Analogous to input M "
" int user_defined_vals[]; // Imagine this is resizable "
" /* End implicitly-defined code */ "
" for (int i=0; i < max_trip_count && keepgoing; ++i) { "
" /* User-defined code (loop body) */ "
" int my_local = a + b; // Reading values in the enclosing scope is fine "
" b = a - b; // writes fine if we specify b as a loop-carried dependency "
" keepgoing = my_local > b; // keepgoing is a loop-carried dependency "
" user_defined_vals[i] = b + b; "
" /* End user-defined code */ "
" } "
" // my_local = 123; // Can't do this. my_local was defined in the the body "
" "
" // These below values are live-out from the loop and therefore accessible "
" b_out; user_defined_vals; keepgoing_out; "
" } "
" "
" There are several things of note in this code snippet: "
" "
" 1) Values from the enclosing scope (i.e. variable a here) are in scope and can "
" be referenced in the inputs of the loop. "
" 2) Any variables which you wish to make available in the enclosing scope (i.e. "
" the variables b and keepgoing) must be declared as either loop-carried "
" dependencies (both at the op inputs and output and at the body net input and "
" output) or scan_outputs. "
" 3) Values created in the body cannot be accessed in the enclosing scope. "
" "
" Note that the semantics of this op support \" diagonal \" or \" wavefront \" execution. "
" (See Step 3 here for an example: "
" https://devblogs.nvidia.com/optimizing-recurrent-neural-networks-cudnn-5/). "
" Frontends should emit multi-layer RNNs as a series of While operators (with "
" time being the inner looping dimension), with each successive layer consuming "
" the scan_outputs from the previous layer, possibly going through several "
" point-wise operators (e.g. dropout, residual connections, linear layer). "
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ I64 ] > , AnyMemRef , NoneType ] >: $M ,
AnyTypeOf < [ TensorOf < [ I1 ] > , AnyMemRef , NoneType ] >: $cond ,
AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $v_initial ,
2020-02-24 23:46:48 +08:00
AnyAttr : $body );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $v_final_and_scan_outputs );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return - 1 ;
}
static int getNumberOfResults () {
return - 1 ;
}
static std :: vector < int > getTypeMap () {
return { 22 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXLpNormalizationOp : ONNX_Op < " LpNormalization " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX LpNormalization operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Given a matrix, apply Lp-normalization along the provided axis. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $input ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < I64Attr , " -1 " >: $axis ,
DefaultValuedAttr < I64Attr , " 2 " >: $p );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXLpPoolOp : ONNX_Op < " LpPool " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX LpPool operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" LpPool consumes an input tensor X and applies Lp pooling across "
" the tensor according to kernel sizes, stride sizes, and pad lengths. "
" Lp pooling consisting of computing the Lp norm on all values of a subset "
" of the input tensor according to the kernel size and downsampling the "
" data into the output tensor Y for further processing. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $X ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < StrAttr , " NOTSET " >: $auto_pad ,
I64ArrayAttr : $kernel_shape ,
DefaultValuedAttr < I64Attr , " 2 " >: $p ,
OptionalAttr < I64ArrayAttr >: $pads ,
OptionalAttr < I64ArrayAttr >: $strides );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $Y );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXMatMulOp : ONNX_Op < " MatMul " ,
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX MatMul operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , AnyMemRef ] >: $A ,
AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , AnyMemRef ] >: $B );
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , AnyMemRef ] >: $Y );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 2 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXMatMulIntegerOp : ONNX_Op < " MatMulInteger " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX MatMulInteger operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html. "
" The production MUST never overflow. The accumulation may overflow if and only if in 32 bits. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ I8 ] > , TensorOf < [ UI8 ] > , AnyMemRef ] >: $A ,
AnyTypeOf < [ TensorOf < [ I8 ] > , TensorOf < [ UI8 ] > , AnyMemRef ] >: $B ,
AnyTypeOf < [ TensorOf < [ I8 ] > , TensorOf < [ UI8 ] > , AnyMemRef , NoneType ] >: $a_zero_point ,
AnyTypeOf < [ TensorOf < [ I8 ] > , TensorOf < [ UI8 ] > , AnyMemRef , NoneType ] >: $b_zero_point );
let results = ( outs AnyTypeOf < [ TensorOf < [ I32 ] > , AnyMemRef ] >: $Y );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 4 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 3 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXMaxOp : ONNX_Op < " Max " ,
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Max operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Element-wise max of each of the input tensors (with Numpy-style broadcasting support). "
" All inputs and outputs must have the same data type. "
" This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md). "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins Variadic < AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >>: $data_0 );
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $max );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return - 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXMaxPoolOp : ONNX_Op < " MaxPool " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX MaxPool operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" MaxPool consumes an input tensor X and applies max pooling across "
" the tensor according to kernel sizes, stride sizes, and pad lengths. "
" max pooling consisting of computing the max on all values of a "
" subset of the input tensor according to the kernel size and downsampling the "
" data into the output tensor Y for further processing. The output spatial shape will be following: "
" ``` "
" output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1) "
" ``` "
" or "
" ``` "
" output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1) "
" ``` "
" if ceil_mode is enabled "
" "
" ``` "
" * pad_shape[i] is sum of pads along axis i "
" ``` "
" "
" `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following: "
" ``` "
" VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) + 1) / strides_spatial_shape[i]) "
" SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i]) "
" ``` "
" And pad shape will be following if `SAME_UPPER` or `SAME_LOWER`: "
" ``` "
" pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) - input_spatial_shape[i] "
" ``` "
" The output of each pooling window is maximum number of elements exclude pad. "
" "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $X ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < StrAttr , " NOTSET " >: $auto_pad ,
DefaultValuedAttr < I64Attr , " 0 " >: $ceil_mode ,
OptionalAttr < I64ArrayAttr >: $dilations ,
I64ArrayAttr : $kernel_shape ,
OptionalAttr < I64ArrayAttr >: $pads ,
DefaultValuedAttr < I64Attr , " 0 " >: $storage_order ,
OptionalAttr < I64ArrayAttr >: $strides );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $Y ,
AnyTypeOf < [ TensorOf < [ I64 ] > , AnyMemRef , NoneType ] >: $Indices );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 2 ;
}
static std :: vector < int > getTypeMap () {
return { 20 , 4 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXMaxRoiPoolOp : ONNX_Op < " MaxRoiPool " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX MaxRoiPool operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" ROI max pool consumes an input tensor X and region of interests (RoIs) to "
" apply max pooling across each RoI, to produce output 4-D tensor of shape "
" (num_rois, channels, pooled_shape[0], pooled_shape[1]). "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $X ,
AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $rois ,
2020-02-24 23:46:48 +08:00
I64ArrayAttr : $pooled_shape ,
DefaultValuedAttr < F32Attr , " 1.0 " >: $spatial_scale );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $Y );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 2 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXMaxUnpoolOp : ONNX_Op < " MaxUnpool " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX MaxUnpool operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" MaxUnpool essentially computes the partial inverse of the MaxPool op. "
" The input information to this op is typically the the output information from a MaxPool op. The first "
" input tensor X is the tensor that needs to be unpooled, which is typically the pooled tensor (first output) "
" from MaxPool. The second input tensor, I, contains the indices to the (locally maximal) elements corrsponding "
" to the elements in the first input tensor X. Input tensor I is typically the second output of the MaxPool op. "
" The third (optional) input is a tensor that specifies the output size of the unpooling operation. "
" "
" MaxUnpool is intended to do 'partial' inverse of the MaxPool op. 'Partial' because all the non-maximal "
" values from the original input to MaxPool are set to zero in the output of the MaxUnpool op. Pooling "
" the result of an unpooling operation should give back the original input to the unpooling op. "
" "
" MaxUnpool can produce the same output size for several input sizes, which makes unpooling op ambiguous. "
" The third input argument, output_size, is meant to disambiguate the op and produce output tensor of "
" known/predictable size. "
" "
" In addition to the inputs, MaxUnpool takes three attributes, namely kernel_shape, strides, and pads, "
" which define the exact unpooling op. The attributes typically have the same values as the corrsponding "
" pooling op that the unpooling op is trying to invert. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $X ,
AnyTypeOf < [ TensorOf < [ I64 ] > , AnyMemRef ] >: $I ,
AnyTypeOf < [ TensorOf < [ I64 ] > , AnyMemRef , NoneType ] >: $output_shape ,
2020-02-24 23:46:48 +08:00
I64ArrayAttr : $kernel_shape ,
OptionalAttr < I64ArrayAttr >: $pads ,
OptionalAttr < I64ArrayAttr >: $strides );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 3 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXMeanOp : ONNX_Op < " Mean " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Mean operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Element-wise mean of each of the input tensors (with Numpy-style broadcasting support). "
" All inputs and outputs must have the same data type. "
" This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md). "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins Variadic < AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >>: $data_0 );
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $mean );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return - 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXMeanVarianceNormalizationOp : ONNX_Op < " MeanVarianceNormalization " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX MeanVarianceNormalization operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" A MeanVarianceNormalization Function: Perform mean variance normalization "
" on the input tensor X using formula: <br/> ``` (X-EX)/sqrt(E(X-EX)^2) ``` "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $X ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < I64ArrayAttr , " { 0, 2, 3} " >: $axes );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $Y );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXMinOp : ONNX_Op < " Min " ,
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Min operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Element-wise min of each of the input tensors (with Numpy-style broadcasting support). "
" All inputs and outputs must have the same data type. "
" This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md). "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins Variadic < AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >>: $data_0 );
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $min );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return - 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXModOp : ONNX_Op < " Mod " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Mod operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Performs element-wise binary modulus (with Numpy-style broadcasting support). "
" The sign of the remainder is the same as that of the Divisor. "
" "
" Mod operator can also behave like C fmod() or numpy.fmod. In this case, the sign of the remainder however, will be the same as the Dividend "
" (in contrast to integer mod). To force a behavior like numpy.fmod() an 'fmod' Attribute is provided. "
" This attribute is set to 0 by default causing the behavior to be like integer mod. "
" Setting this attribute to 1 causes the remainder to be calculated similar to that of numpy.fmod(). "
" "
" If the input type is floating point, then `fmod` attribute must be set to 1. "
" "
" In case of dividend being zero, the results will be platform dependent. "
" "
" This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md). "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $A ,
AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $B ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < I64Attr , " 0 " >: $fmod );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $C );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 2 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXMulOp : ONNX_Op < " Mul " ,
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Mul operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Performs element-wise binary multiplication (with Numpy-style broadcasting support). "
" "
" This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md). "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $A ,
AnyTypeOf < [ TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $B );
let results = ( outs AnyTypeOf < [ TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $C );
2020-02-21 22:28:24 +08:00
let builders = [
2020-05-20 15:45:42 +08:00
OpBuilder < " OpBuilder &builder, OperationState &state, Value A, Value B " , [{
2020-07-22 10:05:18 +08:00
auto lhsTy = A . getType ();
auto rhsTy = B . getType ();
2020-06-09 03:45:32 +08:00
auto elementType = getBroadcastedType ( lhsTy , rhsTy );
auto shapedType = elementType . dyn_cast_or_null < ShapedType > ();
if ( ! shapedType || ! shapedType . hasStaticShape ()) {
elementType = A . getType () . cast < TensorType > () . getElementType ();
elementType = UnrankedTensorType :: get ( elementType );
}
build ( builder , state , elementType , A , B );
2020-02-21 22:28:24 +08:00
}] > ,
2020-05-20 15:45:42 +08:00
OpBuilder < " OpBuilder &builder, OperationState &state, ValueRange operands, ArrayRef<NamedAttribute> attributes " , [{
2020-07-22 10:05:18 +08:00
auto lhsTy = operands [ 0 ] . getType ();
auto rhsTy = operands [ 1 ] . getType ();
2020-06-09 03:45:32 +08:00
auto elementType = getBroadcastedType ( lhsTy , rhsTy );
auto shapedType = elementType . dyn_cast_or_null < ShapedType > ();
if ( ! shapedType || ! shapedType . hasStaticShape ()) {
elementType = operands [ 0 ] . getType () . cast < TensorType > () . getElementType ();
elementType = UnrankedTensorType :: get ( elementType );
}
2020-02-21 22:28:24 +08:00
std :: vector < mlir :: Type > outputTypes ;
2020-06-09 03:45:32 +08:00
outputTypes . emplace_back ( elementType );
2020-02-21 22:28:24 +08:00
build ( builder , state , outputTypes , operands , attributes );
}] >
2020-02-24 23:46:48 +08:00
];
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 2 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXMultinomialOp : ONNX_Op < " Multinomial " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Multinomial operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Generate a tensor of samples from a multinomial distribution according to the probabilities "
" of each of the possible outcomes. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $input ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < I64Attr , " 6 " >: $dtype ,
DefaultValuedAttr < I64Attr , " 1 " >: $sample_size ,
OptionalAttr < F32Attr >: $seed );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { - 1 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXNegOp : ONNX_Op < " Neg " ,
2020-05-07 11:42:43 +08:00
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Neg operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Neg takes one input data (Tensor<T>) and produces one output data "
" (Tensor<T>) where each element flipped sign, y = -x, is applied to "
" the tensor elementwise. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F32 ] > , TensorOf < [ I32 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $X );
let results = ( outs AnyTypeOf < [ TensorOf < [ F32 ] > , TensorOf < [ I32 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $Y );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXNonMaxSuppressionOp : ONNX_Op < " NonMaxSuppression " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX NonMaxSuppression operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Filter out boxes that have high intersection-over-union (IOU) overlap with previously selected boxes. "
" Bounding boxes with score less than score_threshold are removed. Bounding box format is indicated by attribute center_point_box. "
" Note that this algorithm is agnostic to where the origin is in the coordinate system and more generally is invariant to "
" orthogonal transformations and translations of the coordinate system; thus translating or reflections of the coordinate system "
" result in the same boxes being selected by the algorithm. "
" The selected_indices output is a set of integers indexing into the input collection of bounding boxes representing the selected boxes. "
" The bounding box coordinates corresponding to the selected indices can then be obtained using the Gather or GatherND operation. "
2019-11-19 10:08:21 +08:00
}];
2020-06-26 04:34:37 +08:00
let arguments = ( ins TensorOf < [ F32 ] >: $boxes ,
TensorOf < [ F32 ] >: $scores ,
AnyTypeOf < [ TensorOf < [ I64 ] > , NoneType ] >: $max_output_boxes_per_class ,
AnyTypeOf < [ TensorOf < [ F32 ] > , NoneType ] >: $iou_threshold ,
AnyTypeOf < [ TensorOf < [ F32 ] > , NoneType ] >: $score_threshold ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < I64Attr , " 0 " >: $center_point_box );
2020-06-26 04:34:37 +08:00
let results = ( outs TensorOf < [ I64 ] >: $selected_indices );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 5 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
2020-07-09 04:52:56 +08:00
return { 4 };
2020-05-22 10:03:16 +08:00
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXNonZeroOp : ONNX_Op < " NonZero " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX NonZero operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Returns the indices of the elements that are non-zero "
" (in row-major order - by dimension). "
" NonZero behaves similar to numpy.nonzero: "
" https://docs.scipy.org/doc/numpy/reference/generated/numpy.nonzero.html "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $X );
2020-06-26 04:34:37 +08:00
let results = ( outs TensorOf < [ I64 ] >: $Y );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
2020-07-09 04:52:56 +08:00
return { 4 };
2020-05-22 10:03:16 +08:00
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXNotOp : ONNX_Op < " Not " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Not operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Returns the negation of the input tensor element-wise. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ I1 ] > , AnyMemRef ] >: $X );
let results = ( outs AnyTypeOf < [ TensorOf < [ I1 ] > , AnyMemRef ] >: $Y );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 0 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXOneHotOp : ONNX_Op < " OneHot " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX OneHot operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Produces a one-hot tensor based on inputs. "
" The locations represented by the index values in the 'indices' input tensor will have 'on_value' "
" and the other locations will have 'off_value' in the output tensor, where 'on_value' and 'off_value' "
" are specified as part of required input argument 'values', which is a two-element tensor of format "
" [off_value, on_value]. The rank of the output tensor will be one greater than the rank of the "
" input tensor. The additional dimension is for one-hot representation. The additional dimension will "
" be inserted at the position specified by 'axis'. If 'axis' is not specified then then additional "
" dimension will be inserted as the innermost dimension, i.e. axis=-1. The size of the additional "
" dimension is specified by required scalar input 'depth'. The type of the output tensor is the same "
" as the type of the 'values' input. Any entries in the 'indices' input tensor with values outside "
" the range [-depth, depth-1] will result in one-hot representation with all 'off_value' values in the "
" output tensor. "
" "
" when axis = 0: "
" output[input[i, j, k], i, j, k] = 1 for all i, j, k and 0 otherwise. "
" "
" when axis = -1: "
" output[i, j, k, input[i, j, k]] = 1 for all i, j, k and 0 otherwise. "
" "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $indices ,
AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $depth ,
AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $values ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < I64Attr , " -1 " >: $axis );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 3 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 22 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXOrOp : ONNX_Op < " Or " ,
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Or operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Returns the tensor resulted from performing the `or` logical operation "
" elementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support). "
" "
" This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md). "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ I1 ] > , AnyMemRef ] >: $A ,
AnyTypeOf < [ TensorOf < [ I1 ] > , AnyMemRef ] >: $B );
let results = ( outs AnyTypeOf < [ TensorOf < [ I1 ] > , AnyMemRef ] >: $C );
2020-06-09 03:45:32 +08:00
let builders = [
OpBuilder < " OpBuilder &builder, OperationState &state, Value A, Value B " , [{
2020-07-22 10:05:18 +08:00
auto lhsTy = A . getType ();
auto rhsTy = B . getType ();
2020-06-09 03:45:32 +08:00
auto elementType = getBroadcastedType ( lhsTy , rhsTy );
auto shapedType = elementType . dyn_cast_or_null < ShapedType > ();
if ( ! shapedType || ! shapedType . hasStaticShape ()) {
elementType = A . getType () . cast < TensorType > () . getElementType ();
elementType = UnrankedTensorType :: get ( elementType );
}
build ( builder , state , elementType , A , B );
}] > ,
OpBuilder < " OpBuilder &builder, OperationState &state, ValueRange operands, ArrayRef<NamedAttribute> attributes " , [{
2020-07-22 10:05:18 +08:00
auto lhsTy = operands [ 0 ] . getType ();
auto rhsTy = operands [ 1 ] . getType ();
2020-06-09 03:45:32 +08:00
auto elementType = getBroadcastedType ( lhsTy , rhsTy );
auto shapedType = elementType . dyn_cast_or_null < ShapedType > ();
if ( ! shapedType || ! shapedType . hasStaticShape ()) {
elementType = operands [ 0 ] . getType () . cast < TensorType > () . getElementType ();
elementType = UnrankedTensorType :: get ( elementType );
}
std :: vector < mlir :: Type > outputTypes ;
outputTypes . emplace_back ( elementType );
build ( builder , state , outputTypes , operands , attributes );
}] >
];
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 2 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 0 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXPReluOp : ONNX_Op < " PRelu " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX PRelu operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" PRelu takes input data (Tensor<T>) and slope tensor as input, and produces one "
" output data (Tensor<T>) where the function `f(x) = slope * x for x < 0`, "
" `f(x) = x for x >= 0`., is applied to the data tensor elementwise. "
" This operator supports **unidirectional broadcasting** (tensor slope should be unidirectional broadcastable to input tensor X); for more details please check [the doc](Broadcasting.md). "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , AnyMemRef ] >: $X ,
AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , AnyMemRef ] >: $slope );
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , AnyMemRef ] >: $Y );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 2 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXPadOp : ONNX_Op < " Pad " ,
2020-05-15 13:19:28 +08:00
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > , OpInterface < " PromotableConstOperandsOpInterface " > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Pad operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Given a tensor containing the data to be padded (`data`), a tensor containing the number of start and end pad values for axis (`pads`), (optionally) a `mode`, and (optionally) `constant_value`, "
" a padded tensor (`output`) is generated. "
" "
" The three supported `modes` are (similar to corresponding modes supported by `numpy.pad`): "
" "
" 1) `constant`(default) - pads with a given constant value as specified by `constant_value` (which defaults to 0) "
" "
" 2) `reflect` - pads with the reflection of the vector mirrored on the first and last values of the vector along each axis "
" "
" 3) `edge` - pads with the edge values of array "
" "
" "
" Example 1 (`constant` mode): "
" Insert 0 pads to the beginning of the second dimension. "
" "
" data = "
" [ "
" [1.0, 1.2], "
" [2.3, 3.4], "
" [4.5, 5.7], "
" ] "
" "
" pads = [0, 2, 0, 0] "
" "
" mode = 'constant' "
" "
" constant_value = 0.0 "
" "
" output = "
" [ "
" [ "
" [0.0, 0.0, 1.0, 1.2], "
" [0.0, 0.0, 2.3, 3.4], "
" [0.0, 0.0, 4.5, 5.7], "
" ], "
" ] "
" "
" "
" Example 2 (`reflect` mode): "
" data = "
" [ "
" [1.0, 1.2], "
" [2.3, 3.4], "
" [4.5, 5.7], "
" ] "
" "
" pads = [0, 2, 0, 0] "
" "
" mode = 'reflect' "
" "
" output = "
" [ "
" [ "
" [1.0, 1.2, 1.0, 1.2], "
" [2.3, 3.4, 2.3, 3.4], "
" [4.5, 5.7, 4.5, 5.7], "
" ], "
" ] "
" "
" "
" Example 3 (`edge` mode): "
" data = "
" [ "
" [1.0, 1.2], "
" [2.3, 3.4], "
" [4.5, 5.7], "
" ] "
" "
" pads = [0, 2, 0, 0] "
" "
" mode = 'edge' "
" "
" output = "
" [ "
" [ "
" [1.0, 1.0, 1.0, 1.2], "
" [2.3, 2.3, 2.3, 3.4], "
" [4.5, 4.5, 4.5, 5.7], "
" ], "
" ] "
" "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $data ,
2020-06-26 04:34:37 +08:00
AnyTypeOf < [ TensorOf < [ I64 ] > , NoneType ] >: $pads ,
2020-07-14 23:15:06 +08:00
AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef , NoneType ] >: $constant_value ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < StrAttr , " constant " >: $mode );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef , NoneType ] >: $output );
2020-05-15 13:19:28 +08:00
let builders = [
2020-05-20 15:45:42 +08:00
OpBuilder < " OpBuilder &builder, OperationState &state, Value data, Value pads, Value constant_value, StringAttr mode " , [{
2020-05-15 13:19:28 +08:00
auto elementType = data . getType () . cast < TensorType > () . getElementType ();
build ( builder , state , UnrankedTensorType :: get ( elementType ), data , pads , constant_value , mode );
}] > ,
2020-05-20 15:45:42 +08:00
OpBuilder < " OpBuilder &builder, OperationState &state, ValueRange operands, ArrayRef<NamedAttribute> attributes " , [{
2020-05-15 13:19:28 +08:00
auto elementType = operands [ 0 ] . getType () . cast < TensorType > () . getElementType ();
std :: vector < mlir :: Type > outputTypes ;
outputTypes . emplace_back ( UnrankedTensorType :: get ( elementType ));
build ( builder , state , outputTypes , operands , attributes );
}] >
];
let extraClassDeclaration = [{
2020-05-22 10:03:16 +08:00
static int getNumberOfOperands () {
return 3 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
2020-05-15 13:19:28 +08:00
std :: map < std :: string , size_t > promotableConstOperands () {
return {{ " pads " , 1 }, { " constant_value " , 2 }};
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXPowOp : ONNX_Op < " Pow " ,
2020-08-07 13:08:00 +08:00
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Pow operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Pow takes input data (Tensor<T>) and exponent Tensor, and "
" produces one output data (Tensor<T>) where the function `f(x) = x^exponent`, "
" is applied to the data tensor elementwise. "
" This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md). "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $X ,
AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $Y );
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $Z );
2020-06-09 03:45:32 +08:00
let builders = [
OpBuilder < " OpBuilder &builder, OperationState &state, Value X, Value Y " , [{
2020-07-22 10:05:18 +08:00
auto lhsTy = X . getType ();
auto rhsTy = Y . getType ();
2020-06-09 03:45:32 +08:00
auto elementType = getBroadcastedType ( lhsTy , rhsTy );
auto shapedType = elementType . dyn_cast_or_null < ShapedType > ();
if ( ! shapedType || ! shapedType . hasStaticShape ()) {
elementType = X . getType () . cast < TensorType > () . getElementType ();
elementType = UnrankedTensorType :: get ( elementType );
}
build ( builder , state , elementType , X , Y );
}] > ,
OpBuilder < " OpBuilder &builder, OperationState &state, ValueRange operands, ArrayRef<NamedAttribute> attributes " , [{
2020-07-22 10:05:18 +08:00
auto lhsTy = operands [ 0 ] . getType ();
auto rhsTy = operands [ 1 ] . getType ();
2020-06-09 03:45:32 +08:00
auto elementType = getBroadcastedType ( lhsTy , rhsTy );
auto shapedType = elementType . dyn_cast_or_null < ShapedType > ();
if ( ! shapedType || ! shapedType . hasStaticShape ()) {
elementType = operands [ 0 ] . getType () . cast < TensorType > () . getElementType ();
elementType = UnrankedTensorType :: get ( elementType );
}
std :: vector < mlir :: Type > outputTypes ;
outputTypes . emplace_back ( elementType );
build ( builder , state , outputTypes , operands , attributes );
}] >
];
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 2 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXQLinearConvOp : ONNX_Op < " QLinearConv " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX QLinearConv operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" The convolution operator consumes a quantized input tensor, its scale and zero point, "
" a quantized filter, its scale and zero point, and output's scale and zero point, "
" and computes the quantized output. Each scale and zero-point pair must have same shape. "
" It means they must be either scalars (per tensor) or 1-D tensors (per output channel). "
" Each input or output and its related zero point must have same type. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ I8 ] > , TensorOf < [ UI8 ] > , AnyMemRef ] >: $x ,
2020-06-26 04:34:37 +08:00
TensorOf < [ F32 ] >: $x_scale ,
2020-07-14 23:15:06 +08:00
AnyTypeOf < [ TensorOf < [ I8 ] > , TensorOf < [ UI8 ] > , AnyMemRef ] >: $x_zero_point ,
AnyTypeOf < [ TensorOf < [ I8 ] > , TensorOf < [ UI8 ] > , AnyMemRef ] >: $w ,
2020-06-26 04:34:37 +08:00
TensorOf < [ F32 ] >: $w_scale ,
2020-07-14 23:15:06 +08:00
AnyTypeOf < [ TensorOf < [ I8 ] > , TensorOf < [ UI8 ] > , AnyMemRef ] >: $w_zero_point ,
2020-06-26 04:34:37 +08:00
TensorOf < [ F32 ] >: $y_scale ,
2020-07-14 23:15:06 +08:00
AnyTypeOf < [ TensorOf < [ I8 ] > , TensorOf < [ UI8 ] > , AnyMemRef ] >: $y_zero_point ,
AnyTypeOf < [ TensorOf < [ I32 ] > , AnyMemRef , NoneType ] >: $B ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < StrAttr , " NOTSET " >: $auto_pad ,
OptionalAttr < I64ArrayAttr >: $dilations ,
DefaultValuedAttr < I64Attr , " 1 " >: $group ,
OptionalAttr < I64ArrayAttr >: $kernel_shape ,
OptionalAttr < I64ArrayAttr >: $pads ,
OptionalAttr < I64ArrayAttr >: $strides );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ I8 ] > , TensorOf < [ UI8 ] > , AnyMemRef ] >: $y );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 9 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 1 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXQLinearMatMulOp : ONNX_Op < " QLinearMatMul " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX QLinearMatMul operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html. "
" It consumes two quantized input tensors, their scales and zero points, scale and zero point of output, and computes the quantized output. "
" The quantization formula is y = saturate((x / y_scale) + y_zero_point). For (x / y_scale), it is rounding to nearest ties to even. "
" Refer to https://en.wikipedia.org/wiki/Rounding for details. Scale and zero point must have same shape. "
" They must be either scalar (per tensor) or 1-D tensor (per row for 'a' and per column for 'b'). If scale and zero point are 1-D tensor, "
" the number of elements of scale and zero point tensor of input 'a' and output 'y' should be equal to the number of rows of input 'a', "
" and the number of elements of scale and zero point tensor of input 'b' should be equal to the number of columns of input 'b'. "
" Production must never overflow, and accumulation may overflow if and only if in 32 bits. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ I8 ] > , TensorOf < [ UI8 ] > , AnyMemRef ] >: $a ,
2020-06-26 04:34:37 +08:00
TensorOf < [ F32 ] >: $a_scale ,
2020-07-14 23:15:06 +08:00
AnyTypeOf < [ TensorOf < [ I8 ] > , TensorOf < [ UI8 ] > , AnyMemRef ] >: $a_zero_point ,
AnyTypeOf < [ TensorOf < [ I8 ] > , TensorOf < [ UI8 ] > , AnyMemRef ] >: $b ,
2020-06-26 04:34:37 +08:00
TensorOf < [ F32 ] >: $b_scale ,
2020-07-14 23:15:06 +08:00
AnyTypeOf < [ TensorOf < [ I8 ] > , TensorOf < [ UI8 ] > , AnyMemRef ] >: $b_zero_point ,
2020-06-26 04:34:37 +08:00
TensorOf < [ F32 ] >: $y_scale ,
2020-07-14 23:15:06 +08:00
AnyTypeOf < [ TensorOf < [ I8 ] > , TensorOf < [ UI8 ] > , AnyMemRef ] >: $y_zero_point );
let results = ( outs AnyTypeOf < [ TensorOf < [ I8 ] > , TensorOf < [ UI8 ] > , AnyMemRef ] >: $y );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 8 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 1 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXQuantizeLinearOp : ONNX_Op < " QuantizeLinear " ,
2020-06-09 14:55:49 +08:00
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX QuantizeLinear operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" The linear per-tensor/layer quantization operator. It consumes a high precision tensor, a scale, a zero point to compute the low precision / quantized tensor. "
" The quantization formula is y = saturate ((x / y_scale) + y_zero_point). For saturation, it saturates to [0, 255] if it's uint8, or [-128, 127] if it's int8. "
" For (x / y_scale), it's rounding to nearest ties to even. Refer to https://en.wikipedia.org/wiki/Rounding for details. 'y_zero_point' and 'y' must have same type. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F32 ] > , TensorOf < [ I32 ] > , AnyMemRef ] >: $x ,
2020-06-26 04:34:37 +08:00
TensorOf < [ F32 ] >: $y_scale ,
2020-07-14 23:15:06 +08:00
AnyTypeOf < [ TensorOf < [ I8 ] > , TensorOf < [ UI8 ] > , AnyMemRef , NoneType ] >: $y_zero_point );
let results = ( outs AnyTypeOf < [ TensorOf < [ I8 ] > , TensorOf < [ UI8 ] > , AnyMemRef , NoneType ] >: $y );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 3 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 1 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXRNNOp : ONNX_Op < " RNN " ,
2020-05-13 21:08:06 +08:00
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX RNN operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Computes an one-layer simple RNN. This operator is usually supported "
" via some custom implementation such as CuDNN. "
" "
" Notations: "
" "
" `X` - input tensor "
" "
" `i` - input gate "
" "
" `t` - time step (t-1 means previous time step) "
" "
" `Wi` - W parameter weight matrix for input gate "
" "
" `Ri` - R recurrence weight matrix for input gate "
" "
" `Wbi` - W parameter bias vector for input gate "
" "
" `Rbi` - R parameter bias vector for input gate "
" "
" `WBi` - W parameter weight matrix for backward input gate "
" "
" `RBi` - R recurrence weight matrix for backward input gate "
" "
" `WBbi` - WR bias vectors for backward input gate "
" "
" `RBbi` - RR bias vectors for backward input gate "
" "
" `H` - Hidden state "
" "
" `num_directions` - 2 if direction == bidirectional else 1 "
" "
" Activation functions: "
" "
" Relu(x) - max(0, x) "
" "
" Tanh(x) - (1 - e^ { -2x})/(1 + e^ { -2x}) "
" "
" Sigmoid(x) - 1/(1 + e^ { -x}) "
" "
" (NOTE: Below are optional) "
" "
" Affine(x) - alpha*x + beta "
" "
" LeakyRelu(x) - x if x >= 0 else alpha * x "
" "
" ThresholdedRelu(x) - x if x >= alpha else 0 "
" "
" ScaledTanh(x) - alpha*Tanh(beta*x) "
" "
" HardSigmoid(x) - min(max(alpha*x + beta, 0), 1) "
" "
" Elu(x) - x if x >= 0 else alpha*(e^x - 1) "
" "
" Softsign(x) - x/(1 + |x|) "
" "
" Softplus(x) - log(1 + e^x) "
" "
" Equations (Default: f=Tanh): "
" "
" - Ht = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Wbi + Rbi) "
" This operator has **optional** inputs/outputs. See [the doc](IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $X ,
AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $W ,
AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $R ,
AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef , NoneType ] >: $B ,
AnyTypeOf < [ TensorOf < [ I32 ] > , AnyMemRef , NoneType ] >: $sequence_lens ,
AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef , NoneType , NoneType ] >: $initial_h ,
2020-02-24 23:46:48 +08:00
OptionalAttr < F32ArrayAttr >: $activation_alpha ,
OptionalAttr < F32ArrayAttr >: $activation_beta ,
DefaultValuedAttr < StrArrayAttr , " { \" Tanh \" , \" Tanh \" } " >: $activations ,
OptionalAttr < F32Attr >: $clip ,
DefaultValuedAttr < StrAttr , " forward " >: $direction ,
OptionalAttr < I64Attr >: $hidden_size );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef , NoneType , NoneType , NoneType ] >: $Y ,
AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef , NoneType , NoneType , NoneType , NoneType ] >: $Y_h );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 6 ;
}
static int getNumberOfResults () {
return 2 ;
}
static std :: vector < int > getTypeMap () {
return { 20 , 20 };
}
}];
2020-02-24 23:46:48 +08:00
}
def ONNXRandomNormalOp : ONNX_Op < " RandomNormal " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX RandomNormal operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Generate a tensor with random values drawn from a normal distribution. The shape "
" of the tensor is specified by the `shape` argument and the parameter of the normal distribution "
" specified by `mean` and `scale`. "
" "
" The data type is specified by the 'dtype' argument. The 'dtype' argument must "
" be one of the data types specified in the 'DataType' enum field in the "
" TensorProto message. "
2019-11-19 10:08:21 +08:00
}];
2020-01-27 23:09:14 +08:00
let arguments = ( ins DefaultValuedAttr < I64Attr , " 1 " >: $dtype ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < F32Attr , " 0.0 " >: $mean ,
DefaultValuedAttr < F32Attr , " 1.0 " >: $scale ,
OptionalAttr < F32Attr >: $seed ,
I64ArrayAttr : $shape );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 0 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { - 1 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXRandomNormalLikeOp : ONNX_Op < " RandomNormalLike " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX RandomNormalLike operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Generate a tensor with random values drawn from a normal distribution. "
" The shape of the output tensor is copied from the shape of the input tensor, "
" and the parameters of the normal distribution are specified by `mean` and `scale`. "
" "
" The data type is specified by the 'dtype' argument, or copied from the input tensor if not provided. "
" The 'dtype' argument must be one of the data types specified in the 'DataType' enum field in the "
" TensorProto message, and be valid as an output type. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $input ,
2020-02-24 23:46:48 +08:00
OptionalAttr < I64Attr >: $dtype ,
DefaultValuedAttr < F32Attr , " 0.0 " >: $mean ,
DefaultValuedAttr < F32Attr , " 1.0 " >: $scale ,
OptionalAttr < F32Attr >: $seed );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { - 1 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXRandomUniformOp : ONNX_Op < " RandomUniform " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX RandomUniform operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Generate a tensor with random values drawn from a uniform distribution. The shape "
" of the tensor is specified by the `shape` argument and the range by `low` and `high`. "
" "
" The data type is specified by the 'dtype' argument. The 'dtype' argument must "
" be one of the data types specified in the 'DataType' enum field in the "
" TensorProto message. "
2019-11-19 10:08:21 +08:00
}];
2020-01-27 23:09:14 +08:00
let arguments = ( ins DefaultValuedAttr < I64Attr , " 1 " >: $dtype ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < F32Attr , " 1.0 " >: $high ,
DefaultValuedAttr < F32Attr , " 0.0 " >: $low ,
OptionalAttr < F32Attr >: $seed ,
I64ArrayAttr : $shape );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 0 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { - 1 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXRandomUniformLikeOp : ONNX_Op < " RandomUniformLike " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX RandomUniformLike operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Generate a tensor with random values drawn from a uniform distribution. "
" The shape of the output tensor is copied from the shape of the input tensor, "
" and the parameters of the uniform distribution are specified by `low` and `high`. "
" "
" The data type is specified by the 'dtype' argument, or copied from the input tensor if not provided. "
" The 'dtype' argument must be one of the data types specified in the 'DataType' enum field in the "
" TensorProto message and be valid as an output type. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $input ,
2020-02-24 23:46:48 +08:00
OptionalAttr < I64Attr >: $dtype ,
DefaultValuedAttr < F32Attr , " 1.0 " >: $high ,
DefaultValuedAttr < F32Attr , " 0.0 " >: $low ,
OptionalAttr < F32Attr >: $seed );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { - 1 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXRangeOp : ONNX_Op < " Range " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Range operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Generate a tensor containing a sequence of numbers that begin at `start` and extends by increments of `delta` "
" up to `limit` (exclusive). "
" "
" The number of elements in the output of range is computed as below- "
" "
" `number_of_elements = max( ceil( (limit - start) / delta ) , 0 )` "
" "
" The pseudocode determining the contents of the output is shown below- "
" "
" `for(int i=0; i<number_of_elements; ++i)` "
" "
" ` { ` "
" "
" ` output[i] = start + (i * delta); ` "
" "
" `}` "
" "
" `Example 1` "
" Inputs: start = 3, limit = 9, delta = 3 "
" Output: [3, 6] "
" "
" `Example 2` "
" Inputs: start = 10, limit = 4, delta = -2 "
" Output: [10, 8, 6] "
" "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , AnyMemRef ] >: $start ,
AnyTypeOf < [ TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , AnyMemRef ] >: $limit ,
AnyTypeOf < [ TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , AnyMemRef ] >: $delta );
let results = ( outs AnyTypeOf < [ TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 3 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXReciprocalOp : ONNX_Op < " Reciprocal " ,
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Reciprocal operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Reciprocal takes one input data (Tensor<T>) and produces one output data "
" (Tensor<T>) where the reciprocal is, y = 1/x, is applied to "
" the tensor elementwise. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $X );
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $Y );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXReduceL1Op : ONNX_Op < " ReduceL1 " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX ReduceL1 operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Computes the L1 norm of the input tensor's element along the provided axes. The resulted "
" tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then "
" the resulted tensor have the reduced dimension pruned. "
" "
" The above behavior is similar to numpy, with the exception that numpy default keepdims to "
" False instead of True. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $data ,
2020-02-24 23:46:48 +08:00
OptionalAttr < I64ArrayAttr >: $axes ,
DefaultValuedAttr < I64Attr , " 1 " >: $keepdims );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $reduced );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXReduceL2Op : ONNX_Op < " ReduceL2 " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX ReduceL2 operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Computes the L2 norm of the input tensor's element along the provided axes. The resulted "
" tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then "
" the resulted tensor have the reduced dimension pruned. "
" "
" The above behavior is similar to numpy, with the exception that numpy default keepdims to "
" False instead of True. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $data ,
2020-02-24 23:46:48 +08:00
OptionalAttr < I64ArrayAttr >: $axes ,
DefaultValuedAttr < I64Attr , " 1 " >: $keepdims );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $reduced );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXReduceLogSumOp : ONNX_Op < " ReduceLogSum " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX ReduceLogSum operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Computes the log sum of the input tensor's element along the provided axes. The resulted "
" tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then "
" the resulted tensor have the reduced dimension pruned. "
" "
" The above behavior is similar to numpy, with the exception that numpy default keepdims to "
" False instead of True. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $data ,
2020-02-24 23:46:48 +08:00
OptionalAttr < I64ArrayAttr >: $axes ,
DefaultValuedAttr < I64Attr , " 1 " >: $keepdims );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $reduced );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXReduceLogSumExpOp : ONNX_Op < " ReduceLogSumExp " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX ReduceLogSumExp operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Computes the log sum exponent of the input tensor's element along the provided axes. The resulted "
" tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then "
" the resulted tensor have the reduced dimension pruned. "
" "
" The above behavior is similar to numpy, with the exception that numpy default keepdims to "
" False instead of True. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $data ,
2020-02-24 23:46:48 +08:00
OptionalAttr < I64ArrayAttr >: $axes ,
DefaultValuedAttr < I64Attr , " 1 " >: $keepdims );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $reduced );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXReduceMaxOp : ONNX_Op < " ReduceMax " ,
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX ReduceMax operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Computes the max of the input tensor's element along the provided axes. The resulted "
" tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then "
" the resulted tensor have the reduced dimension pruned. "
" "
" The above behavior is similar to numpy, with the exception that numpy default keepdims to "
" False instead of True. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $data ,
2020-02-24 23:46:48 +08:00
OptionalAttr < I64ArrayAttr >: $axes ,
DefaultValuedAttr < I64Attr , " 1 " >: $keepdims );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $reduced );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXReduceMeanOp : ONNX_Op < " ReduceMean " ,
2020-08-07 13:08:00 +08:00
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX ReduceMean operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Computes the mean of the input tensor's element along the provided axes. The resulted "
" tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then "
" the resulted tensor have the reduced dimension pruned. "
" "
" The above behavior is similar to numpy, with the exception that numpy default keepdims to "
" False instead of True. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $data ,
2020-02-24 23:46:48 +08:00
OptionalAttr < I64ArrayAttr >: $axes ,
DefaultValuedAttr < I64Attr , " 1 " >: $keepdims );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $reduced );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXReduceMinOp : ONNX_Op < " ReduceMin " ,
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX ReduceMin operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Computes the min of the input tensor's element along the provided axes. The resulted "
" tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then "
" the resulted tensor have the reduced dimension pruned. "
" "
" The above behavior is similar to numpy, with the exception that numpy default keepdims to "
" False instead of True. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $data ,
2020-02-24 23:46:48 +08:00
OptionalAttr < I64ArrayAttr >: $axes ,
DefaultValuedAttr < I64Attr , " 1 " >: $keepdims );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $reduced );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXReduceProdOp : ONNX_Op < " ReduceProd " ,
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX ReduceProd operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Computes the product of the input tensor's element along the provided axes. The resulted "
" tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then "
" the resulted tensor have the reduced dimension pruned. "
" "
" The above behavior is similar to numpy, with the exception that numpy default keepdims to "
" False instead of True. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $data ,
2020-02-24 23:46:48 +08:00
OptionalAttr < I64ArrayAttr >: $axes ,
DefaultValuedAttr < I64Attr , " 1 " >: $keepdims );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $reduced );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXReduceSumOp : ONNX_Op < " ReduceSum " ,
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX ReduceSum operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Computes the sum of the input tensor's element along the provided axes. The resulted "
" tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then "
" the resulted tensor have the reduced dimension pruned. "
" "
" The above behavior is similar to numpy, with the exception that numpy default keepdims to "
" False instead of True. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $data ,
2020-02-24 23:46:48 +08:00
OptionalAttr < I64ArrayAttr >: $axes ,
DefaultValuedAttr < I64Attr , " 1 " >: $keepdims );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $reduced );
2020-02-21 22:28:24 +08:00
let builders = [
2020-05-20 15:45:42 +08:00
OpBuilder < " OpBuilder &builder, OperationState &state, Value data, ArrayAttr axes, IntegerAttr keepdims " , [{
2020-02-21 22:28:24 +08:00
auto elementType = data . getType () . cast < TensorType > () . getElementType ();
build ( builder , state , UnrankedTensorType :: get ( elementType ), data , axes , keepdims );
}] > ,
2020-05-20 15:45:42 +08:00
OpBuilder < " OpBuilder &builder, OperationState &state, ValueRange operands, ArrayRef<NamedAttribute> attributes " , [{
2020-02-21 22:28:24 +08:00
auto elementType = operands [ 0 ] . getType () . cast < TensorType > () . getElementType ();
std :: vector < mlir :: Type > outputTypes ;
outputTypes . emplace_back ( UnrankedTensorType :: get ( elementType ));
build ( builder , state , outputTypes , operands , attributes );
}] >
2020-02-24 23:46:48 +08:00
];
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXReduceSumSquareOp : ONNX_Op < " ReduceSumSquare " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX ReduceSumSquare operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Computes the sum square of the input tensor's element along the provided axes. The resulted "
" tensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then "
" the resulted tensor have the reduced dimension pruned. "
" "
" The above behavior is similar to numpy, with the exception that numpy default keepdims to "
" False instead of True. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $data ,
2020-02-24 23:46:48 +08:00
OptionalAttr < I64ArrayAttr >: $axes ,
DefaultValuedAttr < I64Attr , " 1 " >: $keepdims );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $reduced );
2020-02-21 22:28:24 +08:00
let builders = [
2020-05-20 15:45:42 +08:00
OpBuilder < " OpBuilder &builder, OperationState &state, Value data, ArrayAttr axes, IntegerAttr keepdims " , [{
2020-02-21 22:28:24 +08:00
auto elementType = data . getType () . cast < TensorType > () . getElementType ();
build ( builder , state , UnrankedTensorType :: get ( elementType ), data , axes , keepdims );
}] > ,
2020-05-20 15:45:42 +08:00
OpBuilder < " OpBuilder &builder, OperationState &state, ValueRange operands, ArrayRef<NamedAttribute> attributes " , [{
2020-02-21 22:28:24 +08:00
auto elementType = operands [ 0 ] . getType () . cast < TensorType > () . getElementType ();
std :: vector < mlir :: Type > outputTypes ;
outputTypes . emplace_back ( UnrankedTensorType :: get ( elementType ));
build ( builder , state , outputTypes , operands , attributes );
}] >
2020-02-24 23:46:48 +08:00
];
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXReluOp : ONNX_Op < " Relu " ,
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Relu operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Relu takes one input data (Tensor<T>) and produces one output data "
" (Tensor<T>) where the rectified linear function, y = max(0, x), is applied to "
" the tensor elementwise. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $X );
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $Y );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXReshapeOp : ONNX_Op < " Reshape " ,
2020-03-19 15:03:37 +08:00
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > , OpInterface < " PromotableConstOperandsOpInterface " > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Reshape operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Reshape the input tensor similar to numpy.reshape. "
" First input is the data tensor, second input is a shape tensor which specifies the output shape. It outputs the reshaped tensor. "
" At most one dimension of the new shape can be -1. In this case, the value is "
" inferred from the size of the tensor and the remaining dimensions. A dimension "
" could also be 0, in which case the actual dimension value is unchanged (i.e. taken "
" from the input tensor). "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $data ,
2020-06-26 04:34:37 +08:00
AnyTypeOf < [ TensorOf < [ I64 ] > , NoneType ] >: $shape );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $reshaped );
2020-03-19 15:03:37 +08:00
let extraClassDeclaration = [{
2020-05-22 10:03:16 +08:00
static int getNumberOfOperands () {
return 2 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
2020-03-19 15:03:37 +08:00
std :: map < std :: string , size_t > promotableConstOperands () {
return {{ " shape " , 1 }};
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXResizeOp : ONNX_Op < " Resize " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Resize operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Resize the input tensor. In general, it calculates every value in the output tensor as a weighted average of neighborhood (a.k.a. sampling locations) in the input tensor. "
" Each dimension value of the output tensor is: "
" output_dimension = floor(input_dimension * (roi_end - roi_start) * scale) if input \\ " sizes\\ " is not specified. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $X ,
AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $roi ,
2020-06-26 04:34:37 +08:00
TensorOf < [ F32 ] >: $scales ,
AnyTypeOf < [ TensorOf < [ I64 ] > , NoneType ] >: $sizes ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < StrAttr , " half_pixel " >: $coordinate_transformation_mode ,
DefaultValuedAttr < F32Attr , " -0.75 " >: $cubic_coeff_a ,
DefaultValuedAttr < I64Attr , " 0 " >: $exclude_outside ,
DefaultValuedAttr < F32Attr , " 0.0 " >: $extrapolation_value ,
DefaultValuedAttr < StrAttr , " nearest " >: $mode ,
DefaultValuedAttr < StrAttr , " round_prefer_floor " >: $nearest_mode );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $Y );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 4 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXReverseSequenceOp : ONNX_Op < " ReverseSequence " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX ReverseSequence operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Reverse batch of sequences having different lengths specified by `sequence_lens`. "
" "
" For each slice i iterating on batch axis, the operator reverses the first sequence_lens[i] elements on time axis, "
" and copies elements whose index's beyond sequence_lens[i] to the output. So the output slice i contains reversed "
" sequences on the first sequence_lens[i] elements, then have original values copied for the other elements. "
" "
" Example 1: "
" input = [[0.0, 4.0, 8.0, 12.0], "
" [1.0, 5.0, 9.0, 13.0], "
" [2.0, 6.0, 10.0, 14.0], "
" [3.0, 7.0, 11.0, 15.0]] "
" sequence_lens = [4, 3, 2, 1] "
" time_axis = 0 "
" batch_axis = 1 "
" "
" output = [[3.0, 6.0, 9.0, 12.0], "
" [2.0, 5.0, 8.0, 13.0], "
" [1.0, 4.0, 10.0, 14.0], "
" [0.0, 7.0, 11.0, 15.0]] "
" "
" Example 2: "
" input = [[0.0, 1.0, 2.0, 3.0 ], "
" [4.0, 5.0, 6.0, 7.0 ], "
" [8.0, 9.0, 10.0, 11.0], "
" [12.0, 13.0, 14.0, 15.0]] "
" sequence_lens = [1, 2, 3, 4] "
" time_axis = 1 "
" batch_axis = 0 "
" "
" output = [[0.0, 1.0, 2.0, 3.0 ], "
" [5.0, 4.0, 6.0, 7.0 ], "
" [10.0, 9.0, 8.0, 11.0], "
" [15.0, 14.0, 13.0, 12.0]] "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $input ,
2020-06-26 04:34:37 +08:00
TensorOf < [ I64 ] >: $sequence_lens ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < I64Attr , " 1 " >: $batch_axis ,
DefaultValuedAttr < I64Attr , " 0 " >: $time_axis );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $Y );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 2 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXRoiAlignOp : ONNX_Op < " RoiAlign " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX RoiAlign operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Region of Interest (RoI) align operation described in the "
" [Mask R-CNN paper](https://arxiv.org/abs/1703.06870). "
" RoiAlign consumes an input tensor X and region of interests (rois) "
" to apply pooling across each RoI; it produces a 4-D tensor of shape "
" (num_rois, C, output_height, output_width). "
" "
" RoiAlign is proposed to avoid the misalignment by removing "
" quantizations while converting from original image into feature "
" map and from feature map into RoI feature; in each ROI bin, "
" the value of the sampled locations are computed directly "
" through bilinear interpolation. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $X ,
AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $rois ,
AnyTypeOf < [ TensorOf < [ I64 ] > , AnyMemRef ] >: $batch_indices ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < StrAttr , " avg " >: $mode ,
DefaultValuedAttr < I64Attr , " 1 " >: $output_height ,
DefaultValuedAttr < I64Attr , " 1 " >: $output_width ,
DefaultValuedAttr < I64Attr , " 0 " >: $sampling_ratio ,
DefaultValuedAttr < F32Attr , " 1.0 " >: $spatial_scale );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $Y );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 3 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXRoundOp : ONNX_Op < " Round " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Round operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Round takes one input Tensor and rounds the values, element-wise, meaning "
" it finds the nearest integer for each value. "
" In case of halfs, the rule is to round them to the nearest even integer. "
" The output tensor has the same shape and type as the input. "
" "
" Examples: "
" ``` "
" round([0.9]) = [1.0] "
" round([2.5]) = [2.0] "
" round([2.3]) = [2.0] "
" round([1.5]) = [2.0] "
" round([-4.5]) = [-4.0] "
" ``` "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $X );
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $Y );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXScanOp : ONNX_Op < " Scan " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Scan operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Scan can be used to iterate over one or more scan_input tensors, "
" constructing zero or more scan_output tensors. It combines ideas from general recurrences, "
" functional programming constructs such as scan, fold, map, and zip and is intended to enable "
" generalizations of RNN-like constructs for sequence-to-sequence processing. "
" Other tensors (referred to as state_variables here) can be used to carry a state "
" when iterating from one element to another (similar to hidden-state in RNNs, also referred "
" to as loop-carried dependences in the context of loops). "
" Many common usages involve a single scan_input tensor (where functionality "
" similar to scan, fold and map can be obtained). When more than one scan_input is used, "
" a behavior similar to zip is obtained. "
" "
" The attribute body must be a graph, specifying the computation to be performed in "
" every iteration. It takes as input the current values of the state_variables and "
" the current iterated element of the scan_inputs. It must return the (updated) values "
" of the state_variables and zero or more scan_output_element tensors. The values of the "
" scan_output_element tensors are concatenated over all the iterations to produce the "
" scan_output values of the scan construct (similar to the concatenated intermediate "
" hidden-state values of RNN-like constructs). All the output tensors (state_variables as "
" well as scan_output_element tensors) are required to have the same shape in each iteration "
" of the loop (a restriction imposed to enable efficient memory allocation). "
" "
" Note that the iterated element passed to the body subgraph does not have a sequence "
" axis. It will have a rank one less than the rank of the corresponding scan_input. "
" "
" The scan operation returns the final values of the state_variables as well as the "
" scan_outputs. "
" "
" The optional attribute scan_input_directions specifies the direction (forward or backward) "
" for each scan input. If this attribute is omitted, all sequences are scanned in the forward "
" direction. A bidirectional scan may be performed by specifying the same tensor input twice "
" in the scan_inputs, once with a forward direction, and once with a backward direction. "
" "
" The scan_output of the operation is produced by concatenating the scan_output_element "
" values produced by the body in each iteration. The optional attribute scan_output_directions "
" specifies the direction in which scan_output is constructed (by appending or prepending the "
" scan_output_element to scan_output in each iteration) for each scan_output. If this attribute "
" is omitted, the scan_output_element is appended to the scan_output in each iteration. "
" "
" The optional attribute scan_input_axes specifies the axis to be scanned for each scan_input. "
" If omitted, every scan_input will be scanned in axis 0. For example, if axis 0 is the "
" batch axis and axis 1 is the time axis (to be scanned), specify an axis value of 1. "
" Note that scanning a non-zero axis may be less efficient than scanning axis zero. "
" "
" The optional attribute scan_output_axes specifies the axis along which the scan_outputs "
" are accumulated for each scan_output. For example, if axis 1 is the time axis (to be "
" scanned) for both inputs and outputs, specify a scan_input axis and scan_output axis "
" value of 1. "
" "
" Note that because of the ONNX restriction that only the last parameter of an operator can "
" be variadic, the initial-states and scan-inputs are listed together as one input parameter. "
" Similarly, the final-states and scan-outputs are listed together as one output parameter. "
" The attribute num_scan_inputs indicates the number M of scan-inputs. "
" "
" The behavior of "
" "
" Scan < "
" num_scan_inputs = m, "
" body = loop-body, "
" scan_input_axes = [axis_1, ..., axis_m] "
" > (init_1, ..., init_n, scan_1, ..., scan_m) "
" "
" is equivalent to the following pseudo-code: "
" "
" // scan_i.shape[axis_i] denotes the (max) sequence-length of scan_i "
" // scan_i.shape[axis_i] is required to be equal to scan_j.shape[axis_j] for all i,j. "
" sequence_length = scan_1.shape[axis_1]; "
" "
" // initialize state-variables "
" st_1 = init_1; ... st_n = init_n; "
" // initialize scan-output variables: [] denotes an empty tensor "
" scan_out_1 = []; ...; scan_out_k = []; "
" // identify number of iterations: "
" "
" // execute loop "
" for (int t = 0; t < sequence_length; ++t) { "
" // generate the scan-input elements: the notation T<axis=k>[t] indicates the sub-tensor "
" // of rank one less than T obtained by indexing T at position t along axis k. "
" si_1 = scan_1<axis=axis_1>[t]; "
" ... ; "
" si_m = scan_m<axis=axis_m>[t]; "
" // execute loop-body "
" st_1, ..., st_n, so_1, ..., so_k = loop-body(st_1, ..., st_n, si_1, ..., si_m) "
" // accumulate the scan-output elements "
" scan_out_1 = Concat<axis=0>(scan_out_1, so_1); ... ; scan_out_k = Concat<axis=0>(scan_out_k, so_k); "
" } "
" "
" return st_1, ..., st_n, scan_out_1, ..., scan_out_k; "
" "
" *Sample usage: Encoding RNN using a Scan* "
" "
" The following example shows how a simple RNN over an input tensor %X, with weight tensor %Wi, "
" recurrence weight tensor %Ri, bias tensors %Wbi and %Rbi, and initial hidden-state %H_0 can "
" be encoded as a ScanLoop. Note that the loop-body is a nested graph, and it directly computes "
" %Wi, %Ri, %Wbi, and %Rbi (typically constants or initializers in the body graph). If these "
" values are computed in the outer graph, they need to be passed in as extra state_variables. "
" "
" graph rnn-encoding { "
" %H_0 = ... "
" %X = ... "
" %Y_h, %Y = Scan[body = <graph rnn-cell-1>, num_scan_inputs=1](%H_0, %X) "
" return %Y, %Y_h "
" } "
" "
" graph rnn-cell-1 ( "
" %H_tminus1[FLOAT, tensor] "
" %X_t[FLOAT, tensor] "
" ) { "
" %Wi = ... "
" %Ri = ... "
" %Wbi = ... "
" %Rbi = ... "
" %t1 = X_t * (Wi^T) "
" %t2 = H_tminus1*(Ri^T) "
" %t3 = Add(%t1, %t2) "
" %t4 = Add(%t3, %Wbi) "
" %t5 = Add(%t4, %Rbi) "
" %Ht = Tanh(%t5) "
" %Accumulate = Identity(%Ht) "
" return %Ht, %Accumulate "
" } "
" "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $initial_state_and_scan_inputs ,
2020-02-24 23:46:48 +08:00
AnyAttr : $body ,
I64Attr : $num_scan_inputs ,
OptionalAttr < I64ArrayAttr >: $scan_input_axes ,
OptionalAttr < I64ArrayAttr >: $scan_input_directions ,
OptionalAttr < I64ArrayAttr >: $scan_output_axes ,
OptionalAttr < I64ArrayAttr >: $scan_output_directions );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $final_state_and_scan_outputs );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return - 1 ;
}
static int getNumberOfResults () {
return - 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXScatterOp : ONNX_Op < " Scatter " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Scatter operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" This operator is deprecated. Please use ScatterElements, which provides the same functionality. "
" "
" Scatter takes three inputs `data`, `updates`, and `indices` of the same "
" rank r >= 1 and an optional attribute axis that identifies an axis of `data` "
" (by default, the outer-most axis, that is axis 0). The output of the operation "
" is produced by creating a copy of the input `data`, and then updating its value "
" to values specified by `updates` at specific index positions specified by "
" `indices`. Its output shape is the same as the shape of `data`. "
" "
" For each entry in `updates`, the target index in `data` is obtained by combining "
" the corresponding entry in `indices` with the index of the entry itself: the "
" index-value for dimension = axis is obtained from the value of the corresponding "
" entry in `indices` and the index-value for dimension != axis is obtained from the "
" index of the entry itself. "
" "
" For instance, in a 2-D tensor case, the update corresponding to the [i][j] entry "
" is performed as below: "
" ``` "
" output[indices[i][j]][j] = updates[i][j] if axis = 0, "
" output[i][indices[i][j]] = updates[i][j] if axis = 1, "
" ``` "
" "
" This operator is the inverse of GatherElements. It is similar to Torch's Scatter operation. "
" "
" Example 1: "
" ``` "
" data = [ "
" [0.0, 0.0, 0.0], "
" [0.0, 0.0, 0.0], "
" [0.0, 0.0, 0.0], "
" ] "
" indices = [ "
" [1, 0, 2], "
" [0, 2, 1], "
" ] "
" updates = [ "
" [1.0, 1.1, 1.2], "
" [2.0, 2.1, 2.2], "
" ] "
" output = [ "
" [2.0, 1.1, 0.0] "
" [1.0, 0.0, 2.2] "
" [0.0, 2.1, 1.2] "
" ] "
" ``` "
" Example 2: "
" ``` "
" data = [[1.0, 2.0, 3.0, 4.0, 5.0]] "
" indices = [[1, 3]] "
" updates = [[1.1, 2.1]] "
" axis = 1 "
" output = [[1.0, 1.1, 3.0, 2.1, 5.0]] "
" ``` "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $data ,
AnyTypeOf < [ TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , AnyMemRef ] >: $indices ,
AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $updates ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < I64Attr , " 0 " >: $axis );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 3 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXScatterElementsOp : ONNX_Op < " ScatterElements " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX ScatterElements operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" ScatterElements takes three inputs `data`, `updates`, and `indices` of the same "
" rank r >= 1 and an optional attribute axis that identifies an axis of `data` "
" (by default, the outer-most axis, that is axis 0). The output of the operation "
" is produced by creating a copy of the input `data`, and then updating its value "
" to values specified by `updates` at specific index positions specified by "
" `indices`. Its output shape is the same as the shape of `data`. "
" "
" For each entry in `updates`, the target index in `data` is obtained by combining "
" the corresponding entry in `indices` with the index of the entry itself: the "
" index-value for dimension = axis is obtained from the value of the corresponding "
" entry in `indices` and the index-value for dimension != axis is obtained from the "
" index of the entry itself. "
" "
" For instance, in a 2-D tensor case, the update corresponding to the [i][j] entry "
" is performed as below: "
" ``` "
" output[indices[i][j]][j] = updates[i][j] if axis = 0, "
" output[i][indices[i][j]] = updates[i][j] if axis = 1, "
" ``` "
" "
" This operator is the inverse of GatherElements. It is similar to Torch's Scatter operation. "
" "
" Example 1: "
" ``` "
" data = [ "
" [0.0, 0.0, 0.0], "
" [0.0, 0.0, 0.0], "
" [0.0, 0.0, 0.0], "
" ] "
" indices = [ "
" [1, 0, 2], "
" [0, 2, 1], "
" ] "
" updates = [ "
" [1.0, 1.1, 1.2], "
" [2.0, 2.1, 2.2], "
" ] "
" output = [ "
" [2.0, 1.1, 0.0] "
" [1.0, 0.0, 2.2] "
" [0.0, 2.1, 1.2] "
" ] "
" ``` "
" Example 2: "
" ``` "
" data = [[1.0, 2.0, 3.0, 4.0, 5.0]] "
" indices = [[1, 3]] "
" updates = [[1.1, 2.1]] "
" axis = 1 "
" output = [[1.0, 1.1, 3.0, 2.1, 5.0]] "
" ``` "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $data ,
AnyTypeOf < [ TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , AnyMemRef ] >: $indices ,
AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $updates ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < I64Attr , " 0 " >: $axis );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 3 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXScatterNDOp : ONNX_Op < " ScatterND " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX ScatterND operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" ScatterND takes three inputs `data` tensor of rank r >= 1, `indices` tensor of rank q >= 1, "
" and `updates` tensor of rank q + r - indices.shape[-1] - 1. The output of the operation "
" is produced by creating a copy of the input `data`, and then updating its value to values "
" specified by `updates` at specific index positions specified by `indices`. Its output shape "
" is the same as the shape of `data`. Note that `indices` should not have duplicate entries. "
" That is, two or more `updates` for the same index-location is not supported. "
" "
" `indices` is an integer tensor. Let k denote indices.shape[-1], the last dimension in the shape of `indices`. "
" `indices` is treated as a (q-1)-dimensional tensor of k-tuples, where each k-tuple is a partial-index into `data`. "
" Hence, k can be a value at most the rank of `data`. When k equals rank(data), each update entry specifies an "
" update to a single element of the tensor. When k is less than rank(data) each update entry specifies an "
" update to a slice of the tensor. "
" "
" `updates` is treated as a (q-1)-dimensional tensor of replacement-slice-values. Thus, the "
" first (q-1) dimensions of updates.shape must match the first (q-1) dimensions of indices.shape. "
" The remaining dimensions of `updates` correspond to the dimensions of the "
" replacement-slice-values. Each replacement-slice-value is a (r-k) dimensional tensor, "
" corresponding to the trailing (r-k) dimensions of `data`. Thus, the shape of `updates` "
" must equal indices.shape[0:q-1] ++ data.shape[k:r-1], where ++ denotes the concatenation "
" of shapes. "
" "
" The `output` is calculated via the following equation: "
" "
" output = np.copy(data) "
" update_indices = indices.shape[:-1] "
" for idx in np.ndindex(update_indices): "
" output[indices[idx]] = updates[idx] "
" "
" The order of iteration in the above loop is not specified. "
" In particular, indices should not have duplicate entries: that is, if idx1 != idx2, then indices[idx1] != indices[idx2]. "
" This ensures that the output value does not depend on the iteration order. "
" "
" This operator is the inverse of GatherND. "
" "
" Example 1: "
" ``` "
" data = [1, 2, 3, 4, 5, 6, 7, 8] "
" indices = [[4], [3], [1], [7]] "
" updates = [9, 10, 11, 12] "
" output = [1, 11, 3, 10, 9, 6, 7, 12] "
" ``` "
" "
" Example 2: "
" ``` "
" data = [[[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]], "
" [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]], "
" [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]], "
" [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]] "
" indices = [[0], [2]] "
" updates = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], "
" [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]]] "
" output = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], "
" [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]], "
" [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]], "
" [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]] "
" ``` "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $data ,
2020-06-26 04:34:37 +08:00
TensorOf < [ I64 ] >: $indices ,
2020-07-14 23:15:06 +08:00
AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $updates );
let results = ( outs AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 3 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXSeluOp : ONNX_Op < " Selu " ,
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Selu operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Selu takes one input data (Tensor<T>) and produces one output data "
" (Tensor<T>) where the scaled exponential linear unit function, "
" `y = gamma * (alpha * e^x - alpha) for x <= 0`, `y = gamma * x for x > 0`, "
" is applied to the tensor elementwise. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $X ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < F32Attr , " 1.67326 " >: $alpha ,
DefaultValuedAttr < F32Attr , " 1.0507 " >: $gamma );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $Y );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXSequenceAtOp : ONNX_Op < " SequenceAt " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX SequenceAt operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Outputs a tensor copy from the tensor at 'position' in 'input_sequence'. "
" Accepted range for 'position' is in `[-n, n - 1]`, where `n` is the number of tensors in 'input_sequence'. "
" Negative value means counting positions from the back. "
2019-11-19 10:08:21 +08:00
}];
2020-07-31 20:05:59 +08:00
let arguments = ( ins AnyTypeOf < [ SeqOf < [ TensorOf < [ UI8 ] > ] > , SeqOf < [ TensorOf < [ UI16 ] > ] > , SeqOf < [ TensorOf < [ UI32 ] > ] > , SeqOf < [ TensorOf < [ UI64 ] > ] > , SeqOf < [ TensorOf < [ I8 ] > ] > , SeqOf < [ TensorOf < [ I16 ] > ] > , SeqOf < [ TensorOf < [ I32 ] > ] > , SeqOf < [ TensorOf < [ I64 ] > ] > , SeqOf < [ TensorOf < [ F16 ] > ] > , SeqOf < [ TensorOf < [ F32 ] > ] > , SeqOf < [ TensorOf < [ F64 ] > ] > , SeqOf < [ TensorOf < [ StringType ] > ] > , SeqOf < [ TensorOf < [ I1 ] > ] > , SeqOf < [ TensorOf < [ Complex < F32 > ] > ] > , SeqOf < [ TensorOf < [ Complex < F64 > ] > ] > , AnyMemRef ] >: $input_sequence ,
2020-07-14 23:15:06 +08:00
AnyTypeOf < [ TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , AnyMemRef ] >: $position );
let results = ( outs AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $tensor );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 2 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { - 1 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXSequenceConstructOp : ONNX_Op < " SequenceConstruct " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX SequenceConstruct operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Construct a tensor sequence containing 'inputs' tensors. "
" All tensors in 'inputs' must have the same data type. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins Variadic < AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >>: $inputs );
2020-07-31 20:05:59 +08:00
let results = ( outs AnyTypeOf < [ SeqOf < [ TensorOf < [ UI8 ] > ] > , SeqOf < [ TensorOf < [ UI16 ] > ] > , SeqOf < [ TensorOf < [ UI32 ] > ] > , SeqOf < [ TensorOf < [ UI64 ] > ] > , SeqOf < [ TensorOf < [ I8 ] > ] > , SeqOf < [ TensorOf < [ I16 ] > ] > , SeqOf < [ TensorOf < [ I32 ] > ] > , SeqOf < [ TensorOf < [ I64 ] > ] > , SeqOf < [ TensorOf < [ F16 ] > ] > , SeqOf < [ TensorOf < [ F32 ] > ] > , SeqOf < [ TensorOf < [ F64 ] > ] > , SeqOf < [ TensorOf < [ StringType ] > ] > , SeqOf < [ TensorOf < [ I1 ] > ] > , SeqOf < [ TensorOf < [ Complex < F32 > ] > ] > , SeqOf < [ TensorOf < [ Complex < F64 > ] > ] > , AnyMemRef ] >: $output_sequence );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return - 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { - 1 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXSequenceEmptyOp : ONNX_Op < " SequenceEmpty " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX SequenceEmpty operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Construct an empty tensor sequence, with given data type. "
2019-11-19 10:08:21 +08:00
}];
2020-01-27 23:09:14 +08:00
let arguments = ( ins OptionalAttr < I64Attr >: $dtype );
2020-07-31 20:05:59 +08:00
let results = ( outs AnyTypeOf < [ SeqOf < [ TensorOf < [ UI8 ] > ] > , SeqOf < [ TensorOf < [ UI16 ] > ] > , SeqOf < [ TensorOf < [ UI32 ] > ] > , SeqOf < [ TensorOf < [ UI64 ] > ] > , SeqOf < [ TensorOf < [ I8 ] > ] > , SeqOf < [ TensorOf < [ I16 ] > ] > , SeqOf < [ TensorOf < [ I32 ] > ] > , SeqOf < [ TensorOf < [ I64 ] > ] > , SeqOf < [ TensorOf < [ F16 ] > ] > , SeqOf < [ TensorOf < [ F32 ] > ] > , SeqOf < [ TensorOf < [ F64 ] > ] > , SeqOf < [ TensorOf < [ StringType ] > ] > , SeqOf < [ TensorOf < [ I1 ] > ] > , SeqOf < [ TensorOf < [ Complex < F32 > ] > ] > , SeqOf < [ TensorOf < [ Complex < F64 > ] > ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 0 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { - 1 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXSequenceEraseOp : ONNX_Op < " SequenceErase " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX SequenceErase operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Outputs a tensor sequence that removes the tensor at 'position' from 'input_sequence'. "
" Accepted range for 'position' is in `[-n, n - 1]`, where `n` is the number of tensors in 'input_sequence'. "
" Negative value means counting positions from the back. "
" 'position' is optional, by default it erases the last tensor from 'input_sequence'. "
2019-11-19 10:08:21 +08:00
}];
2020-07-31 20:05:59 +08:00
let arguments = ( ins AnyTypeOf < [ SeqOf < [ TensorOf < [ UI8 ] > ] > , SeqOf < [ TensorOf < [ UI16 ] > ] > , SeqOf < [ TensorOf < [ UI32 ] > ] > , SeqOf < [ TensorOf < [ UI64 ] > ] > , SeqOf < [ TensorOf < [ I8 ] > ] > , SeqOf < [ TensorOf < [ I16 ] > ] > , SeqOf < [ TensorOf < [ I32 ] > ] > , SeqOf < [ TensorOf < [ I64 ] > ] > , SeqOf < [ TensorOf < [ F16 ] > ] > , SeqOf < [ TensorOf < [ F32 ] > ] > , SeqOf < [ TensorOf < [ F64 ] > ] > , SeqOf < [ TensorOf < [ StringType ] > ] > , SeqOf < [ TensorOf < [ I1 ] > ] > , SeqOf < [ TensorOf < [ Complex < F32 > ] > ] > , SeqOf < [ TensorOf < [ Complex < F64 > ] > ] > , AnyMemRef ] >: $input_sequence ,
2020-07-14 23:15:06 +08:00
AnyTypeOf < [ TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , AnyMemRef , NoneType ] >: $position );
2020-07-31 20:05:59 +08:00
let results = ( outs AnyTypeOf < [ SeqOf < [ TensorOf < [ UI8 ] > ] > , SeqOf < [ TensorOf < [ UI16 ] > ] > , SeqOf < [ TensorOf < [ UI32 ] > ] > , SeqOf < [ TensorOf < [ UI64 ] > ] > , SeqOf < [ TensorOf < [ I8 ] > ] > , SeqOf < [ TensorOf < [ I16 ] > ] > , SeqOf < [ TensorOf < [ I32 ] > ] > , SeqOf < [ TensorOf < [ I64 ] > ] > , SeqOf < [ TensorOf < [ F16 ] > ] > , SeqOf < [ TensorOf < [ F32 ] > ] > , SeqOf < [ TensorOf < [ F64 ] > ] > , SeqOf < [ TensorOf < [ StringType ] > ] > , SeqOf < [ TensorOf < [ I1 ] > ] > , SeqOf < [ TensorOf < [ Complex < F32 > ] > ] > , SeqOf < [ TensorOf < [ Complex < F64 > ] > ] > , AnyMemRef ] >: $output_sequence );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 2 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXSequenceInsertOp : ONNX_Op < " SequenceInsert " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX SequenceInsert operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Outputs a tensor sequence that inserts 'tensor' into 'input_sequence' at 'position'. "
" 'tensor' must have the same data type as 'input_sequence'. "
" Accepted range for 'position' is in `[-n, n]`, where `n` is the number of tensors in 'input_sequence'. "
" Negative value means counting positions from the back. "
" 'position' is optional, by default it inserts 'tensor' to the back of 'input_sequence'. "
2019-11-19 10:08:21 +08:00
}];
2020-07-31 20:05:59 +08:00
let arguments = ( ins AnyTypeOf < [ SeqOf < [ TensorOf < [ UI8 ] > ] > , SeqOf < [ TensorOf < [ UI16 ] > ] > , SeqOf < [ TensorOf < [ UI32 ] > ] > , SeqOf < [ TensorOf < [ UI64 ] > ] > , SeqOf < [ TensorOf < [ I8 ] > ] > , SeqOf < [ TensorOf < [ I16 ] > ] > , SeqOf < [ TensorOf < [ I32 ] > ] > , SeqOf < [ TensorOf < [ I64 ] > ] > , SeqOf < [ TensorOf < [ F16 ] > ] > , SeqOf < [ TensorOf < [ F32 ] > ] > , SeqOf < [ TensorOf < [ F64 ] > ] > , SeqOf < [ TensorOf < [ StringType ] > ] > , SeqOf < [ TensorOf < [ I1 ] > ] > , SeqOf < [ TensorOf < [ Complex < F32 > ] > ] > , SeqOf < [ TensorOf < [ Complex < F64 > ] > ] > , AnyMemRef ] >: $input_sequence ,
2020-07-14 23:15:06 +08:00
AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $tensor ,
AnyTypeOf < [ TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , AnyMemRef , NoneType ] >: $position );
2020-07-31 20:05:59 +08:00
let results = ( outs AnyTypeOf < [ SeqOf < [ TensorOf < [ UI8 ] > ] > , SeqOf < [ TensorOf < [ UI16 ] > ] > , SeqOf < [ TensorOf < [ UI32 ] > ] > , SeqOf < [ TensorOf < [ UI64 ] > ] > , SeqOf < [ TensorOf < [ I8 ] > ] > , SeqOf < [ TensorOf < [ I16 ] > ] > , SeqOf < [ TensorOf < [ I32 ] > ] > , SeqOf < [ TensorOf < [ I64 ] > ] > , SeqOf < [ TensorOf < [ F16 ] > ] > , SeqOf < [ TensorOf < [ F32 ] > ] > , SeqOf < [ TensorOf < [ F64 ] > ] > , SeqOf < [ TensorOf < [ StringType ] > ] > , SeqOf < [ TensorOf < [ I1 ] > ] > , SeqOf < [ TensorOf < [ Complex < F32 > ] > ] > , SeqOf < [ TensorOf < [ Complex < F64 > ] > ] > , AnyMemRef ] >: $output_sequence );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 3 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXSequenceLengthOp : ONNX_Op < " SequenceLength " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX SequenceLength operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Produces a scalar(tensor of empty shape) containing the number of tensors in 'input_sequence'. "
2019-11-19 10:08:21 +08:00
}];
2020-07-31 20:05:59 +08:00
let arguments = ( ins AnyTypeOf < [ SeqOf < [ TensorOf < [ UI8 ] > ] > , SeqOf < [ TensorOf < [ UI16 ] > ] > , SeqOf < [ TensorOf < [ UI32 ] > ] > , SeqOf < [ TensorOf < [ UI64 ] > ] > , SeqOf < [ TensorOf < [ I8 ] > ] > , SeqOf < [ TensorOf < [ I16 ] > ] > , SeqOf < [ TensorOf < [ I32 ] > ] > , SeqOf < [ TensorOf < [ I64 ] > ] > , SeqOf < [ TensorOf < [ F16 ] > ] > , SeqOf < [ TensorOf < [ F32 ] > ] > , SeqOf < [ TensorOf < [ F64 ] > ] > , SeqOf < [ TensorOf < [ StringType ] > ] > , SeqOf < [ TensorOf < [ I1 ] > ] > , SeqOf < [ TensorOf < [ Complex < F32 > ] > ] > , SeqOf < [ TensorOf < [ Complex < F64 > ] > ] > , AnyMemRef ] >: $input_sequence );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ I64 ] > , AnyMemRef ] >: $length );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 4 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXShapeOp : ONNX_Op < " Shape " ,
2020-07-22 22:15:56 +08:00
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Shape operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Takes a tensor as input and outputs an 1D int64 tensor containing the shape of the input tensor. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $data );
let results = ( outs AnyTypeOf < [ TensorOf < [ I64 ] > , AnyMemRef ] >: $shape );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 4 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXShrinkOp : ONNX_Op < " Shrink " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Shrink operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Shrink takes one input data (Tensor<numeric>) and produces one Tensor output, "
" having same datatype and shape with input. It has two attributes, lambd and "
" bias. The formula of this operator is: If x < -lambd, y = x + bias; "
" If x > lambd, y = x - bias; Otherwise, y = 0. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $input ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < F32Attr , " 0.0 " >: $bias ,
DefaultValuedAttr < F32Attr , " 0.5 " >: $lambd );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXSigmoidOp : ONNX_Op < " Sigmoid " ,
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Sigmoid operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Sigmoid takes one input data (Tensor<T>) and produces one output data "
" (Tensor<T>) where the sigmoid function, y = 1 / (1 + exp(-x)), is applied to the "
" tensor elementwise. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $X );
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $Y );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXSignOp : ONNX_Op < " Sign " ,
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Sign operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Calculate the sign of the given input tensor element-wise. "
" If input > 0, output 1. if input < 0, output -1. if input == 0, output 0. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $input );
let results = ( outs AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXSinOp : ONNX_Op < " Sin " ,
2020-06-09 14:55:49 +08:00
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Sin operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Calculates the sine of the given input tensor, element-wise. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $input );
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXSinhOp : ONNX_Op < " Sinh " ,
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Sinh operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Calculates the hyperbolic sine of the given input tensor element-wise. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $input );
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXSizeOp : ONNX_Op < " Size " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Size operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Takes a tensor as input and outputs a int64 scalar that equals to the total number of elements of the input tensor. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $data );
let results = ( outs AnyTypeOf < [ TensorOf < [ I64 ] > , AnyMemRef ] >: $size );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 4 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXSliceOp : ONNX_Op < " Slice " ,
2020-07-22 22:15:56 +08:00
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Slice operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Produces a slice of the input tensor along multiple axes. Similar to numpy: "
" https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html "
" Slices uses `starts`, `ends`, `axes` and `steps` inputs to specify the start and end "
" dimension and step for each axis in the list of axes, it uses this information to "
" slice the input `data` tensor. If a negative value is passed for any of the "
" start or end indices, it represent number of elements before the end of that "
" dimension. If the value passed to start or end is larger than the `n` (the "
" number of elements in this dimension), it represents `n`. For slicing to the "
" end of a dimension with unknown size, it is recommended to pass in `INT_MAX`. "
" If a negative value is passed for step, it represents slicing backward. "
" If `axes` are omitted, they are set to `[0, ..., ndim-1]`. "
" If `steps` are omitted, they are set to `[1, ..., 1]` of length `len(starts)` "
" Example 1: "
" data = [ "
" [1, 2, 3, 4], "
" [5, 6, 7, 8], "
" ] "
" axes = [0, 1] "
" starts = [1, 0] "
" ends = [2, 3] "
" steps = [1, 2] "
" result = [ "
" [5, 7], "
" ] "
" Example 2: "
" data = [ "
" [1, 2, 3, 4], "
" [5, 6, 7, 8], "
" ] "
" starts = [0, 1] "
" ends = [-1, 1000] "
" result = [ "
" [2, 3, 4], "
" ] "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $data ,
AnyTypeOf < [ TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , AnyMemRef ] >: $starts ,
AnyTypeOf < [ TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , AnyMemRef ] >: $ends ,
AnyTypeOf < [ TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , AnyMemRef , NoneType ] >: $axes ,
AnyTypeOf < [ TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , AnyMemRef , NoneType , NoneType ] >: $steps );
let results = ( outs AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 5 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXSoftmaxOp : ONNX_Op < " Softmax " ,
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Softmax operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" The operator computes the softmax (normalized exponential) values for each layer in the batch "
" of the given input. "
" "
" The input does not need to explicitly be a 2D vector; rather, it will be "
" coerced into one. For an arbitrary n-dimensional tensor "
" input \ in [a_0, a_1, ..., a_ { k-1}, a_k, ..., a_ { n-1 \ } \ ] and k is "
" the axis provided, then input will be coerced into a 2-dimensional tensor with "
" dimensions [a_0 * ... * a_ { k-1}, a_k * ... * a_ { n-1 \ } \ ]. For the default "
" case where axis=1, this means the input tensor will be coerced into a 2D tensor "
" of dimensions [a_0, a_1 * ... * a_ { n-1 \ } \ ], where a_0 is often the batch size. "
" In this situation, we must have a_0 = N and a_1 * ... * a_ { n-1} = D. "
" Each of these dimensions must be matched correctly, or else the operator "
" will throw errors. The output tensor has the same shape "
" and contains the softmax values of the corresponding input. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $input ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < I64Attr , " 1 " >: $axis );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXSoftplusOp : ONNX_Op < " Softplus " ,
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Softplus operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Softplus takes one input data (Tensor<T>) and produces one output data "
" (Tensor<T>) where the softplus function, y = ln(exp(x) + 1), is applied to "
" the tensor elementwise. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $X );
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $Y );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXSoftsignOp : ONNX_Op < " Softsign " ,
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Softsign operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Calculates the softsign (x/(1+|x|)) of the given input tensor element-wise. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $input );
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXSpaceToDepthOp : ONNX_Op < " SpaceToDepth " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX SpaceToDepth operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" SpaceToDepth rearranges blocks of spatial data into depth. More specifically, "
" this op outputs a copy of the input tensor where values from the height and width dimensions "
" are moved to the depth dimension. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $input ,
2020-02-24 23:46:48 +08:00
I64Attr : $blocksize );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXSplitOp : ONNX_Op < " Split " ,
2020-05-13 18:07:27 +08:00
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Split operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Split a tensor into a list of tensors, along the specified "
" 'axis'. Lengths of the parts can be specified using argument 'split'. "
" Otherwise, the tensor is split to equal sized parts. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $input ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < I64Attr , " 0 " >: $axis ,
OptionalAttr < I64ArrayAttr >: $split );
2020-07-14 23:15:06 +08:00
let results = ( outs Variadic < AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >>: $outputs );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return - 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXSplitToSequenceOp : ONNX_Op < " SplitToSequence " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX SplitToSequence operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Split a tensor into a sequence of tensors, along the specified "
" 'axis'. Lengths of the parts can be specified using argument 'split'. "
" 'split' must contain only positive numbers. "
" 'split' is either a scalar (tensor of empty shape), or a 1-D tensor. "
" If 'split' is a scalar, then 'input' will be split into equally sized chunks(if possible). "
" Last chunk will be smaller if the 'input' size along the given axis 'axis' is not divisible "
" by 'split'. "
" Otherwise, the tensor is split into 'size(split)' chunks, with lengths of the parts on 'axis' "
" specified in 'split'. In this scenario, the sum of entries in 'split' must be equal to the "
" dimension size of input tensor on 'axis'. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $input ,
AnyTypeOf < [ TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , AnyMemRef , NoneType ] >: $split ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < I64Attr , " 0 " >: $axis ,
DefaultValuedAttr < I64Attr , " 1 " >: $keepdims );
2020-07-31 20:05:59 +08:00
let results = ( outs AnyTypeOf < [ SeqOf < [ TensorOf < [ UI8 ] > ] > , SeqOf < [ TensorOf < [ UI16 ] > ] > , SeqOf < [ TensorOf < [ UI32 ] > ] > , SeqOf < [ TensorOf < [ UI64 ] > ] > , SeqOf < [ TensorOf < [ I8 ] > ] > , SeqOf < [ TensorOf < [ I16 ] > ] > , SeqOf < [ TensorOf < [ I32 ] > ] > , SeqOf < [ TensorOf < [ I64 ] > ] > , SeqOf < [ TensorOf < [ F16 ] > ] > , SeqOf < [ TensorOf < [ F32 ] > ] > , SeqOf < [ TensorOf < [ F64 ] > ] > , SeqOf < [ TensorOf < [ StringType ] > ] > , SeqOf < [ TensorOf < [ I1 ] > ] > , SeqOf < [ TensorOf < [ Complex < F32 > ] > ] > , SeqOf < [ TensorOf < [ Complex < F64 > ] > ] > , AnyMemRef ] >: $output_sequence );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 2 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { - 1 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXSqrtOp : ONNX_Op < " Sqrt " ,
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Sqrt operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Square root takes one input data (Tensor<T>) and produces one output data "
" (Tensor<T>) where the square root is, y = x^0.5, is applied to "
" the tensor elementwise. If x is negative, then it will return NaN. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $X );
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $Y );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXSqueezeOp : ONNX_Op < " Squeeze " ,
2020-07-03 16:26:41 +08:00
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Squeeze operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Remove single-dimensional entries from the shape of a tensor. "
" Takes a parameter `axes` with a list of axes to squeeze. "
" If `axes` is not provided, all the single dimensions will be removed from "
" the shape. If an axis is selected with shape entry not equal to one, an error is raised. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $data ,
2020-02-24 23:46:48 +08:00
OptionalAttr < I64ArrayAttr >: $axes );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $squeezed );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXStringNormalizerOp : ONNX_Op < " StringNormalizer " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX StringNormalizer operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" StringNormalization performs string operations for basic cleaning. "
" This operator has only one input (denoted by X) and only one output "
" (denoted by Y). This operator first examines the elements in the X, "
" and removes elements specified in \" stopwords \" attribute. "
" After removing stop words, the intermediate result can be further lowercased, "
" uppercased, or just returned depending the \" case_change_action \" attribute. "
" This operator only accepts [C]- and [1, C]-tensor. "
" If all elements in X are dropped, the output will be the empty value of string tensor with shape [1] "
" if input shape is [C] and shape [1, 1] if input shape is [1, C]. "
2019-11-19 10:08:21 +08:00
}];
2020-06-26 04:34:37 +08:00
let arguments = ( ins TensorOf < [ StringType ] >: $X ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < StrAttr , " NONE " >: $case_change_action ,
DefaultValuedAttr < I64Attr , " 0 " >: $is_case_sensitive ,
OptionalAttr < StrAttr >: $locale ,
OptionalAttr < StrArrayAttr >: $stopwords );
2020-06-26 04:34:37 +08:00
let results = ( outs TensorOf < [ StringType ] >: $Y );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
2020-07-09 04:52:56 +08:00
return { 11 };
2020-05-22 10:03:16 +08:00
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXSubOp : ONNX_Op < " Sub " ,
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Sub operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Performs element-wise binary subtraction (with Numpy-style broadcasting support). "
" "
" This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md). "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $A ,
AnyTypeOf < [ TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $B );
let results = ( outs AnyTypeOf < [ TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $C );
2020-06-09 03:45:32 +08:00
let builders = [
OpBuilder < " OpBuilder &builder, OperationState &state, Value A, Value B " , [{
2020-07-22 10:05:18 +08:00
auto lhsTy = A . getType ();
auto rhsTy = B . getType ();
2020-06-09 03:45:32 +08:00
auto elementType = getBroadcastedType ( lhsTy , rhsTy );
auto shapedType = elementType . dyn_cast_or_null < ShapedType > ();
if ( ! shapedType || ! shapedType . hasStaticShape ()) {
elementType = A . getType () . cast < TensorType > () . getElementType ();
elementType = UnrankedTensorType :: get ( elementType );
}
build ( builder , state , elementType , A , B );
}] > ,
OpBuilder < " OpBuilder &builder, OperationState &state, ValueRange operands, ArrayRef<NamedAttribute> attributes " , [{
2020-07-22 10:05:18 +08:00
auto lhsTy = operands [ 0 ] . getType ();
auto rhsTy = operands [ 1 ] . getType ();
2020-06-09 03:45:32 +08:00
auto elementType = getBroadcastedType ( lhsTy , rhsTy );
auto shapedType = elementType . dyn_cast_or_null < ShapedType > ();
if ( ! shapedType || ! shapedType . hasStaticShape ()) {
elementType = operands [ 0 ] . getType () . cast < TensorType > () . getElementType ();
elementType = UnrankedTensorType :: get ( elementType );
}
std :: vector < mlir :: Type > outputTypes ;
outputTypes . emplace_back ( elementType );
build ( builder , state , outputTypes , operands , attributes );
}] >
];
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 2 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXSumOp : ONNX_Op < " Sum " ,
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Sum operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Element-wise sum of each of the input tensors (with Numpy-style broadcasting support). "
" All inputs and outputs must have the same data type. "
" This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md). "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins Variadic < AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >>: $data_0 );
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $sum );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return - 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXTanOp : ONNX_Op < " Tan " ,
2020-06-09 14:55:49 +08:00
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Tan operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Calculates the tangent of the given input tensor, element-wise. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $input );
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXTanhOp : ONNX_Op < " Tanh " ,
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Tanh operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Calculates the hyperbolic tangent of the given input tensor element-wise. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $input );
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXTfIdfVectorizerOp : ONNX_Op < " TfIdfVectorizer " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX TfIdfVectorizer operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" This transform extracts n-grams from the input sequence and save them as a vector. Input can "
" be either a 1-D or 2-D tensor. For 1-D input, output is the n-gram representation of that input. "
" For 2-D input, the output is also a 2-D tensor whose i-th row is the n-gram representation of the i-th input row. "
" More specifically, if input shape is [C], the corresponding output shape would be [max(ngram_indexes) + 1]. "
" If input shape is [N, C], this operator produces a [N, max(ngram_indexes) + 1]-tensor. "
" "
" In contrast to standard n-gram extraction, here, the indexes of extracting an n-gram from the original "
" sequence are not necessarily consecutive numbers. The discontinuity between indexes are controlled by the number of skips. "
" If the number of skips is 2, we should skip two tokens when scanning through the original sequence. "
" Let's consider an example. Assume that input sequence is [94, 17, 36, 12, 28] and the number of skips is 2. "
" The associated 2-grams are [94, 12] and [17, 28] respectively indexed by [0, 3] and [1, 4]. "
" If the number of skips becomes 0, the 2-grams generated are [94, 17], [17, 36], [36, 12], [12, 28] "
" indexed by [0, 1], [1, 2], [2, 3], [3, 4], respectively. "
" "
" The output vector (denoted by Y) stores the count of each n-gram; "
" Y[ngram_indexes[i]] indicates the times that the i-th n-gram is found. The attribute ngram_indexes is used to determine the mapping "
" between index i and the corresponding n-gram's output coordinate. If pool_int64s is [94, 17, 17, 36], ngram_indexes is [1, 0], "
" ngram_counts=[0, 0], then the Y[0] (first element in Y) and Y[1] (second element in Y) are the counts of [17, 36] and [94, 17], "
" respectively. An n-gram which cannot be found in pool_strings/pool_int64s should be ignored and has no effect on the output. "
" Note that we may consider all skips up to S when generating the n-grams. "
" "
" The examples used above are true if mode is \" TF \" . If mode is \" IDF \" , all the counts larger than 1 would be truncated to 1 and "
" the i-th element in weights would be used to scale (by multiplication) the count of the i-th n-gram in pool. If mode is \" TFIDF \" , "
" this operator first computes the counts of all n-grams and then scale them by the associated values in the weights attribute. "
" "
" Only one of pool_strings and pool_int64s can be set. If pool_int64s is set, the input should be an integer tensor. "
" If pool_strings is set, the input must be a string tensor. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ StringType ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , AnyMemRef ] >: $X ,
2020-02-24 23:46:48 +08:00
I64Attr : $max_gram_length ,
I64Attr : $max_skip_count ,
I64Attr : $min_gram_length ,
StrAttr : $mode ,
I64ArrayAttr : $ngram_counts ,
I64ArrayAttr : $ngram_indexes ,
OptionalAttr < I64ArrayAttr >: $pool_int64s ,
OptionalAttr < StrArrayAttr >: $pool_strings ,
OptionalAttr < F32ArrayAttr >: $weights );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ F32 ] > , AnyMemRef ] >: $Y );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 7 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXThresholdedReluOp : ONNX_Op < " ThresholdedRelu " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX ThresholdedRelu operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" ThresholdedRelu takes one input data (Tensor<T>) and produces one output data "
" (Tensor<T>) where the rectified linear function, y = x for x > alpha, y = 0 otherwise, "
" is applied to the tensor elementwise. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $X ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < F32Attr , " 1.0 " >: $alpha );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $Y );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXTileOp : ONNX_Op < " Tile " ,
2020-07-22 22:15:56 +08:00
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > , OpInterface < " PromotableConstOperandsOpInterface " > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Tile operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Constructs a tensor by tiling a given tensor. "
" This is the same as function `tile` in Numpy, but no broadcast. "
" For example A = [[1, 2], [3, 4]], B = [1, 2], tile(A, B) = [[1, 2, 1, 2], [3, 4, 3, 4]] "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $input ,
2020-07-22 22:15:56 +08:00
AnyTypeOf < [ TensorOf < [ I64 ] > , AnyMemRef , NoneType ] >: $repeats );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 2 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
2020-07-22 22:15:56 +08:00
std :: map < std :: string , size_t > promotableConstOperands () {
return {{ " repeats " , 1 }};
}
2020-05-22 10:03:16 +08:00
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXTopKOp : ONNX_Op < " TopK " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX TopK operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Retrieve the top-K largest or smallest elements along a specified axis. Given an input tensor of "
" shape [a_1, a_2, ..., a_n, r] and integer argument k, return two outputs: "
" -Value tensor of shape [a_1, a_2, ..., a_ { axis-1}, k, a_ { axis+1}, ... a_n] "
" which contains the values of the top k elements along the specified axis "
" -Index tensor of shape [a_1, a_2, ..., a_ { axis-1}, k, a_ { axis+1}, ... a_n] which "
" contains the indices of the top k elements (original indices from the input "
" tensor). "
" "
" If \" largest \" is 1 (the default value) then the k largest elements are returned. "
" If \" sorted \" is 1 (the default value) then the resulting k elements will be sorted. "
" If \" sorted \" is 0, order of returned 'Values' and 'Indices' are undefined. "
" "
" Given two equivalent values, this operator uses the indices along the axis as "
" a tiebreaker. That is, the element with the lower index will appear first. "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $X ,
2020-06-26 04:34:37 +08:00
TensorOf < [ I64 ] >: $K ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < I64Attr , " -1 " >: $axis ,
DefaultValuedAttr < I64Attr , " 1 " >: $largest ,
DefaultValuedAttr < I64Attr , " 1 " >: $sorted );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $Values ,
AnyTypeOf < [ TensorOf < [ I64 ] > , AnyMemRef ] >: $Indices );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 2 ;
}
static int getNumberOfResults () {
return 2 ;
}
static std :: vector < int > getTypeMap () {
return { 20 , 4 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXTransposeOp : ONNX_Op < " Transpose " ,
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Transpose operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Transpose the input tensor similar to numpy.transpose. For example, when "
" perm=(1, 0, 2), given an input tensor of shape (1, 2, 3), the output shape "
" will be (2, 1, 3). "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $data ,
2020-02-24 23:46:48 +08:00
OptionalAttr < I64ArrayAttr >: $perm );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $transposed );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXUniqueOp : ONNX_Op < " Unique " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Unique operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Find the unique elements of a tensor. When an optional attribute 'axis' is provided, unique subtensors sliced along the 'axis' are returned. "
" Otherwise the input tensor is flattened and unique values of the flattened tensor are returned. "
" "
" This operator returns the unique values or sliced unique subtensors of the input tensor and three optional outputs. "
" The first output tensor 'Y' contains all unique values or subtensors of the input. "
" The second optional output tensor 'indices' contains indices of 'Y' elements' first occurance in 'X'.. "
" The third optional output tensor 'inverse_indices' contains, for elements of 'X', its corresponding indices in 'Y'. \" . "
" The fourth optional output tensor 'counts' contains the count of each element of 'Y' in the input. "
" "
" Outputs are either sorted in ascending order or optionally in the order of the first occurrence of the values in the input. "
" "
" https://docs.scipy.org/doc/numpy/reference/generated/numpy.unique.html "
" "
" Example 1: "
" input_X = [2, 1, 1, 3, 4, 3] "
" attribute_sorted = 0 "
" attribute_axis = None "
" output_Y = [2, 1, 3, 4] "
" output_indices = [0, 1, 3, 4] "
" output_inverse_indices = [0, 1, 1, 2, 3, 2] "
" output_counts = [1, 2, 2, 1] "
" "
" Example 2: "
" input_X = [[1, 3], [2, 3]] "
" attribute_sorted = 1 "
" attribute_axis = None "
" output_Y = [1, 2, 3] "
" output_indices = [0, 2, 1] "
" output_inverse_indices = [0, 2, 1, 2] "
" output_counts = [1, 1, 2] "
" "
" Example 3: "
" input_X = [[1, 0, 0], [1, 0, 0], [2, 3, 4]] "
" attribute_sorted = 1 "
" attribute_axis = 0 "
" output_Y = [[1, 0, 0], [2, 3, 4]] "
" output_indices = [0, 2] "
" output_inverse_indices = [0, 0, 1] "
" output_counts = [2, 1] "
" "
" Example 4: "
" input_x = [[[1., 1.], [0., 1.], [2., 1.], [0., 1.]], "
" [[1., 1.], [0., 1.], [2., 1.], [0., 1.]]] "
" attribute_sorted = 1 "
" attribute_axis = 1 "
" "
" intermediate data are presented below for better understanding: "
" "
" there are 4 subtensors sliced along axis 1 of input_x (shape = (2, 4, 2)): "
" A: [[1, 1], [1, 1]], "
" [[0, 1], [0, 1]], "
" [[2, 1], [2, 1]], "
" [[0, 1], [0, 1]]. "
" "
" there are 3 unique subtensors: "
" [[1, 1], [1, 1]], "
" [[0, 1], [0, 1]], "
" [[2, 1], [2, 1]]. "
" "
" sorted unique subtensors: "
" B: [[0, 1], [0, 1]], "
" [[1, 1], [1, 1]], "
" [[2, 1], [2, 1]]. "
" "
" output_Y is constructed from B: "
" [[[0. 1.], [1. 1.], [2. 1.]], "
" [[0. 1.], [1. 1.], [2. 1.]]] "
" "
" output_indices is to map from B to A: "
" [1, 0, 2] "
" "
" output_inverse_indices is to map from A to B: "
" [1, 0, 2, 0] "
" "
" output_counts = [2 1 1] "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $X ,
2020-02-24 23:46:48 +08:00
OptionalAttr < I64Attr >: $axis ,
DefaultValuedAttr < I64Attr , " 1 " >: $sorted );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $Y ,
2020-06-26 04:34:37 +08:00
AnyTypeOf < [ TensorOf < [ I64 ] > , NoneType ] >: $indices ,
AnyTypeOf < [ TensorOf < [ I64 ] > , NoneType ] >: $inverse_indices ,
AnyTypeOf < [ TensorOf < [ I64 ] > , NoneType ] >: $counts );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 4 ;
}
static std :: vector < int > getTypeMap () {
2020-07-09 04:52:56 +08:00
return { 20 , 4 , 4 , 4 };
2020-05-22 10:03:16 +08:00
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXUnsqueezeOp : ONNX_Op < " Unsqueeze " ,
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Unsqueeze operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Insert single-dimensional entries to the shape of an input tensor (`data`). "
" Takes one required argument `axes` - which contains a list of dimension indices and this operator will insert a dimension of value `1` into the corresponding index of the output tensor (`expanded`). "
" "
" For example: "
" Given an input tensor (`data`) of shape [3, 4, 5], then "
" Unsqueeze(data, axes=[0, 4]) outputs a tensor (`expanded`) containing same data as `data` but with shape [1, 3, 4, 5, 1]. "
" "
" The attribute `axes` should not contain any duplicate entries. It is an error if it contains duplicates. "
" The rank of the output tensor (`output_rank`) is the rank of the input tensor (`data`) plus the number of values in `axes`. "
" Each value in `axes` should be within the (inclusive) range [-output_rank , output_rank - 1]. "
" The order of values in `axes` does not matter and can come in any order. "
" "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $data ,
2020-02-24 23:46:48 +08:00
I64ArrayAttr : $axes );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $expanded );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXUpsampleOp : ONNX_Op < " Upsample " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Upsample operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Upsample the input tensor. "
" Each dimension value of the output tensor is: "
" output_dimension = floor(input_dimension * scale). "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $X ,
2020-06-26 04:34:37 +08:00
TensorOf < [ F32 ] >: $scales ,
2020-02-24 23:46:48 +08:00
DefaultValuedAttr < StrAttr , " nearest " >: $mode );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $Y );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 2 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXWhereOp : ONNX_Op < " Where " ,
[ NoSideEffect ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Where operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Return elements, either from X or Y, depending on condition "
" (with Numpy-style broadcasting support). "
" Where behaves like numpy.where with three parameters: "
" https://docs.scipy.org/doc/numpy/reference/generated/numpy.where.html "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ I1 ] > , AnyMemRef ] >: $condition ,
AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $X ,
AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $Y );
let results = ( outs AnyTypeOf < [ TensorOf < [ UI8 ] > , TensorOf < [ UI16 ] > , TensorOf < [ UI32 ] > , TensorOf < [ UI64 ] > , TensorOf < [ I8 ] > , TensorOf < [ I16 ] > , TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F16 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , TensorOf < [ I1 ] > , TensorOf < [ Complex < F32 > ] > , TensorOf < [ Complex < F64 > ] > , AnyMemRef ] >: $output );
2020-05-22 10:03:16 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 3 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 21 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
def ONNXXorOp : ONNX_Op < " Xor " ,
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2019-11-19 10:08:21 +08:00
let summary = " ONNX Xor operation " ;
let description = [{
2020-02-24 23:46:48 +08:00
" Returns the tensor resulted from performing the `xor` logical operation "
" elementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support). "
" "
" This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md). "
2019-11-19 10:08:21 +08:00
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ I1 ] > , AnyMemRef ] >: $A ,
AnyTypeOf < [ TensorOf < [ I1 ] > , AnyMemRef ] >: $B );
let results = ( outs AnyTypeOf < [ TensorOf < [ I1 ] > , AnyMemRef ] >: $C );
2020-06-09 03:45:32 +08:00
let builders = [
OpBuilder < " OpBuilder &builder, OperationState &state, Value A, Value B " , [{
2020-07-22 10:05:18 +08:00
auto lhsTy = A . getType ();
auto rhsTy = B . getType ();
2020-06-09 03:45:32 +08:00
auto elementType = getBroadcastedType ( lhsTy , rhsTy );
auto shapedType = elementType . dyn_cast_or_null < ShapedType > ();
if ( ! shapedType || ! shapedType . hasStaticShape ()) {
elementType = A . getType () . cast < TensorType > () . getElementType ();
elementType = UnrankedTensorType :: get ( elementType );
}
build ( builder , state , elementType , A , B );
}] > ,
OpBuilder < " OpBuilder &builder, OperationState &state, ValueRange operands, ArrayRef<NamedAttribute> attributes " , [{
2020-07-22 10:05:18 +08:00
auto lhsTy = operands [ 0 ] . getType ();
auto rhsTy = operands [ 1 ] . getType ();
2020-06-09 03:45:32 +08:00
auto elementType = getBroadcastedType ( lhsTy , rhsTy );
auto shapedType = elementType . dyn_cast_or_null < ShapedType > ();
if ( ! shapedType || ! shapedType . hasStaticShape ()) {
elementType = operands [ 0 ] . getType () . cast < TensorType > () . getElementType ();
elementType = UnrankedTensorType :: get ( elementType );
}
std :: vector < mlir :: Type > outputTypes ;
outputTypes . emplace_back ( elementType );
build ( builder , state , outputTypes , operands , attributes );
}] >
];
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 2 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 0 };
}
}];
2019-11-19 10:08:21 +08:00
}
2020-02-24 23:46:48 +08:00
2020-06-23 08:01:56 +08:00
def ONNXArrayFeatureExtractorOp : ONNX_Op < " ArrayFeatureExtractor " ,
[ NoSideEffect ] > {
let summary = " ONNX ArrayFeatureExtractor operation " ;
let description = [{
" Select elements of the input tensor based on the indices passed.<br> "
" The indices are applied to the last axes of the tensor. "
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ I64 ] > , TensorOf < [ I32 ] > , TensorOf < [ StringType ] > , AnyMemRef ] >: $X ,
2020-06-26 04:34:37 +08:00
TensorOf < [ I64 ] >: $Y );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ I64 ] > , TensorOf < [ I32 ] > , TensorOf < [ StringType ] > , AnyMemRef ] >: $Z );
2020-06-23 08:01:56 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 2 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
}
def ONNXBinarizerOp : ONNX_Op < " Binarizer " ,
[ NoSideEffect ] > {
let summary = " ONNX Binarizer operation " ;
let description = [{
" Maps the values of the input tensor to either 0 or 1, element-wise, based on the outcome of a comparison against a threshold value. "
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ I64 ] > , TensorOf < [ I32 ] > , AnyMemRef ] >: $X ,
2020-06-23 08:01:56 +08:00
DefaultValuedAttr < F32Attr , " 0.0 " >: $threshold );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ I64 ] > , TensorOf < [ I32 ] > , AnyMemRef ] >: $Y );
2020-06-23 08:01:56 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
}
def ONNXCastMapOp : ONNX_Op < " CastMap " ,
[ NoSideEffect ] > {
let summary = " ONNX CastMap operation " ;
let description = [{
" Converts a map to a tensor.<br>The map key must be an int64 and the values will be ordered "
" in ascending order based on this key.<br>The operator supports dense packing or sparse packing. "
" If using sparse packing, the key cannot exceed the max_map-1 value. "
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TupleOf < [ I64 , StringType ] > , TupleOf < [ I64 , F32 ] > , AnyMemRef ] >: $X ,
2020-06-23 08:01:56 +08:00
DefaultValuedAttr < StrAttr , " TO_FLOAT " >: $cast_to ,
DefaultValuedAttr < StrAttr , " DENSE " >: $map_form ,
DefaultValuedAttr < I64Attr , " 1 " >: $max_map );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ StringType ] > , TensorOf < [ F32 ] > , TensorOf < [ I64 ] > , AnyMemRef ] >: $Y );
2020-06-23 08:01:56 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { - 1 };
}
}];
}
def ONNXCategoryMapperOp : ONNX_Op < " CategoryMapper " ,
[ NoSideEffect ] > {
let summary = " ONNX CategoryMapper operation " ;
let description = [{
" Converts strings to integers and vice versa.<br> "
" Two sequences of equal length are used to map between integers and strings, "
" with strings and integers at the same index detailing the mapping.<br> "
" Each operator converts either integers to strings or strings to integers, depending "
" on which default value attribute is provided. Only one default value attribute "
" should be defined.<br> "
" If the string default value is set, it will convert integers to strings. "
" If the int default value is set, it will convert strings to integers. "
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ StringType ] > , TensorOf < [ I64 ] > , AnyMemRef ] >: $X ,
2020-06-23 08:01:56 +08:00
OptionalAttr < I64ArrayAttr >: $cats_int64s ,
OptionalAttr < StrArrayAttr >: $cats_strings ,
DefaultValuedAttr < I64Attr , " -1 " >: $default_int64 ,
DefaultValuedAttr < StrAttr , " _Unused " >: $default_string );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ StringType ] > , TensorOf < [ I64 ] > , AnyMemRef ] >: $Y );
2020-06-23 08:01:56 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { - 1 };
}
}];
}
def ONNXDictVectorizerOp : ONNX_Op < " DictVectorizer " ,
[ NoSideEffect ] > {
let summary = " ONNX DictVectorizer operation " ;
let description = [{
" Uses an index mapping to convert a dictionary to an array.<br> "
" Given a dictionary, each key is looked up in the vocabulary attribute corresponding to "
" the key type. The index into the vocabulary array at which the key is found is then "
" used to index the output 1-D tensor 'Y' and insert into it the value found in the dictionary 'X'.<br> "
" The key type of the input map must correspond to the element type of the defined vocabulary attribute. "
" Therefore, the output array will be equal in length to the index mapping vector parameter. "
" All keys in the input dictionary must be present in the index mapping vector. "
" For each item in the input dictionary, insert its value in the output array. "
" Any keys not present in the input dictionary, will be zero in the output array.<br> "
" For example: if the ``string_vocabulary`` parameter is set to ``[ \" a \" , \" c \" , \" b \" , \" z \" ]``, "
" then an input of `` { \" a \" : 4, \" c \" : 8}`` will produce an output of ``[4, 8, 0, 0]``. "
" "
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TupleOf < [ StringType , I64 ] > , TupleOf < [ I64 , StringType ] > , TupleOf < [ I64 , F32 ] > , TupleOf < [ I64 , F64 ] > , TupleOf < [ StringType , F32 ] > , TupleOf < [ StringType , F64 ] > , AnyMemRef ] >: $X ,
2020-06-23 08:01:56 +08:00
OptionalAttr < I64ArrayAttr >: $int64_vocabulary ,
OptionalAttr < StrArrayAttr >: $string_vocabulary );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ I64 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ StringType ] > , AnyMemRef ] >: $Y );
2020-06-23 08:01:56 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { - 1 };
}
}];
}
def ONNXFeatureVectorizerOp : ONNX_Op < " FeatureVectorizer " ,
[ NoSideEffect ] > {
let summary = " ONNX FeatureVectorizer operation " ;
let description = [{
" Concatenates input tensors into one continuous output.<br> "
" All input shapes are 2-D and are concatenated along the second dimention. 1-D tensors are treated as [1,C]. "
" Inputs are copied to the output maintaining the order of the input arguments.<br> "
" All inputs must be integers or floats, while the output will be all floating point values. "
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins Variadic < AnyTypeOf < [ TensorOf < [ I32 ] > , TensorOf < [ I64 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >>: $X ,
2020-06-23 08:01:56 +08:00
OptionalAttr < I64ArrayAttr >: $inputdimensions );
2020-06-26 04:34:37 +08:00
let results = ( outs TensorOf < [ F32 ] >: $Y );
2020-06-23 08:01:56 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return - 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
2020-07-09 04:52:56 +08:00
return { 7 };
2020-06-23 08:01:56 +08:00
}
}];
}
def ONNXImputerOp : ONNX_Op < " Imputer " ,
[ NoSideEffect ] > {
let summary = " ONNX Imputer operation " ;
let description = [{
" Replaces inputs that equal one value with another, leaving all other elements alone.<br> "
" This operator is typically used to replace missing values in situations where they have a canonical "
" representation, such as -1, 0, NaN, or some extreme value.<br> "
" One and only one of imputed_value_floats or imputed_value_int64s should be defined -- floats if the input tensor "
" holds floats, integers if the input tensor holds integers. The imputed values must all fit within the "
" width of the tensor element type. One and only one of the replaced_value_float or replaced_value_int64 should be defined, "
" which one depends on whether floats or integers are being processed.<br> "
" The imputed_value attribute length can be 1 element, or it can have one element per input feature.<br>In other words, if the input tensor has the shape [*,F], then the length of the attribute array may be 1 or F. If it is 1, then it is broadcast along the last dimension and applied to each feature. "
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ I64 ] > , TensorOf < [ I32 ] > , AnyMemRef ] >: $X ,
2020-06-23 08:01:56 +08:00
OptionalAttr < F32ArrayAttr >: $imputed_value_floats ,
OptionalAttr < I64ArrayAttr >: $imputed_value_int64s ,
DefaultValuedAttr < F32Attr , " 0.0 " >: $replaced_value_float ,
DefaultValuedAttr < I64Attr , " 0 " >: $replaced_value_int64 );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ I64 ] > , TensorOf < [ I32 ] > , AnyMemRef ] >: $Y );
2020-06-23 08:01:56 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { 20 };
}
}];
}
def ONNXLabelEncoderOp : ONNX_Op < " LabelEncoder " ,
[ NoSideEffect ] > {
let summary = " ONNX LabelEncoder operation " ;
let description = [{
" Maps each element in the input tensor to another value.<br> "
" The mapping is determined by the two parallel attributes, 'keys_*' and "
" 'values_*' attribute. The i-th value in the specified 'keys_*' attribute "
" would be mapped to the i-th value in the specified 'values_*' attribute. It "
" implies that input's element type and the element type of the specified "
" 'keys_*' should be identical while the output type is identical to the "
" specified 'values_*' attribute. If an input element can not be found in the "
" specified 'keys_*' attribute, the 'default_*' that matches the specified "
" 'values_*' attribute may be used as its output value.<br> "
" Let's consider an example which maps a string tensor to an integer tensor. "
" Assume and 'keys_strings' is [ \" Amy \" , \" Sally \" ], 'values_int64s' is [5, 6], "
" and 'default_int64' is '-1'. The input [ \" Dori \" , \" Amy \" , \" Amy \" , \" Sally \" , "
" \" Sally \" ] would be mapped to [-1, 5, 5, 6, 6].<br> "
" Since this operator is an one-to-one mapping, its input and output shapes "
" are the same. Notice that only one of 'keys_*'/'values_*' can be set.<br> "
" For key look-up, bit-wise comparison is used so even a float NaN can be "
" mapped to a value in 'values_*' attribute.<br> "
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ StringType ] > , TensorOf < [ I64 ] > , TensorOf < [ F32 ] > , AnyMemRef ] >: $X ,
2020-06-23 08:01:56 +08:00
DefaultValuedAttr < F32Attr , " -0.0 " >: $default_float ,
DefaultValuedAttr < I64Attr , " -1 " >: $default_int64 ,
DefaultValuedAttr < StrAttr , " _Unused " >: $default_string ,
OptionalAttr < F32ArrayAttr >: $keys_floats ,
OptionalAttr < I64ArrayAttr >: $keys_int64s ,
OptionalAttr < StrArrayAttr >: $keys_strings ,
OptionalAttr < F32ArrayAttr >: $values_floats ,
OptionalAttr < I64ArrayAttr >: $values_int64s ,
OptionalAttr < StrArrayAttr >: $values_strings );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ StringType ] > , TensorOf < [ I64 ] > , TensorOf < [ F32 ] > , AnyMemRef ] >: $Y );
2020-06-23 08:01:56 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { - 1 };
}
}];
}
def ONNXLinearClassifierOp : ONNX_Op < " LinearClassifier " ,
[ NoSideEffect ] > {
let summary = " ONNX LinearClassifier operation " ;
let description = [{
" Linear classifier "
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ I64 ] > , TensorOf < [ I32 ] > , AnyMemRef ] >: $X ,
2020-06-23 08:01:56 +08:00
OptionalAttr < I64ArrayAttr >: $classlabels_ints ,
OptionalAttr < StrArrayAttr >: $classlabels_strings ,
F32ArrayAttr : $coefficients ,
OptionalAttr < F32ArrayAttr >: $intercepts ,
DefaultValuedAttr < I64Attr , " 0 " >: $multi_class ,
DefaultValuedAttr < StrAttr , " NONE " >: $post_transform );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ StringType ] > , TensorOf < [ I64 ] > , AnyMemRef ] >: $Y ,
2020-06-26 04:34:37 +08:00
TensorOf < [ F32 ] >: $Z );
2020-06-23 08:01:56 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 2 ;
}
static std :: vector < int > getTypeMap () {
2020-07-09 04:52:56 +08:00
return { - 1 , 7 };
2020-06-23 08:01:56 +08:00
}
}];
}
def ONNXLinearRegressorOp : ONNX_Op < " LinearRegressor " ,
[ NoSideEffect ] > {
let summary = " ONNX LinearRegressor operation " ;
let description = [{
" Generalized linear regression evaluation.<br> "
" If targets is set to 1 (default) then univariate regression is performed.<br> "
" If targets is set to M then M sets of coefficients must be passed in as a sequence "
" and M results will be output for each input n in N.<br> "
" The coefficients array is of length n, and the coefficients for each target are contiguous. "
" Intercepts are optional but if provided must match the number of targets. "
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ I64 ] > , TensorOf < [ I32 ] > , AnyMemRef ] >: $X ,
2020-06-23 08:01:56 +08:00
OptionalAttr < F32ArrayAttr >: $coefficients ,
OptionalAttr < F32ArrayAttr >: $intercepts ,
DefaultValuedAttr < StrAttr , " NONE " >: $post_transform ,
DefaultValuedAttr < I64Attr , " 1 " >: $targets );
2020-06-26 04:34:37 +08:00
let results = ( outs TensorOf < [ F32 ] >: $Y );
2020-06-23 08:01:56 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
2020-07-09 04:52:56 +08:00
return { 7 };
2020-06-23 08:01:56 +08:00
}
}];
}
def ONNXNormalizerOp : ONNX_Op < " Normalizer " ,
[ NoSideEffect ] > {
let summary = " ONNX Normalizer operation " ;
let description = [{
" Normalize the input. There are three normalization modes, which have the corresponding formulas, "
" defined using element-wise infix operators '/' and '^' and tensor-wide functions 'max' and 'sum':<br> "
" <br> "
" Max: Y = X / max(X)<br> "
" L1: Y = X / sum(X)<br> "
" L2: Y = sqrt(X^2 / sum(X^2)}<br> "
" In all modes, if the divisor is zero, Y == X. "
" <br> "
" For batches, that is, [N,C] tensors, normalization is done along the C axis. In other words, each row "
" of the batch is normalized independently. "
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ I64 ] > , TensorOf < [ I32 ] > , AnyMemRef ] >: $X ,
2020-06-23 08:01:56 +08:00
DefaultValuedAttr < StrAttr , " MAX " >: $norm );
2020-06-26 04:34:37 +08:00
let results = ( outs TensorOf < [ F32 ] >: $Y );
2020-06-23 08:01:56 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
2020-07-09 04:52:56 +08:00
return { 7 };
2020-06-23 08:01:56 +08:00
}
}];
}
def ONNXOneHotEncoderOp : ONNX_Op < " OneHotEncoder " ,
[ NoSideEffect ] > {
let summary = " ONNX OneHotEncoder operation " ;
let description = [{
" Replace each input element with an array of ones and zeros, where a single "
" one is placed at the index of the category that was passed in. The total category count "
" will determine the size of the extra dimension of the output array Y.<br> "
" For example, if we pass a tensor with a single value of 4, and a category count of 8, "
" the output will be a tensor with ``[0,0,0,0,1,0,0,0]``.<br> "
" This operator assumes every input feature is from the same set of categories.<br> "
" If the input is a tensor of float, int32, or double, the data will be cast "
" to integers and the cats_int64s category list will be used for the lookups. "
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ StringType ] > , TensorOf < [ I64 ] > , TensorOf < [ I32 ] > , TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , AnyMemRef ] >: $X ,
2020-06-23 08:01:56 +08:00
OptionalAttr < I64ArrayAttr >: $cats_int64s ,
OptionalAttr < StrArrayAttr >: $cats_strings ,
DefaultValuedAttr < I64Attr , " 1 " >: $zeros );
2020-06-26 04:34:37 +08:00
let results = ( outs TensorOf < [ F32 ] >: $Y );
2020-06-23 08:01:56 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
2020-07-09 04:52:56 +08:00
return { 7 };
2020-06-23 08:01:56 +08:00
}
}];
}
def ONNXSVMClassifierOp : ONNX_Op < " SVMClassifier " ,
[ NoSideEffect ] > {
let summary = " ONNX SVMClassifier operation " ;
let description = [{
" Support Vector Machine classifier "
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ I64 ] > , TensorOf < [ I32 ] > , AnyMemRef ] >: $X ,
2020-06-23 08:01:56 +08:00
OptionalAttr < I64ArrayAttr >: $classlabels_ints ,
OptionalAttr < StrArrayAttr >: $classlabels_strings ,
OptionalAttr < F32ArrayAttr >: $coefficients ,
OptionalAttr < F32ArrayAttr >: $kernel_params ,
DefaultValuedAttr < StrAttr , " LINEAR " >: $kernel_type ,
DefaultValuedAttr < StrAttr , " NONE " >: $post_transform ,
OptionalAttr < F32ArrayAttr >: $prob_a ,
OptionalAttr < F32ArrayAttr >: $prob_b ,
OptionalAttr < F32ArrayAttr >: $rho ,
OptionalAttr < F32ArrayAttr >: $support_vectors ,
OptionalAttr < I64ArrayAttr >: $vectors_per_class );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ StringType ] > , TensorOf < [ I64 ] > , AnyMemRef ] >: $Y ,
2020-06-26 04:34:37 +08:00
TensorOf < [ F32 ] >: $Z );
2020-06-23 08:01:56 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 2 ;
}
static std :: vector < int > getTypeMap () {
2020-07-09 04:52:56 +08:00
return { - 1 , 7 };
2020-06-23 08:01:56 +08:00
}
}];
}
def ONNXSVMRegressorOp : ONNX_Op < " SVMRegressor " ,
[ NoSideEffect ] > {
let summary = " ONNX SVMRegressor operation " ;
let description = [{
" Support Vector Machine regression prediction and one-class SVM anomaly detection. "
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ I64 ] > , TensorOf < [ I32 ] > , AnyMemRef ] >: $X ,
2020-06-23 08:01:56 +08:00
OptionalAttr < F32ArrayAttr >: $coefficients ,
OptionalAttr < F32ArrayAttr >: $kernel_params ,
DefaultValuedAttr < StrAttr , " LINEAR " >: $kernel_type ,
DefaultValuedAttr < I64Attr , " 0 " >: $n_supports ,
DefaultValuedAttr < I64Attr , " 0 " >: $one_class ,
DefaultValuedAttr < StrAttr , " NONE " >: $post_transform ,
OptionalAttr < F32ArrayAttr >: $rho ,
OptionalAttr < F32ArrayAttr >: $support_vectors );
2020-06-26 04:34:37 +08:00
let results = ( outs TensorOf < [ F32 ] >: $Y );
2020-06-23 08:01:56 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
2020-07-09 04:52:56 +08:00
return { 7 };
2020-06-23 08:01:56 +08:00
}
}];
}
def ONNXScalerOp : ONNX_Op < " Scaler " ,
2020-07-24 01:05:19 +08:00
[ NoSideEffect , DeclareOpInterfaceMethods < ShapeInferenceOpInterface > ] > {
2020-06-23 08:01:56 +08:00
let summary = " ONNX Scaler operation " ;
let description = [{
" Rescale input data, for example to standardize features by removing the mean and scaling to unit variance. "
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ I64 ] > , TensorOf < [ I32 ] > , AnyMemRef ] >: $X ,
2020-06-23 08:01:56 +08:00
OptionalAttr < F32ArrayAttr >: $offset ,
OptionalAttr < F32ArrayAttr >: $scale );
2020-06-26 04:34:37 +08:00
let results = ( outs TensorOf < [ F32 ] >: $Y );
2020-06-23 08:01:56 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
2020-07-09 04:52:56 +08:00
return { 7 };
2020-06-23 08:01:56 +08:00
}
}];
}
def ONNXTreeEnsembleClassifierOp : ONNX_Op < " TreeEnsembleClassifier " ,
[ NoSideEffect ] > {
let summary = " ONNX TreeEnsembleClassifier operation " ;
let description = [{
" Tree Ensemble classifier. Returns the top class for each of N inputs.<br> "
" The attributes named 'nodes_X' form a sequence of tuples, associated by "
" index into the sequences, which must all be of equal length. These tuples "
" define the nodes.<br> "
" Similarly, all fields prefixed with 'class_' are tuples of votes at the leaves. "
" A leaf may have multiple votes, where each vote is weighted by "
" the associated class_weights index.<br> "
" One and only one of classlabels_strings or classlabels_int64s "
" will be defined. The class_ids are indices into this list. "
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ I64 ] > , TensorOf < [ I32 ] > , AnyMemRef ] >: $X ,
2020-06-23 08:01:56 +08:00
OptionalAttr < F32ArrayAttr >: $base_values ,
OptionalAttr < I64ArrayAttr >: $class_ids ,
OptionalAttr < I64ArrayAttr >: $class_nodeids ,
OptionalAttr < I64ArrayAttr >: $class_treeids ,
OptionalAttr < F32ArrayAttr >: $class_weights ,
OptionalAttr < I64ArrayAttr >: $classlabels_int64s ,
OptionalAttr < StrArrayAttr >: $classlabels_strings ,
OptionalAttr < I64ArrayAttr >: $nodes_falsenodeids ,
OptionalAttr < I64ArrayAttr >: $nodes_featureids ,
OptionalAttr < F32ArrayAttr >: $nodes_hitrates ,
OptionalAttr < I64ArrayAttr >: $nodes_missing_value_tracks_true ,
OptionalAttr < StrArrayAttr >: $nodes_modes ,
OptionalAttr < I64ArrayAttr >: $nodes_nodeids ,
OptionalAttr < I64ArrayAttr >: $nodes_treeids ,
OptionalAttr < I64ArrayAttr >: $nodes_truenodeids ,
OptionalAttr < F32ArrayAttr >: $nodes_values ,
DefaultValuedAttr < StrAttr , " NONE " >: $post_transform );
2020-07-14 23:15:06 +08:00
let results = ( outs AnyTypeOf < [ TensorOf < [ StringType ] > , TensorOf < [ I64 ] > , AnyMemRef ] >: $Y ,
2020-06-26 04:34:37 +08:00
TensorOf < [ F32 ] >: $Z );
2020-06-23 08:01:56 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 2 ;
}
static std :: vector < int > getTypeMap () {
2020-07-09 04:52:56 +08:00
return { - 1 , 7 };
2020-06-23 08:01:56 +08:00
}
}];
}
def ONNXTreeEnsembleRegressorOp : ONNX_Op < " TreeEnsembleRegressor " ,
[ NoSideEffect ] > {
let summary = " ONNX TreeEnsembleRegressor operation " ;
let description = [{
" Tree Ensemble regressor. Returns the regressed values for each input in N.<br> "
" All args with nodes_ are fields of a tuple of tree nodes, and "
" it is assumed they are the same length, and an index i will decode the "
" tuple across these inputs. Each node id can appear only once "
" for each tree id.<br> "
" All fields prefixed with target_ are tuples of votes at the leaves.<br> "
" A leaf may have multiple votes, where each vote is weighted by "
" the associated target_weights index.<br> "
" All trees must have their node ids start at 0 and increment by 1.<br> "
" Mode enum is BRANCH_LEQ, BRANCH_LT, BRANCH_GTE, BRANCH_GT, BRANCH_EQ, BRANCH_NEQ, LEAF "
}];
2020-07-14 23:15:06 +08:00
let arguments = ( ins AnyTypeOf < [ TensorOf < [ F32 ] > , TensorOf < [ F64 ] > , TensorOf < [ I64 ] > , TensorOf < [ I32 ] > , AnyMemRef ] >: $X ,
2020-06-23 08:01:56 +08:00
DefaultValuedAttr < StrAttr , " SUM " >: $aggregate_function ,
OptionalAttr < F32ArrayAttr >: $base_values ,
OptionalAttr < I64Attr >: $n_targets ,
OptionalAttr < I64ArrayAttr >: $nodes_falsenodeids ,
OptionalAttr < I64ArrayAttr >: $nodes_featureids ,
OptionalAttr < F32ArrayAttr >: $nodes_hitrates ,
OptionalAttr < I64ArrayAttr >: $nodes_missing_value_tracks_true ,
OptionalAttr < StrArrayAttr >: $nodes_modes ,
OptionalAttr < I64ArrayAttr >: $nodes_nodeids ,
OptionalAttr < I64ArrayAttr >: $nodes_treeids ,
OptionalAttr < I64ArrayAttr >: $nodes_truenodeids ,
OptionalAttr < F32ArrayAttr >: $nodes_values ,
DefaultValuedAttr < StrAttr , " NONE " >: $post_transform ,
OptionalAttr < I64ArrayAttr >: $target_ids ,
OptionalAttr < I64ArrayAttr >: $target_nodeids ,
OptionalAttr < I64ArrayAttr >: $target_treeids ,
OptionalAttr < F32ArrayAttr >: $target_weights );
2020-06-26 04:34:37 +08:00
let results = ( outs TensorOf < [ F32 ] >: $Y );
2020-06-23 08:01:56 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
2020-07-09 04:52:56 +08:00
return { 7 };
2020-06-23 08:01:56 +08:00
}
}];
}
def ONNXZipMapOp : ONNX_Op < " ZipMap " ,
[ NoSideEffect ] > {
let summary = " ONNX ZipMap operation " ;
let description = [{
" Creates a map from the input and the attributes.<br> "
" The values are provided by the input tensor, while the keys are specified by the attributes. "
" Must provide keys in either classlabels_strings or classlabels_int64s (but not both).<br> "
" The columns of the tensor correspond one-by-one to the keys specified by the attributes. There must be as many columns as keys.<br> "
}];
2020-06-26 04:34:37 +08:00
let arguments = ( ins TensorOf < [ F32 ] >: $X ,
2020-06-23 08:01:56 +08:00
OptionalAttr < I64ArrayAttr >: $classlabels_int64s ,
OptionalAttr < StrArrayAttr >: $classlabels_strings );
2020-07-31 20:05:59 +08:00
let results = ( outs AnyTypeOf < [ SeqOf < [ TupleOf < [ StringType , F32 ] > ] > , SeqOf < [ TupleOf < [ I64 , F32 ] > ] > , AnyMemRef ] >: $Z );
2020-06-23 08:01:56 +08:00
let extraClassDeclaration = [{
static int getNumberOfOperands () {
return 1 ;
}
static int getNumberOfResults () {
return 1 ;
}
static std :: vector < int > getTypeMap () {
return { - 1 };
}
}];
}