Integrate LLVM at llvm/llvm-project@e2d7d3cb0e
Updates LLVM usage to match [e2d7d3cb0ead](https://github.com/llvm/llvm-project/commit/e2d7d3cb0ead) PiperOrigin-RevId: 351915841
This commit is contained in:
parent
4d62cd4dee
commit
9a1abaa212
|
@ -15,9 +15,9 @@
|
|||
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
|
||||
LLVM_COMMIT = "6abbba3fca9fdf8d31f74800a7ddb40b103ae6e3"
|
||||
LLVM_COMMIT = "e2d7d3cb0eade079690c3938f694c8f7ef2b686b"
|
||||
|
||||
LLVM_SHA256 = "f5f71f1351dd124501734b2ff50631663356bd354ed781dd08231e8ee25594d8"
|
||||
LLVM_SHA256 = "0bc37998b2adabc747e7caaf85886204ebf2d0205729086b1a45bf2489a73508"
|
||||
|
||||
LLVM_BAZEL_TAG = "llvm-project-{commit}".format(commit = LLVM_COMMIT)
|
||||
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
6abbba3fca9fdf8d31f74800a7ddb40b103ae6e3
|
||||
e2d7d3cb0eade079690c3938f694c8f7ef2b686b
|
||||
|
||||
|
|
|
@ -154,13 +154,13 @@ func @dyn_broadcast(%operand: tensor<?x?xf32>) -> tensor<?x?x?xf32> {
|
|||
// CHECK: %[[EL1:.*]] = tensor.extract %[[SHAPE]]{{\[}}%[[C1]]] : tensor<3xi64>
|
||||
|
||||
// CHECK: %[[SIZE_1:.*]] = index_cast %[[EL1]] : i64 to index
|
||||
// CHECK: %[[EXPAND_1:.*]] = cmpi "slt", %[[OPER_DIM_0]], %[[SIZE_1]] : index
|
||||
// CHECK: %[[EXPAND_1:.*]] = cmpi slt, %[[OPER_DIM_0]], %[[SIZE_1]] : index
|
||||
// CHECK: %[[STRIDE_1:.*]] = select %[[EXPAND_1]], %[[C0]], %[[OP_STRIDE_0]] : index
|
||||
|
||||
// CHECK: %[[C2:.*]] = constant 2 : index
|
||||
// CHECK: %[[EL2:.*]] = tensor.extract %[[SHAPE]]{{\[}}%[[C2]]] : tensor<3xi64>
|
||||
// CHECK: %[[SIZE_2:.*]] = index_cast %[[EL2]] : i64 to index
|
||||
// CHECK: %[[EXPAND_2:.*]] = cmpi "slt", %[[OPER_DIM_1]], %[[SIZE_2]] : index
|
||||
// CHECK: %[[EXPAND_2:.*]] = cmpi slt, %[[OPER_DIM_1]], %[[SIZE_2]] : index
|
||||
// CHECK: %[[STRIDE_2:.*]] = select %[[EXPAND_2]], %[[C0]], %[[C1]] : index
|
||||
|
||||
// CHECK: %[[TRANSFORMED_MEMREF:.*]] = memref_reinterpret_cast %[[OPERAND]] to offset: [0], sizes: {{\[}}%[[SIZE_0]], %[[SIZE_1]], %[[SIZE_2]]], strides: {{\[}}%[[C0]], %[[STRIDE_1]], %[[STRIDE_2]]]: memref<?x?xf32> to memref<?x?x?xf32, #map>
|
||||
|
|
|
@ -252,7 +252,7 @@ func @float_cmp(%lhs: tensor<2x2xf32>,
|
|||
// CHECK: linalg.init_tensor [2, 2] : tensor<2x2xi1>
|
||||
// CHECK: linalg.generic
|
||||
// CHECK-NEXT: ^bb0(%[[LHS_IN:.*]]: f32, %[[RHS_IN:.*]]: f32, %{{.*}}: i1):
|
||||
// CHECK-NEXT: %[[RESULT:.*]] = cmpf "oeq", %[[LHS_IN]], %[[RHS_IN]] : f32
|
||||
// CHECK-NEXT: %[[RESULT:.*]] = cmpf oeq, %[[LHS_IN]], %[[RHS_IN]] : f32
|
||||
// CHECK-NEXT: linalg.yield %[[RESULT]] : i1
|
||||
|
||||
// -----
|
||||
|
@ -267,7 +267,7 @@ func @float_cmp_ne(%lhs: tensor<2x2xf32>,
|
|||
// CHECK: linalg.init_tensor [2, 2] : tensor<2x2xi1>
|
||||
// CHECK: linalg.generic
|
||||
// CHECK-NEXT: ^bb0(%[[LHS_IN:.*]]: f32, %[[RHS_IN:.*]]: f32, %{{.*}}: i1):
|
||||
// CHECK-NEXT: %[[RESULT:.*]] = cmpf "une", %[[LHS_IN]], %[[RHS_IN]] : f32
|
||||
// CHECK-NEXT: %[[RESULT:.*]] = cmpf une, %[[LHS_IN]], %[[RHS_IN]] : f32
|
||||
// CHECK-NEXT: linalg.yield %[[RESULT]] : i1
|
||||
|
||||
// -----
|
||||
|
@ -282,7 +282,7 @@ func @int_cmp(%lhs: tensor<2x2xi32>,
|
|||
// CHECK: linalg.init_tensor [2, 2] : tensor<2x2xi1>
|
||||
// CHECK: linalg.generic
|
||||
// CHECK-NEXT: ^bb0(%[[LHS_IN:.*]]: i32, %[[RHS_IN:.*]]: i32, %{{.*}}: i1):
|
||||
// CHECK-NEXT: %[[RESULT:.*]] = cmpi "slt", %[[LHS_IN]], %[[RHS_IN]] : i32
|
||||
// CHECK-NEXT: %[[RESULT:.*]] = cmpi slt, %[[LHS_IN]], %[[RHS_IN]] : i32
|
||||
// CHECK-NEXT: linalg.yield %[[RESULT]] : i1
|
||||
|
||||
// -----
|
||||
|
@ -326,7 +326,7 @@ func @is_finte(%input: tensor<2x2xf32>) -> tensor<2x2xi1> {
|
|||
// CHECK-NEXT: ^bb0(%[[OPERAND_IN:.*]]: f32
|
||||
// CHECK-NEXT: %[[POS_INF:.+]] = constant 0x7F800000 : f32
|
||||
// CHECK-NEXT: %[[ABS_X:.+]] = absf %[[OPERAND_IN]] : f32
|
||||
// CHECK-NEXT: %[[RESULT:.+]] = cmpf "one", %[[ABS_X]], %[[POS_INF]] : f32
|
||||
// CHECK-NEXT: %[[RESULT:.+]] = cmpf one, %[[ABS_X]], %[[POS_INF]] : f32
|
||||
// CHECK-NEXT: linalg.yield %[[RESULT]] : i1
|
||||
|
||||
// -----
|
||||
|
@ -513,7 +513,7 @@ func @minf(%lhs: tensor<2x2xf32>, %rhs: tensor<2x2xf32>) -> tensor<2x2xf32> {
|
|||
// CHECK: linalg.init_tensor [2, 2] : tensor<2x2xf32>
|
||||
// CHECK: linalg.generic
|
||||
// CHECK-NEXT: ^bb0(%[[LHS_IN:.*]]: f32, %[[RHS_IN:.*]]: f32, %{{.*}}: f32):
|
||||
// CHECK-NEXT: %[[CMP:.*]] = cmpf "olt", %[[LHS_IN]], %[[RHS_IN]] : f32
|
||||
// CHECK-NEXT: %[[CMP:.*]] = cmpf olt, %[[LHS_IN]], %[[RHS_IN]] : f32
|
||||
// CHECK-NEXT: %[[RESULT:.*]] = select %[[CMP]], %[[LHS_IN]], %[[RHS_IN]] : f32
|
||||
// CHECK-NEXT: linalg.yield %[[RESULT]] : f32
|
||||
|
||||
|
@ -528,7 +528,7 @@ func @maxi(%lhs: tensor<2x2xi32>, %rhs: tensor<2x2xi32>) -> tensor<2x2xi32> {
|
|||
// CHECK: linalg.init_tensor [2, 2] : tensor<2x2xi32>
|
||||
// CHECK: linalg.generic
|
||||
// CHECK-NEXT: ^bb0(%[[LHS_IN:.*]]: i32, %[[RHS_IN:.*]]: i32, %{{.*}}: i32):
|
||||
// CHECK-NEXT: %[[CMP:.*]] = cmpi "sgt", %[[LHS_IN]], %[[RHS_IN]] : i32
|
||||
// CHECK-NEXT: %[[CMP:.*]] = cmpi sgt, %[[LHS_IN]], %[[RHS_IN]] : i32
|
||||
// CHECK-NEXT: %[[RESULT:.*]] = select %[[CMP]], %[[LHS_IN]], %[[RHS_IN]] : i32
|
||||
// CHECK-NEXT: linalg.yield %[[RESULT]] : i32
|
||||
|
||||
|
|
|
@ -160,7 +160,7 @@ func @addUnrankedUnranked(
|
|||
// CHECK-NEXT: %[[LHS_SHAPE:.*]] = shape.shape_of %[[LHS]] : tensor<*xf32> -> tensor<?xindex>
|
||||
// CHECK-NEXT: %[[LHS_RANK:.*]] = shape.rank %[[LHS_SHAPE]] : tensor<?xindex> -> index
|
||||
// CHECK-NEXT: %[[C0:.*]] = constant 0 : index
|
||||
// CHECK-NEXT: %[[LHS_IS_SCALAR:.*]] = cmpi "eq", %[[LHS_RANK]], %[[C0]] : index
|
||||
// CHECK-NEXT: %[[LHS_IS_SCALAR:.*]] = cmpi eq, %[[LHS_RANK]], %[[C0]] : index
|
||||
// Handle scalar LHS case
|
||||
// CHECK-NEXT: %[[VAL_8:.*]] = scf.if %[[LHS_IS_SCALAR]] -> (tensor<*xf32>) {
|
||||
// CHECK-NEXT: %[[SCALAR_LHS:.*]] = tensor.cast %[[LHS]] : tensor<*xf32> to tensor<f32>
|
||||
|
@ -174,7 +174,7 @@ func @addUnrankedUnranked(
|
|||
// CHECK-NEXT: } else {
|
||||
// CHECK-NEXT: %[[RHS_SHAPE:.*]] = shape.shape_of %[[RHS]] : tensor<*xf32> -> tensor<?xindex>
|
||||
// CHECK-NEXT: %[[RHS_RANK:.*]] = shape.rank %[[RHS_SHAPE]] : tensor<?xindex> -> index
|
||||
// CHECK-NEXT: %[[RHS_IS_SCALAR:.*]] = cmpi "eq", %[[RHS_RANK]], %[[C0]] : index
|
||||
// CHECK-NEXT: %[[RHS_IS_SCALAR:.*]] = cmpi eq, %[[RHS_RANK]], %[[C0]] : index
|
||||
// Handle scalar RHS case
|
||||
// CHECK-NEXT: %[[VAL_14:.*]] = scf.if %[[RHS_IS_SCALAR]] -> (tensor<*xf32>) {
|
||||
// CHECK-NEXT: %[[SCALAR_RHS:.*]] = tensor.cast %[[RHS]] : tensor<*xf32> to tensor<f32>
|
||||
|
@ -197,11 +197,11 @@ func @addUnrankedUnranked(
|
|||
// CHECK-NEXT: %[[RESHAPED_SAME_RESULT:.*]] = "mhlo.dynamic_reshape"(%[[FLATTENED_RESULT]], %[[ANY_SHAPE]]) : (tensor<?xf32>, tensor<?xindex>) -> tensor<*xf32>
|
||||
// CHECK-NEXT: scf.yield %[[RESHAPED_SAME_RESULT]] : tensor<*xf32>
|
||||
// CHECK-NEXT: } else {
|
||||
// CHECK-NEXT: %[[LHS_RANK_GREATER:.*]] = cmpi "sgt", %[[LHS_RANK]], %[[RHS_RANK]] : index
|
||||
// CHECK-NEXT: %[[LHS_RANK_GREATER:.*]] = cmpi sgt, %[[LHS_RANK]], %[[RHS_RANK]] : index
|
||||
// CHECK-NEXT: %[[GREATEST_RANK:.*]] = select %[[LHS_RANK_GREATER]], %[[LHS_RANK]], %[[RHS_RANK]] : index
|
||||
// Handle rank 1 specialization
|
||||
// CHECK-NEXT: %[[C1:.*]] = constant 1 : index
|
||||
// CHECK-NEXT: %[[GREATEST_RANK_IS_1:.*]] = cmpi "eq", %[[GREATEST_RANK]], %[[C1]] : index
|
||||
// CHECK-NEXT: %[[GREATEST_RANK_IS_1:.*]] = cmpi eq, %[[GREATEST_RANK]], %[[C1]] : index
|
||||
// CHECK-NEXT: %[[RESULT_RANK_1:.*]] = scf.if %[[GREATEST_RANK_IS_1]] -> (tensor<*xf32>) {
|
||||
// CHECK-NEXT: %[[CONST_SHAPE_1:.*]] = shape.const_shape [1]
|
||||
// CHECK-NEXT: %[[BROADCASTED_LHS_1:.*]] = shape.broadcast %[[LHS_SHAPE]], %[[CONST_SHAPE_1]] : tensor<?xindex>, tensor<1xindex> -> tensor<?xindex>
|
||||
|
@ -215,7 +215,7 @@ func @addUnrankedUnranked(
|
|||
// CHECK-NEXT: scf.yield %[[RESULT_1]] : tensor<*xf32>
|
||||
// CHECK-NEXT: } else {
|
||||
// CHECK-NEXT: %[[C2:.*]] = constant 2 : index
|
||||
// CHECK-NEXT: %[[GREATEST_RANK_IS_2:.*]] = cmpi "eq", %[[GREATEST_RANK]], %[[C2]] : index
|
||||
// CHECK-NEXT: %[[GREATEST_RANK_IS_2:.*]] = cmpi eq, %[[GREATEST_RANK]], %[[C2]] : index
|
||||
// Handle rank 2 specialization
|
||||
// CHECK-NEXT: %[[VAL_26:.*]] = scf.if %[[GREATEST_RANK_IS_2]] -> (tensor<*xf32>) {
|
||||
// CHECK-NEXT: %[[CONST_SHAPE_2:.*]] = shape.const_shape [1, 1]
|
||||
|
@ -230,7 +230,7 @@ func @addUnrankedUnranked(
|
|||
// CHECK-NEXT: scf.yield %[[RESULT_2]] : tensor<*xf32>
|
||||
// CHECK-NEXT: } else {
|
||||
// CHECK-NEXT: %[[C3:.*]] = constant 3 : index
|
||||
// CHECK-NEXT: %[[GREATEST_RANK_IS_3:.*]] = cmpi "eq", %[[GREATEST_RANK]], %[[C3]] : index
|
||||
// CHECK-NEXT: %[[GREATEST_RANK_IS_3:.*]] = cmpi eq, %[[GREATEST_RANK]], %[[C3]] : index
|
||||
// Handle rank 3 specialization
|
||||
// CHECK-NEXT: %[[VAL_34:.*]] = scf.if %[[GREATEST_RANK_IS_3]] -> (tensor<*xf32>) {
|
||||
// CHECK-NEXT: %[[CONST_SHAPE_3:.*]] = shape.const_shape [1, 1, 1]
|
||||
|
@ -245,7 +245,7 @@ func @addUnrankedUnranked(
|
|||
// CHECK-NEXT: scf.yield %[[RESULT_3]] : tensor<*xf32>
|
||||
// CHECK-NEXT: } else {
|
||||
// CHECK-NEXT: %[[C4:.*]] = constant 4 : index
|
||||
// CHECK-NEXT: %[[GREATEST_RANK_IS_4:.*]] = cmpi "eq", %[[GREATEST_RANK]], %[[C4]] : index
|
||||
// CHECK-NEXT: %[[GREATEST_RANK_IS_4:.*]] = cmpi eq, %[[GREATEST_RANK]], %[[C4]] : index
|
||||
// Handle rank 4 specialization
|
||||
// CHECK-NEXT: %[[VAL_42:.*]] = scf.if %[[GREATEST_RANK_IS_4]] -> (tensor<*xf32>) {
|
||||
// CHECK-NEXT: %[[CONST_SHAPE_4:.*]] = shape.const_shape [1, 1, 1, 1]
|
||||
|
@ -260,7 +260,7 @@ func @addUnrankedUnranked(
|
|||
// CHECK-NEXT: scf.yield %[[RESULT_4]] : tensor<*xf32>
|
||||
// CHECK-NEXT: } else {
|
||||
// CHECK-NEXT: %[[C5:.*]] = constant 5 : index
|
||||
// CHECK-NEXT: %[[GREATEST_RANK_IS_5:.*]] = cmpi "eq", %[[GREATEST_RANK]], %[[C5]] : index
|
||||
// CHECK-NEXT: %[[GREATEST_RANK_IS_5:.*]] = cmpi eq, %[[GREATEST_RANK]], %[[C5]] : index
|
||||
// Handle rank 5 specialization
|
||||
// CHECK-NEXT: %[[VAL_50:.*]] = scf.if %[[GREATEST_RANK_IS_5]] -> (tensor<*xf32>) {
|
||||
// CHECK-NEXT: %[[CONST_SHAPE_5:.*]] = shape.const_shape [1, 1, 1, 1, 1]
|
||||
|
@ -275,7 +275,7 @@ func @addUnrankedUnranked(
|
|||
// CHECK-NEXT: scf.yield %[[RESULT_5]] : tensor<*xf32>
|
||||
// CHECK-NEXT: } else {
|
||||
// CHECK-NEXT: %[[C6:.*]] = constant 6 : index
|
||||
// CHECK-NEXT: %[[GREATEST_RANK_IS_6:.*]] = cmpi "eq", %[[GREATEST_RANK]], %[[C6]] : index
|
||||
// CHECK-NEXT: %[[GREATEST_RANK_IS_6:.*]] = cmpi eq, %[[GREATEST_RANK]], %[[C6]] : index
|
||||
// CHECK-NEXT: assert %[[GREATEST_RANK_IS_6]]
|
||||
// Handle rank 6 specialization
|
||||
// CHECK-NEXT: %[[CONST_SHAPE_6:.*]] = shape.const_shape [1, 1, 1, 1, 1, 1]
|
||||
|
|
|
@ -53,17 +53,17 @@ func @unary_ops_float(%arg0: tensor<4xf32>) -> tensor<4xf32> {
|
|||
|
||||
// CHECK-LABEL: func @compare_int
|
||||
func @compare_int(%arg0: tensor<4xi32>, %arg1: tensor<4xi32>) -> (tensor<4xi1>,tensor<4xi1>,tensor<4xi1>,tensor<4xi1>,tensor<4xi1>,tensor<4xi1>) {
|
||||
// CHECK-NEXT: %0 = cmpi "eq", %arg0, %arg1 : tensor<4xi32>
|
||||
// CHECK-NEXT: %0 = cmpi eq, %arg0, %arg1 : tensor<4xi32>
|
||||
%0 = "mhlo.compare"(%arg0, %arg1) {comparison_direction = "EQ"} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1>
|
||||
// CHECK-NEXT: %1 = cmpi "ne", %arg0, %arg1 : tensor<4xi32>
|
||||
// CHECK-NEXT: %1 = cmpi ne, %arg0, %arg1 : tensor<4xi32>
|
||||
%1 = "mhlo.compare"(%arg0, %arg1) {comparison_direction = "NE"} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1>
|
||||
// CHECK-NEXT: %2 = cmpi "slt", %arg0, %arg1 : tensor<4xi32>
|
||||
// CHECK-NEXT: %2 = cmpi slt, %arg0, %arg1 : tensor<4xi32>
|
||||
%2 = "mhlo.compare"(%arg0, %arg1) {comparison_direction = "LT"} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1>
|
||||
// CHECK-NEXT: %3 = cmpi "sle", %arg0, %arg1 : tensor<4xi32>
|
||||
// CHECK-NEXT: %3 = cmpi sle, %arg0, %arg1 : tensor<4xi32>
|
||||
%3 = "mhlo.compare"(%arg0, %arg1) {comparison_direction = "LE"} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1>
|
||||
// CHECK-NEXT: %4 = cmpi "sgt", %arg0, %arg1 : tensor<4xi32>
|
||||
// CHECK-NEXT: %4 = cmpi sgt, %arg0, %arg1 : tensor<4xi32>
|
||||
%4 = "mhlo.compare"(%arg0, %arg1) {comparison_direction = "GT"} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1>
|
||||
// CHECK-NEXT: %5 = cmpi "sge", %arg0, %arg1 : tensor<4xi32>
|
||||
// CHECK-NEXT: %5 = cmpi sge, %arg0, %arg1 : tensor<4xi32>
|
||||
%5 = "mhlo.compare"(%arg0, %arg1) {comparison_direction = "GE"} : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1>
|
||||
// CHECK-NEXT: return %0, %1, %2, %3, %4, %5 : tensor<4xi1>, tensor<4xi1>, tensor<4xi1>, tensor<4xi1>, tensor<4xi1>, tensor<4xi1>
|
||||
return %0, %1, %2, %3, %4, %5 : tensor<4xi1>, tensor<4xi1>, tensor<4xi1>, tensor<4xi1>, tensor<4xi1>, tensor<4xi1>
|
||||
|
@ -71,17 +71,17 @@ func @compare_int(%arg0: tensor<4xi32>, %arg1: tensor<4xi32>) -> (tensor<4xi1>,t
|
|||
|
||||
// CHECK-LABEL: func @compare_float
|
||||
func @compare_float(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>) -> (tensor<4xi1>,tensor<4xi1>,tensor<4xi1>,tensor<4xi1>,tensor<4xi1>,tensor<4xi1>) {
|
||||
// CHECK-NEXT: %0 = cmpf "oeq", %arg0, %arg1 : tensor<4xf32>
|
||||
// CHECK-NEXT: %0 = cmpf oeq, %arg0, %arg1 : tensor<4xf32>
|
||||
%0 = "mhlo.compare"(%arg0, %arg1) {comparison_direction = "EQ"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xi1>
|
||||
// CHECK-NEXT: %1 = cmpf "une", %arg0, %arg1 : tensor<4xf32>
|
||||
// CHECK-NEXT: %1 = cmpf une, %arg0, %arg1 : tensor<4xf32>
|
||||
%1 = "mhlo.compare"(%arg0, %arg1) {comparison_direction = "NE"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xi1>
|
||||
// CHECK-NEXT: %2 = cmpf "olt", %arg0, %arg1 : tensor<4xf32>
|
||||
// CHECK-NEXT: %2 = cmpf olt, %arg0, %arg1 : tensor<4xf32>
|
||||
%2 = "mhlo.compare"(%arg0, %arg1) {comparison_direction = "LT"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xi1>
|
||||
// CHECK-NEXT: %3 = cmpf "ole", %arg0, %arg1 : tensor<4xf32>
|
||||
// CHECK-NEXT: %3 = cmpf ole, %arg0, %arg1 : tensor<4xf32>
|
||||
%3 = "mhlo.compare"(%arg0, %arg1) {comparison_direction = "LE"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xi1>
|
||||
// CHECK-NEXT: %4 = cmpf "ogt", %arg0, %arg1 : tensor<4xf32>
|
||||
// CHECK-NEXT: %4 = cmpf ogt, %arg0, %arg1 : tensor<4xf32>
|
||||
%4 = "mhlo.compare"(%arg0, %arg1) {comparison_direction = "GT"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xi1>
|
||||
// CHECK-NEXT: %5 = cmpf "oge", %arg0, %arg1 : tensor<4xf32>
|
||||
// CHECK-NEXT: %5 = cmpf oge, %arg0, %arg1 : tensor<4xf32>
|
||||
%5 = "mhlo.compare"(%arg0, %arg1) {comparison_direction = "GE"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xi1>
|
||||
return %0, %1, %2, %3, %4, %5: tensor<4xi1>, tensor<4xi1>, tensor<4xi1>, tensor<4xi1>, tensor<4xi1>, tensor<4xi1>
|
||||
}
|
||||
|
|
|
@ -34,10 +34,10 @@ func @tanh_f32(%arg0 : f32) -> f32 {
|
|||
// CHECK: %[[VAL_13:.*]] = constant 0.00226843474 : f32
|
||||
// CHECK: %[[VAL_14:.*]] = constant 0.00489352504 : f32
|
||||
// CHECK: %[[VAL_15:.*]] = absf %[[VAL_0]] : f32
|
||||
// CHECK: %[[VAL_16:.*]] = cmpf "olt", %[[VAL_15]], %[[VAL_1]] : f32
|
||||
// CHECK: %[[VAL_17:.*]] = cmpf "ule", %[[VAL_0]], %[[VAL_2]] : f32
|
||||
// CHECK: %[[VAL_16:.*]] = cmpf olt, %[[VAL_15]], %[[VAL_1]] : f32
|
||||
// CHECK: %[[VAL_17:.*]] = cmpf ule, %[[VAL_0]], %[[VAL_2]] : f32
|
||||
// CHECK: %[[VAL_18:.*]] = select %[[VAL_17]], %[[VAL_0]], %[[VAL_2]] : f32
|
||||
// CHECK: %[[VAL_19:.*]] = cmpf "uge", %[[VAL_18]], %[[VAL_3]] : f32
|
||||
// CHECK: %[[VAL_19:.*]] = cmpf uge, %[[VAL_18]], %[[VAL_3]] : f32
|
||||
// CHECK: %[[VAL_20:.*]] = select %[[VAL_19]], %[[VAL_18]], %[[VAL_3]] : f32
|
||||
// CHECK: %[[VAL_21:.*]] = mulf %[[VAL_20]], %[[VAL_20]] : f32
|
||||
// CHECK: %[[VAL_22:.*]] = mulf %[[VAL_21]], %[[VAL_4]] : f32
|
||||
|
@ -90,10 +90,10 @@ func @tanh_f16(%arg0 : f16) -> f16 {
|
|||
// CHECK: %[[VAL_14:.*]] = constant 0.00489352504 : f32
|
||||
// CHECK: %[[VAL_15:.*]] = fpext %[[VAL_0]] : f16 to f32
|
||||
// CHECK: %[[VAL_16:.*]] = absf %[[VAL_15]] : f32
|
||||
// CHECK: %[[VAL_17:.*]] = cmpf "olt", %[[VAL_16]], %[[VAL_1]] : f32
|
||||
// CHECK: %[[VAL_18:.*]] = cmpf "ule", %[[VAL_15]], %[[VAL_2]] : f32
|
||||
// CHECK: %[[VAL_17:.*]] = cmpf olt, %[[VAL_16]], %[[VAL_1]] : f32
|
||||
// CHECK: %[[VAL_18:.*]] = cmpf ule, %[[VAL_15]], %[[VAL_2]] : f32
|
||||
// CHECK: %[[VAL_19:.*]] = select %[[VAL_18]], %[[VAL_15]], %[[VAL_2]] : f32
|
||||
// CHECK: %[[VAL_20:.*]] = cmpf "uge", %[[VAL_19]], %[[VAL_3]] : f32
|
||||
// CHECK: %[[VAL_20:.*]] = cmpf uge, %[[VAL_19]], %[[VAL_3]] : f32
|
||||
// CHECK: %[[VAL_21:.*]] = select %[[VAL_20]], %[[VAL_19]], %[[VAL_3]] : f32
|
||||
// CHECK: %[[VAL_22:.*]] = mulf %[[VAL_21]], %[[VAL_21]] : f32
|
||||
// CHECK: %[[VAL_23:.*]] = mulf %[[VAL_22]], %[[VAL_4]] : f32
|
||||
|
@ -151,7 +151,7 @@ func @atan2_f32(%arg0 : f32, %arg1 : f32) -> f32 {
|
|||
// CHECK: %[[CST_13:.*]] = constant 0x7F800000 : f32
|
||||
// CHECK: %[[VAL_0:.*]] = absf %[[ARG1]] : f32
|
||||
// CHECK: %[[VAL_1:.*]] = absf %[[ARG0]] : f32
|
||||
// CHECK: %[[VAL_2:.*]] = cmpf "ole", %[[VAL_0]], %[[VAL_1]] : f32
|
||||
// CHECK: %[[VAL_2:.*]] = cmpf ole, %[[VAL_0]], %[[VAL_1]] : f32
|
||||
// CHECK: %[[VAL_3:.*]] = select %[[VAL_2]], %[[VAL_0]], %[[VAL_1]] : f32
|
||||
// CHECK: %[[VAL_4:.*]] = select %[[VAL_2]], %[[VAL_1]], %[[VAL_0]] : f32
|
||||
// CHECK: %[[VAL_5:.*]] = divf %[[VAL_3]], %[[VAL_4]] : f32
|
||||
|
@ -175,17 +175,17 @@ func @atan2_f32(%arg0 : f32, %arg1 : f32) -> f32 {
|
|||
// CHECK: %[[VAL_23:.*]] = addf %[[VAL_22]], %[[VAL_5]] : f32
|
||||
// CHECK: %[[VAL_24:.*]] = subf %[[CST_7]], %[[VAL_23]] : f32
|
||||
// CHECK: %[[VAL_25:.*]] = select %[[VAL_2]], %[[VAL_24]], %[[VAL_23]] : f32
|
||||
// CHECK: %[[VAL_26:.*]] = cmpf "olt", %[[ARG1]], %[[CST_8]] : f32
|
||||
// CHECK: %[[VAL_26:.*]] = cmpf olt, %[[ARG1]], %[[CST_8]] : f32
|
||||
// CHECK: %[[VAL_27:.*]] = subf %[[CST_9]], %[[VAL_25]] : f32
|
||||
// CHECK: %[[VAL_28:.*]] = select %[[VAL_26]], %[[VAL_27]], %[[VAL_25]] : f32
|
||||
// CHECK: %[[VAL_29:.*]] = select %[[VAL_26]], %[[CST_9]], %[[CST_8]] : f32
|
||||
// CHECK: %[[VAL_30:.*]] = cmpf "oeq", %[[ARG0]], %[[CST_8]] : f32
|
||||
// CHECK: %[[VAL_30:.*]] = cmpf oeq, %[[ARG0]], %[[CST_8]] : f32
|
||||
// CHECK: %[[VAL_31:.*]] = select %[[VAL_30]], %[[VAL_29]], %[[VAL_28]] : f32
|
||||
// CHECK: %[[VAL_32:.*]] = cmpf "uno", %[[ARG0]], %[[ARG1]] : f32
|
||||
// CHECK: %[[VAL_32:.*]] = cmpf uno, %[[ARG0]], %[[ARG1]] : f32
|
||||
// CHECK: %[[VAL_35:.*]] = select %[[VAL_32]], %[[CST_10]], %[[VAL_31]] : f32
|
||||
// CHECK: %[[VAL_36:.*]] = select %[[VAL_26]], %[[CST_11]], %[[CST_12]] : f32
|
||||
// CHECK: %[[VAL_37:.*]] = cmpf "oeq", %[[ARG1]], %[[CST_13]] : f32
|
||||
// CHECK: %[[VAL_38:.*]] = cmpf "oeq", %[[ARG0]], %[[CST_13]] : f32
|
||||
// CHECK: %[[VAL_37:.*]] = cmpf oeq, %[[ARG1]], %[[CST_13]] : f32
|
||||
// CHECK: %[[VAL_38:.*]] = cmpf oeq, %[[ARG0]], %[[CST_13]] : f32
|
||||
// CHECK: %[[VAL_39:.*]] = and %[[VAL_37]], %[[VAL_38]] : i1
|
||||
// CHECK: %[[VAL_40:.*]] = select %[[VAL_39]], %[[VAL_36]], %[[VAL_35]] : f32
|
||||
// CHECK: %[[VAL_41:.*]] = copysign %[[VAL_40]], %[[ARG0]] : f32
|
||||
|
@ -218,7 +218,7 @@ func @atan2_f16(%arg0 : f16, %arg1 : f16) -> f16 {
|
|||
// CHECK: %[[VAL_1:.*]] = fpext %[[ARG1]] : f16 to f32
|
||||
// CHECK: %[[VAL_2:.*]] = absf %[[VAL_1]] : f32
|
||||
// CHECK: %[[VAL_3:.*]] = absf %[[VAL_0]] : f32
|
||||
// CHECK: %[[VAL_4:.*]] = cmpf "ole", %[[VAL_2]], %[[VAL_3]] : f32
|
||||
// CHECK: %[[VAL_4:.*]] = cmpf ole, %[[VAL_2]], %[[VAL_3]] : f32
|
||||
// CHECK: %[[VAL_5:.*]] = select %[[VAL_4]], %[[VAL_2]], %[[VAL_3]] : f32
|
||||
// CHECK: %[[VAL_6:.*]] = select %[[VAL_4]], %[[VAL_3]], %[[VAL_2]] : f32
|
||||
// CHECK: %[[VAL_7:.*]] = divf %[[VAL_5]], %[[VAL_6]] : f32
|
||||
|
@ -242,17 +242,17 @@ func @atan2_f16(%arg0 : f16, %arg1 : f16) -> f16 {
|
|||
// CHECK: %[[VAL_25:.*]] = addf %[[VAL_24]], %[[VAL_7]] : f32
|
||||
// CHECK: %[[VAL_26:.*]] = subf %[[CST_7]], %[[VAL_25]] : f32
|
||||
// CHECK: %[[VAL_27:.*]] = select %[[VAL_4]], %[[VAL_26]], %[[VAL_25]] : f32
|
||||
// CHECK: %[[VAL_28:.*]] = cmpf "olt", %[[VAL_1]], %[[CST_8]] : f32
|
||||
// CHECK: %[[VAL_28:.*]] = cmpf olt, %[[VAL_1]], %[[CST_8]] : f32
|
||||
// CHECK: %[[VAL_29:.*]] = subf %[[CST_9]], %[[VAL_27]] : f32
|
||||
// CHECK: %[[VAL_30:.*]] = select %[[VAL_28]], %[[VAL_29]], %[[VAL_27]] : f32
|
||||
// CHECK: %[[VAL_31:.*]] = select %[[VAL_28]], %[[CST_9]], %[[CST_8]] : f32
|
||||
// CHECK: %[[VAL_32:.*]] = cmpf "oeq", %[[VAL_0]], %[[CST_8]] : f32
|
||||
// CHECK: %[[VAL_32:.*]] = cmpf oeq, %[[VAL_0]], %[[CST_8]] : f32
|
||||
// CHECK: %[[VAL_33:.*]] = select %[[VAL_32]], %[[VAL_31]], %[[VAL_30]] : f32
|
||||
// CHECK: %[[VAL_34:.*]] = cmpf "uno", %[[VAL_0]], %[[VAL_1]] : f32
|
||||
// CHECK: %[[VAL_34:.*]] = cmpf uno, %[[VAL_0]], %[[VAL_1]] : f32
|
||||
// CHECK: %[[VAL_37:.*]] = select %[[VAL_34]], %[[CST_10]], %[[VAL_33]] : f32
|
||||
// CHECK: %[[VAL_38:.*]] = select %[[VAL_28]], %[[CST_11]], %[[CST_12]] : f32
|
||||
// CHECK: %[[VAL_39:.*]] = cmpf "oeq", %[[VAL_1]], %[[CST_13]] : f32
|
||||
// CHECK: %[[VAL_40:.*]] = cmpf "oeq", %[[VAL_0]], %[[CST_13]] : f32
|
||||
// CHECK: %[[VAL_39:.*]] = cmpf oeq, %[[VAL_1]], %[[CST_13]] : f32
|
||||
// CHECK: %[[VAL_40:.*]] = cmpf oeq, %[[VAL_0]], %[[CST_13]] : f32
|
||||
// CHECK: %[[VAL_41:.*]] = and %[[VAL_39]], %[[VAL_40]] : i1
|
||||
// CHECK: %[[VAL_42:.*]] = select %[[VAL_41]], %[[VAL_38]], %[[VAL_37]] : f32
|
||||
// CHECK: %[[VAL_43:.*]] = copysign %[[VAL_42]], %[[VAL_0]] : f32
|
||||
|
@ -290,7 +290,7 @@ func @atan_f32(%arg : f32) -> f32 {
|
|||
// CHECK: %[[CST_10:.*]] = constant 0x7FC00000 : f32
|
||||
// CHECK: %[[VAL_0:.*]] = absf %[[CST]] : f32
|
||||
// CHECK: %[[VAL_1:.*]] = absf %arg0 : f32
|
||||
// CHECK: %[[VAL_2:.*]] = cmpf "ole", %[[VAL_0]], %[[VAL_1]] : f32
|
||||
// CHECK: %[[VAL_2:.*]] = cmpf ole, %[[VAL_0]], %[[VAL_1]] : f32
|
||||
// CHECK: %[[VAL_3:.*]] = select %[[VAL_2]], %[[VAL_0]], %[[VAL_1]] : f32
|
||||
// CHECK: %[[VAL_4:.*]] = select %[[VAL_2]], %[[VAL_1]], %[[VAL_0]] : f32
|
||||
// CHECK: %[[VAL_5:.*]] = divf %[[VAL_3]], %[[VAL_4]] : f32
|
||||
|
@ -314,9 +314,9 @@ func @atan_f32(%arg : f32) -> f32 {
|
|||
// CHECK: %[[VAL_23:.*]] = addf %[[VAL_22]], %[[VAL_5]] : f32
|
||||
// CHECK: %[[VAL_24:.*]] = subf %[[CST_8]], %[[VAL_23]] : f32
|
||||
// CHECK: %[[VAL_25:.*]] = select %[[VAL_2]], %[[VAL_24]], %[[VAL_23]] : f32
|
||||
// CHECK: %[[VAL_26:.*]] = cmpf "oeq", %arg0, %[[CST_9]] : f32
|
||||
// CHECK: %[[VAL_26:.*]] = cmpf oeq, %arg0, %[[CST_9]] : f32
|
||||
// CHECK: %[[VAL_27:.*]] = select %[[VAL_26]], %[[CST_9]], %[[VAL_25]] : f32
|
||||
// CHECK: %[[VAL_28:.*]] = cmpf "uno", %arg0, %[[CST]] : f32
|
||||
// CHECK: %[[VAL_28:.*]] = cmpf uno, %arg0, %[[CST]] : f32
|
||||
// CHECK: %[[VAL_29:.*]] = select %[[VAL_28]], %[[CST_10]], %[[VAL_27]] : f32
|
||||
// CHECK: %[[VAL_30:.*]] = copysign %[[VAL_29]], %arg0 : f32
|
||||
// CHECK: return %[[VAL_30]] : f32
|
||||
|
@ -344,7 +344,7 @@ func @atan_f16(%arg : f16) -> f16 {
|
|||
// CHECK: %[[VAL_0:.*]] = fpext %arg0 : f16 to f32
|
||||
// CHECK: %[[VAL_1:.*]] = absf %[[CST]] : f32
|
||||
// CHECK: %[[VAL_2:.*]] = absf %[[VAL_0]] : f32
|
||||
// CHECK: %[[VAL_3:.*]] = cmpf "ole", %[[VAL_1]], %[[VAL_2]] : f32
|
||||
// CHECK: %[[VAL_3:.*]] = cmpf ole, %[[VAL_1]], %[[VAL_2]] : f32
|
||||
// CHECK: %[[VAL_4:.*]] = select %[[VAL_3]], %[[VAL_1]], %[[VAL_2]] : f32
|
||||
// CHECK: %[[VAL_5:.*]] = select %[[VAL_3]], %[[VAL_2]], %[[VAL_1]] : f32
|
||||
// CHECK: %[[VAL_6:.*]] = divf %[[VAL_4]], %[[VAL_5]] : f32
|
||||
|
@ -368,9 +368,9 @@ func @atan_f16(%arg : f16) -> f16 {
|
|||
// CHECK: %[[VAL_24:.*]] = addf %[[VAL_23]], %[[VAL_6]] : f32
|
||||
// CHECK: %[[VAL_25:.*]] = subf %[[CST_8]], %[[VAL_24]] : f32
|
||||
// CHECK: %[[VAL_26:.*]] = select %[[VAL_3]], %[[VAL_25]], %[[VAL_24]] : f32
|
||||
// CHECK: %[[VAL_27:.*]] = cmpf "oeq", %[[VAL_0]], %[[CST_9]] : f32
|
||||
// CHECK: %[[VAL_27:.*]] = cmpf oeq, %[[VAL_0]], %[[CST_9]] : f32
|
||||
// CHECK: %[[VAL_28:.*]] = select %[[VAL_27]], %[[CST_9]], %[[VAL_26]] : f32
|
||||
// CHECK: %[[VAL_29:.*]] = cmpf "uno", %[[VAL_0]], %[[CST]] : f32
|
||||
// CHECK: %[[VAL_29:.*]] = cmpf uno, %[[VAL_0]], %[[CST]] : f32
|
||||
// CHECK: %[[VAL_30:.*]] = select %[[VAL_29]], %[[CST_10]], %[[VAL_28]] : f32
|
||||
// CHECK: %[[VAL_31:.*]] = copysign %[[VAL_30]], %[[VAL_0]] : f32
|
||||
// CHECK: %[[VAL_32:.*]] = fptrunc %[[VAL_31]] : f32 to f16
|
||||
|
|
|
@ -81,12 +81,12 @@ func @select_and_scatter(%arg: memref<112x112xf32>,
|
|||
// Compute index I of the ARG buffer and check whether it is in padding area.
|
||||
// CHECK: [[START_I:%.*]] = muli [[II]], [[C2]] : index
|
||||
// CHECK: [[ARG_I:%.*]] = addi [[START_I]], [[WIN_I]] : index
|
||||
// CHECK: [[ARG_I_FITS:%.*]] = cmpi "ult", [[ARG_I]], [[C112]] : index
|
||||
// CHECK: [[ARG_I_FITS:%.*]] = cmpi ult, [[ARG_I]], [[C112]] : index
|
||||
|
||||
// Compute index J of the ARG buffer and check whether it is in padding area.
|
||||
// CHECK: [[START_J:%.*]] = muli [[JJ]], [[C2]] : index
|
||||
// CHECK: [[ARG_J:%.*]] = addi [[START_J]], [[WIN_J]] : index
|
||||
// CHECK: [[ARG_J_FITS:%.*]] = cmpi "ult", [[ARG_J]], [[C112]] : index
|
||||
// CHECK: [[ARG_J_FITS:%.*]] = cmpi ult, [[ARG_J]], [[C112]] : index
|
||||
|
||||
// Update `INBOUNDS`, i.e. whether or not ARG indices are inside the boundaries
|
||||
// of the buffer or they are in the padding area.
|
||||
|
|
|
@ -10,7 +10,7 @@ func @min_op(%lhs: memref<4x3x2x1xf32>, %rhs: memref<4x3x2x1xf32>,
|
|||
// CHECK-NEXT: affine.for %[[L:.*]] = 0 to 1 {
|
||||
// CHECK-NEXT: %[[LHS:.*]] = affine.load %{{.*}}[%[[I]], %[[J]], %[[K]], %[[L]]] : memref<4x3x2x1xf32>
|
||||
// CHECK-NEXT: %[[RHS:.*]] = affine.load %{{.*}}[%[[I]], %[[J]], %[[K]], %[[L]]] : memref<4x3x2x1xf32>
|
||||
// CHECK-NEXT: %[[MIN_PREDICATE:.*]] = cmpf "olt", %[[LHS]], %[[RHS]] : f32
|
||||
// CHECK-NEXT: %[[MIN_PREDICATE:.*]] = cmpf olt, %[[LHS]], %[[RHS]] : f32
|
||||
// CHECK-NEXT: %[[MIN:.*]] = select %[[MIN_PREDICATE]], %[[LHS]], %[[RHS]] : f32
|
||||
// CHECK-NEXT: affine.store %[[MIN]], %{{.*}}[%[[I]], %[[J]], %[[K]], %[[L]]] : memref<4x3x2x1xf32>
|
||||
// CHECK: return
|
||||
|
@ -69,7 +69,7 @@ func @int_div_op(%lhs: memref<7xi32>, %rhs: memref<7xi32>,
|
|||
// CHECK-LABEL: func @float_max_op
|
||||
func @float_max_op(%lhs: memref<7xf32>, %rhs: memref<7xf32>,
|
||||
%result: memref<7xf32>) -> () {
|
||||
// CHECK: %[[CHECK:.*]] = cmpf "ogt", %[[ONE:.*]], %[[TWO:.*]] : f32
|
||||
// CHECK: %[[CHECK:.*]] = cmpf ogt, %[[ONE:.*]], %[[TWO:.*]] : f32
|
||||
// CHECK: select %[[CHECK]], %[[ONE]], %[[TWO]] : f32
|
||||
"lmhlo.maximum"(%lhs, %rhs, %result) {name = "max.1"}
|
||||
: (memref<7xf32>, memref<7xf32>, memref<7xf32>) -> ()
|
||||
|
@ -79,7 +79,7 @@ func @float_max_op(%lhs: memref<7xf32>, %rhs: memref<7xf32>,
|
|||
// CHECK-LABEL: func @int_max_op
|
||||
func @int_max_op(%lhs: memref<7xi32>, %rhs: memref<7xi32>,
|
||||
%result: memref<7xi32>) -> () {
|
||||
// CHECK: %[[CHECK:.*]] = cmpi "sgt", %[[ONE:.*]], %[[TWO:.*]] : i32
|
||||
// CHECK: %[[CHECK:.*]] = cmpi sgt, %[[ONE:.*]], %[[TWO:.*]] : i32
|
||||
// CHECK: select %[[CHECK]], %[[ONE]], %[[TWO]] : i32
|
||||
"lmhlo.maximum"(%lhs, %rhs, %result) {name = "max.1"}
|
||||
: (memref<7xi32>, memref<7xi32>, memref<7xi32>) -> ()
|
||||
|
@ -90,7 +90,7 @@ func @int_max_op(%lhs: memref<7xi32>, %rhs: memref<7xi32>,
|
|||
// CHECK-LABEL: func @float_min_op
|
||||
func @float_min_op(%lhs: memref<7xf32>, %rhs: memref<7xf32>,
|
||||
%result: memref<7xf32>) -> () {
|
||||
// CHECK: %[[CHECK:.*]] = cmpf "olt", %[[ONE:.*]], %[[TWO:.*]] : f32
|
||||
// CHECK: %[[CHECK:.*]] = cmpf olt, %[[ONE:.*]], %[[TWO:.*]] : f32
|
||||
// CHECK: select %[[CHECK]], %[[ONE]], %[[TWO]] : f32
|
||||
"lmhlo.minimum"(%lhs, %rhs, %result) {name = "min.1"}
|
||||
: (memref<7xf32>, memref<7xf32>, memref<7xf32>) -> ()
|
||||
|
@ -100,7 +100,7 @@ func @float_min_op(%lhs: memref<7xf32>, %rhs: memref<7xf32>,
|
|||
// CHECK-LABEL: func @int_min_op
|
||||
func @int_min_op(%lhs: memref<7xi32>, %rhs: memref<7xi32>,
|
||||
%result: memref<7xi32>) -> () {
|
||||
// CHECK: %[[CHECK:.*]] = cmpi "slt", %[[ONE:.*]], %[[TWO:.*]] : i32
|
||||
// CHECK: %[[CHECK:.*]] = cmpi slt, %[[ONE:.*]], %[[TWO:.*]] : i32
|
||||
// CHECK: select %[[CHECK]], %[[ONE]], %[[TWO]] : i32
|
||||
"lmhlo.minimum"(%lhs, %rhs, %result) {name = "min.1"}
|
||||
: (memref<7xi32>, memref<7xi32>, memref<7xi32>) -> ()
|
||||
|
|
|
@ -69,7 +69,7 @@ func @minf(%lhs: memref<2x2xf32>, %rhs: memref<2x2xf32>,
|
|||
}
|
||||
// CHECK: linalg.generic
|
||||
// CHECK-NEXT: ^bb0(%[[LHS_IN:.*]]: f32, %[[RHS_IN:.*]]: f32, %[[RESULT_OUT:.*]]: f32):
|
||||
// CHECK-NEXT: %[[CMP:.*]] = cmpf "olt", %[[LHS_IN]], %[[RHS_IN]] : f32
|
||||
// CHECK-NEXT: %[[CMP:.*]] = cmpf olt, %[[LHS_IN]], %[[RHS_IN]] : f32
|
||||
// CHECK-NEXT: %[[RESULT:.*]] = select %[[CMP]], %[[LHS_IN]], %[[RHS_IN]] : f32
|
||||
// CHECK-NEXT: linalg.yield %[[RESULT]] : f32
|
||||
|
||||
|
@ -84,7 +84,7 @@ func @maxi(%lhs: memref<2x2xi32>, %rhs: memref<2x2xi32>,
|
|||
}
|
||||
// CHECK: linalg.generic
|
||||
// CHECK-NEXT: ^bb0(%[[LHS_IN:.*]]: i32, %[[RHS_IN:.*]]: i32, %[[RESULT_OUT:.*]]: i32):
|
||||
// CHECK-NEXT: %[[CMP:.*]] = cmpi "sgt", %[[LHS_IN]], %[[RHS_IN]] : i32
|
||||
// CHECK-NEXT: %[[CMP:.*]] = cmpi sgt, %[[LHS_IN]], %[[RHS_IN]] : i32
|
||||
// CHECK-NEXT: %[[RESULT:.*]] = select %[[CMP]], %[[LHS_IN]], %[[RHS_IN]] : i32
|
||||
// CHECK-NEXT: linalg.yield %[[RESULT]] : i32
|
||||
|
||||
|
@ -149,7 +149,7 @@ func @is_finte(%input: memref<2x2xf32>, %result: memref<2x2xi1>) {
|
|||
// CHECK-NEXT: ^bb0(%[[OPERAND_IN:.*]]: f32, %[[RESULT_OUT:.*]]):
|
||||
// CHECK-NEXT: %[[POS_INF:.+]] = constant 0x7F800000 : f32
|
||||
// CHECK-NEXT: %[[ABS_X:.+]] = absf %[[OPERAND_IN]] : f32
|
||||
// CHECK-NEXT: %[[RESULT:.+]] = cmpf "one", %[[ABS_X]], %[[POS_INF]] : f32
|
||||
// CHECK-NEXT: %[[RESULT:.+]] = cmpf one, %[[ABS_X]], %[[POS_INF]] : f32
|
||||
// CHECK-NEXT: linalg.yield %[[RESULT]] : i1
|
||||
|
||||
// -----
|
||||
|
@ -163,7 +163,7 @@ func @float_cmp(%lhs: memref<2x2xf32>, %rhs: memref<2x2xf32>,
|
|||
}
|
||||
// CHECK: linalg.generic
|
||||
// CHECK-NEXT: ^bb0(%[[LHS_IN:.*]]: f32, %[[RHS_IN:.*]]: f32, %[[RESULT_OUT:.*]]: i1):
|
||||
// CHECK-NEXT: %[[RESULT:.*]] = cmpf "oeq", %[[LHS_IN]], %[[RHS_IN]] : f32
|
||||
// CHECK-NEXT: %[[RESULT:.*]] = cmpf oeq, %[[LHS_IN]], %[[RHS_IN]] : f32
|
||||
// CHECK-NEXT: linalg.yield %[[RESULT]] : i1
|
||||
|
||||
// -----
|
||||
|
@ -177,7 +177,7 @@ func @int_cmp(%lhs: memref<2x2xi32>, %rhs: memref<2x2xi32>,
|
|||
}
|
||||
// CHECK: linalg.generic
|
||||
// CHECK-NEXT: ^bb0(%[[LHS_IN:.*]]: i32, %[[RHS_IN:.*]]: i32, %[[RESULT_OUT:.*]]: i1):
|
||||
// CHECK-NEXT: %[[RESULT:.*]] = cmpi "slt", %[[LHS_IN]], %[[RHS_IN]] : i32
|
||||
// CHECK-NEXT: %[[RESULT:.*]] = cmpi slt, %[[LHS_IN]], %[[RHS_IN]] : i32
|
||||
// CHECK-NEXT: linalg.yield %[[RESULT]] : i1
|
||||
|
||||
// -----
|
||||
|
@ -385,7 +385,7 @@ func @absi(%input: memref<2x2xi32>,
|
|||
// CHECK: linalg.generic
|
||||
// CHECK-NEXT: ^bb0(%[[OPERAND_IN:.*]]: i32, %[[RESULT_OUT:.*]]):
|
||||
// CHECK-NEXT: %[[L0:.*]] = constant 0 : i32
|
||||
// CHECK-NEXT: %[[L1:.*]] = cmpi "sge", %[[OPERAND_IN]], %[[L0]] : i32
|
||||
// CHECK-NEXT: %[[L1:.*]] = cmpi sge, %[[OPERAND_IN]], %[[L0]] : i32
|
||||
// CHECK-NEXT: %[[L2:.*]] = subi %[[L0]], %[[OPERAND_IN]] : i32
|
||||
// CHECK-NEXT: %[[RESULT:.*]] = select %[[L1]], %[[OPERAND_IN]], %[[L2]] : i32
|
||||
// CHECK-NEXT: linalg.yield %[[RESULT]] : i32
|
||||
|
@ -610,10 +610,10 @@ func @sign(%input: memref<2x2xf32>, %result: memref<2x2xf32>) {
|
|||
// CHECK: linalg.generic
|
||||
// CHECK-NEXT: ^bb0(%[[OPERAND_IN:.*]]: f32, %[[RESULT_OUT:.*]]):
|
||||
// CHECK-NEXT: %[[CST_0:.*]] = constant 0.000000e+00 : f32
|
||||
// CHECK-NEXT: %[[NE_0:.*]] = cmpf "one", %[[OPERAND_IN]], %[[CST_0]] : f32
|
||||
// CHECK-NEXT: %[[NE_0:.*]] = cmpf one, %[[OPERAND_IN]], %[[CST_0]] : f32
|
||||
// CHECK-NEXT: %[[NE_0_FLOAT:.*]] = uitofp %[[NE_0]] : i1 to f32
|
||||
// CHECK-NEXT: %[[SIGN:.*]] = copysign %[[NE_0_FLOAT]], %[[OPERAND_IN]] : f32
|
||||
// CHECK-NEXT: %[[CMP:.*]] = cmpf "uno", %[[OPERAND_IN]], %[[OPERAND_IN]] : f32
|
||||
// CHECK-NEXT: %[[CMP:.*]] = cmpf uno, %[[OPERAND_IN]], %[[OPERAND_IN]] : f32
|
||||
// CHECK-NEXT: %[[RESULT:.*]] = select %[[CMP]], %[[OPERAND_IN]], %[[SIGN]] : f32
|
||||
// CHECK-NEXT: linalg.yield %[[RESULT]] : f32
|
||||
|
||||
|
@ -627,10 +627,10 @@ func @sign_bf16(%input: memref<2x2xbf16>, %result: memref<2x2xbf16>) {
|
|||
// CHECK: linalg.generic
|
||||
// CHECK-NEXT: ^bb0(%[[OPERAND_IN:.*]]: bf16, %[[RESULT_OUT:.*]]):
|
||||
// CHECK-NEXT: %[[CST_0:.*]] = constant 0.000000e+00 : bf16
|
||||
// CHECK-NEXT: %[[NE_0:.*]] = cmpf "one", %[[OPERAND_IN]], %[[CST_0]] : bf16
|
||||
// CHECK-NEXT: %[[NE_0:.*]] = cmpf one, %[[OPERAND_IN]], %[[CST_0]] : bf16
|
||||
// CHECK-NEXT: %[[NE_0_FLOAT:.*]] = uitofp %[[NE_0]] : i1 to bf16
|
||||
// CHECK-NEXT: %[[SIGN:.*]] = copysign %[[NE_0_FLOAT]], %[[OPERAND_IN]] : bf16
|
||||
// CHECK-NEXT: %[[CMP:.*]] = cmpf "uno", %[[OPERAND_IN]], %[[OPERAND_IN]] : bf16
|
||||
// CHECK-NEXT: %[[CMP:.*]] = cmpf uno, %[[OPERAND_IN]], %[[OPERAND_IN]] : bf16
|
||||
// CHECK-NEXT: %[[RESULT:.*]] = select %[[CMP]], %[[OPERAND_IN]], %[[SIGN]] : bf16
|
||||
// CHECK-NEXT: linalg.yield %[[RESULT]] : bf16
|
||||
|
||||
|
@ -646,7 +646,7 @@ func @sign_i16(%input: memref<2x2xi16>, %result: memref<2x2xi16>) {
|
|||
// CHECK-NEXT: %[[C0:.*]] = constant 0 : i16
|
||||
// CHECK-NEXT: %[[C15:.*]] = constant 15 : i16
|
||||
// CHECK-NEXT: %[[C1:.*]] = constant 1 : i16
|
||||
// CHECK-NEXT: %[[CMP:.*]] = cmpi "eq", %[[OPERAND_IN]], %[[C0]] : i16
|
||||
// CHECK-NEXT: %[[CMP:.*]] = cmpi eq, %[[OPERAND_IN]], %[[C0]] : i16
|
||||
// CHECK-NEXT: %[[ASHR:.*]] = shift_right_signed %[[OPERAND_IN]], %[[C15]] : i16
|
||||
// CHECK-NEXT: %[[OR:.*]] = or %[[ASHR]], %[[C1]] : i16
|
||||
// CHECK-NEXT: %[[RESULT:.*]] = select %[[CMP]], %[[C0]], %[[OR]] : i16
|
||||
|
|
|
@ -167,11 +167,11 @@ func @reduce_window(%arg: memref<112x112xf32>,
|
|||
|
||||
// CHECK: [[START_I:%.*]] = muli [[I]], [[C2]] : index
|
||||
// CHECK: [[INDEX_I:%.*]] = addi [[START_I]], [[IW]] : index
|
||||
// CHECK: [[INDEX_I_FITS:%.*]] = cmpi "ult", [[INDEX_I]], [[C112]]
|
||||
// CHECK: [[INDEX_I_FITS:%.*]] = cmpi ult, [[INDEX_I]], [[C112]]
|
||||
|
||||
// CHECK: [[START_J:%.*]] = muli [[J]], [[C2]] : index
|
||||
// CHECK: [[INDEX_J:%.*]] = addi [[START_J]], [[JW]] : index
|
||||
// CHECK: [[INDEX_J_FITS:%.*]] = cmpi "ult", [[INDEX_J]], [[C112]]
|
||||
// CHECK: [[INDEX_J_FITS:%.*]] = cmpi ult, [[INDEX_J]], [[C112]]
|
||||
// CHECK: [[IN_BOUNDS_1:%.*]] = and [[INDEX_I_FITS]], [[INDEX_J_FITS]]
|
||||
|
||||
// CHECK: [[ELEM_TO_REDUCE:%.*]] = scf.if [[IN_BOUNDS_1]] -> (f32) {
|
||||
|
|
Loading…
Reference in New Issue