Add support for lowering and/or within mhlo.reduce op body.

PiperOrigin-RevId: 367627034
This commit is contained in:
Hanhan Wang 2021-04-09 07:07:41 -07:00 committed by TensorFlow MLIR Team
parent 2ef77c1c0c
commit fdb653788c
2 changed files with 52 additions and 0 deletions

View File

@ -2107,6 +2107,8 @@ void populateHLOToLinalgConversionPattern(MLIRContext* context,
patterns->insert<ReduceRegionXLAOpConversion<mhlo::AddOp>, patterns->insert<ReduceRegionXLAOpConversion<mhlo::AddOp>,
ReduceRegionXLAOpConversion<mhlo::MinOp>, ReduceRegionXLAOpConversion<mhlo::MinOp>,
ReduceRegionXLAOpConversion<mhlo::MaxOp>, ReduceRegionXLAOpConversion<mhlo::MaxOp>,
ReduceRegionXLAOpConversion<mhlo::AndOp>,
ReduceRegionXLAOpConversion<mhlo::OrOp>,
ReduceRegionReturnOpConversion>(context); ReduceRegionReturnOpConversion>(context);
} }

View File

@ -1352,6 +1352,56 @@ func @reduce_maximum(%arg0: tensor<5x4xi32>, %arg1: tensor<i32>) -> tensor<5xi32
// ----- // -----
func @reduce_and(%arg0: tensor<5x4xi1>, %arg1: tensor<i1>) -> tensor<5xi1> {
%0 = "mhlo.reduce"(%arg0, %arg1) ({
^bb0(%arg3: tensor<i1>, %arg4 : tensor<i1>):
%1 = mhlo.and %arg3, %arg4 : tensor<i1>
"mhlo.return"(%1) : (tensor<i1>) -> ()
}) {dimensions = dense<1> : tensor<1xi64>} : (tensor<5x4xi1>, tensor<i1>) -> tensor<5xi1>
return %0 : tensor<5xi1>
}
// CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)>
// CHECK-DAG: #[[MAP1:.*]] = affine_map<(d0, d1) -> (d0)>
// CHECK-LABEL: @reduce_and
// CHECK-DAG: %[[INIT:.*]] = tensor.extract %{{.*}} : tensor<i1>
// CHECK-DAG: %[[INIT_TENSOR:.*]] = linalg.init_tensor [5]
// CHECK-DAG: %[[FILL_TENSOR:.*]] = linalg.fill(%[[INIT_TENSOR]], %[[INIT]])
// CHECK: linalg.generic
// CHECK-SAME: indexing_maps = [#[[MAP0]], #[[MAP1]]]
// CHECK-SAME: iterator_types = ["parallel", "reduction"]
// CHECK-SAME: ins(%{{.*}}tensor<5x4xi1>)
// CHECK-SAME: outs(%[[FILL_TENSOR]] : tensor<5xi1>)
// CHECK-NEXT: ^bb0(%[[LHS_IN:.*]]: i1, %[[RHS_IN:.*]]: i1):
// CHECK-NEXT: %[[RESULT:.*]] = and %[[LHS_IN]], %[[RHS_IN]] : i1
// CHECK-NEXT: linalg.yield %[[RESULT]] : i1
// -----
func @reduce_or(%arg0: tensor<5x4xi1>, %arg1: tensor<i1>) -> tensor<5xi1> {
%0 = "mhlo.reduce"(%arg0, %arg1) ({
^bb0(%arg3: tensor<i1>, %arg4 : tensor<i1>):
%1 = mhlo.or %arg3, %arg4 : tensor<i1>
"mhlo.return"(%1) : (tensor<i1>) -> ()
}) {dimensions = dense<1> : tensor<1xi64>} : (tensor<5x4xi1>, tensor<i1>) -> tensor<5xi1>
return %0 : tensor<5xi1>
}
// CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0, d1) -> (d0, d1)>
// CHECK-DAG: #[[MAP1:.*]] = affine_map<(d0, d1) -> (d0)>
// CHECK-LABEL: @reduce_or
// CHECK-DAG: %[[INIT:.*]] = tensor.extract %{{.*}} : tensor<i1>
// CHECK-DAG: %[[INIT_TENSOR:.*]] = linalg.init_tensor [5]
// CHECK-DAG: %[[FILL_TENSOR:.*]] = linalg.fill(%[[INIT_TENSOR]], %[[INIT]])
// CHECK: linalg.generic
// CHECK-SAME: indexing_maps = [#[[MAP0]], #[[MAP1]]]
// CHECK-SAME: iterator_types = ["parallel", "reduction"]
// CHECK-SAME: ins(%{{.*}}tensor<5x4xi1>)
// CHECK-SAME: outs(%[[FILL_TENSOR]] : tensor<5xi1>)
// CHECK-NEXT: ^bb0(%[[LHS_IN:.*]]: i1, %[[RHS_IN:.*]]: i1):
// CHECK-NEXT: %[[RESULT:.*]] = or %[[LHS_IN]], %[[RHS_IN]] : i1
// CHECK-NEXT: linalg.yield %[[RESULT]] : i1
// -----
func @reduce_dim0(%arg0: tensor<5x4xi32>, %arg1: tensor<i32>) -> tensor<4xi32> { func @reduce_dim0(%arg0: tensor<5x4xi32>, %arg1: tensor<i32>) -> tensor<4xi32> {
%0 = "mhlo.reduce"(%arg0, %arg1) ({ %0 = "mhlo.reduce"(%arg0, %arg1) ({
^bb0(%arg3: tensor<i32>, %arg4 : tensor<i32>): ^bb0(%arg3: tensor<i32>, %arg4 : tensor<i32>):