Roll-forward with fix:

[XLA/GPU] Migrate nested reduce emitter to take LMHLO.

PiperOrigin-RevId: 343582798
This commit is contained in:
Tim Shen 2020-11-20 16:06:11 -08:00 committed by TensorFlow MLIR Team
parent f849c45b74
commit 5a6edaa588
1 changed files with 5 additions and 4 deletions

View File

@ -197,10 +197,11 @@ def LHLO_XorOp : LHLO_BinaryElementwiseOp<"xor", LHLO_PredOrIntBuffer>, BASE_HLO
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// TODO(b/139813999): specify required function signature in a type-safe way. // TODO(b/139813999): specify required function signature in a type-safe way.
def LHLO_ReduceOp: LHLO_Op<"reduce", [ //
SameVariadicOperandSize, // The region `body` may return lmhlo.TerminatorOp or mhlo.ReturnOp. We are
SingleBlockImplicitTerminator<"TerminatorOp"> // moving towards mhlo.ReturnOp, but some code that needs cleanup still assumes lmhlo.TerminatorOp.
]>, BASE_HLO_ReduceOp { // TODO(timshen): cleanup lmhlo.TerminatorOp.
def LHLO_ReduceOp: LHLO_Op<"reduce", [SameVariadicOperandSize]>, BASE_HLO_ReduceOp {
let arguments = (ins let arguments = (ins
Arg<Variadic<LHLO_Buffer>, "", [MemRead]>:$operands, Arg<Variadic<LHLO_Buffer>, "", [MemRead]>:$operands,
Arg<Variadic<LHLO_Buffer>, "", [MemRead]>:$init_values, Arg<Variadic<LHLO_Buffer>, "", [MemRead]>:$init_values,