[MLIR][NFC] Rename ReduceOp operands() => inputs().

- Rename to avoid confusion as operands generally includes all operands of an operation

PiperOrigin-RevId: 368479524
This commit is contained in:
Rahul Joshi 2021-04-14 12:05:33 -07:00 committed by TensorFlow MLIR Team
parent 236e7db5c0
commit c75cbf4ac7
6 changed files with 19 additions and 19 deletions

View File

@ -625,7 +625,7 @@ def HLO_ReduceOp: HLO_Op<"reduce", [
InferFusibilityOpInterface
]>, BASE_HLO_ReduceOp {
let arguments = (ins
Variadic<HLO_TensorOrTuple>:$operands,
Variadic<HLO_TensorOrTuple>:$inputs,
Variadic<HLO_TensorOrTuple>:$init_values,
I64ElementsAttr:$dimensions
);
@ -633,7 +633,7 @@ def HLO_ReduceOp: HLO_Op<"reduce", [
let results = (outs Variadic<HLO_TensorOrTuple>);
let builders = [
OpBuilder<(ins "ValueRange":$operands, "ValueRange":$init_values,
OpBuilder<(ins "ValueRange":$inputs, "ValueRange":$init_values,
"DenseIntElementsAttr":$dimensions)>];
let extraClassDeclaration = [{

View File

@ -205,7 +205,7 @@ def LHLO_XorOp : LHLO_BinaryElementwiseOp<"xor", LHLO_PredOrIntBuffer>, BASE_HLO
// TODO(timshen): cleanup lmhlo.TerminatorOp.
def LHLO_ReduceOp: LHLO_Op<"reduce", [SameVariadicOperandSize]>, BASE_HLO_ReduceOp {
let arguments = (ins
Arg<Variadic<LHLO_Buffer>, "", [MemRead]>:$operands,
Arg<Variadic<LHLO_Buffer>, "", [MemRead]>:$inputs,
Arg<Variadic<LHLO_Buffer>, "", [MemRead]>:$init_values,
Arg<Variadic<LHLO_Buffer>, "", [MemWrite]>:$out,
I64ElementsAttr:$dimensions

View File

@ -1882,23 +1882,23 @@ static TensorType GetReduceResultType(Type operand_ty,
}
void ReduceOp::build(OpBuilder& builder, OperationState& state,
ValueRange operands, ValueRange init_values,
ValueRange inputs, ValueRange init_values,
DenseIntElementsAttr dimensions) {
SmallVector<Type, 1> result_ty;
result_ty.reserve(operands.size());
result_ty.reserve(inputs.size());
for (Value operand : operands) {
for (Value input : inputs) {
result_ty.push_back(
GetReduceResultType(operand.getType(), dimensions, &builder));
GetReduceResultType(input.getType(), dimensions, &builder));
}
build(builder, state, result_ty, operands, init_values, dimensions);
build(builder, state, result_ty, inputs, init_values, dimensions);
}
LogicalResult ReduceOp::fold(ArrayRef<Attribute> operands,
SmallVectorImpl<OpFoldResult>& results) {
// No dimensions to reduce.
if (dimensions().getNumElements() == 0) {
for (Value input : this->operands()) {
for (Value input : this->inputs()) {
results.push_back(input);
}
return success();

View File

@ -984,7 +984,7 @@ class ReduceConverter : public OpConversionPattern<lmhlo::ReduceOp> {
auto loc = reduce_op.getLoc();
lmhlo::ReduceOp::Adaptor adaptor(args);
auto operand_shape =
adaptor.operands()[0].getType().template dyn_cast<ShapedType>();
adaptor.inputs()[0].getType().template dyn_cast<ShapedType>();
if (!operand_shape || !operand_shape.hasRank()) {
emitError(loc, "lhlo to linalg conversion expects known-rank args");
return failure();
@ -1019,7 +1019,7 @@ class ReduceConverter : public OpConversionPattern<lmhlo::ReduceOp> {
auto linalg_op = rewriter.create<linalg::GenericOp>(
loc, /*resultTensorTypes=*/ArrayRef<Type>{},
/*inputs=*/adaptor.operands(), /*outputBuffers=*/adaptor.out(), maps,
/*inputs=*/adaptor.inputs(), /*outputBuffers=*/adaptor.out(), maps,
types);
rewriter.inlineRegionBefore(reduce_op.body(), linalg_op.region(),
linalg_op.region().end());
@ -1423,7 +1423,7 @@ class ReduceOnTensorsConversion : public OpConversionPattern<mhlo::ReduceOp> {
if (op.getNumOperands() != 2) {
return op.emitError("expects exactly two operands");
}
Value src = adaptor.operands()[0];
Value src = adaptor.inputs()[0];
auto src_type = src.getType().cast<ShapedType>();
int src_rank = src_type.getRank();
if (!src_rank) {
@ -1458,11 +1458,11 @@ class ReduceOnTensorsConversion : public OpConversionPattern<mhlo::ReduceOp> {
indexing_maps.emplace_back(AffineMap::get(src_rank, /*symbolCount=*/0,
exprs, rewriter.getContext()));
SmallVector<Value, 2> inputs = {adaptor.operands()[0]};
SmallVector<Value, 2> inputs = {adaptor.inputs()[0]};
Type result_type = op.getResult(0).getType();
auto shaped_type = result_type.cast<ShapedType>();
SmallVector<Value, 8> dyn_shape = GetReduceOpInitTensorDynSizes(
rewriter, loc, adaptor.operands()[0], result_type.cast<ShapedType>(),
rewriter, loc, adaptor.inputs()[0], result_type.cast<ShapedType>(),
reduction_dims);
auto init_tensor = GetInitTensor(rewriter, loc, shaped_type, dyn_shape);
Value filled_tensor =

View File

@ -72,7 +72,7 @@ class LhloReduceToGPULaunchConverter : public OpConversionPattern<ReduceOp> {
// Require all inputs to have the same shape.
int64_t reduce_dim_size = 0;
for (auto input : reduce_op.operands()) {
for (auto input : reduce_op.inputs()) {
auto shaped_type = input.getType().dyn_cast<ShapedType>();
if (!shaped_type || !shaped_type.hasStaticShape()) {
return failure();
@ -133,7 +133,7 @@ class LhloReduceToGPULaunchConverter : public OpConversionPattern<ReduceOp> {
auto accumulator = rewriter.create<memref::SubViewOp>(
loc, resType, output, offset, size, stride);
llvm::SmallVector<Value, 4> indexings;
auto input_buffer = *reduce_op.operands().begin();
Value input_buffer = reduce_op.inputs().front();
auto input_type_rank =
input_buffer.getType().cast<MemRefType>().getRank();

View File

@ -234,8 +234,8 @@ class ReduceOpConverter : public OpConversionPattern<lmhlo::ReduceOp> {
reducing_dims.insert(rdim.getSExtValue());
}
Value operand = *reduce_op.operands().begin();
Value out = *reduce_op.out().begin();
Value operand = reduce_op.inputs().front();
Value out = reduce_op.out().front();
SmallVector<Value, 2> parallel_lower, parallel_upper, parallel_step;
SmallVector<Value, 2> reduce_lower, reduce_upper, reduce_step;
auto operand_shape = operand.getType().cast<MemRefType>().getShape();
@ -293,7 +293,7 @@ class ReduceOpConverter : public OpConversionPattern<lmhlo::ReduceOp> {
rewriter->setInsertionPointToStart(inner.getBody());
Value elem = rewriter->create<mlir::memref::LoadOp>(
loc, *reduce_op.operands().begin(), indices);
loc, reduce_op.inputs().front(), indices);
return rewriter->create<scf::ReduceOp>(loc, elem);
}
};