[MLIR][HLO] Eliminate duplicate broadcastable constraints

PiperOrigin-RevId: 376718433
This commit is contained in:
A. Unique TensorFlower 2021-05-31 13:49:21 -07:00 committed by TensorFlow MLIR Team
parent 0f341012c6
commit 31536431e0
2 changed files with 49 additions and 34 deletions

View File

@ -306,25 +306,58 @@ struct MergeAssumingOpsPattern : public OpRewritePattern<shape::AssumingOp> {
} }
}; };
struct EliminateDuplicateCstrBroadcastableOps // Eliminate casted extent tensors. Instead, produce the concrete extent tensor
: public OpRewritePattern<shape::CstrBroadcastableOp> { // type where possible.
using OpRewritePattern<shape::CstrBroadcastableOp>::OpRewritePattern; struct CanonicalizeCastedShapeOfOpPattern
: public OpRewritePattern<tensor::CastOp> {
using OpRewritePattern<tensor::CastOp>::OpRewritePattern;
LogicalResult matchAndRewrite(shape::CstrBroadcastableOp op, LogicalResult matchAndRewrite(tensor::CastOp op,
PatternRewriter &rewriter) const override { PatternRewriter &rewriter) const override {
// Search for previous occurence of the same constraint. // Only merge tensor cast into `shape_of` ops.
Operation *it = op->getPrevNode(); auto shape_of_op = op.source().getDefiningOp<shape::ShapeOfOp>();
while (it != nullptr) { if (!shape_of_op) return failure();
if (auto candidate = llvm::dyn_cast<shape::CstrBroadcastableOp>(it)) {
if (candidate.shapes() == op.shapes()) { // Desired type must be an extent tensor type.
rewriter.replaceOp(op, candidate.result()); auto result_ty = op.getType().dyn_cast<RankedTensorType>();
return success(); if (!result_ty || result_ty.getRank() != 1 ||
} !result_ty.getElementType().isIndex())
return failure();
rewriter.replaceOpWithNewOp<shape::ShapeOfOp>(op, result_ty,
shape_of_op.arg());
if (shape_of_op->getUses().empty()) rewriter.eraseOp(shape_of_op);
return success();
}
};
// TODO(frgossen): Remove this once it has landed upstream.
struct CanonicalizeBroadcastPattern
: public OpRewritePattern<shape::BroadcastOp> {
using OpRewritePattern<shape::BroadcastOp>::OpRewritePattern;
LogicalResult matchAndRewrite(shape::BroadcastOp op,
PatternRewriter &rewriter) const override {
// Only concretize dynamic extent tensor result types.
auto resultTy = op.getType().dyn_cast<RankedTensorType>();
if (!resultTy || !resultTy.isDynamicDim(0)) return failure();
// Infer resulting shape rank if possible.
int64_t maxRank = 0;
for (Value shape : op.shapes()) {
if (auto extentTensorTy = shape.getType().dyn_cast<RankedTensorType>()) {
// Cannot infer resulting shape rank if any operand is dynamically
// ranked.
if (extentTensorTy.isDynamicDim(0)) return failure();
maxRank = std::max(maxRank, extentTensorTy.getDimSize(0));
} }
it = it->getPrevNode();
} }
return failure(); auto newOp = rewriter.create<shape::BroadcastOp>(
op.getLoc(), RankedTensorType::get({maxRank}, rewriter.getIndexType()),
op.shapes());
rewriter.replaceOpWithNewOp<tensor::CastOp>(op, op.getType(), newOp);
return success();
} }
}; };
@ -399,7 +432,8 @@ void PopulateMoveUpDynamicBroadcastsForFusionPatterns(
MLIRContext *context, OwningRewritePatternList *patterns) { MLIRContext *context, OwningRewritePatternList *patterns) {
// clang-format off // clang-format off
patterns->insert< patterns->insert<
EliminateDuplicateCstrBroadcastableOps, CanonicalizeBroadcastPattern,
CanonicalizeCastedShapeOfOpPattern,
InlineBroadcastedShapeOperandsPattern<shape::CstrBroadcastableOp>, InlineBroadcastedShapeOperandsPattern<shape::CstrBroadcastableOp>,
MergeAssumingOpsPattern, MergeAssumingOpsPattern,
MoveIntoAssumingOpPattern<shape::ShapeOfOp>, MoveIntoAssumingOpPattern<shape::ShapeOfOp>,
@ -409,7 +443,6 @@ void PopulateMoveUpDynamicBroadcastsForFusionPatterns(
MoveUpBroadcastInDimOpPattern, MoveUpBroadcastInDimOpPattern,
ShapeReificationPattern>(context); ShapeReificationPattern>(context);
// clang-format on // clang-format on
shape::BroadcastOp::getCanonicalizationPatterns(*patterns, context);
tensor::CastOp::getCanonicalizationPatterns(*patterns, context); tensor::CastOp::getCanonicalizationPatterns(*patterns, context);
} }

View File

@ -374,21 +374,3 @@ func @sub_sub(%arg0: tensor<?x32xf16>, %arg1 : tensor<?x32xf16>,
} }
return %7 : tensor<?x?x32xf16> return %7 : tensor<?x?x32xf16>
} }
// -----
// CHECK-LABEL: @redundant_cstr_broadcastable
// CHECK-SAME: (%[[ARG0:.*]]: tensor<?xindex>, %[[ARG1:.*]]: tensor<?xindex>)
func @redundant_cstr_broadcastable(%arg0: tensor<?xindex>,
%arg1 : tensor<?xindex>) {
// CHECK-DAG: %[[WITNESS:.*]] = shape.cstr_broadcastable %[[ARG0]], %[[ARG1]]
// CHECK: shape.assuming %[[WITNESS]]
%0 = shape.cstr_broadcastable %arg0, %arg1 : tensor<?xindex>, tensor<?xindex>
%1 = shape.cstr_broadcastable %arg0, %arg1 : tensor<?xindex>, tensor<?xindex>
%2 = shape.assuming_all %0, %1
shape.assuming %2 -> () {
"some.op"() : () -> ()
shape.assuming_yield
}
return
}