2020-07-07 04:57:00 +08:00
|
|
|
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
==============================================================================*/
|
|
|
|
|
|
|
|
// This file implements logic for lowering HLO dialect to LHLO dialect.
|
|
|
|
|
2020-07-29 07:12:08 +08:00
|
|
|
#include "mlir-hlo/Dialect/mhlo/IR/hlo_ops.h"
|
|
|
|
#include "mlir-hlo/Dialect/mhlo/IR/lhlo_ops.h"
|
2021-06-05 06:35:08 +08:00
|
|
|
#include "mlir-hlo/Dialect/mhlo/transforms/PassDetail.h"
|
2020-07-29 07:12:08 +08:00
|
|
|
#include "mlir-hlo/Dialect/mhlo/transforms/map_hlo_to_lhlo_op.h"
|
|
|
|
#include "mlir-hlo/Dialect/mhlo/transforms/passes.h"
|
|
|
|
#include "mlir-hlo/Dialect/mhlo/transforms/rewriters.h"
|
2021-03-30 16:06:12 +08:00
|
|
|
#include "mlir/Dialect/MemRef/IR/MemRef.h"
|
2020-10-09 22:13:14 +08:00
|
|
|
#include "mlir/Dialect/Shape/IR/Shape.h"
|
|
|
|
#include "mlir/Dialect/Shape/Transforms/Passes.h"
|
2020-07-29 07:12:08 +08:00
|
|
|
#include "mlir/Dialect/StandardOps/IR/Ops.h"
|
2020-11-18 04:29:09 +08:00
|
|
|
#include "mlir/Dialect/StandardOps/Transforms/FuncConversions.h"
|
2020-12-17 12:29:15 +08:00
|
|
|
#include "mlir/Dialect/Tensor/IR/Tensor.h"
|
2020-07-29 07:12:08 +08:00
|
|
|
#include "mlir/IR/AffineMap.h"
|
|
|
|
#include "mlir/IR/Attributes.h"
|
|
|
|
#include "mlir/IR/BlockAndValueMapping.h"
|
|
|
|
#include "mlir/IR/Builders.h"
|
2020-12-02 05:17:12 +08:00
|
|
|
#include "mlir/IR/BuiltinOps.h"
|
2020-12-15 16:58:42 +08:00
|
|
|
#include "mlir/IR/BuiltinTypes.h"
|
2020-07-29 07:12:08 +08:00
|
|
|
#include "mlir/IR/Location.h"
|
|
|
|
#include "mlir/IR/MLIRContext.h"
|
|
|
|
#include "mlir/IR/Operation.h"
|
|
|
|
#include "mlir/IR/PatternMatch.h"
|
|
|
|
#include "mlir/Pass/Pass.h"
|
2020-10-12 23:41:15 +08:00
|
|
|
#include "mlir/Transforms/Bufferize.h"
|
2020-07-29 07:12:08 +08:00
|
|
|
#include "mlir/Transforms/DialectConversion.h"
|
2020-07-07 04:57:00 +08:00
|
|
|
|
|
|
|
namespace mlir {
|
2020-07-07 12:51:24 +08:00
|
|
|
namespace mhlo {
|
2020-07-07 04:57:00 +08:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
template <typename T>
|
2020-10-22 16:20:06 +08:00
|
|
|
using BaseOpConversion = OpConversionPattern<T>;
|
2020-07-07 04:57:00 +08:00
|
|
|
|
|
|
|
Value InsertDynamicAllocAndDealloc(Location loc, Value result,
|
|
|
|
Value shape_operand,
|
|
|
|
ConversionPatternRewriter* rewriter) {
|
2020-10-05 18:54:51 +08:00
|
|
|
auto result_type = result.getType().dyn_cast<RankedTensorType>();
|
2020-07-07 04:57:00 +08:00
|
|
|
if (!result_type) {
|
|
|
|
result.getDefiningOp()->emitOpError()
|
|
|
|
<< "tensor to buffer conversion expects ranked results";
|
|
|
|
}
|
|
|
|
auto memref_type =
|
|
|
|
MemRefType::get(result_type.getShape(), result_type.getElementType());
|
|
|
|
|
|
|
|
// Extract the required element out of the vector.
|
|
|
|
SmallVector<Value, 4> dynamic_operands;
|
|
|
|
for (auto shape_element : llvm::enumerate(result_type.getShape())) {
|
|
|
|
if (shape_element.value() != ShapedType::kDynamicSize) continue;
|
2020-10-05 18:54:51 +08:00
|
|
|
Value index = rewriter->create<ConstantIndexOp>(loc, shape_element.index());
|
|
|
|
Value alloc_operand =
|
2020-12-17 12:29:15 +08:00
|
|
|
rewriter->create<tensor::ExtractOp>(loc, shape_operand, index);
|
2020-07-07 04:57:00 +08:00
|
|
|
if (!alloc_operand.getType().isIndex()) {
|
|
|
|
alloc_operand = rewriter->create<IndexCastOp>(loc, alloc_operand,
|
|
|
|
rewriter->getIndexType());
|
|
|
|
}
|
|
|
|
dynamic_operands.push_back(alloc_operand);
|
|
|
|
}
|
|
|
|
|
2021-03-17 04:31:59 +08:00
|
|
|
return rewriter->create<memref::AllocOp>(loc, memref_type, dynamic_operands);
|
2020-07-07 04:57:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
Value InsertAlloc(Location loc, OpResult result,
|
|
|
|
ConversionPatternRewriter* rewriter) {
|
2020-10-05 18:54:51 +08:00
|
|
|
auto result_type = result.getType().dyn_cast<RankedTensorType>();
|
2020-07-07 04:57:00 +08:00
|
|
|
if (!result_type || !result_type.hasStaticShape()) {
|
|
|
|
result.getDefiningOp()->emitOpError()
|
|
|
|
<< "tensor to buffer conversion expects statically shaped results";
|
|
|
|
}
|
|
|
|
auto memref_type =
|
|
|
|
MemRefType::get(result_type.getShape(), result_type.getElementType());
|
|
|
|
OpBuilder::InsertionGuard guard(*rewriter);
|
2020-09-09 21:49:15 +08:00
|
|
|
rewriter->setInsertionPoint(result.getDefiningOp());
|
2021-03-17 04:31:59 +08:00
|
|
|
auto alloc = rewriter->create<memref::AllocOp>(loc, memref_type);
|
2020-07-07 04:57:00 +08:00
|
|
|
return alloc;
|
|
|
|
}
|
|
|
|
|
2020-11-12 02:00:22 +08:00
|
|
|
/// Converts the results of the operation `op` to memref types and append them
|
|
|
|
/// to the `results` vector.
|
|
|
|
LogicalResult ConvertResults(Operation* op, SmallVectorImpl<Value>& results,
|
|
|
|
ConversionPatternRewriter& rewriter) {
|
2021-05-25 01:10:21 +08:00
|
|
|
size_t num_operands = results.size();
|
|
|
|
SmallVector<Value, 2> tensor_operands;
|
2020-11-12 02:00:22 +08:00
|
|
|
for (auto result : llvm::enumerate(op->getResults())) {
|
|
|
|
RankedTensorType resultType =
|
|
|
|
result.value().getType().dyn_cast<RankedTensorType>();
|
|
|
|
if (!resultType) return failure();
|
|
|
|
|
|
|
|
if (resultType.hasStaticShape()) {
|
|
|
|
results.push_back(InsertAlloc(op->getLoc(), result.value(), &rewriter));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
auto shape_type_op = dyn_cast<InferShapedTypeOpInterface>(op);
|
|
|
|
if (!shape_type_op) return failure();
|
|
|
|
|
2021-05-25 01:10:21 +08:00
|
|
|
if (tensor_operands.empty()) {
|
|
|
|
for (auto operand : ArrayRef<Value>(results).take_front(num_operands)) {
|
|
|
|
auto tp = operand.getType().cast<ShapedType>();
|
|
|
|
tensor_operands.push_back(rewriter.create<memref::TensorLoadOp>(
|
|
|
|
op->getLoc(),
|
|
|
|
RankedTensorType::get(tp.getShape(), tp.getElementType()),
|
|
|
|
operand));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-12 02:00:22 +08:00
|
|
|
SmallVector<Value, 1> results_shape;
|
2021-05-25 01:10:21 +08:00
|
|
|
auto status = shape_type_op.reifyReturnTypeShapes(rewriter, tensor_operands,
|
|
|
|
results_shape);
|
2020-11-12 02:00:22 +08:00
|
|
|
if (failed(status)) return failure();
|
|
|
|
results.push_back(
|
|
|
|
InsertDynamicAllocAndDealloc(op->getLoc(), result.value(),
|
|
|
|
results_shape[result.index()], &rewriter));
|
|
|
|
}
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2020-07-07 04:57:00 +08:00
|
|
|
template <typename HloOpTy>
|
|
|
|
class HloToLhloOpConverter : public BaseOpConversion<HloOpTy> {
|
|
|
|
public:
|
|
|
|
using BaseOpConversion<HloOpTy>::BaseOpConversion;
|
|
|
|
LogicalResult matchAndRewrite(
|
|
|
|
HloOpTy hloOp, ArrayRef<Value> operands,
|
|
|
|
ConversionPatternRewriter& rewriter) const final {
|
|
|
|
Operation* op = hloOp.getOperation();
|
|
|
|
SmallVector<Value, 4> buffer_args(operands.begin(), operands.end());
|
2020-11-12 02:00:22 +08:00
|
|
|
if (failed(ConvertResults(op, buffer_args, rewriter))) return failure();
|
2020-07-07 12:51:24 +08:00
|
|
|
rewriter.create<mhlo::HloToLhloOp<HloOpTy>>(op->getLoc(), llvm::None,
|
|
|
|
buffer_args, op->getAttrs());
|
2020-10-05 18:54:51 +08:00
|
|
|
rewriter.replaceOp(
|
|
|
|
op, llvm::makeArrayRef(buffer_args).drop_front(operands.size()));
|
2020-07-07 04:57:00 +08:00
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2020-10-16 06:08:30 +08:00
|
|
|
// This specialization exists so that LMHLO's Dot can be given a specific set of
|
|
|
|
// dimension numbers, when lowering from MHLO's Dot, which does not have
|
|
|
|
// dimension numbers (it uses DotGeneral for this generalized notion of dot
|
|
|
|
// products). When these two dialects are in sync with respect to the
|
|
|
|
// Dot/DotGeneral issue, this specialization should be deleted.
|
|
|
|
template <>
|
|
|
|
class HloToLhloOpConverter<mhlo::DotOp> : public BaseOpConversion<mhlo::DotOp> {
|
|
|
|
public:
|
|
|
|
using BaseOpConversion<mhlo::DotOp>::BaseOpConversion;
|
|
|
|
LogicalResult matchAndRewrite(
|
|
|
|
mhlo::DotOp hloOp, ArrayRef<Value> operands,
|
|
|
|
ConversionPatternRewriter& rewriter) const final {
|
|
|
|
Operation* op = hloOp.getOperation();
|
|
|
|
SmallVector<Value, 2> buffer_args(operands.begin(), operands.end());
|
2020-11-12 02:00:22 +08:00
|
|
|
if (failed(ConvertResults(op, buffer_args, rewriter))) return failure();
|
2020-10-16 06:08:30 +08:00
|
|
|
|
|
|
|
// TODO(silvasean): Move this helper to MLIR core.
|
|
|
|
auto make_elements_attr = [&rewriter](ArrayRef<int64_t> integers) {
|
|
|
|
auto type = RankedTensorType::get({static_cast<int64_t>(integers.size())},
|
|
|
|
rewriter.getIntegerType(64));
|
|
|
|
return DenseIntElementsAttr::get(type, integers);
|
|
|
|
};
|
|
|
|
auto dotOp = rewriter.create<lmhlo::DotOp>(op->getLoc(), llvm::None,
|
|
|
|
buffer_args, op->getAttrs());
|
|
|
|
// MHLO's Dot uses rank-2 operands, of the form ([N, M], [M, O]) -> [N, O].
|
|
|
|
auto dimension_numbers = mhlo::DotDimensionNumbers::get(
|
|
|
|
make_elements_attr({}), make_elements_attr({}), make_elements_attr({1}),
|
|
|
|
make_elements_attr({0}), rewriter.getContext());
|
|
|
|
dotOp.dot_dimension_numbersAttr(dimension_numbers);
|
|
|
|
rewriter.replaceOp(op, ArrayRef<Value>(buffer_args).slice(operands.size()));
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2020-11-13 01:45:39 +08:00
|
|
|
struct HloToLhloCustomCallOpConverter
|
|
|
|
: public BaseOpConversion<mhlo::CustomCallOp> {
|
|
|
|
public:
|
|
|
|
using BaseOpConversion<mhlo::CustomCallOp>::BaseOpConversion;
|
|
|
|
|
|
|
|
LogicalResult matchAndRewrite(
|
|
|
|
mhlo::CustomCallOp hloOp, ArrayRef<Value> operands,
|
|
|
|
ConversionPatternRewriter& rewriter) const final {
|
|
|
|
Operation* op = hloOp.getOperation();
|
|
|
|
SmallVector<Value, 2> buffer_args(operands.begin(), operands.end());
|
|
|
|
if (failed(ConvertResults(op, buffer_args, rewriter))) return failure();
|
|
|
|
|
|
|
|
auto lhloOp = rewriter.create<lmhlo::CustomCallOp>(
|
|
|
|
op->getLoc(), llvm::None, buffer_args, op->getAttrs());
|
|
|
|
// Setup AttrSizedOperandSegments attribute to indicate number of operands
|
|
|
|
// for args and outputs.
|
|
|
|
const int32_t segments[2] = {static_cast<int32_t>(operands.size()),
|
|
|
|
static_cast<int32_t>(op->getNumResults())};
|
2020-12-17 22:28:06 +08:00
|
|
|
lhloOp->setAttr(lhloOp.getOperandSegmentSizeAttr(),
|
|
|
|
rewriter.getI32VectorAttr(segments));
|
2020-11-13 01:45:39 +08:00
|
|
|
|
|
|
|
rewriter.replaceOp(op, ArrayRef<Value>(buffer_args).slice(operands.size()));
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-02-10 22:30:50 +08:00
|
|
|
class HloToLhloReshapeUnrankedConverter
|
|
|
|
: public BaseOpConversion<mhlo::ReshapeOp> {
|
|
|
|
public:
|
|
|
|
using BaseOpConversion<mhlo::ReshapeOp>::BaseOpConversion;
|
|
|
|
|
|
|
|
LogicalResult matchAndRewrite(
|
|
|
|
mhlo::ReshapeOp op, ArrayRef<Value> operands,
|
|
|
|
ConversionPatternRewriter& rewriter) const final {
|
|
|
|
mhlo::ReshapeOp::Adaptor adaptor(operands);
|
|
|
|
auto unranked_operand_type =
|
|
|
|
adaptor.operand().getType().dyn_cast<UnrankedMemRefType>();
|
|
|
|
if (unranked_operand_type == nullptr) return failure();
|
|
|
|
|
|
|
|
auto result_type = op.getType().cast<RankedTensorType>();
|
2021-03-17 04:31:59 +08:00
|
|
|
rewriter.replaceOpWithNewOp<memref::CastOp>(
|
2021-02-10 22:30:50 +08:00
|
|
|
op, adaptor.operand(),
|
|
|
|
MemRefType::get(result_type.getShape(), result_type.getElementType()));
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2020-12-16 17:50:12 +08:00
|
|
|
// TODO(pifon): Consider inserting lhlo.copy as in
|
|
|
|
// HloToLhloDynamicBroadcastInDimOpConverter.
|
2021-02-10 22:30:50 +08:00
|
|
|
class HloToLhloDynamicReshapeConverter
|
2020-12-16 17:50:12 +08:00
|
|
|
: public BaseOpConversion<mhlo::DynamicReshapeOp> {
|
|
|
|
public:
|
|
|
|
using BaseOpConversion<mhlo::DynamicReshapeOp>::BaseOpConversion;
|
|
|
|
|
|
|
|
LogicalResult matchAndRewrite(
|
|
|
|
mhlo::DynamicReshapeOp op, ArrayRef<Value> operands,
|
|
|
|
ConversionPatternRewriter& rewriter) const final {
|
|
|
|
Type result_type;
|
|
|
|
if (auto ranked_type = op.getType().dyn_cast<RankedTensorType>()) {
|
|
|
|
result_type =
|
|
|
|
MemRefType::get(ranked_type.getShape(), ranked_type.getElementType());
|
|
|
|
} else if (auto unranked_type =
|
|
|
|
op.getType().dyn_cast<UnrankedTensorType>()) {
|
|
|
|
result_type = UnrankedMemRefType::get(unranked_type.getElementType(), 0);
|
|
|
|
} else {
|
|
|
|
return failure();
|
|
|
|
}
|
|
|
|
mhlo::DynamicReshapeOp::Adaptor adaptor(operands);
|
2021-03-17 04:31:59 +08:00
|
|
|
rewriter.replaceOpWithNewOp<memref::ReshapeOp>(
|
2020-12-16 17:50:12 +08:00
|
|
|
op, result_type, adaptor.operand(), adaptor.output_shape());
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2020-12-22 22:27:57 +08:00
|
|
|
// TODO(b/175670649) Fix this to no longer access original tensor operands.
|
2020-12-16 17:50:12 +08:00
|
|
|
class HloToLhloDynamicBroadcastInDimOpConverter
|
2020-07-07 12:51:24 +08:00
|
|
|
: public BaseOpConversion<mhlo::DynamicBroadcastInDimOp> {
|
2020-07-07 04:57:00 +08:00
|
|
|
public:
|
2020-12-16 17:50:12 +08:00
|
|
|
HloToLhloDynamicBroadcastInDimOpConverter(TypeConverter& converter,
|
|
|
|
MLIRContext* ctx,
|
|
|
|
bool insert_copy = true)
|
|
|
|
: BaseOpConversion<mhlo::DynamicBroadcastInDimOp>(converter, ctx),
|
|
|
|
insert_copy_(insert_copy) {}
|
2020-07-07 04:57:00 +08:00
|
|
|
|
|
|
|
LogicalResult matchAndRewrite(
|
2020-07-07 12:51:24 +08:00
|
|
|
mhlo::DynamicBroadcastInDimOp op, ArrayRef<Value> operands,
|
2020-07-07 04:57:00 +08:00
|
|
|
ConversionPatternRewriter& rewriter) const final {
|
2021-01-22 19:02:13 +08:00
|
|
|
if (!op.getType().isa<RankedTensorType>()) return failure();
|
2020-12-16 17:50:12 +08:00
|
|
|
Value result = InsertDynamicMemrefCastOp(op, operands.front(), &rewriter);
|
2020-12-15 22:31:28 +08:00
|
|
|
|
2020-12-16 17:50:12 +08:00
|
|
|
if (insert_copy_) {
|
|
|
|
auto loc = op.getLoc();
|
|
|
|
Value result_buffer = InsertDynamicAllocAndDealloc(
|
|
|
|
loc, op.getResult(), op.output_dimensions(), &rewriter);
|
2020-07-07 04:57:00 +08:00
|
|
|
|
2020-12-16 17:50:12 +08:00
|
|
|
rewriter.create<lmhlo::CopyOp>(loc, result, result_buffer);
|
|
|
|
result = result_buffer;
|
|
|
|
}
|
|
|
|
rewriter.replaceOp(op, {result});
|
2020-07-07 04:57:00 +08:00
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
// Inserts dynamic memref to change the layout of the memref to put 0-stride
|
|
|
|
// and size of the target dimension if size-1 dimension expansion is
|
|
|
|
// necessary.
|
2021-03-17 04:31:59 +08:00
|
|
|
memref::ReinterpretCastOp InsertDynamicMemrefCastOp(
|
2020-07-07 12:51:24 +08:00
|
|
|
mhlo::DynamicBroadcastInDimOp op, Value operand, OpBuilder* b) const {
|
2020-07-07 04:57:00 +08:00
|
|
|
auto loc = op.getLoc();
|
|
|
|
auto operand_type = operand.getType().cast<MemRefType>();
|
|
|
|
auto operand_shape = operand_type.getShape();
|
2020-11-09 20:23:54 +08:00
|
|
|
auto operand_rank = operand_type.getRank();
|
2020-07-07 04:57:00 +08:00
|
|
|
|
2020-11-09 20:23:54 +08:00
|
|
|
auto result_type = op.getType().cast<RankedTensorType>();
|
|
|
|
auto result_rank = result_type.getRank();
|
2020-07-07 04:57:00 +08:00
|
|
|
|
|
|
|
Value zero = b->create<ConstantIndexOp>(loc, 0);
|
|
|
|
Value one = b->create<ConstantIndexOp>(loc, 1);
|
2020-11-09 20:23:54 +08:00
|
|
|
|
|
|
|
// Compute a reversed scan product. Compute the stride for the dimensions so
|
|
|
|
// far, working from minor to major dimensions. Additionally, save the
|
|
|
|
// operand shape Values to use in the next loop.
|
|
|
|
SmallVector<Value, 2> operand_strides(operand_rank, one);
|
|
|
|
SmallVector<Value, 2> operand_sizes(operand_rank, one);
|
|
|
|
Value stride_so_far = one;
|
|
|
|
for (int i = operand_rank - 1; i >= 0; --i) {
|
2020-07-07 04:57:00 +08:00
|
|
|
Value operand_dim_size =
|
2020-11-09 20:23:54 +08:00
|
|
|
ShapedType::isDynamic(operand_shape[i])
|
2021-03-17 04:31:59 +08:00
|
|
|
? b->create<memref::DimOp>(loc, operand, i).getResult()
|
2020-11-09 20:23:54 +08:00
|
|
|
: b->create<ConstantIndexOp>(loc, operand_shape[i]).getResult();
|
|
|
|
operand_sizes[i] = operand_dim_size;
|
|
|
|
|
|
|
|
operand_strides[i] = stride_so_far;
|
|
|
|
if (i > 0) {
|
|
|
|
stride_so_far = b->create<MulIOp>(loc, stride_so_far, operand_dim_size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-28 03:44:24 +08:00
|
|
|
SmallVector<OpFoldResult, 2> sizes, strides;
|
2020-11-09 20:23:54 +08:00
|
|
|
sizes.reserve(result_rank);
|
|
|
|
strides.reserve(result_rank);
|
2020-07-07 04:57:00 +08:00
|
|
|
|
2020-11-09 20:23:54 +08:00
|
|
|
DenseMap<int, int> output_to_input_dim;
|
|
|
|
for (auto dim : llvm::enumerate(op.broadcast_dimensions())) {
|
|
|
|
output_to_input_dim[dim.value().getSExtValue()] = dim.index();
|
|
|
|
}
|
|
|
|
for (int i = 0; i < result_rank; ++i) {
|
|
|
|
Value i_val = b->create<ConstantIndexOp>(loc, i);
|
|
|
|
Value result_dim_size =
|
2020-12-17 12:29:15 +08:00
|
|
|
b->create<tensor::ExtractOp>(loc, op.output_dimensions(), i_val);
|
2020-07-07 04:57:00 +08:00
|
|
|
if (!result_dim_size.getType().isIndex()) {
|
|
|
|
result_dim_size =
|
|
|
|
b->create<IndexCastOp>(loc, result_dim_size, b->getIndexType());
|
|
|
|
}
|
2020-11-09 20:23:54 +08:00
|
|
|
sizes.push_back(result_dim_size);
|
|
|
|
|
|
|
|
auto it = output_to_input_dim.find(i);
|
|
|
|
// If the rank of the output is greater than the rank of the input, i.e.
|
|
|
|
// there was no output dimension in the inverse broadcast_dimensions map
|
|
|
|
// we also set stride to 0 to emulate padding of the shape with 1s and the
|
|
|
|
// corresponding expansion.
|
|
|
|
if (it == output_to_input_dim.end()) {
|
|
|
|
strides.push_back(zero);
|
|
|
|
continue;
|
|
|
|
}
|
2020-07-07 04:57:00 +08:00
|
|
|
|
|
|
|
// There can be two cases:
|
2020-11-09 20:23:54 +08:00
|
|
|
// 1) Operand dim == result dim => expansion is not needed
|
|
|
|
// => stride flattened buffer stride
|
2020-07-07 04:57:00 +08:00
|
|
|
// 2) Operand dim < result dim => expansion is needed => stride := 0.
|
2020-11-09 20:23:54 +08:00
|
|
|
int dim = it->second;
|
|
|
|
Value is_expansion = b->create<CmpIOp>(
|
|
|
|
loc, CmpIPredicate::slt, operand_sizes[dim], result_dim_size);
|
2021-01-28 03:44:24 +08:00
|
|
|
Value select = b->create<mlir::SelectOp>(loc, is_expansion, zero,
|
|
|
|
operand_strides[dim]);
|
|
|
|
strides.push_back(select);
|
2020-07-07 04:57:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Type-erased memref type with static rank, dynamic sizes and strides.
|
2020-11-09 20:23:54 +08:00
|
|
|
SmallVector<int64_t, 2> dynamic_layout(result_rank,
|
2020-07-07 04:57:00 +08:00
|
|
|
MemRefType::kDynamicStrideOrOffset);
|
2020-11-09 20:23:54 +08:00
|
|
|
SmallVector<int64_t, 2> dynamic_shape(result_rank,
|
2020-07-07 04:57:00 +08:00
|
|
|
MemRefType::kDynamicSize);
|
|
|
|
auto type_erased_memref_type = MemRefType::get(
|
|
|
|
dynamic_shape, operand_type.getElementType(),
|
|
|
|
makeStridedLinearLayoutMap(dynamic_layout,
|
|
|
|
/*offset=*/0, b->getContext()));
|
|
|
|
|
2021-03-17 04:31:59 +08:00
|
|
|
auto transformed_operand = b->create<memref::ReinterpretCastOp>(
|
2021-01-28 03:44:24 +08:00
|
|
|
loc, type_erased_memref_type, operand,
|
|
|
|
/*offset=*/b->getI64IntegerAttr(0), sizes, strides);
|
2020-07-07 04:57:00 +08:00
|
|
|
return transformed_operand;
|
|
|
|
}
|
2020-07-08 17:11:30 +08:00
|
|
|
|
2020-12-16 17:50:12 +08:00
|
|
|
// Keep the copy semantics and allocate a buffer for the result of the memref
|
|
|
|
// cast.
|
|
|
|
bool insert_copy_;
|
2020-07-08 17:11:30 +08:00
|
|
|
};
|
|
|
|
|
2020-10-16 06:08:30 +08:00
|
|
|
struct HloToLhloDotGeneralOpConverter
|
|
|
|
: public BaseOpConversion<mhlo::DotGeneralOp> {
|
|
|
|
using BaseOpConversion<mhlo::DotGeneralOp>::BaseOpConversion;
|
|
|
|
LogicalResult matchAndRewrite(
|
|
|
|
mhlo::DotGeneralOp dotGeneralOp, ArrayRef<Value> operands,
|
|
|
|
ConversionPatternRewriter& rewriter) const final {
|
|
|
|
Operation* op = dotGeneralOp.getOperation();
|
|
|
|
|
|
|
|
if (op->getResults().empty()) return failure();
|
|
|
|
OpResult result = op->getResults()[0];
|
|
|
|
RankedTensorType resultType = result.getType().dyn_cast<RankedTensorType>();
|
|
|
|
if (!resultType) return failure();
|
|
|
|
|
|
|
|
// The third buffer argument will be filled with what used to be the return
|
|
|
|
// type of the DotGeneral.
|
|
|
|
if (operands.size() != 2) return failure();
|
|
|
|
std::array<Value, 3> bufferArgs = {operands[0], operands[1], {}};
|
|
|
|
|
|
|
|
if (resultType.hasStaticShape()) {
|
|
|
|
bufferArgs[2] = InsertAlloc(op->getLoc(), result, &rewriter);
|
|
|
|
} else {
|
|
|
|
SmallVector<Value, 1> results_shape;
|
|
|
|
auto shape_type_op = dyn_cast<InferShapedTypeOpInterface>(op);
|
2021-05-25 01:10:21 +08:00
|
|
|
if (failed(shape_type_op.reifyReturnTypeShapes(rewriter, operands,
|
|
|
|
results_shape)))
|
2020-10-16 06:08:30 +08:00
|
|
|
return failure();
|
|
|
|
|
|
|
|
bufferArgs[2] = InsertDynamicAllocAndDealloc(
|
|
|
|
op->getLoc(), result, results_shape.front(), &rewriter);
|
|
|
|
}
|
|
|
|
|
|
|
|
rewriter.create<lmhlo::DotOp>(op->getLoc(), llvm::None, bufferArgs,
|
|
|
|
op->getAttrs());
|
|
|
|
rewriter.replaceOp(op, bufferArgs[2]);
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2020-07-07 12:51:24 +08:00
|
|
|
struct HloToLhloReduceOpConverter : public BaseOpConversion<mhlo::ReduceOp> {
|
2020-07-07 04:57:00 +08:00
|
|
|
public:
|
2020-07-07 12:51:24 +08:00
|
|
|
using BaseOpConversion<mhlo::ReduceOp>::BaseOpConversion;
|
2020-07-07 04:57:00 +08:00
|
|
|
|
|
|
|
LogicalResult matchAndRewrite(
|
2020-07-07 12:51:24 +08:00
|
|
|
mhlo::ReduceOp op, ArrayRef<Value> operands,
|
2020-07-07 04:57:00 +08:00
|
|
|
ConversionPatternRewriter& rewriter) const final {
|
|
|
|
auto loc = op.getLoc();
|
|
|
|
// TODO(b/137624192) Implement variadic reduce.
|
|
|
|
if (op.getNumResults() != 1) return failure();
|
|
|
|
if (!llvm::hasSingleElement(op.body())) {
|
|
|
|
return op.emitOpError()
|
|
|
|
<< "tensor to buffer conversion expects a single block "
|
|
|
|
"in the region containing the operation";
|
|
|
|
}
|
|
|
|
SmallVector<Value, 4> buffer_args(operands.begin(), operands.end());
|
2021-06-12 07:31:53 +08:00
|
|
|
if (failed(ConvertResults(op, buffer_args, rewriter))) return failure();
|
2020-07-09 01:05:32 +08:00
|
|
|
auto new_op = rewriter.create<lmhlo::ReduceOp>(loc, llvm::None, buffer_args,
|
2021-03-01 20:51:42 +08:00
|
|
|
op->getAttrs());
|
2020-07-07 04:57:00 +08:00
|
|
|
|
|
|
|
// Copy over the operations inside the region.
|
|
|
|
rewriter.inlineRegionBefore(op.body(), new_op.body(), new_op.body().end());
|
|
|
|
|
2020-07-16 19:40:32 +08:00
|
|
|
// Convert the region signature to memref and add extra result.
|
2020-07-07 04:57:00 +08:00
|
|
|
auto& entry_block = new_op.body().front();
|
2020-07-16 19:40:32 +08:00
|
|
|
TypeConverter::SignatureConversion sig_conversion(
|
|
|
|
entry_block.getNumArguments() + 1);
|
|
|
|
for (auto arg : entry_block.getArguments()) {
|
|
|
|
auto old_type = arg.getType().cast<TensorType>();
|
2020-07-07 04:57:00 +08:00
|
|
|
auto new_type =
|
|
|
|
MemRefType::get(old_type.getShape(), old_type.getElementType());
|
2020-07-16 19:40:32 +08:00
|
|
|
sig_conversion.addInputs(arg.getArgNumber(), new_type);
|
2020-07-07 04:57:00 +08:00
|
|
|
}
|
2020-07-16 19:40:32 +08:00
|
|
|
auto return_op = cast<mhlo::ReturnOp>(entry_block.getTerminator());
|
|
|
|
auto result_type = return_op.results().front().getType().cast<TensorType>();
|
|
|
|
sig_conversion.addInputs({MemRefType::get(result_type.getShape(),
|
|
|
|
result_type.getElementType())});
|
|
|
|
rewriter.applySignatureConversion(&new_op.body(), sig_conversion);
|
2020-07-07 04:57:00 +08:00
|
|
|
|
|
|
|
rewriter.replaceOp(op, ArrayRef<Value>(buffer_args).slice(operands.size()));
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2020-09-10 19:07:57 +08:00
|
|
|
// Legalize mhlo.return to a lmhlo.copy and lmhlo.terminator.
|
|
|
|
struct HloToLhloReturnOpConverter : public BaseOpConversion<mhlo::ReturnOp> {
|
|
|
|
public:
|
|
|
|
using BaseOpConversion<mhlo::ReturnOp>::BaseOpConversion;
|
|
|
|
|
|
|
|
LogicalResult matchAndRewrite(
|
|
|
|
mhlo::ReturnOp op, ArrayRef<Value> operands,
|
|
|
|
ConversionPatternRewriter& rewriter) const final {
|
|
|
|
auto loc = op.getLoc();
|
2020-12-10 15:53:05 +08:00
|
|
|
auto& entry_block = op->getParentRegion()->front();
|
2020-09-10 19:07:57 +08:00
|
|
|
auto num_arguments = entry_block.getNumArguments();
|
|
|
|
if (operands.size() > num_arguments) {
|
|
|
|
return op.emitError(
|
|
|
|
"The number of operands that need Copy operations is more "
|
|
|
|
"than the number of target function arguments.");
|
|
|
|
}
|
|
|
|
|
|
|
|
// The index of the first output block argument.
|
|
|
|
auto dest_arg_idx = num_arguments - operands.size();
|
|
|
|
|
|
|
|
// Create a lmhlo.copy for each operand of mhlo.return.
|
|
|
|
for (Value operand : operands) {
|
|
|
|
rewriter.create<lmhlo::CopyOp>(loc, operand,
|
|
|
|
entry_block.getArgument(dest_arg_idx));
|
|
|
|
++dest_arg_idx;
|
|
|
|
}
|
|
|
|
rewriter.replaceOpWithNewOp<lmhlo::TerminatorOp>(op);
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
2020-07-16 19:40:32 +08:00
|
|
|
|
2020-12-22 22:27:57 +08:00
|
|
|
// TODO(b/175789537) Remove this pattern.
|
|
|
|
class HloToLhloTensorStoreOpLegacyConverter
|
2021-03-17 04:31:59 +08:00
|
|
|
: public BaseOpConversion<mlir::memref::TensorStoreOp> {
|
2020-07-07 04:57:00 +08:00
|
|
|
public:
|
2021-03-17 04:31:59 +08:00
|
|
|
using BaseOpConversion<mlir::memref::TensorStoreOp>::BaseOpConversion;
|
2020-07-07 04:57:00 +08:00
|
|
|
|
|
|
|
LogicalResult matchAndRewrite(
|
2021-03-17 04:31:59 +08:00
|
|
|
mlir::memref::TensorStoreOp op, ArrayRef<Value> operands,
|
2020-07-07 04:57:00 +08:00
|
|
|
ConversionPatternRewriter& rewriter) const final {
|
2020-07-09 01:05:32 +08:00
|
|
|
rewriter.replaceOpWithNewOp<lmhlo::CopyOp>(op, llvm::None, operands.front(),
|
|
|
|
operands.back());
|
2020-07-07 04:57:00 +08:00
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// Lowers from HLO dialect to LHLO dialect allocating/deallocating temporary
|
|
|
|
// buffers if necessary.
|
|
|
|
//
|
|
|
|
// Example fusion with HLO ops.
|
|
|
|
//
|
|
|
|
// func @fusion(%arg0: memref<2x2xf32>,
|
|
|
|
// %arg1: memref<2x2xf32>,
|
|
|
|
// %arg2: memref<2x2xf32>,
|
|
|
|
// %arg3: memref<2x2xf32>) {
|
2020-07-09 01:05:32 +08:00
|
|
|
// "lmhlo.fusion"() ({
|
2020-07-07 04:57:00 +08:00
|
|
|
// %0 = tensor_load %arg1 : memref<2x2xf32>
|
|
|
|
// %1 = tensor_load %arg2 : memref<2x2xf32>
|
2020-07-07 12:51:24 +08:00
|
|
|
// %2 = "mhlo.add"(%0, %1) :
|
2020-07-07 04:57:00 +08:00
|
|
|
// (tensor<2x2xf32>, tensor<2x2xf32>) -> tensor<2x2xf32>
|
|
|
|
// %3 = tensor_load %arg0 : memref<2x2xf32>
|
2020-07-07 12:51:24 +08:00
|
|
|
// %4 = "mhlo.multiply"(%2, %3) :
|
2020-07-07 04:57:00 +08:00
|
|
|
// (tensor<2x2xf32>, tensor<2x2xf32>) -> tensor<2x2xf32>
|
|
|
|
// tensor_store %4, %arg3 : memref<2x2xf32>
|
2020-07-09 01:05:32 +08:00
|
|
|
// "lmhlo.terminator"() : () -> ()
|
2020-07-07 04:57:00 +08:00
|
|
|
// }) : () -> ()
|
|
|
|
// return
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// Transformed fusion with LHLO ops.
|
|
|
|
// func @fusion(%arg0: memref<2x2xf32>,
|
|
|
|
// %arg1: memref<2x2xf32>,
|
|
|
|
// %arg2: memref<2x2xf32>,
|
|
|
|
// %arg3: memref<2x2xf32>) {
|
2020-07-09 01:05:32 +08:00
|
|
|
// "lmhlo.fusion"() ( {
|
2020-07-07 04:57:00 +08:00
|
|
|
// %0 = alloc() : memref<2x2xf32>
|
2020-07-09 01:05:32 +08:00
|
|
|
// "lmhlo.add"(%arg1, %arg2, %0) :
|
2020-07-07 04:57:00 +08:00
|
|
|
// (memref<2x2xf32>, memref<2x2xf32>, memref<2x2xf32>) -> ()
|
2020-07-09 01:05:32 +08:00
|
|
|
// "lmhlo.multiply"(%0, %arg0, %arg3) :
|
2020-07-07 04:57:00 +08:00
|
|
|
// (memref<2x2xf32>, memref<2x2xf32>, memref<2x2xf32>) -> ()
|
2020-07-09 01:05:32 +08:00
|
|
|
// "lmhlo.terminator"() : () -> ()
|
2020-07-07 04:57:00 +08:00
|
|
|
// }) : () -> ()
|
|
|
|
// return
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// FuncOp signature conversion example:
|
|
|
|
//
|
|
|
|
// func @func_op(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>) -> tensor<4xf32> {
|
2020-07-07 12:51:24 +08:00
|
|
|
// %0 = "mhlo.maximum"(%arg0, %arg1) : (tensor<4xf32>, tensor<4xf32>) ->
|
|
|
|
// tensor<4xf32> %1 = "mhlo.add"(%arg0, %0) : (tensor<4xf32>,
|
2020-07-07 04:57:00 +08:00
|
|
|
// tensor<4xf32>) -> tensor<4xf32> return %1 : tensor<4xf32>
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// Transformed function with an extra argument for the result. The types have
|
|
|
|
// been converted from tensor to memref.
|
|
|
|
//
|
|
|
|
// func @func_op(%arg0: memref<4xf32>,
|
|
|
|
// %arg1: memref<4xf32>,
|
|
|
|
// %arg2: memref<4xf32>) {
|
|
|
|
// %0 = alloc() : memref<4xf32>
|
|
|
|
|
2020-07-09 01:05:32 +08:00
|
|
|
// "lmhlo.maximum"(%arg0, %arg1, %0) :
|
2020-07-07 04:57:00 +08:00
|
|
|
// (memref<4xf32>, memref<4xf32>, memref<4xf32>) -> ()
|
|
|
|
// %1 = alloc() : memref<4xf32>
|
2020-07-09 01:05:32 +08:00
|
|
|
// "lmhlo.add"(%arg0, %0, %1) :
|
2020-07-07 04:57:00 +08:00
|
|
|
// (memref<4xf32>, memref<4xf32>, memref<4xf32>) -> ()
|
2020-07-09 01:05:32 +08:00
|
|
|
// "lmhlo.copy"(%1, %arg2) : (memref<4xf32>, memref<4xf32>) -> ()
|
|
|
|
// "lmhlo.terminator"() : () -> ()
|
2020-07-07 04:57:00 +08:00
|
|
|
// }
|
|
|
|
|
2021-06-05 06:35:08 +08:00
|
|
|
struct HloLegalizeToLhlo : public HloLegalizeToLhloPassBase<HloLegalizeToLhlo> {
|
|
|
|
using HloLegalizeToLhloPassBase<HloLegalizeToLhlo>::HloLegalizeToLhloPassBase;
|
|
|
|
explicit HloLegalizeToLhlo(bool convert_to_lmhlo_only)
|
|
|
|
: HloLegalizeToLhloPassBase<
|
|
|
|
HloLegalizeToLhlo>::HloLegalizeToLhloPassBase() {
|
|
|
|
this->convert_to_lmhlo_only_ = convert_to_lmhlo_only;
|
|
|
|
}
|
|
|
|
|
2020-08-26 11:30:05 +08:00
|
|
|
void getDependentDialects(DialectRegistry& registry) const override {
|
2021-03-30 16:06:12 +08:00
|
|
|
registry.insert<lmhlo::LmhloDialect, memref::MemRefDialect,
|
|
|
|
shape::ShapeDialect>();
|
2020-08-26 11:30:05 +08:00
|
|
|
}
|
|
|
|
|
2020-07-07 04:57:00 +08:00
|
|
|
public:
|
|
|
|
HloLegalizeToLhlo() = default;
|
2020-11-04 01:49:13 +08:00
|
|
|
HloLegalizeToLhlo(const HloLegalizeToLhlo& o) {}
|
2020-07-07 04:57:00 +08:00
|
|
|
|
|
|
|
void runOnOperation() override {
|
|
|
|
auto& context = getContext();
|
2021-03-24 03:18:57 +08:00
|
|
|
OwningRewritePatternList patterns(&context);
|
2020-07-07 04:57:00 +08:00
|
|
|
ConversionTarget target(context);
|
2020-07-09 01:05:32 +08:00
|
|
|
target.addLegalDialect<lmhlo::LmhloDialect>();
|
2020-07-07 04:57:00 +08:00
|
|
|
target.addLegalDialect<StandardOpsDialect>();
|
2021-03-17 04:31:59 +08:00
|
|
|
target.addLegalDialect<memref::MemRefDialect>();
|
2021-03-11 18:00:50 +08:00
|
|
|
target.addLegalDialect<shape::ShapeDialect>();
|
2020-12-17 12:29:15 +08:00
|
|
|
target.addLegalDialect<tensor::TensorDialect>();
|
2020-07-09 01:19:13 +08:00
|
|
|
target.addIllegalDialect<mhlo::MhloDialect>();
|
2021-05-25 01:10:21 +08:00
|
|
|
// Declare tensor_store illegal. tensor_load may be used to reify output
|
|
|
|
// shape computation during dialect conversion and will be handled later.
|
|
|
|
target.addIllegalOp<mlir::memref::TensorStoreOp>();
|
2021-03-17 04:31:59 +08:00
|
|
|
// buffer_cast is illegal if it has uses.
|
|
|
|
// TODO(b/175670649) Make buffer_cast illegal.
|
|
|
|
target.addDynamicallyLegalOp<mlir::memref::BufferCastOp>(
|
2020-12-22 22:27:57 +08:00
|
|
|
[](auto op) { return op->use_empty(); });
|
2020-07-07 04:57:00 +08:00
|
|
|
|
2020-10-15 17:53:17 +08:00
|
|
|
BufferizeTypeConverter converter;
|
2020-07-08 16:43:30 +08:00
|
|
|
auto isMemRefType = [](Type type) { return type.isa<BaseMemRefType>(); };
|
2020-07-07 04:57:00 +08:00
|
|
|
target.addDynamicallyLegalOp<FuncOp>([&](FuncOp op) {
|
|
|
|
auto inputs = op.getType().getInputs();
|
2020-07-08 16:43:30 +08:00
|
|
|
return llvm::all_of(inputs, isMemRefType) &&
|
2020-07-07 04:57:00 +08:00
|
|
|
converter.isLegal(&op.getBody());
|
|
|
|
});
|
2020-07-08 20:59:45 +08:00
|
|
|
target.addDynamicallyLegalOp<CallOp>([&](CallOp op) {
|
|
|
|
return std::all_of(op.operand_type_begin(), op.operand_type_end(),
|
|
|
|
isMemRefType) &&
|
|
|
|
std::all_of(op.result_type_begin(), op.result_type_end(),
|
|
|
|
isMemRefType);
|
|
|
|
});
|
|
|
|
target.addDynamicallyLegalOp<mlir::ReturnOp>([&](mlir::ReturnOp op) {
|
|
|
|
return std::all_of(op.operand_type_begin(), op.operand_type_end(),
|
|
|
|
isMemRefType);
|
2020-07-07 04:57:00 +08:00
|
|
|
});
|
|
|
|
|
2021-06-05 06:35:08 +08:00
|
|
|
populateHLOToLHLOConversionPattern(&context, &converter, &patterns,
|
|
|
|
convert_to_lmhlo_only_);
|
2021-03-24 03:18:57 +08:00
|
|
|
populateFuncOpTypeConversionPattern(patterns, converter);
|
|
|
|
populateCallOpTypeConversionPattern(patterns, converter);
|
|
|
|
populateBranchOpInterfaceTypeConversionPattern(patterns, converter);
|
|
|
|
populateReturnOpTypeConversionPattern(patterns, converter);
|
|
|
|
populateEliminateBufferizeMaterializationsPatterns(converter, patterns);
|
|
|
|
|
|
|
|
populateShapeStructuralTypeConversionsAndLegality(converter, patterns,
|
|
|
|
target);
|
2020-12-22 22:27:57 +08:00
|
|
|
|
|
|
|
// TODO(b/175789537) Remove this pattern.
|
|
|
|
patterns.insert<HloToLhloTensorStoreOpLegacyConverter>(&context);
|
|
|
|
|
2020-10-27 21:55:28 +08:00
|
|
|
if (failed(applyPartialConversion(getOperation(), target,
|
|
|
|
std::move(patterns))))
|
2020-07-07 04:57:00 +08:00
|
|
|
signalPassFailure();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
} // namespace
|
|
|
|
|
2021-06-05 06:35:08 +08:00
|
|
|
// Lowers some metadata-only mhlo ops (e.g. reshape) to memref dialect
|
|
|
|
// directly and Lowers others to their lmhlo counterparts.
|
|
|
|
void populateDynamicHLOToLHLOOrMemRefConversionPattern(
|
2020-12-16 17:50:12 +08:00
|
|
|
MLIRContext* context, BufferizeTypeConverter* converter,
|
|
|
|
OwningRewritePatternList* patterns, bool insert_copy) {
|
|
|
|
patterns->insert<HloToLhloDynamicBroadcastInDimOpConverter>(
|
|
|
|
*converter, context, insert_copy);
|
2021-02-10 22:30:50 +08:00
|
|
|
patterns->insert<HloToLhloDynamicReshapeConverter,
|
|
|
|
HloToLhloReshapeUnrankedConverter>(*converter, context);
|
2020-12-16 17:50:12 +08:00
|
|
|
}
|
|
|
|
|
2021-06-05 06:35:08 +08:00
|
|
|
// Simply lowers all mhlo ops to their lmhlo counterparts, do not apply
|
|
|
|
// any optimization (e.g. elide any buffer copy).
|
|
|
|
void populateDynamicHLOToLHLOOnlyConversionPattern(
|
|
|
|
MLIRContext* context, BufferizeTypeConverter* converter,
|
|
|
|
OwningRewritePatternList* patterns) {
|
|
|
|
// clang-format off
|
|
|
|
patterns->insert<HloToLhloOpConverter<mhlo::DynamicBroadcastInDimOp>,
|
2021-06-17 04:44:21 +08:00
|
|
|
HloToLhloOpConverter<mhlo::DynamicGatherOp>,
|
2021-06-11 05:19:55 +08:00
|
|
|
HloToLhloOpConverter<mhlo::DynamicIotaOp>,
|
|
|
|
HloToLhloOpConverter<mhlo::DynamicPadOp>,
|
2021-06-12 07:31:53 +08:00
|
|
|
HloToLhloOpConverter<mhlo::DynamicReshapeOp>,
|
|
|
|
HloToLhloOpConverter<mhlo::RealDynamicSliceOp>
|
2021-06-05 06:35:08 +08:00
|
|
|
>(*converter, context);
|
|
|
|
// clang-format on
|
|
|
|
}
|
|
|
|
|
2020-10-15 17:53:17 +08:00
|
|
|
void populateHLOToLHLOConversionPattern(MLIRContext* context,
|
|
|
|
BufferizeTypeConverter* converter,
|
2021-06-05 06:35:08 +08:00
|
|
|
OwningRewritePatternList* patterns,
|
|
|
|
bool convert_to_lmhlo_only) {
|
|
|
|
if (convert_to_lmhlo_only) {
|
|
|
|
populateDynamicHLOToLHLOOnlyConversionPattern(context, converter, patterns);
|
|
|
|
} else {
|
|
|
|
populateDynamicHLOToLHLOOrMemRefConversionPattern(context, converter,
|
|
|
|
patterns);
|
|
|
|
}
|
2020-07-07 04:57:00 +08:00
|
|
|
// clang-format off
|
|
|
|
patterns->insert<
|
2020-11-13 01:45:39 +08:00
|
|
|
HloToLhloCustomCallOpConverter,
|
2020-10-16 06:08:30 +08:00
|
|
|
HloToLhloDotGeneralOpConverter,
|
2020-07-07 12:51:24 +08:00
|
|
|
HloToLhloOpConverter<mhlo::AbsOp>,
|
|
|
|
HloToLhloOpConverter<mhlo::AddOp>,
|
|
|
|
HloToLhloOpConverter<mhlo::AndOp>,
|
2020-09-18 16:39:48 +08:00
|
|
|
HloToLhloOpConverter<mhlo::Atan2Op>,
|
2020-07-07 12:51:24 +08:00
|
|
|
HloToLhloOpConverter<mhlo::BroadcastInDimOp>,
|
|
|
|
HloToLhloOpConverter<mhlo::CeilOp>,
|
|
|
|
HloToLhloOpConverter<mhlo::CompareOp>,
|
|
|
|
HloToLhloOpConverter<mhlo::ComplexOp>,
|
2021-06-15 03:35:47 +08:00
|
|
|
HloToLhloOpConverter<mhlo::ConcatenateOp>,
|
2020-07-07 12:51:24 +08:00
|
|
|
HloToLhloOpConverter<mhlo::ConstOp>,
|
|
|
|
HloToLhloOpConverter<mhlo::ConvOp>,
|
|
|
|
HloToLhloOpConverter<mhlo::ConvertOp>,
|
|
|
|
HloToLhloOpConverter<mhlo::CopyOp>,
|
|
|
|
HloToLhloOpConverter<mhlo::CosOp>,
|
|
|
|
HloToLhloOpConverter<mhlo::DivOp>,
|
|
|
|
HloToLhloOpConverter<mhlo::DotOp>,
|
|
|
|
HloToLhloOpConverter<mhlo::ExpOp>,
|
2021-02-18 20:53:34 +08:00
|
|
|
HloToLhloOpConverter<mhlo::Expm1Op>,
|
2020-08-31 23:15:32 +08:00
|
|
|
HloToLhloOpConverter<mhlo::FloorOp>,
|
2020-07-07 12:51:24 +08:00
|
|
|
HloToLhloOpConverter<mhlo::GatherOp>,
|
|
|
|
HloToLhloOpConverter<mhlo::ImagOp>,
|
|
|
|
HloToLhloOpConverter<mhlo::IotaOp>,
|
2020-10-02 18:07:56 +08:00
|
|
|
HloToLhloOpConverter<mhlo::IsFiniteOp>,
|
2020-07-07 12:51:24 +08:00
|
|
|
HloToLhloOpConverter<mhlo::LogOp>,
|
|
|
|
HloToLhloOpConverter<mhlo::MaxOp>,
|
|
|
|
HloToLhloOpConverter<mhlo::MinOp>,
|
|
|
|
HloToLhloOpConverter<mhlo::MulOp>,
|
|
|
|
HloToLhloOpConverter<mhlo::NegOp>,
|
2020-09-29 20:58:52 +08:00
|
|
|
HloToLhloOpConverter<mhlo::NotOp>,
|
2020-12-08 22:38:26 +08:00
|
|
|
HloToLhloOpConverter<mhlo::OrOp>,
|
2020-12-22 07:26:38 +08:00
|
|
|
HloToLhloOpConverter<mhlo::PowOp>,
|
2020-07-07 12:51:24 +08:00
|
|
|
HloToLhloOpConverter<mhlo::RealOp>,
|
|
|
|
HloToLhloOpConverter<mhlo::RemOp>,
|
|
|
|
HloToLhloOpConverter<mhlo::RsqrtOp>,
|
|
|
|
HloToLhloOpConverter<mhlo::ReshapeOp>,
|
|
|
|
HloToLhloOpConverter<mhlo::SelectOp>,
|
2020-12-08 05:01:25 +08:00
|
|
|
HloToLhloOpConverter<mhlo::ShiftLeftOp>,
|
|
|
|
HloToLhloOpConverter<mhlo::ShiftRightArithmeticOp>,
|
|
|
|
HloToLhloOpConverter<mhlo::ShiftRightLogicalOp>,
|
2020-07-07 12:51:24 +08:00
|
|
|
HloToLhloOpConverter<mhlo::SignOp>,
|
2020-09-14 17:30:26 +08:00
|
|
|
HloToLhloOpConverter<mhlo::SinOp>,
|
2020-09-05 12:42:22 +08:00
|
|
|
HloToLhloOpConverter<mhlo::SliceOp>,
|
2020-07-07 12:51:24 +08:00
|
|
|
HloToLhloOpConverter<mhlo::SqrtOp>,
|
|
|
|
HloToLhloOpConverter<mhlo::SubOp>,
|
|
|
|
HloToLhloOpConverter<mhlo::TanhOp>,
|
2020-09-05 05:58:10 +08:00
|
|
|
HloToLhloOpConverter<mhlo::TransposeOp>,
|
2020-12-08 22:38:26 +08:00
|
|
|
HloToLhloOpConverter<mhlo::XorOp>,
|
2020-07-07 04:57:00 +08:00
|
|
|
HloToLhloReduceOpConverter,
|
2020-12-22 22:27:57 +08:00
|
|
|
HloToLhloReturnOpConverter
|
2020-11-13 16:57:54 +08:00
|
|
|
>(*converter, context);
|
2020-07-07 04:57:00 +08:00
|
|
|
// clang-format on
|
|
|
|
}
|
|
|
|
|
2021-06-05 06:35:08 +08:00
|
|
|
std::unique_ptr<OperationPass<ModuleOp>> createLegalizeToLhloPass(
|
|
|
|
bool convert_to_lmhlo_only) {
|
|
|
|
return std::make_unique<HloLegalizeToLhlo>(convert_to_lmhlo_only);
|
2020-07-07 04:57:00 +08:00
|
|
|
}
|
|
|
|
|
2020-07-07 12:51:24 +08:00
|
|
|
} // namespace mhlo
|
2020-07-07 04:57:00 +08:00
|
|
|
} // namespace mlir
|