2020-07-02 03:18:52 +08:00
|
|
|
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
==============================================================================*/
|
|
|
|
|
2020-07-09 11:32:16 +08:00
|
|
|
// This file defines the operations used in the LMHLO dialect.
|
2020-07-02 03:18:52 +08:00
|
|
|
|
2020-07-29 07:12:08 +08:00
|
|
|
#include "mlir-hlo/Dialect/mhlo/IR/lhlo_ops.h"
|
2020-07-02 03:18:52 +08:00
|
|
|
|
|
|
|
#include <assert.h>
|
|
|
|
#include <stddef.h>
|
|
|
|
#include <stdint.h>
|
|
|
|
|
2021-02-23 00:41:59 +08:00
|
|
|
#include <unordered_set>
|
|
|
|
|
2020-07-29 07:12:08 +08:00
|
|
|
#include "llvm/ADT/APFloat.h"
|
|
|
|
#include "llvm/ADT/APInt.h"
|
|
|
|
#include "llvm/ADT/ArrayRef.h"
|
|
|
|
#include "llvm/ADT/STLExtras.h"
|
2021-02-02 02:22:48 +08:00
|
|
|
#include "llvm/ADT/SmallSet.h"
|
2020-07-29 07:12:08 +08:00
|
|
|
#include "llvm/ADT/SmallVector.h"
|
|
|
|
#include "llvm/ADT/StringRef.h"
|
|
|
|
#include "llvm/Support/FormatVariadic.h"
|
2021-03-11 07:36:22 +08:00
|
|
|
#include "mlir-hlo/Dialect/mhlo/IR/hlo_ops_common.h"
|
2020-07-29 07:12:08 +08:00
|
|
|
#include "mlir-hlo/Dialect/mhlo/IR/lhlo_ops.h.inc"
|
2021-03-17 04:31:59 +08:00
|
|
|
#include "mlir/Dialect/MemRef/IR/MemRef.h"
|
2020-08-24 03:27:48 +08:00
|
|
|
#include "mlir/Dialect/StandardOps/IR/Ops.h"
|
2020-07-29 07:12:08 +08:00
|
|
|
#include "mlir/IR/Attributes.h"
|
2021-03-31 23:00:52 +08:00
|
|
|
#include "mlir/IR/BlockAndValueMapping.h"
|
2020-07-29 07:12:08 +08:00
|
|
|
#include "mlir/IR/Builders.h"
|
2020-12-12 11:00:36 +08:00
|
|
|
#include "mlir/IR/BuiltinTypes.h"
|
2020-07-29 07:12:08 +08:00
|
|
|
#include "mlir/IR/Dialect.h"
|
|
|
|
#include "mlir/IR/Location.h"
|
|
|
|
#include "mlir/IR/MLIRContext.h"
|
|
|
|
#include "mlir/IR/OpDefinition.h"
|
|
|
|
#include "mlir/IR/OpImplementation.h"
|
|
|
|
#include "mlir/IR/Operation.h"
|
|
|
|
#include "mlir/IR/OperationSupport.h"
|
|
|
|
#include "mlir/IR/PatternMatch.h"
|
|
|
|
#include "mlir/IR/TypeUtilities.h"
|
|
|
|
#include "mlir/IR/Types.h"
|
|
|
|
#include "mlir/IR/Value.h"
|
2020-07-02 03:18:52 +08:00
|
|
|
|
|
|
|
namespace mlir {
|
2020-07-09 01:05:32 +08:00
|
|
|
namespace lmhlo {
|
2020-07-02 03:18:52 +08:00
|
|
|
|
2021-01-27 09:23:49 +08:00
|
|
|
LmhloDialect::LmhloDialect(MLIRContext* context)
|
2020-08-08 19:34:20 +08:00
|
|
|
: Dialect(getDialectNamespace(), context, TypeID::get<LmhloDialect>()) {
|
2020-07-02 03:18:52 +08:00
|
|
|
addOperations<
|
|
|
|
#define GET_OP_LIST
|
2020-07-29 07:12:08 +08:00
|
|
|
#include "mlir-hlo/Dialect/mhlo/IR/lhlo_ops.cc.inc"
|
2020-07-02 03:18:52 +08:00
|
|
|
>();
|
|
|
|
}
|
|
|
|
|
2021-04-26 20:42:39 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AbsOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
static LogicalResult Verify(AbsOp op) {
|
|
|
|
auto operand_type = getElementTypeOrSelf(op.input().getType());
|
|
|
|
auto output_type = getElementTypeOrSelf(op.output().getType());
|
|
|
|
if (auto complex_type = operand_type.dyn_cast<ComplexType>()) {
|
|
|
|
if (complex_type.getElementType() != output_type) {
|
|
|
|
return op.emitOpError(
|
|
|
|
"requires output type to be the same as the element type of the "
|
|
|
|
"input");
|
|
|
|
}
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
if (operand_type != output_type)
|
|
|
|
return op.emitOpError("requires all operands to have the same type");
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AllToAllOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2021-02-02 02:22:48 +08:00
|
|
|
// Verifies replica groups attached to collective communication operations.
|
|
|
|
// If the attribute is not empty, it must be a rank 2 tensor, and each replica
|
|
|
|
// should appear exactly once. If `is_uniform_sized` is true, then we also check
|
|
|
|
// that each group is of the same size. If the operation has
|
|
|
|
// `use_global_device_id` set, then replica group cannot be empty.
|
|
|
|
template <typename OpT>
|
|
|
|
LogicalResult VerifyReplicaGroups(OpT op, bool is_uniform_sized) {
|
|
|
|
DenseIntElementsAttr attr = op.replica_groups();
|
|
|
|
auto replica_group_type = attr.getType().dyn_cast<RankedTensorType>();
|
|
|
|
if (!replica_group_type || replica_group_type.getRank() != 2 ||
|
|
|
|
!replica_group_type.getElementType().isInteger(/*width=*/64))
|
|
|
|
return op.emitOpError(
|
|
|
|
"replica groups should be a rank 2 tensor of 64 bit integers");
|
|
|
|
|
|
|
|
if (replica_group_type.getShape().equals(ArrayRef<int64_t>{0, 0}))
|
|
|
|
return success();
|
|
|
|
|
|
|
|
int64_t max_replica_id_seen = 0;
|
|
|
|
llvm::SmallSet<int64_t, 8> replica_seen;
|
|
|
|
for (int64_t id : attr.getValues<int64_t>()) {
|
|
|
|
if (is_uniform_sized && id == -1) {
|
|
|
|
return op.emitOpError("Invalid replica id -1");
|
|
|
|
}
|
|
|
|
if (id != -1) {
|
|
|
|
if (!replica_seen.insert(id).second) {
|
|
|
|
return op.emitOpError("replica id #") << id << " seen more than once";
|
|
|
|
}
|
|
|
|
max_replica_id_seen = std::max(max_replica_id_seen, id);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (int64_t id = 0; id <= max_replica_id_seen; id++) {
|
|
|
|
if (!replica_seen.contains(id)) {
|
|
|
|
return op.emitOpError("replica id #")
|
|
|
|
<< id << " not seen in replica groups";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(jurahul): Add verification for output shape.
|
|
|
|
static LogicalResult Verify(AllGatherOp op) {
|
|
|
|
return VerifyReplicaGroups(op, /*is_uniform_sized=*/true);
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(jurahul): Add verification for output shape.
|
|
|
|
static LogicalResult Verify(AllToAllOp op) {
|
|
|
|
return VerifyReplicaGroups(op, /*is_uniform_sized=*/true);
|
|
|
|
}
|
|
|
|
|
2021-01-27 09:23:49 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AllReduceOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
static LogicalResult Verify(AllReduceOp op) {
|
2021-02-02 02:22:48 +08:00
|
|
|
if (failed(VerifyReplicaGroups(op, /*is_uniform_sized=*/false)))
|
|
|
|
return failure();
|
|
|
|
|
2021-04-26 20:42:39 +08:00
|
|
|
// AllReduce has variadic operands and results that have the same size.
|
|
|
|
// Each member of the operand should have the same type as the corresponding
|
2021-01-27 09:23:49 +08:00
|
|
|
// member of the result.
|
|
|
|
for (auto it : llvm::enumerate(
|
|
|
|
llvm::zip(op.operands().getTypes(), op.results().getTypes()))) {
|
|
|
|
Type operandType = std::get<0>(it.value());
|
|
|
|
Type resultType = std::get<1>(it.value());
|
|
|
|
if (operandType != resultType)
|
|
|
|
return op.emitOpError("requires operand #")
|
|
|
|
<< it.index() << " (type: " << operandType << ") and result #"
|
|
|
|
<< it.index() << " (type: " << resultType << ") to have same type";
|
|
|
|
}
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2021-05-05 15:26:46 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// CaseOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
void CaseOp::getSuccessorRegions(Optional<unsigned> index,
|
|
|
|
ArrayRef<Attribute> operands,
|
|
|
|
SmallVectorImpl<RegionSuccessor>& regions) {
|
|
|
|
// If the predecessor is the CaseOp, branch to all other branches.
|
|
|
|
if (!index.hasValue()) {
|
|
|
|
for (auto& branch : branches())
|
|
|
|
regions.push_back(RegionSuccessor(&branch, branch.getArguments()));
|
|
|
|
}
|
|
|
|
// If the predecessor is one of the branches, branch back to the parent
|
|
|
|
// operation.
|
|
|
|
regions.push_back(RegionSuccessor());
|
|
|
|
}
|
|
|
|
|
2021-03-11 07:36:22 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// CollectivePermuteOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
static LogicalResult Verify(CollectivePermuteOp op) {
|
|
|
|
return mlir::hlo::VerifyCollectivePermuteSourceTargetPairs(
|
|
|
|
op, op.source_target_pairs());
|
|
|
|
}
|
|
|
|
|
2020-08-24 03:27:48 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// ConstOp.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
/// An lho.constant on an memref that is locally allocated and with no other
|
|
|
|
/// users (other than dealloc's) can be erased.
|
|
|
|
// TODO: This can be generalized to an arbitrary op by making use of memory
|
|
|
|
// effects (write memory effect).
|
|
|
|
struct EraseConstOp : public OpRewritePattern<ConstOp> {
|
|
|
|
using OpRewritePattern<ConstOp>::OpRewritePattern;
|
|
|
|
|
|
|
|
LogicalResult matchAndRewrite(ConstOp op,
|
|
|
|
PatternRewriter& rewriter) const override {
|
|
|
|
Value memref = op.output();
|
2021-03-17 04:31:59 +08:00
|
|
|
if (!memref.getDefiningOp<memref::AllocOp>()) {
|
2020-08-24 03:27:48 +08:00
|
|
|
return failure();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that all uses of the memref are either DeallocOps or this op.
|
|
|
|
for (Operation* user : memref.getUsers())
|
2021-03-17 04:31:59 +08:00
|
|
|
if (user != op && !isa<memref::DeallocOp>(user)) return failure();
|
2020-08-24 03:27:48 +08:00
|
|
|
|
|
|
|
rewriter.eraseOp(op);
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
void ConstOp::getCanonicalizationPatterns(OwningRewritePatternList& results,
|
|
|
|
MLIRContext* context) {
|
|
|
|
results.insert<EraseConstOp>(context);
|
|
|
|
}
|
|
|
|
|
2021-02-23 00:41:59 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// CustomCallOp.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
static LogicalResult Verify(CustomCallOp op) {
|
|
|
|
if (op.target_arg_mapping()) {
|
|
|
|
CustomCallTargetArgMapping mapping = *op.target_arg_mapping();
|
|
|
|
auto verify_mapping = [&](int64_t target_num, size_t op_num,
|
|
|
|
ArrayAttr mapping,
|
|
|
|
StringRef kind) -> LogicalResult {
|
|
|
|
if (target_num < op_num)
|
|
|
|
return op.emitOpError("number of target " + kind + " (")
|
|
|
|
<< target_num << ") cannot be less than the number of " << kind
|
|
|
|
<< "(" << op_num << ") for the operation";
|
|
|
|
|
|
|
|
if (mapping.size() != op_num)
|
|
|
|
return op.emitOpError("number of entries in the mapping for " + kind +
|
|
|
|
" (")
|
|
|
|
<< mapping.size() << ") should match the number of " << kind
|
|
|
|
<< " for the operation (" << op_num << ")";
|
|
|
|
|
|
|
|
std::unordered_set<int64_t> entries;
|
|
|
|
// Each entry in the mapping should be < target_num and an entry cannot
|
|
|
|
// appear more than once.
|
|
|
|
for (Attribute entry : mapping) {
|
|
|
|
int64_t int_entry = entry.cast<IntegerAttr>().getInt();
|
|
|
|
// ODS verification will ensure that these entries are integers.
|
|
|
|
if (!entries.insert(int_entry).second)
|
|
|
|
return op.emitOpError("entry ")
|
|
|
|
<< int_entry
|
|
|
|
<< " cannot appear more than once in the mapping for " << kind;
|
|
|
|
if (int_entry < 0 || int_entry >= target_num)
|
|
|
|
return op.emitOpError(
|
|
|
|
"entries in mapping for " + kind +
|
|
|
|
" must be >= 0 and less than target's number of " + kind +
|
|
|
|
" (")
|
|
|
|
<< target_num << ")";
|
|
|
|
}
|
|
|
|
return success();
|
|
|
|
};
|
|
|
|
if (failed(verify_mapping(mapping.num_args().getInt(), op.args().size(),
|
|
|
|
mapping.args_to_target_args(), "args")) ||
|
|
|
|
failed(verify_mapping(mapping.num_results().getInt(),
|
|
|
|
op.output().size(),
|
|
|
|
mapping.results_to_target_results(), "results")))
|
|
|
|
return failure();
|
|
|
|
}
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2021-03-31 23:00:52 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// ReduceOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
// Removes `lmhlo.copy` inside ReduceOp body.
|
|
|
|
//
|
|
|
|
// TODO(b/183920887): Remove this pattern as soon as bufferization is fixed.
|
|
|
|
struct RemoveCopyInReduceBody : public OpRewritePattern<ReduceOp> {
|
|
|
|
using OpRewritePattern<ReduceOp>::OpRewritePattern;
|
|
|
|
|
|
|
|
LogicalResult matchAndRewrite(ReduceOp reduce,
|
|
|
|
PatternRewriter& rewriter) const override {
|
|
|
|
// Find the only `lmhlo.copy` in the body of `reduce`.
|
|
|
|
CopyOp the_only_copy;
|
|
|
|
for (auto& op : reduce.body().front()) {
|
|
|
|
if (auto copy = dyn_cast<lmhlo::CopyOp>(op)) {
|
|
|
|
if (the_only_copy == nullptr) {
|
|
|
|
the_only_copy = copy;
|
|
|
|
} else {
|
|
|
|
the_only_copy = nullptr;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!the_only_copy) return failure();
|
|
|
|
|
|
|
|
auto new_reduce = rewriter.cloneWithoutRegions(reduce);
|
|
|
|
Block* new_block =
|
|
|
|
rewriter.createBlock(&new_reduce.body(), new_reduce.body().end(),
|
|
|
|
reduce.body().front().getArgumentTypes());
|
|
|
|
|
|
|
|
mlir::BlockAndValueMapping bvm;
|
|
|
|
for (auto item : llvm::zip(reduce.body().front().getArguments(),
|
|
|
|
new_block->getArguments())) {
|
|
|
|
bvm.map(std::get<0>(item), std::get<1>(item));
|
|
|
|
}
|
|
|
|
bvm.map(the_only_copy.operand(), bvm.lookup(the_only_copy.output()));
|
|
|
|
|
|
|
|
rewriter.setInsertionPointToStart(new_block);
|
|
|
|
for (auto& op : reduce.body().front()) {
|
|
|
|
if (llvm::isa<lmhlo::CopyOp>(op) || llvm::isa<memref::DeallocOp>(op) ||
|
|
|
|
llvm::isa<memref::AllocOp>(op))
|
|
|
|
continue;
|
|
|
|
rewriter.clone(op, bvm);
|
|
|
|
}
|
|
|
|
rewriter.eraseOp(reduce);
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
void ReduceOp::getCanonicalizationPatterns(OwningRewritePatternList& results,
|
|
|
|
MLIRContext* context) {
|
|
|
|
results.insert<RemoveCopyInReduceBody>(context);
|
|
|
|
}
|
|
|
|
|
2021-04-02 01:23:35 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// ReduceWindowOp.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
// For reduce-window, all `inputs` need to have compatible shapes.
|
|
|
|
static LogicalResult Verify(ReduceWindowOp op) {
|
|
|
|
if (failed(verifyCompatibleShapes(op.inputs().getTypes())))
|
|
|
|
return op.emitOpError() << "requires same shape for all operands";
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2021-05-05 15:26:46 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// WhileOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
void WhileOp::getSuccessorRegions(Optional<unsigned> index,
|
|
|
|
ArrayRef<Attribute> operands,
|
|
|
|
SmallVectorImpl<RegionSuccessor>& regions) {
|
|
|
|
// If the predecessor is the WhileOp or the body region, branch into the
|
|
|
|
// cond region.
|
|
|
|
if (!index.hasValue() || index.getValue() == 1) {
|
|
|
|
regions.push_back(RegionSuccessor(&cond(), cond().getArguments()));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
// If the predecessor is the cond region, we can branch to the body region
|
|
|
|
// or back to the parent operation.
|
|
|
|
regions.push_back(RegionSuccessor(&body(), body().getArguments()));
|
|
|
|
regions.push_back(RegionSuccessor());
|
|
|
|
}
|
|
|
|
|
|
|
|
Region& WhileOp::getLoopBody() { return body(); }
|
|
|
|
|
|
|
|
bool WhileOp::isDefinedOutsideOfLoop(Value value) {
|
|
|
|
return !body().isAncestor(value.getParentRegion());
|
|
|
|
}
|
|
|
|
|
|
|
|
LogicalResult WhileOp::moveOutOfLoop(ArrayRef<Operation*> ops) {
|
|
|
|
for (auto op : ops) op->moveBefore(*this);
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2020-09-16 20:56:43 +08:00
|
|
|
} // namespace lmhlo
|
|
|
|
} // namespace mlir
|
|
|
|
|
2020-07-02 03:18:52 +08:00
|
|
|
#define GET_OP_CLASSES
|
2020-07-29 07:12:08 +08:00
|
|
|
#include "mlir-hlo/Dialect/mhlo/IR/lhlo_ops.cc.inc"
|
2020-07-02 03:18:52 +08:00
|
|
|
|
2020-09-16 20:56:43 +08:00
|
|
|
namespace mlir {
|
|
|
|
namespace lmhlo {
|
|
|
|
|
2020-07-02 03:18:52 +08:00
|
|
|
// TODO(cheshire): Support folding, reuse code from hlo_ops.cc.
|
|
|
|
|
2021-01-27 09:23:49 +08:00
|
|
|
void FusionOp::build(OpBuilder& builder, OperationState& result,
|
2020-07-02 03:18:52 +08:00
|
|
|
ArrayRef<NamedAttribute> attributes) {
|
|
|
|
result.addAttributes(attributes);
|
2021-01-27 09:23:49 +08:00
|
|
|
Region* bodyRegion = result.addRegion();
|
2020-07-02 03:18:52 +08:00
|
|
|
FusionOp::ensureTerminator(*bodyRegion, builder, result.location);
|
|
|
|
}
|
|
|
|
|
2021-05-05 15:26:46 +08:00
|
|
|
void FusionOp::getSuccessorRegions(Optional<unsigned> index,
|
|
|
|
ArrayRef<Attribute> operands,
|
|
|
|
SmallVectorImpl<RegionSuccessor>& regions) {
|
|
|
|
// If the predecessor is the fusion region, jump back to the parent op.
|
|
|
|
if (index.hasValue()) {
|
|
|
|
assert(index.getValue() == 0 && "expected fusion region");
|
|
|
|
regions.push_back(RegionSuccessor());
|
|
|
|
} else {
|
|
|
|
// If the predecessor is the FusionOp, branch into the region.
|
|
|
|
regions.push_back(RegionSuccessor(®ion(), region().getArguments()));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-09 01:05:32 +08:00
|
|
|
} // namespace lmhlo
|
2020-07-02 03:18:52 +08:00
|
|
|
} // namespace mlir
|