[NFC] Make function names follow style guide.

Functions should start with a capital letter and have a capital letter for each
new word. See https://google.github.io/styleguide/cppguide.html#Function_Names

PiperOrigin-RevId: 347420402
This commit is contained in:
Hanhan Wang 2020-12-14 10:46:04 -08:00 committed by TensorFlow MLIR Team
parent 8b35a75d4a
commit 1a58f19664
1 changed files with 13 additions and 13 deletions

View File

@ -49,17 +49,17 @@ SmallVector<StringRef, 3> GetNParallelLoopsAttrs(unsigned nParallelLoops) {
} }
template <bool isLHLO = true> template <bool isLHLO = true>
Value getResultValue(Operation* op) { Value GetResultValue(Operation* op) {
return isLHLO ? op->getOperand(op->getNumOperands() - 1) : op->getResult(0); return isLHLO ? op->getOperand(op->getNumOperands() - 1) : op->getResult(0);
} }
template <bool isLHLO = true> template <bool isLHLO = true>
ShapedType getHloOpResultType(Operation* op) { ShapedType GetHloOpResultType(Operation* op) {
return getResultValue<isLHLO>(op).getType().template cast<ShapedType>(); return GetResultValue<isLHLO>(op).getType().template cast<ShapedType>();
} }
template <bool isLHLO = true> template <bool isLHLO = true>
bool verifyHloOpBufferOrTensorSemantics(Operation* op) { bool VerifyHloOpBufferOrTensorSemantics(Operation* op) {
auto verify_type = [&](Value val) -> bool { auto verify_type = [&](Value val) -> bool {
return (isLHLO && val.getType().isa<MemRefType>()) || return (isLHLO && val.getType().isa<MemRefType>()) ||
(!isLHLO && val.getType().isa<RankedTensorType>()); (!isLHLO && val.getType().isa<RankedTensorType>());
@ -293,8 +293,8 @@ class DataMovementOpConverter : public OpConversionPattern<OpTy> {
LogicalResult matchAndRewrite( LogicalResult matchAndRewrite(
OpTy op, ArrayRef<Value> args, OpTy op, ArrayRef<Value> args,
ConversionPatternRewriter& rewriter) const final { ConversionPatternRewriter& rewriter) const final {
if (!verifyHloOpBufferOrTensorSemantics<isLHLO>(op)) return failure(); if (!VerifyHloOpBufferOrTensorSemantics<isLHLO>(op)) return failure();
auto result_type = getHloOpResultType<isLHLO>(op); auto result_type = GetHloOpResultType<isLHLO>(op);
SmallVector<AffineMap, 2> indexing_maps = SmallVector<AffineMap, 2> indexing_maps =
Derived::getIndexingMaps(op, &rewriter); Derived::getIndexingMaps(op, &rewriter);
@ -331,7 +331,7 @@ class BroadcastConverter
ShapedType input_type = ShapedType input_type =
broadcast_op.operand().getType().template cast<ShapedType>(); broadcast_op.operand().getType().template cast<ShapedType>();
unsigned input_rank = input_type.getRank(); unsigned input_rank = input_type.getRank();
unsigned nloops = getHloOpResultType<isLHLO>(broadcast_op).getRank(); unsigned nloops = GetHloOpResultType<isLHLO>(broadcast_op).getRank();
// BroadcastOp prepends the dimensions in the `broadcast_sizes` attribute to // BroadcastOp prepends the dimensions in the `broadcast_sizes` attribute to
// the input's dimensions. // the input's dimensions.
@ -365,7 +365,7 @@ class HloBroadcastInDimConverter
static SmallVector<AffineMap, 2> getIndexingMaps( static SmallVector<AffineMap, 2> getIndexingMaps(
mhlo::BroadcastInDimOp broadcast_op, Builder* b) { mhlo::BroadcastInDimOp broadcast_op, Builder* b) {
auto result_type = getHloOpResultType<false>(broadcast_op); auto result_type = GetHloOpResultType<false>(broadcast_op);
auto operand_type = auto operand_type =
broadcast_op.operand().getType().template cast<ShapedType>(); broadcast_op.operand().getType().template cast<ShapedType>();
unsigned nloops = result_type.getRank(); unsigned nloops = result_type.getRank();
@ -563,7 +563,7 @@ class TransposeConverter
isLHLO>::DataMovementOpConverter; isLHLO>::DataMovementOpConverter;
static SmallVector<AffineMap, 2> getIndexingMaps(OpTy op, Builder* b) { static SmallVector<AffineMap, 2> getIndexingMaps(OpTy op, Builder* b) {
auto result_type = auto result_type =
getHloOpResultType<isLHLO>(op).template cast<ShapedType>(); GetHloOpResultType<isLHLO>(op).template cast<ShapedType>();
auto nloops = result_type.getRank(); auto nloops = result_type.getRank();
SmallVector<AffineExpr, 2> input_exprs; SmallVector<AffineExpr, 2> input_exprs;
input_exprs.resize(result_type.getRank()); input_exprs.resize(result_type.getRank());
@ -587,11 +587,11 @@ class ReshapeOpConverter : public OpConversionPattern<OpTy> {
LogicalResult matchAndRewrite( LogicalResult matchAndRewrite(
OpTy reshape_op, ArrayRef<Value> args, OpTy reshape_op, ArrayRef<Value> args,
ConversionPatternRewriter& rewriter) const final { ConversionPatternRewriter& rewriter) const final {
if (!verifyHloOpBufferOrTensorSemantics<isLHLO>(reshape_op)) if (!VerifyHloOpBufferOrTensorSemantics<isLHLO>(reshape_op))
return failure(); return failure();
ShapedType operand_type = ShapedType operand_type =
reshape_op.operand().getType().template cast<ShapedType>(); reshape_op.operand().getType().template cast<ShapedType>();
ShapedType result_type = getHloOpResultType<isLHLO>(reshape_op); ShapedType result_type = GetHloOpResultType<isLHLO>(reshape_op);
if (!operand_type.hasStaticShape() || !result_type.hasStaticShape()) if (!operand_type.hasStaticShape() || !result_type.hasStaticShape())
return failure(); return failure();
@ -696,7 +696,7 @@ class IotaConverter : public OpConversionPattern<OpTy> {
LogicalResult matchAndRewrite( LogicalResult matchAndRewrite(
OpTy iota_op, ArrayRef<Value> args, OpTy iota_op, ArrayRef<Value> args,
ConversionPatternRewriter& rewriter) const final { ConversionPatternRewriter& rewriter) const final {
ShapedType result_shaped_type = getHloOpResultType<isLHLO>(iota_op); ShapedType result_shaped_type = GetHloOpResultType<isLHLO>(iota_op);
if (!result_shaped_type) return failure(); if (!result_shaped_type) return failure();
auto result_element_type = result_shaped_type.getElementType(); auto result_element_type = result_shaped_type.getElementType();
@ -867,7 +867,7 @@ class ReverseConverter
isLHLO>::DataMovementOpConverter; isLHLO>::DataMovementOpConverter;
static SmallVector<AffineMap, 2> getIndexingMaps(OpTy op, Builder* b) { static SmallVector<AffineMap, 2> getIndexingMaps(OpTy op, Builder* b) {
auto result_type = auto result_type =
getHloOpResultType<isLHLO>(op).template cast<ShapedType>(); GetHloOpResultType<isLHLO>(op).template cast<ShapedType>();
auto nloops = result_type.getRank(); auto nloops = result_type.getRank();
SmallVector<AffineExpr, 2> input_exprs; SmallVector<AffineExpr, 2> input_exprs;
input_exprs.reserve(nloops); input_exprs.reserve(nloops);