[NFC] Rename passes for stylistic consistency. (#232)

* lower-frontend -> convert-onnx-to-krnl

* lower-all-llvm -> convert-krnl-to-llvm

* lower-krnl -> convert-krnl-to-affine

* Name fix.
This commit is contained in:
Tian Jin 2020-07-31 21:37:35 +08:00 committed by GitHub
parent b4228fd288
commit 58ee62fb49
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 63 additions and 62 deletions

View File

@ -1 +1,3 @@
add_subdirectory(ONNXToKrnl)
add_subdirectory(KrnlToLLVM)
add_subdirectory(KrnlToAffine)

View File

@ -0,0 +1,11 @@
add_library(OMKrnlToAffine
KrnlToAffine.cpp)
target_include_directories(OMKrnlToAffine
PRIVATE
${ONNX_MLIR_SRC_ROOT}
${ONNX_MLIR_BIN_ROOT}
${ONNX_MLIR_SRC_ROOT})
# Header dependencies
add_dependencies(OMKrnlToAffine OMKrnlOpsInc)
# Linking dependencies
add_dependencies(OMKrnlToAffine OMKrnlOps)

View File

@ -106,15 +106,15 @@ void lowerIterateOp(KrnlIterateOp &iterateOp, OpBuilder &builder,
}
//===----------------------------------------------------------------------===//
// KrnlToAffineLoweringPass
// ConvertKrnlToAffinePass
//===----------------------------------------------------------------------===//
/// This is a partial lowering to affine loops of the krnl dialect operations.
/// At this stage the dialect will contain standard operations as well like
/// add and multiply, this pass will leave these operations intact.
namespace {
struct KrnlToAffineLoweringPass
: public PassWrapper<KrnlToAffineLoweringPass, FunctionPass> {
struct ConvertKrnlToAffinePass
: public PassWrapper<ConvertKrnlToAffinePass, FunctionPass> {
void runOnFunction() final;
};
} // end anonymous namespace.
@ -211,7 +211,7 @@ LogicalResult interpretOperation(Operation *op, OpBuilder &builder,
return success();
}
void KrnlToAffineLoweringPass::runOnFunction() {
void ConvertKrnlToAffinePass::runOnFunction() {
OpBuilder builder(&getContext());
mlir::Operation *funcOp = getFunction();
@ -243,6 +243,6 @@ void KrnlToAffineLoweringPass::runOnFunction() {
}
} // namespace
std::unique_ptr<Pass> mlir::createLowerKrnlPass() {
return std::make_unique<KrnlToAffineLoweringPass>();
std::unique_ptr<Pass> mlir::createConvertKrnlToAffinePass() {
return std::make_unique<ConvertKrnlToAffinePass>();
}

View File

@ -0,0 +1,15 @@
add_library(OMKrnlToLLVM
KrnlToLLVM.cpp)
target_include_directories(OMKrnlToLLVM
PRIVATE
${ONNX_MLIR_SRC_ROOT}
${ONNX_MLIR_BIN_ROOT}
${ONNX_MLIR_SRC_ROOT})
# Header dependencies
add_dependencies(OMKrnlToLLVM OMKrnlOpsInc OMONNXOpsInc)
# Linking dependencies
add_dependencies(OMKrnlToLLVM OMKrnlOps OMONNXOps)
target_link_libraries(OMKrnlToLLVM
onnx)

View File

@ -865,13 +865,13 @@ private:
//===----------------------------------------------------------------------===//
namespace {
struct KrnlToLLVMLoweringPass
: public PassWrapper<KrnlToLLVMLoweringPass, OperationPass<ModuleOp>> {
struct ConvertKrlnToLLVMPass
: public PassWrapper<ConvertKrlnToLLVMPass, OperationPass<ModuleOp>> {
void runOnOperation() final;
};
} // end anonymous namespace
void KrnlToLLVMLoweringPass::runOnOperation() {
void ConvertKrlnToLLVMPass::runOnOperation() {
// Define the target for this lowering i.e. the LLVM dialect.
ConversionTarget target(getContext());
target.addLegalDialect<LLVM::LLVMDialect>();
@ -905,6 +905,6 @@ void KrnlToLLVMLoweringPass::runOnOperation() {
}
/// Create the pass for lowering `Krnl`, `Affine` and `Std` dialects to LLVM.
std::unique_ptr<mlir::Pass> mlir::createKrnlLowerToLLVMPass() {
return std::make_unique<KrnlToLLVMLoweringPass>();
std::unique_ptr<mlir::Pass> mlir::createConvertKrnlToLLVMPass() {
return std::make_unique<ConvertKrlnToLLVMPass>();
}

View File

@ -48,12 +48,13 @@ void initOMPasses() {
return mlir::createKrnlBundleMemoryPoolsPass();
});
mlir::registerPass(
"lower-krnl", "Lower Krnl dialect.", []() -> std::unique_ptr<mlir::Pass> {
return mlir::createLowerKrnlPass();
mlir::registerPass("convert-krnl-to-affine", "Lower Krnl dialect.",
[]() -> std::unique_ptr<mlir::Pass> {
return mlir::createConvertKrnlToAffinePass();
});
mlir::registerPass("lower-frontend", "Lower frontend ops to Krnl dialect.",
mlir::registerPass("convert-onnx-to-krnl",
"Lower frontend ops to Krnl dialect.",
[]() -> std::unique_ptr<mlir::Pass> {
return mlir::createLowerToKrnlPass();
});
@ -64,10 +65,10 @@ void initOMPasses() {
return mlir::createElideConstGlobalValuePass();
});
mlir::registerPass("lower-all-llvm",
mlir::registerPass("convert-krnl-to-llvm",
"Lower the Krnl Affine and Std dialects to LLVM.",
[]() -> std::unique_ptr<mlir::Pass> {
return mlir::createKrnlLowerToLLVMPass();
return mlir::createConvertKrnlToLLVMPass();
});
mlir::registerPass("pack-krnl-constants",

View File

@ -397,7 +397,7 @@ void addONNXToKrnlPasses(mlir::PassManager &pm) {
}
void addKrnlToAffinePasses(mlir::PassManager &pm) {
pm.addPass(mlir::createLowerKrnlPass());
pm.addPass(mlir::createConvertKrnlToAffinePass());
// Fuse loops in Affine dialect.
// pm.addPass(mlir::createLoopFusionPass());
}
@ -405,7 +405,7 @@ void addKrnlToAffinePasses(mlir::PassManager &pm) {
void addKrnlToLLVMPasses(mlir::PassManager &pm) {
pm.addPass(mlir::createLowerAffinePass());
pm.addPass(mlir::createLowerToCFGPass());
pm.addPass(mlir::createKrnlLowerToLLVMPass());
pm.addPass(mlir::createConvertKrnlToLLVMPass());
pm.addPass(mlir::createCanonicalizerPass());
}

View File

@ -38,13 +38,13 @@ std::unique_ptr<Pass> createKrnlBundleMemoryPoolsPass();
std::unique_ptr<Pass> createLowerToKrnlPass();
/// Pass for lowering frontend dialects to Krnl IR dialect.
std::unique_ptr<Pass> createLowerKrnlPass();
std::unique_ptr<Pass> createConvertKrnlToAffinePass();
/// Pass for eliding the values of global Krnl operations.
std::unique_ptr<Pass> createElideConstGlobalValuePass();
/// Pass for lowering Krnl dialect to LLVM dialect.
std::unique_ptr<Pass> createKrnlLowerToLLVMPass();
std::unique_ptr<Pass> createConvertKrnlToLLVMPass();
/// Pass for packing Krnl global constants.
std::unique_ptr<Pass> createPackKrnlGlobalConstantsPass();

View File

@ -1,31 +1,3 @@
add_library(OMKrnlToAffine
LowerKrnl.cpp)
target_include_directories(OMKrnlToAffine
PRIVATE
${ONNX_MLIR_SRC_ROOT}
${ONNX_MLIR_BIN_ROOT}
${ONNX_MLIR_SRC_ROOT})
# Header dependencies
add_dependencies(OMKrnlToAffine OMKrnlOpsInc)
# Linking dependencies
add_dependencies(OMKrnlToAffine OMKrnlOps)
add_library(OMKrnlToLLVM
LowerToLLVM.cpp)
target_include_directories(OMKrnlToLLVM
PRIVATE
${ONNX_MLIR_SRC_ROOT}
${ONNX_MLIR_BIN_ROOT}
${ONNX_MLIR_SRC_ROOT})
# Header dependencies
add_dependencies(OMKrnlToLLVM OMKrnlOpsInc OMONNXOpsInc)
# Linking dependencies
add_dependencies(OMKrnlToLLVM OMKrnlOps OMONNXOps)
target_link_libraries(OMKrnlToLLVM
onnx)
add_library(OMElideKrnlGlobalConstants
ElideKrnlGlobalConstants.cpp
ElideKrnlGlobalConstants.hpp)

View File

@ -1,4 +1,4 @@
// RUN: onnx-mlir-opt --lower-krnl %s -split-input-file | FileCheck %s
// RUN: onnx-mlir-opt --convert-krnl-to-affine %s -split-input-file | FileCheck %s
func @test_lower_degenerate_iterate(%arg0: memref<f32>) -> memref<f32> {
%0 = alloc() : memref<f32>

View File

@ -1,4 +1,4 @@
// RUN: onnx-mlir-opt --lower-krnl %s -split-input-file | FileCheck %s
// RUN: onnx-mlir-opt --convert-krnl-to-affine %s -split-input-file | FileCheck %s
// CHECK-DAG: #{{.*}} = affine_map<(d0) -> (d0)>
// CHECK-DAG: #{{.*}} = affine_map<(d0) -> (d0 + 2)>

View File

@ -1,4 +1,4 @@
// RUN: onnx-mlir-opt --shape-inference --lower-frontend --lower-krnl --lower-all-llvm %s -split-input-file | FileCheck %s
// RUN: onnx-mlir-opt --shape-inference --convert-onnx-to-krnl --convert-krnl-to-affine --convert-krnl-to-llvm %s -split-input-file | FileCheck %s
// -----

View File

@ -1,4 +1,4 @@
// RUN: onnx-mlir-opt --lower-krnl --lower-all-llvm %s -split-input-file | FileCheck %s
// RUN: onnx-mlir-opt --convert-krnl-to-affine --convert-krnl-to-llvm %s -split-input-file | FileCheck %s
func @test_getref_lowering(%arg0: memref<2x2xf32>) -> memref<2x2xf32> {
%c13_i64 = constant 13 : i64

View File

@ -1,4 +1,4 @@
// RUN: onnx-mlir-opt --shape-inference --lower-frontend --enable-memory-pool --lower-krnl --lower-all-llvm %s -split-input-file | FileCheck %s
// RUN: onnx-mlir-opt --shape-inference --convert-onnx-to-krnl --enable-memory-pool --convert-krnl-to-affine --convert-krnl-to-llvm %s -split-input-file | FileCheck %s
func @test_memory_pool(%arg0: tensor<10x10xf32>) -> tensor<10x10xf32> {
%0 = "onnx.Add"(%arg0, %arg0) : (tensor<10x10xf32>, tensor<10x10xf32>) -> tensor<10x10xf32>

View File

@ -1,4 +1,4 @@
// RUN: onnx-mlir-opt --lower-krnl %s -split-input-file | FileCheck %s
// RUN: onnx-mlir-opt --convert-krnl-to-affine %s -split-input-file | FileCheck %s
func @simple_permute() {
%ii, %jj = krnl.define_loops 2

View File

@ -1,4 +1,4 @@
// RUN: onnx-mlir-opt --shape-inference --lower-frontend --lower-krnl --lower-all-llvm %s -split-input-file | FileCheck %s
// RUN: onnx-mlir-opt --shape-inference --convert-onnx-to-krnl --convert-krnl-to-affine --convert-krnl-to-llvm %s -split-input-file | FileCheck %s
// -----

View File

@ -1,4 +1,4 @@
// RUN: onnx-mlir-opt --lower-krnl %s -split-input-file | FileCheck %s
// RUN: onnx-mlir-opt --convert-krnl-to-affine %s -split-input-file | FileCheck %s
func @simple_unroll() {
%ii = krnl.define_loops 1

View File

@ -1,4 +1,4 @@
// RUN: onnx-mlir-opt --shape-inference --lower-frontend --enable-memory-pool --bundle-memory-pools --canonicalize %s -split-input-file | FileCheck %s
// RUN: onnx-mlir-opt --shape-inference --convert-onnx-to-krnl --enable-memory-pool --bundle-memory-pools --canonicalize %s -split-input-file | FileCheck %s
func @test_bundle_memory_pool(%arg0: tensor<10x10xf32>, %arg1: tensor<10x20xf32>) -> tensor<10x20xf32> {
%0 = "onnx.Add"(%arg0, %arg0) : (tensor<10x10xf32>, tensor<10x10xf32>) -> tensor<10x10xf32>

View File

@ -1,4 +1,4 @@
// RUN: onnx-mlir-opt --shape-inference --lower-frontend --enable-memory-pool %s -split-input-file | FileCheck %s
// RUN: onnx-mlir-opt --shape-inference --convert-onnx-to-krnl --enable-memory-pool %s -split-input-file | FileCheck %s
/// One intermediate value to allocate in the memory pool.
func @test_enable_memory_pool(%arg0: tensor<10x10xf32>) -> tensor<10x10xf32> {

View File

@ -1,4 +1,4 @@
// RUN: onnx-mlir-opt --shape-inference --lower-frontend %s -split-input-file | FileCheck %s
// RUN: onnx-mlir-opt --shape-inference --convert-onnx-to-krnl %s -split-input-file | FileCheck %s
// ----

View File

@ -1,4 +1,4 @@
// RUN: onnx-mlir-opt --shape-inference --lower-frontend %s -split-input-file | FileCheck %s
// RUN: onnx-mlir-opt --shape-inference --convert-onnx-to-krnl %s -split-input-file | FileCheck %s
// -----