[WIP][NFC]Rename files to llvm style (#35)

* Change naming style for builder directory.

* Change naming style for conversion folder.

* Fix case sensitivity issue.

* Fix missing onnx header onnx_pb.h issue.

* Rename files in Conversion to llvm style.

* Rename files in Dialect to llvm style.

* Path fix.

* Rename files in Pass to llvm style.

* Rename files in Runtime to llvm style.

* Rename files in Tool to llvm style.

* Rename files in Transform to llvm style.

* Change comments about filenames.

* Fix case.

* Rename interface directory to use llvm file naming convention.
This commit is contained in:
Tian Jin 2020-03-19 16:48:09 +08:00 committed by GitHub
parent 549af8f0b2
commit 0aafb3e72f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
67 changed files with 245 additions and 200 deletions

View File

@ -544,7 +544,7 @@ if __name__ == '__main__':
curr_dir = os.path.dirname(os.path.realpath(__file__)) curr_dir = os.path.dirname(os.path.realpath(__file__))
class Args(object): class Args(object):
op_def_file = os.path.join(curr_dir, 'onnx_ops.td.inc') op_def_file = os.path.join(curr_dir, 'ONNXOps.td.inc')
op_importer_file = os.path.join(curr_dir, 'op_build_table.inc') op_importer_file = os.path.join(curr_dir, 'OpBuildTable.inc')
main(Args) main(Args)

View File

@ -1,17 +1,16 @@
add_library(builder add_library(builder
frontend_dialect_helper.cpp FrontendDialectHelper.cpp
frontend_dialect_helper.hpp FrontendDialectHelper.hpp
frontend_dialect_transformer.cpp FrontendDialectTransformer.cpp
frontend_dialect_transformer.hpp FrontendDialectTransformer.hpp
op_build_table.inc OpBuildTable.inc)
)
target_include_directories(builder PRIVATE ${CMAKE_SOURCE_DIR}) target_include_directories(builder PRIVATE ${CMAKE_SOURCE_DIR})
target_include_directories(builder PRIVATE ${CMAKE_BINARY_DIR}) target_include_directories(builder PRIVATE ${CMAKE_BINARY_DIR})
target_link_libraries(builder compiler onnx ${MLIRLibs} curses mpark_variant) target_link_libraries(builder compiler onnx ${MLIRLibs} curses mpark_variant)
target_include_directories(builder target_include_directories(builder
PRIVATE PUBLIC
${CMAKE_SOURCE_DIR}/third_party/onnx ${CMAKE_SOURCE_DIR}/third_party/onnx
${CMAKE_SOURCE_DIR}/third_party/variant ${CMAKE_SOURCE_DIR}/third_party/variant
${CMAKE_SOURCE_DIR}) ${CMAKE_SOURCE_DIR})

View File

@ -1,4 +1,4 @@
//===------------------- frontend_dialect_helper.cpp ----------------------===// //===--------------------- FrontendDialectHelper.cpp ----------------------===//
// //
// Copyright 2019 The IBM Research Authors. // Copyright 2019 The IBM Research Authors.
// //
@ -8,7 +8,7 @@
// //
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "src/builder/frontend_dialect_helper.hpp" #include "src/Builder/FrontendDialectHelper.hpp"
namespace onnx_mlir { namespace onnx_mlir {

View File

@ -1,4 +1,4 @@
//===------------------- frontend_dialect_helper.hpp ----------------------===// //===--------------------- FrontendDialectHelper.hpp ----------------------===//
// //
// Copyright 2019 The IBM Research Authors. // Copyright 2019 The IBM Research Authors.
// //
@ -31,7 +31,7 @@
#include "llvm/ADT/ScopedHashTable.h" #include "llvm/ADT/ScopedHashTable.h"
#include "llvm/Support/raw_ostream.h" #include "llvm/Support/raw_ostream.h"
#include "src/dialect/onnx/onnx_ops.hpp" #include "src/Dialect/ONNX/ONNXOps.hpp"
#include "onnx/onnx_pb.h" #include "onnx/onnx_pb.h"
namespace onnx_mlir { namespace onnx_mlir {

View File

@ -1,4 +1,4 @@
//===- frontend_dialect_transformer.cpp - MLIR Operations -----------------===// //===--------- FrontendDialectTransformer.cpp - MLIR Operations -----------===//
// //
// Copyright 2019 The IBM Research Authors. // Copyright 2019 The IBM Research Authors.
// //
@ -19,7 +19,7 @@
#include <mpark/variant.hpp> #include <mpark/variant.hpp>
namespace bstd = mpark; namespace bstd = mpark;
#include "frontend_dialect_transformer.hpp" #include "FrontendDialectTransformer.hpp"
namespace onnx_mlir { namespace onnx_mlir {
namespace { namespace {
@ -369,12 +369,12 @@ private:
llvm::StringRef opName = node.op_type(); llvm::StringRef opName = node.op_type();
// the following code is generated by gen_doc.py // the following code is generated by gen_doc.py
// refer to dialect/onnx/onnx.td for details // refer to Dialect/ONNX/ONNXOps.td for details
// when the input or output of then op does not match the specification, // when the input or output of then op does not match the specification,
// the generic operator is used // the generic operator is used
// one known reeason is the optional input // one known reeason is the optional input
#include "src/builder/op_build_table.inc" #include "src/Builder/OpBuildTable.inc"
} }
/*! /*!

View File

@ -1,4 +1,4 @@
//===- frontend_dialect_transformer.hpp - MLIR Operations -----------------===// //===--------- FrontendDialectTransformer.hpp - MLIR Operations -----------===//
// //
// Copyright 2019 The IBM Research Authors. // Copyright 2019 The IBM Research Authors.
// //
@ -18,7 +18,7 @@
#include "onnx/onnx_pb.h" #include "onnx/onnx_pb.h"
#include "src/builder/frontend_dialect_helper.hpp" #include "src/Builder/FrontendDialectHelper.hpp"
namespace mlir { namespace mlir {
class MLIRContext; class MLIRContext;

View File

@ -1,17 +1,17 @@
add_library(compiler add_library(compiler
dialect/krnl/krnl_ops.cpp Dialect/Krnl/KrnlOps.cpp
dialect/krnl/krnl_ops.hpp Dialect/Krnl/KrnlOps.hpp
dialect/krnl/krnl_types.cpp Dialect/Krnl/KrnlTypes.cpp
dialect/krnl/krnl_types.hpp Dialect/Krnl/KrnlTypes.hpp
dialect/onnx/onnx_ops.cpp Dialect/ONNX/ONNXOps.cpp
dialect/onnx/onnx_ops.hpp Dialect/ONNX/ONNXOps.hpp
dialect/krnl/krnl_helper.cpp Dialect/Krnl/KrnlHelper.cpp
dialect/krnl/krnl_helper.hpp Dialect/Krnl/KrnlHelper.hpp
pass/shape_inference_interface.hpp Pass/ShapeInferenceInterface.hpp
pass/onnx_combine.cpp Pass/ONNXCombine.cpp
pass/onnx_rewrite.cpp Pass/ONNXRewrite.cpp
pass/onnx_decompose.cpp Pass/ONNXDecompose.cpp
pass/passes.hpp) Pass/Passes.hpp)
# Include root src directory. # Include root src directory.
target_include_directories(compiler PRIVATE ${ONNX_MLIR_SRC_ROOT}) target_include_directories(compiler PRIVATE ${ONNX_MLIR_SRC_ROOT})
@ -25,30 +25,30 @@ target_link_libraries(compiler
${MLIRLibs} ${MLIRLibs}
curses) curses)
set(LLVM_TARGET_DEFINITIONS pass/onnx_decompose.td) set(LLVM_TARGET_DEFINITIONS Pass/ONNXDecompose.td)
onnx_mlir_tablegen(onnx_decompose.inc -gen-rewriters) onnx_mlir_tablegen(onnx_decompose.inc -gen-rewriters)
add_public_tablegen_target(gen_onnx_decompose) add_public_tablegen_target(gen_onnx_decompose)
add_dependencies(compiler gen_onnx_decompose) add_dependencies(compiler gen_onnx_decompose)
set(LLVM_TARGET_DEFINITIONS pass/shape_inference_interface.td) set(LLVM_TARGET_DEFINITIONS Pass/ShapeInferenceInterface.td)
onnx_mlir_tablegen(shape_inference.hpp.inc -gen-op-interface-decls) onnx_mlir_tablegen(shape_inference.hpp.inc -gen-op-interface-decls)
onnx_mlir_tablegen(shape_inference.cpp.inc -gen-op-interface-defs) onnx_mlir_tablegen(shape_inference.cpp.inc -gen-op-interface-defs)
add_public_tablegen_target(gen_shape_inference) add_public_tablegen_target(gen_shape_inference)
add_dependencies(compiler gen_shape_inference) add_dependencies(compiler gen_shape_inference)
set(LLVM_TARGET_DEFINITIONS pass/onnx_combine.td) set(LLVM_TARGET_DEFINITIONS Pass/ONNXCombine.td)
onnx_mlir_tablegen(onnx_combine.inc -gen-rewriters) onnx_mlir_tablegen(onnx_combine.inc -gen-rewriters)
add_public_tablegen_target(gen_onnx_combine) add_public_tablegen_target(gen_onnx_combine)
add_dependencies(compiler gen_onnx_combine) add_dependencies(compiler gen_onnx_combine)
set(LLVM_TARGET_DEFINITIONS pass/onnx_rewrite.td) set(LLVM_TARGET_DEFINITIONS Pass/ONNXRewrite.td)
onnx_mlir_tablegen(onnx_rewrite.inc -gen-rewriters) onnx_mlir_tablegen(onnx_rewrite.inc -gen-rewriters)
add_public_tablegen_target(gen_onnx_rewrite) add_public_tablegen_target(gen_onnx_rewrite)
add_dependencies(compiler gen_onnx_rewrite) add_dependencies(compiler gen_onnx_rewrite)
add_subdirectory(interface) add_subdirectory(Interface)
set(LLVM_TARGET_DEFINITIONS dialect/onnx/onnx.td) set(LLVM_TARGET_DEFINITIONS Dialect/ONNX/ONNXOps.td)
onnx_mlir_tablegen(onnx.hpp.inc -gen-op-decls "-I${CMAKE_SOURCE_DIR}/compiler/pass") onnx_mlir_tablegen(onnx.hpp.inc -gen-op-decls "-I${CMAKE_SOURCE_DIR}/compiler/pass")
onnx_mlir_tablegen(onnx.cpp.inc -gen-op-defs "-I${CMAKE_SOURCE_DIR}/compiler/pass") onnx_mlir_tablegen(onnx.cpp.inc -gen-op-defs "-I${CMAKE_SOURCE_DIR}/compiler/pass")
set(GEN_DOC_FILE ${CMAKE_BINARY_DIR}/docs/Dialects/onnx.md) set(GEN_DOC_FILE ${CMAKE_BINARY_DIR}/docs/Dialects/onnx.md)
@ -61,23 +61,22 @@ add_dependencies(compiler gen_onnx)
# dependency of the onnx dialect library, which is currently part of `compiler`. # dependency of the onnx dialect library, which is currently part of `compiler`.
add_dependencies(compiler onnx_mlir_gen_promotable_const_operands_op_interface) add_dependencies(compiler onnx_mlir_gen_promotable_const_operands_op_interface)
add_onnx_mlir_dialect_doc(onnx Dialect/ONNX/ONNXOps.td)
add_onnx_mlir_dialect_doc(onnx dialect/onnx/onnx.td) set(LLVM_TARGET_DEFINITIONS Dialect/Krnl/KrnlOps.td)
set(LLVM_TARGET_DEFINITIONS dialect/krnl/krnl_ops.td)
onnx_mlir_tablegen(krnl.hpp.inc -gen-op-decls) onnx_mlir_tablegen(krnl.hpp.inc -gen-op-decls)
onnx_mlir_tablegen(krnl.cpp.inc -gen-op-defs) onnx_mlir_tablegen(krnl.cpp.inc -gen-op-defs)
add_public_tablegen_target(gen_krnl_ops) add_public_tablegen_target(gen_krnl_ops)
add_dependencies(compiler gen_krnl_ops) add_dependencies(compiler gen_krnl_ops)
add_library(onnx_mlir_onnx_decompose pass/onnx_decompose.cpp) add_library(onnx_mlir_onnx_decompose Pass/ONNXDecompose.cpp)
target_include_directories(onnx_mlir_onnx_decompose target_include_directories(onnx_mlir_onnx_decompose
PRIVATE ${ONNX_MLIR_SRC_ROOT} ${ONNX_MLIR_BIN_ROOT} PRIVATE ${ONNX_MLIR_SRC_ROOT} ${ONNX_MLIR_BIN_ROOT}
${ONNX_MLIR_SRC_ROOT}) ${ONNX_MLIR_SRC_ROOT})
target_link_libraries(onnx_mlir_onnx_decompose ${MLIRLibs}) target_link_libraries(onnx_mlir_onnx_decompose ${MLIRLibs})
add_dependencies(onnx_mlir_onnx_decompose gen_onnx) add_dependencies(onnx_mlir_onnx_decompose gen_onnx)
add_library(onnx_mlir_shape_inference pass/shape_inference_pass.cpp) add_library(onnx_mlir_shape_inference Pass/ShapeInferencePass.cpp)
target_include_directories(onnx_mlir_shape_inference target_include_directories(onnx_mlir_shape_inference
PRIVATE ${ONNX_MLIR_SRC_ROOT} ${ONNX_MLIR_BIN_ROOT} PRIVATE ${ONNX_MLIR_SRC_ROOT} ${ONNX_MLIR_BIN_ROOT}
${ONNX_MLIR_SRC_ROOT}) ${ONNX_MLIR_SRC_ROOT})
@ -85,33 +84,33 @@ target_link_libraries(onnx_mlir_shape_inference ${MLIRLibs})
add_dependencies(onnx_mlir_shape_inference gen_onnx) add_dependencies(onnx_mlir_shape_inference gen_onnx)
add_library(onnx_mlir_lower_frontend add_library(onnx_mlir_lower_frontend
conversion/onnx_to_krnl/onnx_to_krnl_common.cpp Conversion/ONNXToKrnl/ONNXToKrnlCommon.cpp
conversion/onnx_to_krnl/onnx_to_krnl_common.hpp Conversion/ONNXToKrnl/ONNXToKrnlCommon.hpp
conversion/onnx_to_krnl/math/elementwise.cpp Conversion/ONNXToKrnl/Math/Elementwise.cpp
conversion/onnx_to_krnl/math/gemm.cpp Conversion/ONNXToKrnl/Math/Gemm.cpp
conversion/onnx_to_krnl/math/matmul.cpp Conversion/ONNXToKrnl/Math/MatMul.cpp
conversion/onnx_to_krnl/math/reduction.cpp Conversion/ONNXToKrnl/Math/Reduction.cpp
conversion/onnx_to_krnl/math/softmax.cpp Conversion/ONNXToKrnl/Math/Softmax.cpp
conversion/onnx_to_krnl/nn/conv.cpp Conversion/ONNXToKrnl/NN/Conv.cpp
conversion/onnx_to_krnl/nn/normalization.cpp Conversion/ONNXToKrnl/NN/Normalization.cpp
conversion/onnx_to_krnl/nn/pooling.cpp Conversion/ONNXToKrnl/NN/Pooling.cpp
conversion/onnx_to_krnl/tensor/identity.cpp Conversion/ONNXToKrnl/Tensor/Identity.cpp
conversion/onnx_to_krnl/tensor/reshape.cpp Conversion/ONNXToKrnl/Tensor/Reshape.cpp
conversion/onnx_to_krnl/tensor/padconstantvaluepad.cpp Conversion/ONNXToKrnl/Tensor/PadConstantValuePad.cpp
conversion/onnx_to_krnl/tensor/transpose.cpp Conversion/ONNXToKrnl/Tensor/Transpose.cpp
conversion/onnx_to_krnl/tensor/unsqueeze.cpp Conversion/ONNXToKrnl/Tensor/Unsqueeze.cpp
conversion/onnx_to_krnl/tensor/constant.cpp Conversion/ONNXToKrnl/Tensor/Constant.cpp
conversion/onnx_to_krnl/convert_onnx_to_krnl.cpp) Conversion/ONNXToKrnl/ConvertONNXToKrnl.cpp)
target_include_directories(onnx_mlir_lower_frontend target_include_directories(onnx_mlir_lower_frontend
PRIVATE ${ONNX_MLIR_SRC_ROOT} ${ONNX_MLIR_BIN_ROOT} PRIVATE ${ONNX_MLIR_SRC_ROOT} ${ONNX_MLIR_BIN_ROOT}
${ONNX_MLIR_SRC_ROOT}) ${ONNX_MLIR_SRC_ROOT})
target_link_libraries(onnx_mlir_lower_frontend ${MLIRLibs}) target_link_libraries(onnx_mlir_lower_frontend ${MLIRLibs})
add_dependencies(onnx_mlir_lower_frontend gen_krnl_ops) add_dependencies(onnx_mlir_lower_frontend gen_krnl_ops)
add_subdirectory(transform) add_subdirectory(Transform)
add_subdirectory(tool) add_subdirectory(Tool)
add_subdirectory(builder) add_subdirectory(Builder)
add_subdirectory(runtime) add_subdirectory(Runtime)
add_executable(onnx-mlir main.cpp) add_executable(onnx-mlir main.cpp)

View File

@ -1,4 +1,4 @@
//====- convert_onnx_to_krnl.cpp - ONNX dialects to Krnl lowering ---------===// //====------ ConvertONNXToKrnl.cpp - ONNX dialects to Krnl lowering --------===//
// //
// Copyright 2019 The IBM Research Authors. // Copyright 2019 The IBM Research Authors.
// //
@ -9,7 +9,7 @@
// //
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "src/conversion/onnx_to_krnl/onnx_to_krnl_common.hpp" #include "src/Conversion/ONNXToKrnl/ONNXToKrnlCommon.hpp"
using namespace mlir; using namespace mlir;

View File

@ -1,4 +1,4 @@
//===----- elementwise.cpp - Elementwise Ops ------------------------------===// //===---------------- Elementwise.cpp - Elementwise Ops -------------------===//
// //
// Copyright 2019 The IBM Research Authors. // Copyright 2019 The IBM Research Authors.
// //
@ -8,7 +8,7 @@
// //
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "src/conversion/onnx_to_krnl/onnx_to_krnl_common.hpp" #include "src/Conversion/ONNXToKrnl/ONNXToKrnlCommon.hpp"
using namespace mlir; using namespace mlir;

View File

@ -1,4 +1,4 @@
//===----- gemm.cpp - Lowering Gemm Op ------------------------------------===// //===----------------- Gemm.cpp - Lowering Gemm Op -------------------------===//
// //
// Copyright 2019 The IBM Research Authors. // Copyright 2019 The IBM Research Authors.
// //
@ -8,7 +8,7 @@
// //
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "src/conversion/onnx_to_krnl/onnx_to_krnl_common.hpp" #include "src/Conversion/ONNXToKrnl/ONNXToKrnlCommon.hpp"
using namespace mlir; using namespace mlir;

View File

@ -1,4 +1,4 @@
//===----- matmul.cpp - Lowering Matmul Op --------------------------------===// //===----------------- Matmul.cpp - Lowering Matmul Op --------------------===//
// //
// Copyright 2019 The IBM Research Authors. // Copyright 2019 The IBM Research Authors.
// //
@ -8,7 +8,7 @@
// //
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "src/conversion/onnx_to_krnl/onnx_to_krnl_common.hpp" #include "src/Conversion/ONNXToKrnl/ONNXToKrnlCommon.hpp"
using namespace mlir; using namespace mlir;

View File

@ -1,4 +1,4 @@
//===----- reduction.cpp - Lowering Reduction Ops -------------------------===// //===-------------- Reduction.cpp - Lowering Reduction Ops ----------------===//
// //
// Copyright 2019 The IBM Research Authors. // Copyright 2019 The IBM Research Authors.
// //
@ -8,7 +8,7 @@
// //
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "src/conversion/onnx_to_krnl/onnx_to_krnl_common.hpp" #include "src/Conversion/ONNXToKrnl/ONNXToKrnlCommon.hpp"
using namespace mlir; using namespace mlir;

View File

@ -1,4 +1,4 @@
//===----- softmax.cpp - Softmax Op ---------------------------------------===// //===----------------- Softmax.cpp - Softmax Op ---------------------------===//
// //
// Copyright 2019 The IBM Research Authors. // Copyright 2019 The IBM Research Authors.
// //
@ -8,7 +8,7 @@
// //
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "src/conversion/onnx_to_krnl/onnx_to_krnl_common.hpp" #include "src/Conversion/ONNXToKrnl/ONNXToKrnlCommon.hpp"
using namespace mlir; using namespace mlir;

View File

@ -1,4 +1,4 @@
//===----- conv.cpp - Lowering Convolution Op -----------------------------===// //===--------------- Conv.cpp - Lowering Convolution Op --------------------===//
// //
// Copyright 2019 The IBM Research Authors. // Copyright 2019 The IBM Research Authors.
// //
@ -8,7 +8,7 @@
// //
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "src/conversion/onnx_to_krnl/onnx_to_krnl_common.hpp" #include "src/Conversion/ONNXToKrnl/ONNXToKrnlCommon.hpp"
using namespace mlir; using namespace mlir;

View File

@ -1,4 +1,4 @@
//===----- normalization.cpp - Lowering Normalization Ops -----------------===// //===----------- Normalization.cpp - Lowering Normalization Ops ------------===//
// //
// Copyright 2019 The IBM Research Authors. // Copyright 2019 The IBM Research Authors.
// //
@ -8,7 +8,7 @@
// //
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "src/conversion/onnx_to_krnl/onnx_to_krnl_common.hpp" #include "src/Conversion/ONNXToKrnl/ONNXToKrnlCommon.hpp"
using namespace mlir; using namespace mlir;

View File

@ -1,4 +1,4 @@
//===----- pooling.cpp - Lowering Pooling Ops -----------------------------===// //===---------------- Pooling.cpp - Lowering Pooling Ops ------------------===//
// //
// Copyright 2019 The IBM Research Authors. // Copyright 2019 The IBM Research Authors.
// //
@ -8,7 +8,7 @@
// //
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "src/conversion/onnx_to_krnl/onnx_to_krnl_common.hpp" #include "src/Conversion/ONNXToKrnl/ONNXToKrnlCommon.hpp"
using namespace mlir; using namespace mlir;

View File

@ -1,4 +1,4 @@
//====-- onnx_to_krnl_common.cpp - ONNX dialects to Krnl lowering ---------===// //====----- ONNXToKrnlCommon.cpp - ONNX dialects to Krnl lowering ---------===//
// //
// Copyright 2019 The IBM Research Authors. // Copyright 2019 The IBM Research Authors.
// //
@ -9,7 +9,7 @@
// //
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "src/conversion/onnx_to_krnl/onnx_to_krnl_common.hpp" #include "src/Conversion/ONNXToKrnl/ONNXToKrnlCommon.hpp"
/// Check is all dimensions are known at compile time. /// Check is all dimensions are known at compile time.
bool hasAllConstantDimensions(MemRefType type) { bool hasAllConstantDimensions(MemRefType type) {

View File

@ -1,4 +1,4 @@
//====-- onnx_to_krnl_common.hpp - ONNX dialects to Krnl lowering ---------===// //====------ ONNXToKrnlCommon.hpp - ONNX dialects to Krnl lowering --------===//
// //
// Copyright 2019 The IBM Research Authors. // Copyright 2019 The IBM Research Authors.
// //
@ -21,10 +21,10 @@
#include "llvm/ADT/Sequence.h" #include "llvm/ADT/Sequence.h"
#include "mlir/IR/PatternMatch.h" #include "mlir/IR/PatternMatch.h"
#include "src/dialect/krnl/krnl_helper.hpp" #include "src/Dialect/Krnl/KrnlHelper.hpp"
#include "src/dialect/krnl/krnl_ops.hpp" #include "src/Dialect/Krnl/KrnlOps.hpp"
#include "src/dialect/onnx/onnx_ops.hpp" #include "src/Dialect/ONNX/ONNXOps.hpp"
#include "src/pass/passes.hpp" #include "src/Pass/Passes.hpp"
using namespace mlir; using namespace mlir;
@ -198,7 +198,7 @@ struct TensorTypeConverter : public TypeConverter {
// Functions to add lowering patterns for frontend operations. // Functions to add lowering patterns for frontend operations.
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// `math` directory methods: // `Math` directory methods:
void populateLoweringONNXElementwiseOpPattern( void populateLoweringONNXElementwiseOpPattern(
OwningRewritePatternList &patterns, MLIRContext *ctx); OwningRewritePatternList &patterns, MLIRContext *ctx);
@ -215,7 +215,7 @@ void populateLoweringONNXReductionOpPattern(
void populateLoweringONNXSoftmaxOpPattern( void populateLoweringONNXSoftmaxOpPattern(
OwningRewritePatternList &patterns, MLIRContext *ctx); OwningRewritePatternList &patterns, MLIRContext *ctx);
// `nn` directory methods: // `NN` directory methods:
void populateLoweringONNXConvOpPattern( void populateLoweringONNXConvOpPattern(
OwningRewritePatternList &patterns, MLIRContext *ctx); OwningRewritePatternList &patterns, MLIRContext *ctx);
@ -226,7 +226,7 @@ void populateLoweringONNXNormalizationOpPattern(
void populateLoweringONNXPoolingOpPattern( void populateLoweringONNXPoolingOpPattern(
OwningRewritePatternList &patterns, MLIRContext *ctx); OwningRewritePatternList &patterns, MLIRContext *ctx);
// `tensor` directory methods: // `Tensor` directory methods:
void populateLoweringONNXUnsqueezeOpPattern( void populateLoweringONNXUnsqueezeOpPattern(
OwningRewritePatternList &patterns, MLIRContext *ctx); OwningRewritePatternList &patterns, MLIRContext *ctx);

View File

@ -1,4 +1,4 @@
//===---- constant.cpp - Lowering Constant Op -----------------------------===// //===---------------- Constant.cpp - Lowering Constant Op -----------------===//
// //
// Copyright 2019 The IBM Research Authors. // Copyright 2019 The IBM Research Authors.
// //
@ -8,7 +8,7 @@
// //
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "src/conversion/onnx_to_krnl/onnx_to_krnl_common.hpp" #include "src/Conversion/ONNXToKrnl/ONNXToKrnlCommon.hpp"
using namespace mlir; using namespace mlir;

View File

@ -1,4 +1,4 @@
//===----- identity.cpp - Lowering Identity Op ----------------------------===// //===----------------- Identity.cpp - Lowering Identity Op ----------------===//
// //
// Copyright 2019 The IBM Research Authors. // Copyright 2019 The IBM Research Authors.
// //
@ -8,7 +8,7 @@
// //
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "src/conversion/onnx_to_krnl/onnx_to_krnl_common.hpp" #include "src/Conversion/ONNXToKrnl/ONNXToKrnlCommon.hpp"
using namespace mlir; using namespace mlir;

View File

@ -1,4 +1,4 @@
//===----padconstantvaluepad.cpp - Lowering PadConstantValuePad Op --------===// //===------PadConstantValuePad.cpp - Lowering PadConstantValuePad Op ------===//
// //
// Copyright 2019 The IBM Research Authors. // Copyright 2019 The IBM Research Authors.
// //
@ -8,7 +8,7 @@
// //
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "src/conversion/onnx_to_krnl/onnx_to_krnl_common.hpp" #include "src/Conversion/ONNXToKrnl/ONNXToKrnlCommon.hpp"
using namespace mlir; using namespace mlir;

View File

@ -1,4 +1,4 @@
//===----- reshape.cpp - Lowering Reshape Op ------------------------------===// //===---------------- Reshape.cpp - Lowering Reshape Op -------------------===//
// //
// Copyright 2019 The IBM Research Authors. // Copyright 2019 The IBM Research Authors.
// //
@ -8,7 +8,7 @@
// //
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "src/conversion/onnx_to_krnl/onnx_to_krnl_common.hpp" #include "src/Conversion/ONNXToKrnl/ONNXToKrnlCommon.hpp"
using namespace mlir; using namespace mlir;

View File

@ -1,4 +1,4 @@
//===----- transpose.cpp - Lowering Transpose Op --------------------------===// //===---------------- Transpose.cpp - Lowering Transpose Op ---------------===//
// //
// Copyright 2019 The IBM Research Authors. // Copyright 2019 The IBM Research Authors.
// //
@ -8,7 +8,7 @@
// //
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "src/conversion/onnx_to_krnl/onnx_to_krnl_common.hpp" #include "src/Conversion/ONNXToKrnl/ONNXToKrnlCommon.hpp"
using namespace mlir; using namespace mlir;

View File

@ -1,4 +1,4 @@
//===----- unsqueeze.cpp - Lowering Unsqueeze Op --------------------------===// //===--------------- Unsqueeze.cpp - Lowering Unsqueeze Op ----------------===//
// //
// Copyright 2019 The IBM Research Authors. // Copyright 2019 The IBM Research Authors.
// //
@ -8,7 +8,7 @@
// //
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "src/conversion/onnx_to_krnl/onnx_to_krnl_common.hpp" #include "src/Conversion/ONNXToKrnl/ONNXToKrnlCommon.hpp"
using namespace mlir; using namespace mlir;

View File

@ -1,10 +1,20 @@
//====---------------- KrnlHelper.cpp - Krnl Dialect Helper----------------===//
//
// Copyright 2019-2020 The IBM Research Authors.
//
// =============================================================================
//
// This file declares helper methods to build Krnl Dialect Ops.
//
//===----------------------------------------------------------------------===//
#include "mlir/Dialect/AffineOps/AffineOps.h" #include "mlir/Dialect/AffineOps/AffineOps.h"
#include "mlir/Dialect/StandardOps/Ops.h" #include "mlir/Dialect/StandardOps/Ops.h"
#include "mlir/IR/AffineExpr.h" #include "mlir/IR/AffineExpr.h"
#include "src/dialect/krnl/krnl_ops.hpp" #include "KrnlOps.hpp"
#include "krnl_helper.hpp" #include "KrnlHelper.hpp"
namespace onnx_mlir { namespace onnx_mlir {

View File

@ -1,3 +1,13 @@
//====---------------- KrnlHelper.hpp - Krnl Dialect Helper----------------===//
//
// Copyright 2019-2020 The IBM Research Authors.
//
// =============================================================================
//
// This file implements helper methods to build Krnl Dialect ops.
//
//===----------------------------------------------------------------------===//
#pragma once #pragma once
#include <queue> #include <queue>

View File

@ -1,9 +1,11 @@
//===--------------------- krnl_ops.cpp - MLIR Operations -----------------===// //===---------------------- KrnlOps.cpp - Krnl Operations -----------------===//
// //
// Copyright 2019 The IBM Research Authors. // Copyright 2019-2020 The IBM Research Authors.
// //
// ============================================================================= // =============================================================================
// //
// This file contains implementations of krnl operations.
//
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include <iostream> #include <iostream>
@ -24,9 +26,9 @@
#include "llvm/ADT/SetVector.h" #include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallBitVector.h"
#include "krnl_helper.hpp" #include "KrnlHelper.hpp"
#include "krnl_ops.hpp" #include "KrnlOps.hpp"
using namespace mlir; using namespace mlir;

View File

@ -1,9 +1,11 @@
//===--------------------- krnl_ops.hpp - MLIR Operations -----------------===// //===--------------------- KrnlOps.hpp - Krnl Operations ------------------===//
// //
// Copyright 2019 The IBM Research Authors. // Copyright 2019-2020 The IBM Research Authors.
// //
// ============================================================================= // =============================================================================
// //
// This file contains declarations of krnl operations.
//
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#pragma once #pragma once
@ -15,8 +17,8 @@
#include "mlir/IR/OpDefinition.h" #include "mlir/IR/OpDefinition.h"
#include "mlir/IR/StandardTypes.h" #include "mlir/IR/StandardTypes.h"
#include "src/dialect/krnl/krnl_helper.hpp" #include "KrnlHelper.hpp"
#include "src/dialect/krnl/krnl_types.hpp" #include "KrnlTypes.hpp"
namespace mlir { namespace mlir {
class KrnlOpsDialect : public Dialect { class KrnlOpsDialect : public Dialect {

View File

@ -1,9 +1,11 @@
//===--------------------- krnl_ops.td - MLIR Operations ------------------===// //===------------------ KrnlOps.td - MLIR Operations ---------*- tablegen -===//
// //
// Copyright 2019 The IBM Research Authors. // Copyright 2019-2020 The IBM Research Authors.
// //
// ============================================================================= // =============================================================================
// //
// This file contains TableGen definition of krnl operations.
//
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
include "mlir/IR/OpBase.td" include "mlir/IR/OpBase.td"

View File

@ -1,9 +1,11 @@
//===--------------------- krnl_types.cpp - MLIR Operations ---------------===// //===------------------- KrnlTypes.hpp - Krnl Operations ------------------===//
// //
// Copyright 2019 The IBM Research Authors. // Copyright 2019-2020 The IBM Research Authors.
// //
// ============================================================================= // =============================================================================
// //
// This file contains definition of krnl types.
//
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "krnl_types.hpp" #include "KrnlTypes.hpp"

View File

@ -1,9 +1,11 @@
//===--------------------- krnl_types.hpp - MLIR Operations ---------------===// //===------------------- KrnlTypes.hpp - Krnl Operations ------------------===//
// //
// Copyright 2019 The IBM Research Authors. // Copyright 2019-2020 The IBM Research Authors.
// //
// ============================================================================= // =============================================================================
// //
// This file contains declarations of krnl types.
//
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#pragma once #pragma once

View File

@ -1,12 +1,13 @@
//===- onnx_ops.cpp - MLIR ONNX Operations --------------------------------===// //===------------------ ONNXOps.cpp - ONNX Operations ---------------------===//
// //
// Copyright 2019 The IBM Research Authors. // Copyright 2019-2020 The IBM Research Authors.
// //
// ============================================================================= // =============================================================================
// //
// This file defines ONNX operations in the MLIR operation set. // This file provides definition of ONNX dialect operations.
// //
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "mlir/Dialect/Traits.h" #include "mlir/Dialect/Traits.h"
#include "mlir/IR/Block.h" #include "mlir/IR/Block.h"
#include "mlir/IR/Builders.h" #include "mlir/IR/Builders.h"
@ -19,7 +20,7 @@
#include "llvm/ADT/SetVector.h" #include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallBitVector.h"
#include "onnx_ops.hpp" #include "ONNXOps.hpp"
using namespace mlir; using namespace mlir;
using namespace mlir::OpTrait::util; using namespace mlir::OpTrait::util;

View File

@ -1,4 +1,4 @@
//===- onnx_ops.hpp - MLIR ONNX Operations --------------------------------===// //===-------------------- ONNXOps.hpp - ONNX Operations -------------------===//
// //
// Copyright 2019 The IBM Research Authors. // Copyright 2019 The IBM Research Authors.
// //
@ -19,8 +19,8 @@
#include "mlir/IR/OpDefinition.h" #include "mlir/IR/OpDefinition.h"
#include "mlir/IR/StandardTypes.h" #include "mlir/IR/StandardTypes.h"
#include "src/pass/shape_inference_interface.hpp" #include "src/Pass/ShapeInferenceInterface.hpp"
#include "src/interface/promotable_const_operands_op_interface.hpp" #include "src/Interface/PromotableConstOperandsOpInterface.hpp"
namespace mlir { namespace mlir {

View File

@ -1,10 +1,10 @@
//===- ONNXOps.td -- ONNX operation definitions ---------*- tablegen -*----===// //===--- ONNXOps.td -- ONNX Dialect Operation Definitions ----*- tablegen -===//
// //
// Copyright 2019 The IBM Research Authors // Copyright 2019-2020 The IBM Research Authors
// //
// ============================================================================= // =============================================================================
// //
// Defines MLIR ONNX operations. // Defines ONNX Dialect operations.
// //
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
@ -19,12 +19,12 @@ include "mlir/IR/OpBase.td"
#ifdef SHAPE_INFERENCE_INTERFACE #ifdef SHAPE_INFERENCE_INTERFACE
#else #else
include "pass/shape_inference_interface.td" include "Pass/ShapeInferenceInterface.td"
#endif // SHAPE_INFERENCE_INTERFACE #endif // SHAPE_INFERENCE_INTERFACE
#ifdef PROMOTABLE_CONST_OPERANDS_OP_INTERFACE #ifdef PROMOTABLE_CONST_OPERANDS_OP_INTERFACE
#else #else
include "interface/promotable_const_operands_op_interface.td" include "Interface/PromotableConstOperandsOpInterface.td"
#endif // PROMOTABLE_CONST_OPERANDS_OP_INTERFACE #endif // PROMOTABLE_CONST_OPERANDS_OP_INTERFACE
def ONNX_Dialect : Dialect { def ONNX_Dialect : Dialect {
@ -61,7 +61,7 @@ class ONNX_Op<string mnemonic, list<OpTrait> traits = []> :
// 4. type of string, complex64 and complex128 for input/output are ignored // 4. type of string, complex64 and complex128 for input/output are ignored
// 5. unsigned int are treated as signed one // 5. unsigned int are treated as signed one
include "dialect/onnx/onnx_ops.td.inc" include "Dialect/ONNX/ONNXOps.td.inc"
// Indicate entry point functions of ONNX graph. // Indicate entry point functions of ONNX graph.
def ONNXEntryPointOp: ONNX_Op<"EntryPoint"> { def ONNXEntryPointOp: ONNX_Op<"EntryPoint"> {

View File

@ -1,11 +1,11 @@
set(LLVM_TARGET_DEFINITIONS promotable_const_operands_op_interface.td) set(LLVM_TARGET_DEFINITIONS PromotableConstOperandsOpInterface.td)
onnx_mlir_tablegen(promotable_const_operands_op_interface.hpp.inc -gen-op-interface-decls) onnx_mlir_tablegen(promotable_const_operands_op_interface.hpp.inc -gen-op-interface-decls)
onnx_mlir_tablegen(promotable_const_operands_op_interface.cpp.inc -gen-op-interface-defs) onnx_mlir_tablegen(promotable_const_operands_op_interface.cpp.inc -gen-op-interface-defs)
add_public_tablegen_target(onnx_mlir_gen_promotable_const_operands_op_interface) add_public_tablegen_target(onnx_mlir_gen_promotable_const_operands_op_interface)
add_library(onnx_mlir_promotable_const_operands_op_interface add_library(onnx_mlir_promotable_const_operands_op_interface
promotable_const_operands_op_interface.hpp PromotableConstOperandsOpInterface.hpp
promotable_const_operands_op_interface.cpp) PromotableConstOperandsOpInterface.cpp)
target_include_directories(onnx_mlir_promotable_const_operands_op_interface target_include_directories(onnx_mlir_promotable_const_operands_op_interface
PRIVATE ${ONNX_MLIR_SRC_ROOT} ${ONNX_MLIR_BIN_ROOT} PRIVATE ${ONNX_MLIR_SRC_ROOT} ${ONNX_MLIR_BIN_ROOT}
${ONNX_MLIR_SRC_ROOT}) ${ONNX_MLIR_SRC_ROOT})

View File

@ -11,7 +11,7 @@
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "src/interface/promotable_const_operands_op_interface.hpp" #include "src/Interface/PromotableConstOperandsOpInterface.hpp"
using namespace mlir; using namespace mlir;
@ -19,5 +19,5 @@ using namespace mlir;
// Promotable Const Operands Op Interface // Promotable Const Operands Op Interface
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "src/interface/promotable_const_operands_op_interface.cpp.inc" #include "src/Interface/promotable_const_operands_op_interface.cpp.inc"

View File

@ -20,6 +20,6 @@
namespace mlir { namespace mlir {
/// Include the auto-generated declarations. /// Include the auto-generated declarations.
#include "src/interface/promotable_const_operands_op_interface.hpp.inc" #include "src/Interface/promotable_const_operands_op_interface.hpp.inc"
} // end namespace mlir } // end namespace mlir

View File

@ -1,4 +1,4 @@
//===- ONNXCombine.cpp - ONNX High Level Optimizer ------------------------===// //===--------- ONNXCombine.cpp - ONNX High Level Optimizer ----------------===//
// //
// Copyright 2019 The IBM Research Authors. // Copyright 2019 The IBM Research Authors.
// //
@ -13,7 +13,7 @@
#include "mlir/IR/PatternMatch.h" #include "mlir/IR/PatternMatch.h"
#include <numeric> #include <numeric>
#include "src/dialect/onnx/onnx_ops.hpp" #include "src/Dialect/ONNX/ONNXOps.hpp"
using namespace mlir; using namespace mlir;

View File

@ -1,6 +1,6 @@
//=- ONNXCombine.td - Pattern Match Optimizations for ONNX -*- tablegen -*-===// //===--- ONNXCombine.td - Pattern Match Opt for ONNX Dialect--*- tablegen -===//
// //
// Copyright 2019 The IBM Research Authors. // Copyright 2019-2020 The IBM Research Authors.
// //
// ============================================================================= // =============================================================================
// //
@ -13,7 +13,7 @@
#define ONNX_COMBINE #define ONNX_COMBINE
#ifndef OP_BASE #ifndef OP_BASE
include "dialect/onnx/onnx.td" include "Dialect/ONNX/ONNXOps.td"
#endif // OP_BASE #endif // OP_BASE
/// Note: The DRR definition used for defining patterns is shown below: /// Note: The DRR definition used for defining patterns is shown below:

View File

@ -1,6 +1,6 @@
//===- onnx_decompose.cpp - ONNX High Level Rewriting ---------------------===// //===----------- ONNXDecompose.cpp - ONNX High Level Rewriting ------------===//
// //
// Copyright 2019 The IBM Research Authors. // Copyright 2019-2020 The IBM Research Authors.
// //
// ============================================================================= // =============================================================================
// //
@ -18,8 +18,8 @@
#include "mlir/Pass/Pass.h" #include "mlir/Pass/Pass.h"
#include "mlir/Transforms/DialectConversion.h" #include "mlir/Transforms/DialectConversion.h"
#include "src/dialect/onnx/onnx_ops.hpp" #include "src/Dialect/ONNX/ONNXOps.hpp"
#include "src/pass/passes.hpp" #include "Passes.hpp"
using namespace mlir; using namespace mlir;

View File

@ -1,19 +1,19 @@
//===----------------------------------------------------------------------===// //===- ONNXDecompose.td - Rewriting for decomposing ONNX Ops -*- tablegen -===//
//=- onnx_decompose.td - Rewriting for decomposing ONNX Ops -*- tablegen -*===//
// //
// Copyright 2019 The IBM Research Authors. // Copyright 2019-2020 The IBM Research Authors.
// //
// ============================================================================= // =============================================================================
// //
// Defines language-specific pattern match rewritings for ONNX using // Defines language-specific pattern match rewritings for ONNX using
// Declarative Rewrite Rules (DRR) specified using TableGen records. // Declarative Rewrite Rules (DRR) specified using TableGen records.
// //
//===----------------------------------------------------------------------===//
#ifndef ONNX_DECOMPOSE #ifndef ONNX_DECOMPOSE
#define ONNX_DECOMPOSE #define ONNX_DECOMPOSE
#ifndef OP_BASE #ifndef OP_BASE
include "dialect/onnx/onnx.td" include "Dialect/ONNX/ONNXOps.td"
#endif // OP_BASE #endif // OP_BASE
/// Note: The DRR definition used for defining patterns is shown below: /// Note: The DRR definition used for defining patterns is shown below:

View File

@ -1,6 +1,6 @@
//===- onnx_rewrite.cpp - ONNX High Level Optimizer -----------------------===// //===----------- ONNXRewrite.cpp - ONNX High Level Optimizer --------------===//
// //
// Copyright 2019 The IBM Research Authors. // Copyright 2019-2020 The IBM Research Authors.
// //
// ============================================================================= // =============================================================================
// //
@ -12,7 +12,7 @@
#include "mlir/IR/Matchers.h" #include "mlir/IR/Matchers.h"
#include "mlir/IR/PatternMatch.h" #include "mlir/IR/PatternMatch.h"
#include "src/dialect/onnx/onnx_ops.hpp" #include "src/Dialect/ONNX/ONNXOps.hpp"
using namespace mlir; using namespace mlir;

View File

@ -1,5 +1,4 @@
//===----------------------------------------------------------------------===// //===---- ONNXRewrite.td - Pattern Match Rewriting for ONNX --*- tablegen -===//
//=- onnx_rewrite.td - Pattern Match Rewriting for ONNX -*- tablegen -*----===//
// //
// Copyright 2019 The IBM Research Authors. // Copyright 2019 The IBM Research Authors.
// //
@ -8,12 +7,13 @@
// Defines language-specific pattern match optimizations for ONNX using // Defines language-specific pattern match optimizations for ONNX using
// Declarative Rewrite Rules (DRR) specified using TableGen records. // Declarative Rewrite Rules (DRR) specified using TableGen records.
// //
//===----------------------------------------------------------------------===//
#ifndef ONNX_REWRITE #ifndef ONNX_REWRITE
#define ONNX_REWRITE #define ONNX_REWRITE
#ifndef OP_BASE #ifndef OP_BASE
include "dialect/onnx/onnx.td" include "Dialect/ONNX/ONNXOps.td"
#endif // OP_BASE #endif // OP_BASE
/// Note: The DRR definition used for defining patterns is shown below: /// Note: The DRR definition used for defining patterns is shown below:

View File

@ -1,6 +1,6 @@
//===- passes.hpp - ONNX MLIR Passes Definition ---------------------------===// //===---------- Passes.hpp - ONNX MLIR Passes Definition ------------------===//
// //
// Copyright 2019 The IBM Research Authors. // Copyright 2019-2020 The IBM Research Authors.
// //
// ============================================================================= // =============================================================================
// //

View File

@ -1,6 +1,6 @@
//===- shape_inference_interface.hpp - Definition for ShapeInference --------=// //===---- ShapeInferenceInterface.hpp - Definition for ShapeInference -----===//
// //
// Copyright 2019 The IBM Research Authors. // Copyright 2019-2020 The IBM Research Authors.
// //
// ============================================================================= // =============================================================================
// //

View File

@ -1,6 +1,6 @@
//=- shape_inference_interface.td - Shape Inference Interface -*- tablegen -==// //===- ShapeInferenceInterface.td - ShapeInference Interface -*- tablegen -===//
// //
// Copyright 2019 The IBM Research Authors. // Copyright 2019-2020 The IBM Research Authors.
// //
// ============================================================================= // =============================================================================
// //

View File

@ -1,6 +1,6 @@
//===----- shape_inference_pass.cpp - Shape Inference ---------------------===// //===------- ShapeInferencePass.cpp - Shape Inference ---------------------===//
// //
// Copyright 2019 The IBM Research Authors. // Copyright 2019-2020 The IBM Research Authors.
// //
// ============================================================================= // =============================================================================
// //
@ -14,9 +14,9 @@
#include "llvm/Support/raw_ostream.h" #include "llvm/Support/raw_ostream.h"
#include "mlir/IR/StandardTypes.h" #include "mlir/IR/StandardTypes.h"
#include "shape_inference_interface.hpp" #include "ShapeInferenceInterface.hpp"
#include "passes.hpp" #include "Passes.hpp"
using namespace mlir; using namespace mlir;

View File

@ -1,16 +1,16 @@
add_library(cruntime add_library(cruntime
dyn_memref.cpp DynMemRef.cpp
dyn_memref.h DynMemRef.h
data_type.h) DataType.h)
target_include_directories(cruntime target_include_directories(cruntime
PRIVATE ${ONNX_MLIR_SRC_ROOT} ${ONNX_MLIR_BIN_ROOT} PRIVATE ${ONNX_MLIR_SRC_ROOT} ${ONNX_MLIR_BIN_ROOT}
${ONNX_MLIR_SRC_ROOT}) ${ONNX_MLIR_SRC_ROOT})
pybind11_add_module(pyruntime pybind11_add_module(pyruntime
dyn_memref.cpp DynMemRef.cpp
dyn_memref.h DynMemRef.h
runtime.cpp Runtime.cpp
runtime.hpp) Runtime.hpp)
target_link_libraries(pyruntime PRIVATE ${CMAKE_DL_LIBS}) target_link_libraries(pyruntime PRIVATE ${CMAKE_DL_LIBS})
target_include_directories(pyruntime target_include_directories(pyruntime
PRIVATE ${ONNX_MLIR_SRC_ROOT} ${ONNX_MLIR_BIN_ROOT} PRIVATE ${ONNX_MLIR_SRC_ROOT} ${ONNX_MLIR_BIN_ROOT}

View File

@ -3,7 +3,7 @@
#include <string> #include <string>
#include <vector> #include <vector>
#include "dyn_memref.h" #include "DynMemRef.h"
DynMemRef::DynMemRef(int _rank) { DynMemRef::DynMemRef(int _rank) {
rank = _rank; rank = _rank;

View File

@ -1,4 +1,4 @@
#include "runtime.hpp" #include "Runtime.hpp"
ExecutionSession::ExecutionSession(std::string sharedLibPath, ExecutionSession::ExecutionSession(std::string sharedLibPath,
std::string entryPointName) { std::string entryPointName) {

View File

@ -8,7 +8,7 @@
#include <pybind11/pybind11.h> #include <pybind11/pybind11.h>
#include <pybind11/stl.h> #include <pybind11/stl.h>
#include "src/runtime/dyn_memref.h" #include "DynMemRef.h"
namespace py = pybind11; namespace py = pybind11;

1
src/Tool/CMakeLists.txt Normal file
View File

@ -0,0 +1 @@
add_subdirectory(ONNXMLIROpt)

View File

@ -1,4 +1,4 @@
add_executable(onnx-mlir-opt onnx_mlir_opt.cpp) add_executable(onnx-mlir-opt ONNXMLIROpt.cpp)
add_dependencies(onnx-mlir-opt gen_krnl_ops) add_dependencies(onnx-mlir-opt gen_krnl_ops)
target_include_directories(onnx-mlir-opt PRIVATE ${ONNX_MLIR_SRC_ROOT}) target_include_directories(onnx-mlir-opt PRIVATE ${ONNX_MLIR_SRC_ROOT})

View File

@ -1,9 +1,11 @@
//===---------------- onnx_mlir_opt.cpp - MLIR Operations -----------------===// //===-------------- ONNXMLIROpt.cpp - Optimization Driver -----------------===//
// //
// Copyright 2019 The IBM Research Authors. // Copyright 2019-2020 The IBM Research Authors.
// //
// ============================================================================= // =============================================================================
// //
//
//
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include <llvm/Support/CommandLine.h> #include <llvm/Support/CommandLine.h>
@ -15,9 +17,9 @@
#include <mlir/Support/FileUtilities.h> #include <mlir/Support/FileUtilities.h>
#include <mlir/Support/MlirOptMain.h> #include <mlir/Support/MlirOptMain.h>
#include "src/dialect/krnl/krnl_ops.hpp" #include "src/Dialect/Krnl/KrnlOps.hpp"
#include "src/dialect/onnx/onnx_ops.hpp" #include "src/Dialect/ONNX/ONNXOps.hpp"
#include "src/pass/passes.hpp" #include "src/Pass/Passes.hpp"
using namespace onnx_mlir; using namespace onnx_mlir;

View File

@ -1,6 +1,6 @@
add_library(onnx_mlir_transform add_library(onnx_mlir_transform
lower_krnl.cpp LowerKrnl.cpp
lower_to_llvm.cpp) LowerToLLVM.cpp)
target_include_directories(onnx_mlir_transform target_include_directories(onnx_mlir_transform
PRIVATE ${ONNX_MLIR_SRC_ROOT} ${ONNX_MLIR_BIN_ROOT} PRIVATE ${ONNX_MLIR_SRC_ROOT} ${ONNX_MLIR_BIN_ROOT}

View File

@ -1,10 +1,20 @@
//===-------------- LowerKrnl.cpp - Krnl Dialect Lowering -----------------===//
//
// Copyright 2019-2020 The IBM Research Authors.
//
// =============================================================================
//
//
//
//===----------------------------------------------------------------------===//
#include "mlir/Dialect/AffineOps/AffineOps.h" #include "mlir/Dialect/AffineOps/AffineOps.h"
#include "mlir/Dialect/StandardOps/Ops.h" #include "mlir/Dialect/StandardOps/Ops.h"
#include "mlir/Pass/Pass.h" #include "mlir/Pass/Pass.h"
#include "mlir/Transforms/DialectConversion.h" #include "mlir/Transforms/DialectConversion.h"
#include "src/dialect/krnl/krnl_ops.hpp" #include "src/Dialect/Krnl/KrnlOps.hpp"
#include "src/pass/passes.hpp" #include "src/Pass/Passes.hpp"
using namespace mlir; using namespace mlir;

View File

@ -1,6 +1,10 @@
//====- LowerToLLVM.cpp - Lowering from KRNL+Affine+Std to LLVM -----------===// //===------ LowerToLLVM.cpp - Lowering from KRNL+Affine+Std to LLVM -------===//
//
// Copyright 2019-2020 The IBM Research Authors.
//
// =============================================================================
//
// //
// Copyright 2019 The IBM Research Authors.
// //
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
@ -16,8 +20,8 @@
#include "mlir/Transforms/DialectConversion.h" #include "mlir/Transforms/DialectConversion.h"
#include "llvm/ADT/Sequence.h" #include "llvm/ADT/Sequence.h"
#include "src/dialect/krnl/krnl_ops.hpp" #include "src/Dialect/Krnl/KrnlOps.hpp"
#include "src/pass/passes.hpp" #include "src/Pass/Passes.hpp"
using namespace mlir; using namespace mlir;

View File

@ -15,8 +15,8 @@
#include "mlir/IR/PatternMatch.h" #include "mlir/IR/PatternMatch.h"
#include "mlir/Pass/Pass.h" #include "mlir/Pass/Pass.h"
#include "src/interface/promotable_const_operands_op_interface.hpp" #include "src/Interface/PromotableConstOperandsOpInterface.hpp"
#include "src/pass/passes.hpp" #include "src/Pass/Passes.hpp"
using namespace mlir; using namespace mlir;

View File

@ -16,10 +16,10 @@
#include "llvm/Support/Regex.h" #include "llvm/Support/Regex.h"
#include "llvm/Support/SourceMgr.h" #include "llvm/Support/SourceMgr.h"
#include "src/builder/frontend_dialect_transformer.hpp" #include "src/Builder/FrontendDialectTransformer.hpp"
#include "src/dialect/krnl/krnl_ops.hpp" #include "src/Dialect/Krnl/KrnlOps.hpp"
#include "src/dialect/onnx/onnx_ops.hpp" #include "src/Dialect/ONNX/ONNXOps.hpp"
#include "src/pass/passes.hpp" #include "src/Pass/Passes.hpp"
#include "mlir/Conversion/LoopToStandard/ConvertLoopToStandard.h" #include "mlir/Conversion/LoopToStandard/ConvertLoopToStandard.h"
#include "mlir/ExecutionEngine/ExecutionEngine.h" #include "mlir/ExecutionEngine/ExecutionEngine.h"

View File

@ -1 +0,0 @@
add_subdirectory(onnx_mlir_opt)