[MLIR] Lower ONNX element-wise unary ops: Exp, Tanh, Sinh, Cosh, Sigmoid (#389)

* Lower ExpOp

* Lower ONNXTanhOp

* Lower Exp Tanh, Sinh, and Cosh

* Lower ONNX Sigmoid op

* Merge

* Specialize template lowerScalarOp

* Unify ONNXEWUnaryOpLowering and ONNXEWBinaryOpLowering

* Support multiple types

* Reformat the code

* Add test cases

* Reformat the code

* Change names

* Apply clang-format

* Update variable names
This commit is contained in:
TUNG LEDUC 2019-12-06 10:08:09 +09:00 committed by Tian Jin
parent 0048f2fd86
commit 1c3176bf9f
7 changed files with 609 additions and 39 deletions

View File

@ -263,7 +263,9 @@ def collect_types(schema, input) :
return allowedTypeStr return allowedTypeStr
def gen_schema(schema) : def gen_schema(schema) :
ShapeInferenceList=['Add', 'Mul', 'Div', 'Sub', 'And', 'Or', 'Xor', 'MatMul', 'Gemm'] ShapeInferenceList=['Exp', 'Tanh', 'Sinh', 'Cosh', 'Sigmoid',
'Add', 'Mul', 'Div', 'Sub', 'And', 'Or', 'Xor',
'MatMul', 'Gemm']
CanonicalList=['Add', 'Identity'] CanonicalList=['Add', 'Identity']
line_indent = ' ' line_indent = ' '

View File

@ -38,6 +38,46 @@ ONNXOpsDialect::ONNXOpsDialect(mlir::MLIRContext* ctx)
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// ONNX Operations // ONNX Operations
//===----------------------------------------------------------------------===//
// Exp
/// Infer the output shape of the ONNXExpOp. This method is required by the
/// shape inference interface.
void ONNXExpOp::inferShapes() {
getResult()->setType(getOperand()->getType());
}
//===----------------------------------------------------------------------===//
// Tanh
/// Infer the output shape of the ONNXTanhOp. This method is required by the
/// shape inference interface.
void ONNXTanhOp::inferShapes() {
getResult()->setType(getOperand()->getType());
}
//===----------------------------------------------------------------------===//
// Sinh
/// Infer the output shape of the ONNXSinhOp. This method is required by the
/// shape inference interface.
void ONNXSinhOp::inferShapes() {
getResult()->setType(getOperand()->getType());
}
//===----------------------------------------------------------------------===//
// Cosh
/// Infer the output shape of the ONNXCoshOp. This method is required by the
/// shape inference interface.
void ONNXCoshOp::inferShapes() {
getResult()->setType(getOperand()->getType());
}
//===----------------------------------------------------------------------===//
// Sigmoid
/// Infer the output shape of the ONNXSigmoidOp. This method is required by the
/// shape inference interface.
void ONNXSigmoidOp::inferShapes() {
getResult()->setType(getOperand()->getType());
}
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// Add // Add
/// Infer the output shape of the ONNXAddOp. This method is required by the /// Infer the output shape of the ONNXAddOp. This method is required by the

View File

@ -371,7 +371,7 @@ def ONNXCosOp:ONNX_Op<"Cos",
} }
def ONNXCoshOp:ONNX_Op<"Cosh", def ONNXCoshOp:ONNX_Op<"Cosh",
[NoSideEffect]> { [NoSideEffect, DeclareOpInterfaceMethods<ShapeInferenceOpInterface>]> {
let summary = "ONNX Cosh operation"; let summary = "ONNX Cosh operation";
let description = [{ let description = [{
"Calculates the hyperbolic cosine of the given input tensor element-wise." "Calculates the hyperbolic cosine of the given input tensor element-wise."
@ -567,7 +567,7 @@ def ONNXErfOp:ONNX_Op<"Erf",
} }
def ONNXExpOp:ONNX_Op<"Exp", def ONNXExpOp:ONNX_Op<"Exp",
[NoSideEffect]> { [NoSideEffect, DeclareOpInterfaceMethods<ShapeInferenceOpInterface>]> {
let summary = "ONNX Exp operation"; let summary = "ONNX Exp operation";
let description = [{ let description = [{
"Calculates the exponential of the given input tensor, element-wise." "Calculates the exponential of the given input tensor, element-wise."
@ -2731,7 +2731,7 @@ def ONNXShrinkOp:ONNX_Op<"Shrink",
} }
def ONNXSigmoidOp:ONNX_Op<"Sigmoid", def ONNXSigmoidOp:ONNX_Op<"Sigmoid",
[NoSideEffect]> { [NoSideEffect, DeclareOpInterfaceMethods<ShapeInferenceOpInterface>]> {
let summary = "ONNX Sigmoid operation"; let summary = "ONNX Sigmoid operation";
let description = [{ let description = [{
"Sigmoid takes one input data (Tensor<T>) and produces one output data" "Sigmoid takes one input data (Tensor<T>) and produces one output data"
@ -2764,7 +2764,7 @@ def ONNXSinOp:ONNX_Op<"Sin",
} }
def ONNXSinhOp:ONNX_Op<"Sinh", def ONNXSinhOp:ONNX_Op<"Sinh",
[NoSideEffect]> { [NoSideEffect, DeclareOpInterfaceMethods<ShapeInferenceOpInterface>]> {
let summary = "ONNX Sinh operation"; let summary = "ONNX Sinh operation";
let description = [{ let description = [{
"Calculates the hyperbolic sine of the given input tensor element-wise." "Calculates the hyperbolic sine of the given input tensor element-wise."
@ -2994,7 +2994,7 @@ def ONNXTanOp:ONNX_Op<"Tan",
} }
def ONNXTanhOp:ONNX_Op<"Tanh", def ONNXTanhOp:ONNX_Op<"Tanh",
[NoSideEffect]> { [NoSideEffect, DeclareOpInterfaceMethods<ShapeInferenceOpInterface>]> {
let summary = "ONNX Tanh operation"; let summary = "ONNX Tanh operation";
let description = [{ let description = [{
"Calculates the hyperbolic tangent of the given input tensor element-wise." "Calculates the hyperbolic tangent of the given input tensor element-wise."

View File

@ -44,9 +44,8 @@ static MemRefType convertTensorToMemRef(TensorType type) {
} }
/// Insert an allocation and deallocation for the given MemRefType. /// Insert an allocation and deallocation for the given MemRefType.
static Value* insertAllocAndDealloc( static Value* insertAllocAndDealloc(MemRefType type, Location loc,
MemRefType type, Location loc, PatternRewriter& rewriter, PatternRewriter& rewriter, bool insertDealloc, Value* oldMemRef = nullptr) {
bool insertDealloc, Value *oldMemRef = nullptr) {
// Put together alloc operands for any dynamic dimensions of the memref. // Put together alloc operands for any dynamic dimensions of the memref.
AllocOp alloc; AllocOp alloc;
if (oldMemRef) { if (oldMemRef) {
@ -98,14 +97,166 @@ static bool checkInsertDealloc(Operation *currentOp) {
namespace { namespace {
//===----------------------------------------------------------------------===// template <typename ElementwiseNaryOp>
// Element-wise binary ops lowering to Krnl dialect. struct ScalarOp;
//===----------------------------------------------------------------------===//
template <typename BinaryOp, typename LoweredBinaryOp>
struct ONNXEWBinaryOpLowering : public ConversionPattern {
ONNXEWBinaryOpLowering(MLIRContext* ctx)
: ConversionPattern(BinaryOp::getOperationName(), 1, ctx) {}
template <>
struct ScalarOp<ONNXAddOp> {
using FOp = AddFOp;
using IOp = AddIOp;
};
template <>
struct ScalarOp<ONNXMulOp> {
using FOp = MulFOp;
using IOp = MulIOp;
};
template <>
struct ScalarOp<ONNXDivOp> {
using FOp = DivFOp;
using IOp = DivISOp;
};
template <>
struct ScalarOp<ONNXSubOp> {
using FOp = SubFOp;
using IOp = SubIOp;
};
template <>
struct ScalarOp<ONNXAndOp> {
using FOp = AndOp; // not use
using IOp = AndOp;
};
template <>
struct ScalarOp<ONNXOrOp> {
using FOp = OrOp; // not use
using IOp = OrOp;
};
template <>
struct ScalarOp<ONNXXorOp> {
using FOp = XOrOp; // not use
using IOp = XOrOp;
};
template <>
struct ScalarOp<ONNXExpOp> {
using FOp = ExpOp;
using IOp = ExpOp; // not use
};
template <typename ElementwiseNaryOp>
using ScalarFOp = typename ScalarOp<ElementwiseNaryOp>::FOp;
template <typename ElementwiseNaryOp>
using ScalarIOp = typename ScalarOp<ElementwiseNaryOp>::IOp;
//===----------------------------------------------------------------------===//
// Scalar unary ops for lowering to Krnl dialect.
//===----------------------------------------------------------------------===//
template <typename UnaryOp>
Value* mapToLowerScalarOp(Location loc, ArrayRef<Type> result_types,
ArrayRef<Value*> operands, ConversionPatternRewriter& rewriter) {
/* Lower UnaryOp to Ops in the Standard dialect.
*/
Type element_type = operands.front()->getType();
if (element_type.isa<IntegerType>()) {
return rewriter.create<ScalarIOp<UnaryOp>>(
loc, result_types, operands, mlir::None);
} else if (element_type.isa<FloatType>()) {
return rewriter.create<ScalarFOp<UnaryOp>>(
loc, result_types, operands, mlir::None);
} else {
return nullptr;
}
}
//===----------------------------------------------------------------------===//
// Scalar unary ops for lowering ONNXTanhOp
//===----------------------------------------------------------------------===//
template <>
Value* mapToLowerScalarOp<ONNXTanhOp>(Location loc, ArrayRef<Type> result_types,
ArrayRef<Value*> operands, ConversionPatternRewriter& rewriter) {
// ONNXTanhOp(%X) = DivFOp(SubFOp(ExpOp(%X), ExpOp(NegFOp(%X))),
// AddFOp(ExpOp(%X), ExpOp(NegFOp(%X))))
Value* operand = operands[0];
auto zero = rewriter.create<ConstantOp>(loc, rewriter.getF32FloatAttr(0.0f));
auto neg = rewriter.create<SubFOp>(loc, zero, operand);
auto exp = rewriter.create<ExpOp>(loc, operand);
auto negExp = rewriter.create<ExpOp>(loc, neg);
auto result =
rewriter.create<DivFOp>(loc, rewriter.create<SubFOp>(loc, exp, negExp),
rewriter.create<AddFOp>(loc, exp, negExp));
return result;
}
//===----------------------------------------------------------------------===//
// Scalar unary ops for lowering ONNXSinhOp
//===----------------------------------------------------------------------===//
template <>
Value* mapToLowerScalarOp<ONNXSinhOp>(Location loc, ArrayRef<Type> result_types,
ArrayRef<Value*> operands, ConversionPatternRewriter& rewriter) {
// ONNXSinhOp(%X) = DivFOp(SubFOp(ExpOp(%X), ExpOp(NegFOp(%X))),
// ConstantOp 2)
Value* operand = operands[0];
auto zero = rewriter.create<ConstantOp>(loc, rewriter.getF32FloatAttr(0.0f));
auto two = rewriter.create<ConstantOp>(loc, rewriter.getF32FloatAttr(2.0f));
auto neg = rewriter.create<SubFOp>(loc, zero, operand);
auto exp = rewriter.create<ExpOp>(loc, operand);
auto negExp = rewriter.create<ExpOp>(loc, neg);
auto result = rewriter.create<DivFOp>(
loc, rewriter.create<SubFOp>(loc, exp, negExp), two);
return result;
}
//===----------------------------------------------------------------------===//
// Scalar unary ops for lowering ONNXCoshOp
//===----------------------------------------------------------------------===//
template <>
Value* mapToLowerScalarOp<ONNXCoshOp>(Location loc, ArrayRef<Type> result_types,
ArrayRef<Value*> operands, ConversionPatternRewriter& rewriter) {
// ONNXCoshOp(%X) = DivFOp(AddFOp(ExpOp(%X), ExpOp(NegFOp(%X))),
// ConstantOp 2)
Value* operand = operands[0];
auto zero = rewriter.create<ConstantOp>(loc, rewriter.getF32FloatAttr(0.0f));
auto two = rewriter.create<ConstantOp>(loc, rewriter.getF32FloatAttr(2.0f));
auto neg = rewriter.create<SubFOp>(loc, zero, operand);
auto exp = rewriter.create<ExpOp>(loc, operand);
auto negExp = rewriter.create<ExpOp>(loc, neg);
auto result = rewriter.create<DivFOp>(
loc, rewriter.create<AddFOp>(loc, exp, negExp), two);
return result;
}
//===----------------------------------------------------------------------===//
// Scalar unary ops for lowering ONNXSigmoidOp
//===----------------------------------------------------------------------===//
template <>
Value* mapToLowerScalarOp<ONNXSigmoidOp>(Location loc,
ArrayRef<Type> result_types, ArrayRef<Value*> operands,
ConversionPatternRewriter& rewriter) {
// ONNXSigmoidOp(%X) = DivFOp(ConstantOp 1,
// AddFOp(ConstantOp 1, ExpOp(NegFOp(%X))))
Value* operand = operands[0];
auto zero = rewriter.create<ConstantOp>(loc, rewriter.getF32FloatAttr(0.0f));
auto one = rewriter.create<ConstantOp>(loc, rewriter.getF32FloatAttr(1.0f));
auto neg = rewriter.create<SubFOp>(loc, zero, operand);
auto negExp = rewriter.create<ExpOp>(loc, neg);
auto result = rewriter.create<DivFOp>(
loc, one, rewriter.create<AddFOp>(loc, one, negExp));
return result;
}
//===----------------------------------------------------------------------===//
// Element-wise n-ary ops lowering to Krnl dialect.
//===----------------------------------------------------------------------===//
template <typename ElementwiseNaryOp, unsigned numArgs>
struct ONNXElementwiseNaryOpLowering : public ConversionPattern {
ONNXElementwiseNaryOpLowering(MLIRContext* ctx)
: ConversionPattern(ElementwiseNaryOp::getOperationName(), 1, ctx) {}
PatternMatchResult matchAndRewrite(Operation* op, ArrayRef<Value*> operands, PatternMatchResult matchAndRewrite(Operation* op, ArrayRef<Value*> operands,
ConversionPatternRewriter& rewriter) const final { ConversionPatternRewriter& rewriter) const final {
// TODO: Check that the types are valid. // TODO: Check that the types are valid.
@ -127,8 +278,7 @@ struct ONNXEWBinaryOpLowering : public ConversionPattern {
bool insertDealloc = checkInsertDealloc(op); bool insertDealloc = checkInsertDealloc(op);
if (hasAllConstantDimensions(memRefType)) if (hasAllConstantDimensions(memRefType))
alloc = insertAllocAndDealloc( alloc = insertAllocAndDealloc(memRefType, loc, rewriter, insertDealloc);
memRefType, loc, rewriter, insertDealloc);
else else
alloc = insertAllocAndDealloc( alloc = insertAllocAndDealloc(
memRefType, loc, rewriter, insertDealloc, operands[0]); memRefType, loc, rewriter, insertDealloc, operands[0]);
@ -190,11 +340,15 @@ struct ONNXEWBinaryOpLowering : public ConversionPattern {
SmallVector<Value*, 4> loopIVs; SmallVector<Value*, 4> loopIVs;
for (auto arg : iterationBlock.getArguments()) for (auto arg : iterationBlock.getArguments())
loopIVs.push_back(arg); loopIVs.push_back(arg);
auto loadedFirstVal = rewriter.create<LoadOp>(loc, operands[0], loopIVs);
auto loadedSecondVal = rewriter.create<LoadOp>(loc, operands[1], loopIVs);
auto loweredOpResult = SmallVector<Value*, numArgs> loadedVals;
rewriter.create<LoweredBinaryOp>(loc, loadedFirstVal, loadedSecondVal); for (unsigned i = 0; i < numArgs; i++) {
auto loadedVal = rewriter.create<LoadOp>(loc, operands[i], loopIVs);
loadedVals.push_back(loadedVal);
}
auto loweredOpResult = mapToLowerScalarOp<ElementwiseNaryOp>(
loc, memRefType.getElementType(), loadedVals, rewriter);
// Store result in the resulting array. // Store result in the resulting array.
rewriter.create<StoreOp>(loc, loweredOpResult, alloc, loopIVs); rewriter.create<StoreOp>(loc, loweredOpResult, alloc, loopIVs);
@ -205,6 +359,13 @@ struct ONNXEWBinaryOpLowering : public ConversionPattern {
} }
}; };
template <typename ElementwiseNaryOp>
using ONNXElementwiseUnaryOpLowering =
ONNXElementwiseNaryOpLowering<ElementwiseNaryOp, 1>;
template <typename ElementwiseNaryOp>
using ONNXElementwiseBinaryOpLowering =
ONNXElementwiseNaryOpLowering<ElementwiseNaryOp, 2>;
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// Conversion from Tensor type to the Standard dialect MemRef type. // Conversion from Tensor type to the Standard dialect MemRef type.
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
@ -285,15 +446,18 @@ void FrontendToKrnlLoweringPass::runOnModule() {
patterns, &getContext(), tensor_to_memref_converter); patterns, &getContext(), tensor_to_memref_converter);
// Frontent operation lowering. // Frontent operation lowering.
// TODO: Support 1-N mapping (e.g. different types of the lowered op) patterns.insert<ONNXElementwiseUnaryOpLowering<mlir::ONNXExpOp>,
patterns.insert<ONNXEWBinaryOpLowering<mlir::ONNXAddOp, AddFOp>, ONNXElementwiseUnaryOpLowering<mlir::ONNXTanhOp>,
ONNXEWBinaryOpLowering<mlir::ONNXMulOp, MulFOp>, ONNXElementwiseUnaryOpLowering<mlir::ONNXSinhOp>,
ONNXEWBinaryOpLowering<mlir::ONNXDivOp, DivFOp>, ONNXElementwiseUnaryOpLowering<mlir::ONNXCoshOp>,
ONNXEWBinaryOpLowering<mlir::ONNXSubOp, SubFOp>, ONNXElementwiseUnaryOpLowering<mlir::ONNXSigmoidOp>,
ONNXEWBinaryOpLowering<mlir::ONNXAndOp, AndOp>, ONNXElementwiseBinaryOpLowering<mlir::ONNXAddOp>,
ONNXEWBinaryOpLowering<mlir::ONNXOrOp, OrOp>, ONNXElementwiseBinaryOpLowering<mlir::ONNXMulOp>,
ONNXEWBinaryOpLowering<mlir::ONNXXorOp, XOrOp>> ONNXElementwiseBinaryOpLowering<mlir::ONNXDivOp>,
(&getContext()); ONNXElementwiseBinaryOpLowering<mlir::ONNXSubOp>,
ONNXElementwiseBinaryOpLowering<mlir::ONNXAndOp>,
ONNXElementwiseBinaryOpLowering<mlir::ONNXOrOp>,
ONNXElementwiseBinaryOpLowering<mlir::ONNXXorOp>>(&getContext());
// With the target and rewrite patterns defined, we can now attempt the // With the target and rewrite patterns defined, we can now attempt the
// conversion. The conversion will signal failure if any of our `illegal` // conversion. The conversion will signal failure if any of our `illegal`

View File

@ -88,8 +88,13 @@ class ShapeInferencePass : public mlir::FunctionPass<ShapeInferencePass> {
// All operations which do not return a ranked tensor type have dynamic // All operations which do not return a ranked tensor type have dynamic
// shaped outputs. All those operation need to implement the inferShape() // shaped outputs. All those operation need to implement the inferShape()
// method. // method.
if (op->getName().getStringRef() != "onnx.Add" && if (op->getName().getStringRef() != "onnx.Exp" &&
op->getName().getStringRef() != "onnx.Tanh" &&
op->getName().getStringRef() != "onnx.Sinh" &&
op->getName().getStringRef() != "onnx.Cosh" &&
op->getName().getStringRef() != "onnx.Sigmoid" &&
op->getName().getStringRef() != "onnx.Mul" && op->getName().getStringRef() != "onnx.Mul" &&
op->getName().getStringRef() != "onnx.Add" &&
op->getName().getStringRef() != "onnx.Div" && op->getName().getStringRef() != "onnx.Div" &&
op->getName().getStringRef() != "onnx.Sub" && op->getName().getStringRef() != "onnx.Sub" &&
op->getName().getStringRef() != "onnx.And" && op->getName().getStringRef() != "onnx.And" &&

View File

@ -139,3 +139,121 @@ func @test_xor(%arg0 : tensor<?x10xi32>, %arg1 : tensor<?x10xi32>) -> tensor<*xi
// CHECK: store [[XOR]], [[RES]][%arg2, %arg3] : memref<?x10xi32> // CHECK: store [[XOR]], [[RES]][%arg2, %arg3] : memref<?x10xi32>
// CHECK: return [[RES]] : memref<?x10xi32> // CHECK: return [[RES]] : memref<?x10xi32>
} }
func @test_exp(%arg0 : tensor<?x10xf32>) -> tensor<*xf32> {
%0 = "onnx.Exp"(%arg0) : (tensor<?x10xf32>) -> tensor<*xf32>
"std.return"(%0) : (tensor<*xf32>) -> ()
// CHECK-LABEL: test_exp
// CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref<?x10xf32>
// CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref<?x10xf32>
// CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2
// CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops {
// CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1
// CHECK: } : () -> (!krnl.loop, !krnl.loop)
// CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref<?x10xf32>
// CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) {
// CHECK: [[LOAD:%.+]] = load %arg0[%arg1, %arg2] : memref<?x10xf32>
// CHECK: [[EXP:%.+]] = exp [[LOAD]] : f32
// CHECK: store [[EXP]], [[RES]][%arg1, %arg2] : memref<?x10xf32>
// CHECK: return [[RES]] : memref<?x10xf32>
}
func @test_tanh(%arg0 : tensor<?x10xf32>) -> tensor<*xf32> {
%0 = "onnx.Tanh"(%arg0) : (tensor<?x10xf32>) -> tensor<*xf32>
"std.return"(%0) : (tensor<*xf32>) -> ()
// CHECK-LABEL: test_tanh
// CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref<?x10xf32>
// CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref<?x10xf32>
// CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2
// CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops {
// CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1
// CHECK: } : () -> (!krnl.loop, !krnl.loop)
// CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref<?x10xf32>
// CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) {
// CHECK: [[LOAD:%.+]] = load %arg0[%arg1, %arg2] : memref<?x10xf32>
// CHECK: [[ZERO:%.+]] = constant {{0.+}} : f32
// CHECK: [[NLOAD:%.+]] = subf [[ZERO]], [[LOAD]] : f32
// CHECK: [[EXP:%.+]] = exp [[LOAD]] : f32
// CHECK: [[NEXP:%.+]] = exp [[NLOAD]] : f32
// CHECK: [[DIVIDEND:%.+]] = subf [[EXP]], [[NEXP]] : f32
// CHECK: [[DIVISOR:%.+]] = addf [[EXP]], [[NEXP]] : f32
// CHECK: [[TANH_RES:%.+]] = divf [[DIVIDEND]], [[DIVISOR]] : f32
// CHECK: store [[TANH_RES]], [[RES]][%arg1, %arg2] : memref<?x10xf32>
// CHECK: return [[RES]] : memref<?x10xf32>
}
func @test_sinh(%arg0 : tensor<?x10xf32>) -> tensor<*xf32> {
%0 = "onnx.Sinh"(%arg0) : (tensor<?x10xf32>) -> tensor<*xf32>
"std.return"(%0) : (tensor<*xf32>) -> ()
// CHECK-LABEL: test_sinh
// CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref<?x10xf32>
// CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref<?x10xf32>
// CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2
// CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops {
// CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1
// CHECK: } : () -> (!krnl.loop, !krnl.loop)
// CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref<?x10xf32>
// CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) {
// CHECK: [[LOAD:%.+]] = load %arg0[%arg1, %arg2] : memref<?x10xf32>
// CHECK: [[ZERO:%.+]] = constant {{0.+}} : f32
// CHECK: [[TWO:%.+]] = constant {{2.+}} : f32
// CHECK: [[NLOAD:%.+]] = subf [[ZERO]], [[LOAD]] : f32
// CHECK: [[EXP:%.+]] = exp [[LOAD]] : f32
// CHECK: [[NEXP:%.+]] = exp [[NLOAD]] : f32
// CHECK: [[DIVIDEND:%.+]] = subf [[EXP]], [[NEXP]] : f32
// CHECK: [[SINH_RES:%.+]] = divf [[DIVIDEND]], [[TWO]] : f32
// CHECK: store [[SINH_RES]], [[RES]][%arg1, %arg2] : memref<?x10xf32>
// CHECK: return [[RES]] : memref<?x10xf32>
}
func @test_cosh(%arg0 : tensor<?x10xf32>) -> tensor<*xf32> {
%0 = "onnx.Cosh"(%arg0) : (tensor<?x10xf32>) -> tensor<*xf32>
"std.return"(%0) : (tensor<*xf32>) -> ()
// CHECK-LABEL: test_cosh
// CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref<?x10xf32>
// CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref<?x10xf32>
// CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2
// CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops {
// CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1
// CHECK: } : () -> (!krnl.loop, !krnl.loop)
// CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref<?x10xf32>
// CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) {
// CHECK: [[LOAD:%.+]] = load %arg0[%arg1, %arg2] : memref<?x10xf32>
// CHECK: [[ZERO:%.+]] = constant {{0.+}} : f32
// CHECK: [[TWO:%.+]] = constant {{2.+}} : f32
// CHECK: [[NLOAD:%.+]] = subf [[ZERO]], [[LOAD]] : f32
// CHECK: [[EXP:%.+]] = exp [[LOAD]] : f32
// CHECK: [[NEXP:%.+]] = exp [[NLOAD]] : f32
// CHECK: [[DIVIDEND:%.+]] = addf [[EXP]], [[NEXP]] : f32
// CHECK: [[COSH_RES:%.+]] = divf [[DIVIDEND]], [[TWO]] : f32
// CHECK: store [[COSH_RES]], [[RES]][%arg1, %arg2] : memref<?x10xf32>
// CHECK: return [[RES]] : memref<?x10xf32>
}
func @test_sigmoid(%arg0 : tensor<?x10xf32>) -> tensor<*xf32> {
%0 = "onnx.Sigmoid"(%arg0) : (tensor<?x10xf32>) -> tensor<*xf32>
"std.return"(%0) : (tensor<*xf32>) -> ()
// CHECK-LABEL: test_sigmoid
// CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref<?x10xf32>
// CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref<?x10xf32>
// CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2
// CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops {
// CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1
// CHECK: } : () -> (!krnl.loop, !krnl.loop)
// CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref<?x10xf32>
// CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) {
// CHECK: [[LOAD:%.+]] = load %arg0[%arg1, %arg2] : memref<?x10xf32>
// CHECK: [[ZERO:%.+]] = constant {{0.+}} : f32
// CHECK: [[ONE:%.+]] = constant {{1.+}} : f32
// CHECK: [[NLOAD:%.+]] = subf [[ZERO]], [[LOAD]] : f32
// CHECK: [[NEXP:%.+]] = exp [[NLOAD]] : f32
// CHECK: [[DIVISOR:%.+]] = addf [[ONE]], [[NEXP]] : f32
// CHECK: [[SIGMOID_RES:%.+]] = divf [[ONE]], [[DIVISOR]] : f32
// CHECK: store [[SIGMOID_RES]], [[RES]][%arg1, %arg2] : memref<?x10xf32>
// CHECK: return [[RES]] : memref<?x10xf32>
}

View File

@ -287,3 +287,244 @@ func @test_xor_xor(%arg0 : tensor<?x10xi32>, %arg1 : tensor<?x10xi32>) -> tensor
// CHECK: return [[RET_RES]] : memref<?x10xi32> // CHECK: return [[RET_RES]] : memref<?x10xi32>
} }
func @test_exp_exp(%arg0 : tensor<?x10xf32>) -> tensor<*xf32> {
%0 = "onnx.Exp"(%arg0) : (tensor<?x10xf32>) -> tensor<*xf32>
%1 = "onnx.Exp"(%0) : (tensor<*xf32>) -> tensor<*xf32>
"std.return"(%1) : (tensor<*xf32>) -> ()
// CHECK-LABEL: test_exp_exp
/// First Exp
// CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref<?x10xf32>
// CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref<?x10xf32>
// CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2
// CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops {
// CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1
// CHECK: } : () -> (!krnl.loop, !krnl.loop)
// CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref<?x10xf32>
// CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) {
// CHECK: [[LOAD:%.+]] = load %arg0[%arg1, %arg2] : memref<?x10xf32>
// CHECK: [[EXP:%.+]] = exp [[LOAD]] : f32
// CHECK: store [[EXP]], [[RES]][%arg1, %arg2] : memref<?x10xf32>
/// Second Exp
// CHECK: [[DIM_0:%.+]] = dim [[RES]], 0 : memref<?x10xf32>
// CHECK: [[RET_RES:%.+]] = alloc([[DIM_0]]) : memref<?x10xf32>
// CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2
// CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops {
// CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1
// CHECK: } : () -> (!krnl.loop, !krnl.loop)
// CHECK: [[DIM_2:%.+]] = dim [[RES]], 0 : memref<?x10xf32>
// CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) {
// CHECK: [[LOAD:%.+]] = load [[RES]][%arg1, %arg2] : memref<?x10xf32>
// CHECK: [[EXP:%.+]] = exp [[LOAD]] : f32
// CHECK: store [[EXP]], [[RET_RES]][%arg1, %arg2] : memref<?x10xf32>
/// Dealloc of first result.
// CHECK: dealloc [[RES]] : memref<?x10xf32>
// CHECK-NOT: dealloc [[RET_RES]] : memref<?x10xf32>
// CHECK: return [[RET_RES]] : memref<?x10xf32>
}
func @test_tanh_tanh(%arg0 : tensor<?x10xf32>) -> tensor<*xf32> {
%0 = "onnx.Tanh"(%arg0) : (tensor<?x10xf32>) -> tensor<*xf32>
%1 = "onnx.Tanh"(%0) : (tensor<*xf32>) -> tensor<*xf32>
"std.return"(%1) : (tensor<*xf32>) -> ()
// CHECK-LABEL: test_tanh_tanh
/// First Tanh
// CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref<?x10xf32>
// CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref<?x10xf32>
// CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2
// CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops {
// CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1
// CHECK: } : () -> (!krnl.loop, !krnl.loop)
// CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref<?x10xf32>
// CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) {
// CHECK: [[LOAD:%.+]] = load %arg0[%arg1, %arg2] : memref<?x10xf32>
// CHECK: [[ZERO:%.+]] = constant {{0.+}} : f32
// CHECK: [[NLOAD:%.+]] = subf [[ZERO]], [[LOAD]] : f32
// CHECK: [[EXP:%.+]] = exp [[LOAD]] : f32
// CHECK: [[NEXP:%.+]] = exp [[NLOAD]] : f32
// CHECK: [[DIVIDEND:%.+]] = subf [[EXP]], [[NEXP]] : f32
// CHECK: [[DIVISOR:%.+]] = addf [[EXP]], [[NEXP]] : f32
// CHECK: [[TANH_RES:%.+]] = divf [[DIVIDEND]], [[DIVISOR]] : f32
// CHECK: store [[TANH_RES]], [[RES]][%arg1, %arg2] : memref<?x10xf32>
/// Second Tanh
// CHECK: [[DIM_0:%.+]] = dim [[RES]], 0 : memref<?x10xf32>
// CHECK: [[RET_RES:%.+]] = alloc([[DIM_0]]) : memref<?x10xf32>
// CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2
// CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops {
// CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1
// CHECK: } : () -> (!krnl.loop, !krnl.loop)
// CHECK: [[DIM_2:%.+]] = dim [[RES]], 0 : memref<?x10xf32>
// CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) {
// CHECK: [[LOAD:%.+]] = load [[RES]][%arg1, %arg2] : memref<?x10xf32>
// CHECK: [[ZERO:%.+]] = constant {{0.+}} : f32
// CHECK: [[NLOAD:%.+]] = subf [[ZERO]], [[LOAD]] : f32
// CHECK: [[EXP:%.+]] = exp [[LOAD]] : f32
// CHECK: [[NEXP:%.+]] = exp [[NLOAD]] : f32
// CHECK: [[DIVIDEND:%.+]] = subf [[EXP]], [[NEXP]] : f32
// CHECK: [[DIVISOR:%.+]] = addf [[EXP]], [[NEXP]] : f32
// CHECK: [[TANH_RES:%.+]] = divf [[DIVIDEND]], [[DIVISOR]] : f32
// CHECK: store [[TANH_RES]], [[RET_RES]][%arg1, %arg2] : memref<?x10xf32>
/// Dealloc of first result.
// CHECK: dealloc [[RES]] : memref<?x10xf32>
// CHECK-NOT: dealloc [[RET_RES]] : memref<?x10xf32>
// CHECK: return [[RET_RES]] : memref<?x10xf32>
}
func @test_sinh_sinh(%arg0 : tensor<?x10xf32>) -> tensor<*xf32> {
%0 = "onnx.Sinh"(%arg0) : (tensor<?x10xf32>) -> tensor<*xf32>
%1 = "onnx.Sinh"(%0) : (tensor<*xf32>) -> tensor<*xf32>
"std.return"(%1) : (tensor<*xf32>) -> ()
// CHECK-LABEL: test_sinh_sinh
/// First Sinh
// CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref<?x10xf32>
// CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref<?x10xf32>
// CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2
// CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops {
// CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1
// CHECK: } : () -> (!krnl.loop, !krnl.loop)
// CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref<?x10xf32>
// CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) {
// CHECK: [[LOAD:%.+]] = load %arg0[%arg1, %arg2] : memref<?x10xf32>
// CHECK: [[ZERO:%.+]] = constant {{0.+}} : f32
// CHECK: [[TWO:%.+]] = constant {{2.+}} : f32
// CHECK: [[NLOAD:%.+]] = subf [[ZERO]], [[LOAD]] : f32
// CHECK: [[EXP:%.+]] = exp [[LOAD]] : f32
// CHECK: [[NEXP:%.+]] = exp [[NLOAD]] : f32
// CHECK: [[DIVIDEND:%.+]] = subf [[EXP]], [[NEXP]] : f32
// CHECK: [[SINH_RES:%.+]] = divf [[DIVIDEND]], [[TWO]] : f32
// CHECK: store [[SINH_RES]], [[RES]][%arg1, %arg2] : memref<?x10xf32>
/// Second Sinh
// CHECK: [[DIM_0:%.+]] = dim [[RES]], 0 : memref<?x10xf32>
// CHECK: [[RET_RES:%.+]] = alloc([[DIM_0]]) : memref<?x10xf32>
// CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2
// CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops {
// CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1
// CHECK: } : () -> (!krnl.loop, !krnl.loop)
// CHECK: [[DIM_2:%.+]] = dim [[RES]], 0 : memref<?x10xf32>
// CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) {
// CHECK: [[LOAD:%.+]] = load [[RES]][%arg1, %arg2] : memref<?x10xf32>
// CHECK: [[ZERO:%.+]] = constant {{0.+}} : f32
// CHECK: [[TWO:%.+]] = constant {{2.+}} : f32
// CHECK: [[NLOAD:%.+]] = subf [[ZERO]], [[LOAD]] : f32
// CHECK: [[EXP:%.+]] = exp [[LOAD]] : f32
// CHECK: [[NEXP:%.+]] = exp [[NLOAD]] : f32
// CHECK: [[DIVIDEND:%.+]] = subf [[EXP]], [[NEXP]] : f32
// CHECK: [[SINH_RES:%.+]] = divf [[DIVIDEND]], [[TWO]] : f32
// CHECK: store [[SINH_RES]], [[RET_RES]][%arg1, %arg2] : memref<?x10xf32>
/// Dealloc of first result.
// CHECK: dealloc [[RES]] : memref<?x10xf32>
// CHECK-NOT: dealloc [[RET_RES]] : memref<?x10xf32>
// CHECK: return [[RET_RES]] : memref<?x10xf32>
}
func @test_cosh_cosh(%arg0 : tensor<?x10xf32>) -> tensor<*xf32> {
%0 = "onnx.Cosh"(%arg0) : (tensor<?x10xf32>) -> tensor<*xf32>
%1 = "onnx.Cosh"(%0) : (tensor<*xf32>) -> tensor<*xf32>
"std.return"(%1) : (tensor<*xf32>) -> ()
// CHECK-LABEL: test_cosh_cosh
/// First Cosh
// CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref<?x10xf32>
// CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref<?x10xf32>
// CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2
// CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops {
// CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1
// CHECK: } : () -> (!krnl.loop, !krnl.loop)
// CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref<?x10xf32>
// CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) {
// CHECK: [[LOAD:%.+]] = load %arg0[%arg1, %arg2] : memref<?x10xf32>
// CHECK: [[ZERO:%.+]] = constant {{0.+}} : f32
// CHECK: [[TWO:%.+]] = constant {{2.+}} : f32
// CHECK: [[NLOAD:%.+]] = subf [[ZERO]], [[LOAD]] : f32
// CHECK: [[EXP:%.+]] = exp [[LOAD]] : f32
// CHECK: [[NEXP:%.+]] = exp [[NLOAD]] : f32
// CHECK: [[DIVIDEND:%.+]] = addf [[EXP]], [[NEXP]] : f32
// CHECK: [[COSH_RES:%.+]] = divf [[DIVIDEND]], [[TWO]] : f32
// CHECK: store [[COSH_RES]], [[RES]][%arg1, %arg2] : memref<?x10xf32>
/// Second Cosh
// CHECK: [[DIM_0:%.+]] = dim [[RES]], 0 : memref<?x10xf32>
// CHECK: [[RET_RES:%.+]] = alloc([[DIM_0]]) : memref<?x10xf32>
// CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2
// CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops {
// CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1
// CHECK: } : () -> (!krnl.loop, !krnl.loop)
// CHECK: [[DIM_2:%.+]] = dim [[RES]], 0 : memref<?x10xf32>
// CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) {
// CHECK: [[LOAD:%.+]] = load [[RES]][%arg1, %arg2] : memref<?x10xf32>
// CHECK: [[ZERO:%.+]] = constant {{0.+}} : f32
// CHECK: [[TWO:%.+]] = constant {{2.+}} : f32
// CHECK: [[NLOAD:%.+]] = subf [[ZERO]], [[LOAD]] : f32
// CHECK: [[EXP:%.+]] = exp [[LOAD]] : f32
// CHECK: [[NEXP:%.+]] = exp [[NLOAD]] : f32
// CHECK: [[DIVIDEND:%.+]] = addf [[EXP]], [[NEXP]] : f32
// CHECK: [[COSH_RES:%.+]] = divf [[DIVIDEND]], [[TWO]] : f32
// CHECK: store [[COSH_RES]], [[RET_RES]][%arg1, %arg2] : memref<?x10xf32>
/// Dealloc of first result.
// CHECK: dealloc [[RES]] : memref<?x10xf32>
// CHECK-NOT: dealloc [[RET_RES]] : memref<?x10xf32>
// CHECK: return [[RET_RES]] : memref<?x10xf32>
}
func @test_sigmoid_sigmoid(%arg0 : tensor<?x10xf32>) -> tensor<*xf32> {
%0 = "onnx.Sigmoid"(%arg0) : (tensor<?x10xf32>) -> tensor<*xf32>
%1 = "onnx.Sigmoid"(%0) : (tensor<*xf32>) -> tensor<*xf32>
"std.return"(%1) : (tensor<*xf32>) -> ()
// CHECK-LABEL: test_sigmoid_sigmoid
/// First Sigmoid
// CHECK: [[DIM_0:%.+]] = dim %arg0, 0 : memref<?x10xf32>
// CHECK: [[RES:%.+]] = alloc([[DIM_0]]) : memref<?x10xf32>
// CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2
// CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops {
// CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1
// CHECK: } : () -> (!krnl.loop, !krnl.loop)
// CHECK: [[DIM_2:%.+]] = dim %arg0, 0 : memref<?x10xf32>
// CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) {
// CHECK: [[LOAD:%.+]] = load %arg0[%arg1, %arg2] : memref<?x10xf32>
// CHECK: [[ZERO:%.+]] = constant {{0.+}} : f32
// CHECK: [[ONE:%.+]] = constant {{1.+}} : f32
// CHECK: [[NLOAD:%.+]] = subf [[ZERO]], [[LOAD]] : f32
// CHECK: [[NEXP:%.+]] = exp [[NLOAD]] : f32
// CHECK: [[DIVISOR:%.+]] = addf [[ONE]], [[NEXP]] : f32
// CHECK: [[SIGMOID_RES:%.+]] = divf [[ONE]], [[DIVISOR]] : f32
// CHECK: store [[SIGMOID_RES]], [[RES]][%arg1, %arg2] : memref<?x10xf32>
/// Second Sigmoid
// CHECK: [[DIM_0:%.+]] = dim [[RES]], 0 : memref<?x10xf32>
// CHECK: [[RET_RES:%.+]] = alloc([[DIM_0]]) : memref<?x10xf32>
// CHECK: [[DEF_LOOPS:%.+]]:2 = krnl.define_loops 2
// CHECK: [[OPT_LOOPS:%.+]]:2 = krnl.optimize_loops {
// CHECK: krnl.return_loops [[DEF_LOOPS]]#0, [[DEF_LOOPS]]#1
// CHECK: } : () -> (!krnl.loop, !krnl.loop)
// CHECK: [[DIM_2:%.+]] = dim [[RES]], 0 : memref<?x10xf32>
// CHECK: krnl.iterate([[OPT_LOOPS]]#0, [[OPT_LOOPS]]#1) with ([[DEF_LOOPS]]#0 -> %arg1 = 0 to [[DIM_2]], [[DEF_LOOPS]]#1 -> %arg2 = 0 to 10) {
// CHECK: [[LOAD:%.+]] = load [[RES]][%arg1, %arg2] : memref<?x10xf32>
// CHECK: [[ZERO:%.+]] = constant {{0.+}} : f32
// CHECK: [[ONE:%.+]] = constant {{1.+}} : f32
// CHECK: [[NLOAD:%.+]] = subf [[ZERO]], [[LOAD]] : f32
// CHECK: [[NEXP:%.+]] = exp [[NLOAD]] : f32
// CHECK: [[DIVISOR:%.+]] = addf [[ONE]], [[NEXP]] : f32
// CHECK: [[SIGMOID_RES:%.+]] = divf [[ONE]], [[DIVISOR]] : f32
// CHECK: store [[SIGMOID_RES]], [[RET_RES]][%arg1, %arg2] : memref<?x10xf32>
/// Dealloc of first result.
// CHECK: dealloc [[RES]] : memref<?x10xf32>
// CHECK-NOT: dealloc [[RET_RES]] : memref<?x10xf32>
// CHECK: return [[RET_RES]] : memref<?x10xf32>
}