add tensor from host to jit runner,and get output

This commit is contained in:
colin.liang 2021-07-13 20:10:52 +08:00
parent 62e7b883c7
commit e62fa9311f
2 changed files with 94 additions and 34 deletions

View File

@ -1,39 +1,60 @@
func private @print_memref_f32(memref<*xf32>) attributes { llvm.emit_c_interface } func private @print_memref_f32(memref<*xf32>) attributes { llvm.emit_c_interface }
func @main() { // func @main() -> (i32) {
%c0 = constant 0 : index // %c0 = constant 0 : index
%c1 = constant 1 : index // %c1 = constant 1 : index
// Initialize input. // // Initialize input.
%input = memref.alloc() : memref<2x3xf32> // %input = memref.alloc() : memref<2x3xf32>
%dim_x = memref.dim %input, %c0 : memref<2x3xf32> // %dim_x = memref.dim %input, %c0 : memref<2x3xf32>
%dim_y = memref.dim %input, %c1 : memref<2x3xf32> // %dim_y = memref.dim %input, %c1 : memref<2x3xf32>
scf.parallel (%i, %j) = (%c0, %c0) to (%dim_x, %dim_y) step (%c1, %c1) { // scf.parallel (%i, %j) = (%c0, %c0) to (%dim_x, %dim_y) step (%c1, %c1) {
%i_i64 = index_cast %i : index to i64 // %i_i64 = index_cast %i : index to i64
%i_f32 = sitofp %i_i64 : i64 to f32 // %i_f32 = sitofp %i_i64 : i64 to f32
memref.store %i_f32, %input[%i, %j] : memref<2x3xf32> // memref.store %i_f32, %input[%i, %j] : memref<2x3xf32>
} // }
%unranked_input = memref.cast %input : memref<2x3xf32> to memref<*xf32> // %unranked_input = memref.cast %input : memref<2x3xf32> to memref<*xf32>
call @print_memref_f32(%unranked_input) : (memref<*xf32>) -> () // call @print_memref_f32(%unranked_input) : (memref<*xf32>) -> ()
// CHECK: rank = 2 offset = 0 sizes = [2, 3] strides = [3, 1] // // CHECK: rank = 2 offset = 0 sizes = [2, 3] strides = [3, 1]
// CHECK: [0, 0, 0] // // CHECK: [0, 0, 0]
// CHECK: [1, 1, 1] // // CHECK: [1, 1, 1]
%in = memref.tensor_load %input : memref<2x3xf32> // %in = memref.tensor_load %input : memref<2x3xf32>
%add = "mhlo.add"(%in, %in) {name = "add.3"} : (tensor<2x3xf32>, tensor<2x3xf32>) -> tensor<2x3xf32> // %add = "mhlo.add"(%in, %in) {name = "add.3"} : (tensor<2x3xf32>, tensor<2x3xf32>) -> tensor<2x3xf32>
%output = memref.buffer_cast %add : memref<2x3xf32> // %output = memref.buffer_cast %add : memref<2x3xf32>
%unranked_output = memref.cast %output : memref<2x3xf32> to memref<*xf32> // %unranked_output = memref.cast %output : memref<2x3xf32> to memref<*xf32>
call @print_memref_f32(%unranked_output) : (memref<*xf32>) -> () // call @print_memref_f32(%unranked_output) : (memref<*xf32>) -> ()
// CHECK: rank = 2 offset = 0 sizes = [2, 3] strides = [3, 1] // // CHECK: rank = 2 offset = 0 sizes = [2, 3] strides = [3, 1]
// CHECK: [0, 0, 0] // // CHECK: [0, 0, 0]
// CHECK: [2, 2, 2] // // CHECK: [2, 2, 2]
return // %c42_i32 = constant 42 : i32
}
// return %c42_i32 : i32
// }
// ./mlir-hlo-opt -chlo-legalize-to-hlo -hlo-legalize-to-lhlo -buffer-hoisting -buffer-deallocation -canonicalize -cse -lhlo-legalize-to-linalg -convert-linalg-to-loops -lower-affine -convert-scf-to-std -convert-std-to-llvm ../../../tests/test.mlir > a.mlir // ./mlir-hlo-opt -chlo-legalize-to-hlo -hlo-legalize-to-lhlo -buffer-hoisting -buffer-deallocation -canonicalize -cse -lhlo-legalize-to-linalg -convert-linalg-to-loops -lower-affine -convert-scf-to-std -convert-std-to-llvm ../../../tests/test.mlir > a.mlir
// /root/mlir-hlo/llvm-build/bin/mlir-cpu-runner --entry-point-result=void -shared-libs=/root/mlir-hlo/llvm-build/lib/libmlir_runner_utils.so.13git a.mlir > b.mlir // /root/mlir-hlo/llvm-build/bin/mlir-cpu-runner --entry-point-result=void -shared-libs=/root/mlir-hlo/llvm-build/lib/libmlir_runner_utils.so.13git a.mlir > b.mlir
// /root/mlir-hlo/llvm-build/bin/FileCheck --input-file b.mlir ../../../tests/test.mlir // /root/mlir-hlo/llvm-build/bin/FileCheck --input-file b.mlir ../../../tests/test.mlir
func @main(%1 : memref<*xf32>,%2 : memref<*xf32> ) -> memref<*xf32> {
%ranked1 = memref.cast %1 : memref<*xf32> to memref<6xf32>
%ranked2 = memref.cast %2 : memref<*xf32> to memref<6xf32>
%in1 = memref.tensor_load %ranked1 : memref<6xf32>
%in2 = memref.tensor_load %ranked2 : memref<6xf32>
%add = "mhlo.add"(%in1, %in2) {name = "add.3"} : (tensor<6xf32>, tensor<6xf32>) -> tensor<6xf32>
%output = memref.buffer_cast %add : memref<6xf32>
%unranked_output = memref.cast %output : memref<6xf32> to memref<*xf32>
call @print_memref_f32(%1) : (memref<*xf32>) -> ()
return %unranked_output : memref<*xf32>
}

View File

@ -78,6 +78,18 @@ limitations under the License.
using namespace mlir; using namespace mlir;
using namespace llvm; using namespace llvm;
namespace utils{
template <typename T, int N>
struct MemRefDescriptor {
T *allocated;
T *aligned;
int64_t offset;
int64_t sizes[N];
int64_t strides[N];
};
}
int main(int argc, char **argv) { int main(int argc, char **argv) {
llvm::InitLLVM y(argc, argv); llvm::InitLLVM y(argc, argv);
@ -265,18 +277,45 @@ int main(int argc, char **argv) {
// if (options.dumpObjectFile) // if (options.dumpObjectFile)
// engine->dumpToObjectFile("a.o"); // engine->dumpToObjectFile("a.o");
int res;
struct {
void *data;
} data;
data.data = &res;
float rawdata[6] = {0,1,2,3,4,5};
int64_t dims = 1;
utils::MemRefDescriptor<float,1> a{rawdata,rawdata,0,{6},{1}};
utils::MemRefDescriptor<float,1> b{rawdata,rawdata,0,{6},{1}};
utils::MemRefDescriptor<float,1> result_memref;
struct memref_type{
int64_t res_size = 6;
utils::MemRefDescriptor<float,1> *memref;
} result;
result.memref = &result_memref;
struct {
void *data1_size;
void *data1;
void *data2_size;
void *data2;
void *res;
} data;
data.data1_size = &dims;
void * a_ptr = &a;
data.data1 = &a_ptr;
data.data2_size = &dims;
void * b_ptr = &b;
data.data2 = &b_ptr;
void * result_ptr = &result;
data.res = &result;
void (*fptr)(void **) = *expectedFPtr; void (*fptr)(void **) = *expectedFPtr;
(*fptr)((void **)&data); (*fptr)((void **)&data);
std::cout<<"result: "<<result.memref->allocated[0]<<std::endl;
std::cout<<"result.data "<<res<<std::endl; std::cout<<"result: "<<result.memref->allocated[1]<<std::endl;
std::cout<<"result: "<<result.memref->allocated[2]<<std::endl;
std::cout<<"result: "<<result.memref->allocated[3]<<std::endl;
std::cout<<"result: "<<result.memref->allocated[4]<<std::endl;
std::cout<<"result: "<<result.memref->allocated[5]<<std::endl;
// Run all dynamic library destroy callbacks to prepare for the shutdown. // Run all dynamic library destroy callbacks to prepare for the shutdown.
llvm::for_each(destroyFns, [](MlirRunnerDestroyFn destroy) { destroy(); }); llvm::for_each(destroyFns, [](MlirRunnerDestroyFn destroy) { destroy(); });