|
| 1 | +// RUN: mlir-translate -mlir-to-llvmir %s | FileCheck %s |
| 2 | + |
| 3 | +llvm.func @caller_() { |
| 4 | + %c1 = llvm.mlir.constant(1 : i64) : i64 |
| 5 | + %x_host = llvm.alloca %c1 x f32 {bindc_name = "x"} : (i64) -> !llvm.ptr |
| 6 | + %i_host = llvm.alloca %c1 x i32 {bindc_name = "i"} : (i64) -> !llvm.ptr |
| 7 | + %x_map = omp.map.info var_ptr(%x_host : !llvm.ptr, f32) map_clauses(implicit, exit_release_or_enter_alloc) capture(ByCopy) -> !llvm.ptr {name = "x"} |
| 8 | + %i_map = omp.map.info var_ptr(%i_host : !llvm.ptr, i32) map_clauses(implicit, exit_release_or_enter_alloc) capture(ByCopy) -> !llvm.ptr {name = "i"} |
| 9 | + omp.target map_entries(%x_map -> %x_arg, %i_map -> %i_arg : !llvm.ptr, !llvm.ptr) { |
| 10 | + %1 = llvm.load %i_arg : !llvm.ptr -> i32 |
| 11 | + %2 = llvm.sitofp %1 : i32 to f32 |
| 12 | + llvm.store %2, %x_arg : f32, !llvm.ptr |
| 13 | + // The call instruction uses %x_arg more than once. Hence modifying users |
| 14 | + // while iterating them invalidates the iteration. Which is what is tested |
| 15 | + // by this test. |
| 16 | + llvm.call @callee_(%x_arg, %x_arg) : (!llvm.ptr, !llvm.ptr) -> () |
| 17 | + omp.terminator |
| 18 | + } |
| 19 | + llvm.return |
| 20 | +} |
| 21 | + |
| 22 | +llvm.func @callee_(%arg0: !llvm.ptr, %arg1: !llvm.ptr) { |
| 23 | + llvm.return |
| 24 | +} |
| 25 | + |
| 26 | + |
| 27 | +// CHECK: define internal void @__omp_offloading_{{.*}}_caller__{{.*}}(ptr %[[X_PARAM:.*]], ptr %[[I_PARAM:.*]]) { |
| 28 | + |
| 29 | +// CHECK: %[[I_VAL:.*]] = load i32, ptr %[[I_PARAM]], align 4 |
| 30 | +// CHECK: %[[I_VAL_FL:.*]] = sitofp i32 %[[I_VAL]] to float |
| 31 | +// CHECK: store float %[[I_VAL_FL]], ptr %[[X_PARAM]], align 4 |
| 32 | +// CHECK: call void @callee_(ptr %[[X_PARAM]], ptr %[[X_PARAM]]) |
| 33 | +// CHECK: br label %[[REGION_CONT:.*]] |
| 34 | + |
| 35 | +// CHECK: [[REGION_CONT]]: |
| 36 | +// CHECK: ret void |
| 37 | +// CHECK: } |
0 commit comments