Skip to content

Commit e6dda9c

Browse files
authored
[flang][cuda] Only create shared memory global when needed (#132999)
1 parent 529c5b7 commit e6dda9c

File tree

3 files changed

+20
-1
lines changed

3 files changed

+20
-1
lines changed

flang/include/flang/Optimizer/Transforms/Passes.td

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -463,7 +463,7 @@ def CUFComputeSharedMemoryOffsetsAndSize
463463
the global and set it.
464464
}];
465465

466-
let dependentDialects = ["fir::FIROpsDialect"];
466+
let dependentDialects = ["cuf::CUFDialect", "fir::FIROpsDialect"];
467467
}
468468

469469
def SetRuntimeCallAttributes

flang/lib/Optimizer/Transforms/CUFComputeSharedMemoryOffsetsAndSize.cpp

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -111,6 +111,10 @@ struct CUFComputeSharedMemoryOffsetsAndSize
111111
llvm::alignTo(sharedMemSize, align) + llvm::alignTo(size, align);
112112
alignment = std::max(alignment, align);
113113
}
114+
115+
if (nbDynamicSharedVariables == 0 && nbStaticSharedVariables == 0)
116+
continue;
117+
114118
if (nbDynamicSharedVariables > 0 && nbStaticSharedVariables > 0)
115119
mlir::emitError(
116120
funcOp.getLoc(),

flang/test/Fir/CUDA/cuda-shared-offset.mlir

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -107,3 +107,18 @@ module attributes {dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry<!llvm.ptr, dense<
107107
// CHECK: cuf.shared_memory[%c0{{.*}} : i32] !fir.array<?x?xi32>, %9, %15 : index, index {bindc_name = "s1", uniq_name = "_QMmFss1Es1"} -> !fir.ref<!fir.array<?x?xi32>>
108108
// CHECK: %[[CONV_DYNSIZE:.*]] = fir.convert %[[DYNSIZE]] : (index) -> i32
109109
// CHECK: cuf.shared_memory[%[[CONV_DYNSIZE]] : i32] !fir.array<?x?xi32>, %26, %31 : index, index {bindc_name = "s2", uniq_name = "_QMmFss1Es2"} -> !fir.ref<!fir.array<?x?xi32>>
110+
111+
// -----
112+
113+
module attributes {dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry<!llvm.ptr, dense<64> : vector<4xi64>>, #dlti.dl_entry<!llvm.ptr<271>, dense<32> : vector<4xi64>>, #dlti.dl_entry<!llvm.ptr<270>, dense<32> : vector<4xi64>>, #dlti.dl_entry<f128, dense<128> : vector<2xi64>>, #dlti.dl_entry<f64, dense<64> : vector<2xi64>>, #dlti.dl_entry<f80, dense<128> : vector<2xi64>>, #dlti.dl_entry<f16, dense<16> : vector<2xi64>>, #dlti.dl_entry<i32, dense<32> : vector<2xi64>>, #dlti.dl_entry<i16, dense<16> : vector<2xi64>>, #dlti.dl_entry<i128, dense<128> : vector<2xi64>>, #dlti.dl_entry<i8, dense<8> : vector<2xi64>>, #dlti.dl_entry<!llvm.ptr<272>, dense<64> : vector<4xi64>>, #dlti.dl_entry<i64, dense<64> : vector<2xi64>>, #dlti.dl_entry<i1, dense<8> : vector<2xi64>>, #dlti.dl_entry<"dlti.endianness", "little">, #dlti.dl_entry<"dlti.stack_alignment", 128 : i64>>, fir.defaultkind = "a1c4d8i4l4r4", fir.kindmap = "", gpu.container_module, llvm.data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", llvm.ident = "flang version 20.0.0 (https://github.com/llvm/llvm-project.git cae351f3453a0a26ec8eb2ddaf773c24a29d929e)", llvm.target_triple = "x86_64-unknown-linux-gnu"} {
114+
gpu.module @cuda_device_mod {
115+
gpu.func @_QPnoshared() kernel {
116+
gpu.return
117+
}
118+
}
119+
}
120+
121+
// CHECK-LABEL: gpu.func @_QPnoshared()
122+
// CHECK-NOT: fir.global internal @_QPnoshared__shared_mem
123+
124+

0 commit comments

Comments
 (0)