Skip to content

Commit 43e3d25

Browse files
hjc4869ggerganov
authored andcommitted
CUDA/HIP: Share the same unified memory allocation logic. (llama/12934)
Replace compile-time `GGML_HIP_UMA` with environment variable `GGML_CUDA_ENABLE_UNIFIED_MEMORY`. This unifies the usage on NVIDIA and AMD GPUs, and allows a single binary to be shared between integrated and dedicated GPUs.
1 parent e1dbf9a commit 43e3d25

File tree

4 files changed

+18
-20
lines changed

4 files changed

+18
-20
lines changed

ggml/CMakeLists.txt

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -170,7 +170,6 @@ option(GGML_HIP "ggml: use HIP"
170170
option(GGML_HIP_GRAPHS "ggml: use HIP graph, experimental, slow" OFF)
171171
option(GGML_HIP_NO_VMM "ggml: do not try to use HIP VMM" ON)
172172
option(GGML_HIP_ROCWMMA_FATTN "ggml: enable rocWMMA for FlashAttention" OFF)
173-
option(GGML_HIP_UMA "ggml: use HIP unified memory architecture" OFF)
174173
option(GGML_VULKAN "ggml: use Vulkan" OFF)
175174
option(GGML_VULKAN_CHECK_RESULTS "ggml: run Vulkan op checks" OFF)
176175
option(GGML_VULKAN_DEBUG "ggml: enable Vulkan debug output" OFF)

ggml/src/ggml-cuda/ggml-cuda.cu

Lines changed: 16 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -96,31 +96,32 @@ int ggml_cuda_get_device() {
9696

9797
static cudaError_t ggml_cuda_device_malloc(void ** ptr, size_t size, int device) {
9898
ggml_cuda_set_device(device);
99-
#if defined(GGML_USE_HIP) && defined(GGML_HIP_UMA)
100-
auto res = hipMallocManaged(ptr, size);
101-
if (res == hipSuccess) {
102-
// if error we "need" to know why...
103-
CUDA_CHECK(hipMemAdvise(*ptr, size, hipMemAdviseSetCoarseGrain, device));
104-
}
105-
return res;
106-
#else
107-
108-
#if !defined(GGML_USE_HIP)
10999
cudaError_t err;
110100
if (getenv("GGML_CUDA_ENABLE_UNIFIED_MEMORY") != nullptr)
111101
{
112102
err = cudaMallocManaged(ptr, size);
103+
#if defined(GGML_USE_HIP)
104+
if (err == hipSuccess) {
105+
CUDA_CHECK(cudaMemAdvise(*ptr, size, hipMemAdviseSetCoarseGrain, device));
106+
}
107+
108+
// fall back to cudaMalloc if not supported (e.g. on Windows)
109+
if (err == hipErrorNotSupported) {
110+
static bool warned_unsupported = false;
111+
if (!warned_unsupported) {
112+
GGML_LOG_WARN("hipMallocManaged unsupported, falling back to hipMalloc.\n");
113+
warned_unsupported = true;
114+
}
115+
116+
err = cudaMalloc(ptr, size);
117+
}
118+
#endif // defined(GGML_USE_HIP)
113119
}
114120
else
115121
{
116122
err = cudaMalloc(ptr, size);
117123
}
118124
return err;
119-
#else
120-
return cudaMalloc(ptr, size);
121-
#endif // !defined(GGML_USE_HIP)
122-
123-
#endif
124125
}
125126

126127
#if defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)

ggml/src/ggml-cuda/vendors/hip.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,8 @@
7171
#define cudaLaunchHostFunc hipLaunchHostFunc
7272
#define cudaMalloc hipMalloc
7373
#define cudaMallocHost(ptr, size) hipHostMalloc(ptr, size, hipHostMallocDefault)
74+
#define cudaMallocManaged hipMallocManaged
75+
#define cudaMemAdvise hipMemAdvise
7476
#define cudaMemcpy hipMemcpy
7577
#define cudaMemcpyAsync hipMemcpyAsync
7678
#define cudaMemcpyPeerAsync hipMemcpyPeerAsync

ggml/src/ggml-hip/CMakeLists.txt

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -89,10 +89,6 @@ endif()
8989

9090
add_compile_definitions(GGML_USE_HIP)
9191

92-
if (GGML_HIP_UMA)
93-
add_compile_definitions(GGML_HIP_UMA)
94-
endif()
95-
9692
if (GGML_CUDA_FORCE_MMQ)
9793
add_compile_definitions(GGML_CUDA_FORCE_MMQ)
9894
endif()

0 commit comments

Comments
 (0)