Skip to content

Commit d792304

Browse files
Merge pull request #242 from KernelTuner/register_observer
Register observer & correct clock setting
2 parents eceb226 + dfd3da9 commit d792304

File tree

12 files changed

+154
-124
lines changed

12 files changed

+154
-124
lines changed

CONTRIBUTING.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ Steps without :bash:`sudo` access (e.g. on a cluster):
8585
- /path/to/directory
8686
* [Optional] both Mamba and Miniconda can be automatically activated via :bash:`~/.bashrc`. Do not forget to add these (usually provided at the end of the installation).
8787
* Exit the shell and re-enter to make sure Conda is available. :bash:`cd` to the kernel tuner directory.
88-
* [Optional] if you have limited user folder space, the Pip cache can be pointed elsewhere with the environment variable :bash:`PIP_CACHE_DIR`. The cache location can be checked with :bash:`pip cache dir`.
88+
* [Optional] if you have limited user folder space, the Pip cache can be pointed elsewhere with the environment variable :bash:`PIP_CACHE_DIR`. The cache location can be checked with :bash:`pip cache dir`. On Linux, to point the entire :bash:`~/.cache` default elsewhere, use the :bash:`XDG_CACHE_HOME` environment variable.
8989
* [Optional] update Conda if available before continuing: :bash:`conda update -n base -c conda-forge conda`.
9090
#. Setup a virtual environment: :bash:`conda create --name kerneltuner python=3.11` (or whatever Python version and environment name you prefer).
9191
#. Activate the virtual environment: :bash:`conda activate kerneltuner`.

kernel_tuner/backends/cupy.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
"""This module contains all Cupy specific kernel_tuner functions."""
22
from __future__ import print_function
3+
from warnings import warn
34

45
import numpy as np
56

@@ -124,6 +125,7 @@ def compile(self, kernel_instance):
124125
compiler_options = self.compiler_options
125126
if not any(["-std=" in opt for opt in self.compiler_options]):
126127
compiler_options = ["--std=c++11"] + self.compiler_options
128+
# CuPy already sets the --gpu-architecture by itself, as per https://github.com/cupy/cupy/blob/main/cupy/cuda/compiler.py#L145
127129

128130
options = tuple(compiler_options)
129131

@@ -132,6 +134,7 @@ def compile(self, kernel_instance):
132134
)
133135

134136
self.func = self.current_module.get_function(kernel_name)
137+
self.num_regs = self.func.num_regs
135138
return self.func
136139

137140
def start_event(self):
@@ -197,6 +200,8 @@ def run_kernel(self, func, gpu_args, threads, grid, stream=None):
197200
of the grid
198201
:type grid: tuple(int, int)
199202
"""
203+
if stream is None:
204+
stream = self.stream
200205
func(grid, threads, gpu_args, stream=stream, shared_mem=self.smem_size)
201206

202207
def memset(self, allocation, value, size):

kernel_tuner/backends/nvcuda.py

Lines changed: 14 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,11 @@
11
"""This module contains all NVIDIA cuda-python specific kernel_tuner functions."""
2+
from warnings import warn
3+
24
import numpy as np
35

46
from kernel_tuner.backends.backend import GPUBackend
57
from kernel_tuner.observers.nvcuda import CudaRuntimeObserver
6-
from kernel_tuner.util import SkippableFailure, cuda_error_check
8+
from kernel_tuner.util import SkippableFailure, cuda_error_check, to_valid_nvrtc_gpu_arch_cc
79

810
# embedded in try block to be able to generate documentation
911
# and run tests without cuda-python installed
@@ -161,12 +163,12 @@ def compile(self, kernel_instance):
161163
compiler_options.append(b"--std=c++11")
162164
if not any(["--std=" in opt for opt in self.compiler_options]):
163165
self.compiler_options.append("--std=c++11")
164-
if not any([b"--gpu-architecture=" in opt for opt in compiler_options]):
166+
if not any([b"--gpu-architecture=" in opt or b"-arch" in opt for opt in compiler_options]):
165167
compiler_options.append(
166-
f"--gpu-architecture=compute_{self.cc}".encode("UTF-8")
168+
f"--gpu-architecture=compute_{to_valid_nvrtc_gpu_arch_cc(self.cc)}".encode("UTF-8")
167169
)
168-
if not any(["--gpu-architecture=" in opt for opt in self.compiler_options]):
169-
self.compiler_options.append(f"--gpu-architecture=compute_{self.cc}")
170+
if not any(["--gpu-architecture=" in opt or "-arch" in opt for opt in self.compiler_options]):
171+
self.compiler_options.append(f"--gpu-architecture=compute_{to_valid_nvrtc_gpu_arch_cc(self.cc)}")
170172

171173
err, program = nvrtc.nvrtcCreateProgram(
172174
str.encode(kernel_string), b"CUDAProgram", 0, [], []
@@ -192,6 +194,11 @@ def compile(self, kernel_instance):
192194
)
193195
cuda_error_check(err)
194196

197+
# get the number of registers per thread used in this kernel
198+
num_regs = cuda.cuFuncGetAttribute(cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_NUM_REGS, self.func)
199+
assert num_regs[0] == 0, f"Retrieving number of registers per thread unsuccesful: code {num_regs[0]}"
200+
self.num_regs = num_regs[1]
201+
195202
except RuntimeError as re:
196203
_, n = nvrtc.nvrtcGetProgramLogSize(program)
197204
log = b" " * n
@@ -273,6 +280,8 @@ def run_kernel(self, func, gpu_args, threads, grid, stream=None):
273280
of the grid
274281
:type grid: tuple(int, int)
275282
"""
283+
if stream is None:
284+
stream = self.stream
276285
arg_types = list()
277286
for arg in gpu_args:
278287
if isinstance(arg, cuda.CUdeviceptr):

kernel_tuner/backends/pycuda.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -218,6 +218,8 @@ def compile(self, kernel_instance):
218218
)
219219

220220
self.func = self.current_module.get_function(kernel_name)
221+
if not isinstance(self.func, str):
222+
self.num_regs = self.func.num_regs
221223
return self.func
222224
except drv.CompileError as e:
223225
if "uses too much shared data" in e.stderr:

kernel_tuner/core.py

Lines changed: 16 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -341,7 +341,7 @@ def __init__(
341341
print("Using: " + self.dev.name)
342342

343343
def benchmark_default(self, func, gpu_args, threads, grid, result):
344-
"""Benchmark one kernel execution at a time"""
344+
"""Benchmark one kernel execution at a time."""
345345
observers = [
346346
obs for obs in self.dev.observers if not isinstance(obs, ContinuousObserver)
347347
]
@@ -391,12 +391,8 @@ def benchmark_continuous(self, func, gpu_args, threads, grid, result, duration):
391391
for obs in self.continuous_observers:
392392
result.update(obs.get_results())
393393

394-
def benchmark(self, func, gpu_args, instance, verbose, objective):
395-
"""benchmark the kernel instance"""
396-
logging.debug("benchmark " + instance.name)
397-
logging.debug("thread block dimensions x,y,z=%d,%d,%d", *instance.threads)
398-
logging.debug("grid dimensions x,y,z=%d,%d,%d", *instance.grid)
399-
394+
def set_nvml_parameters(self, instance):
395+
"""Set the NVML parameters. Avoids setting time leaking into benchmark time."""
400396
if self.use_nvml:
401397
if "nvml_pwr_limit" in instance.params:
402398
new_limit = int(
@@ -409,6 +405,15 @@ def benchmark(self, func, gpu_args, instance, verbose, objective):
409405
if "nvml_mem_clock" in instance.params:
410406
self.nvml.mem_clock = instance.params["nvml_mem_clock"]
411407

408+
def benchmark(self, func, gpu_args, instance, verbose, objective, skip_nvml_setting=False):
409+
"""Benchmark the kernel instance."""
410+
logging.debug("benchmark " + instance.name)
411+
logging.debug("thread block dimensions x,y,z=%d,%d,%d", *instance.threads)
412+
logging.debug("grid dimensions x,y,z=%d,%d,%d", *instance.grid)
413+
414+
if self.use_nvml and not skip_nvml_setting:
415+
self.set_nvml_parameters(instance)
416+
412417
# Call the observers to register the configuration to be benchmarked
413418
for obs in self.dev.observers:
414419
obs.register_configuration(instance.params)
@@ -577,9 +582,12 @@ def compile_and_benchmark(self, kernel_source, gpu_args, params, kernel_options,
577582

578583
# benchmark
579584
if func:
585+
# setting the NVML parameters here avoids this time from leaking into the benchmark time, ends up in framework time instead
586+
if self.use_nvml:
587+
self.set_nvml_parameters(instance)
580588
start_benchmark = time.perf_counter()
581589
result.update(
582-
self.benchmark(func, gpu_args, instance, verbose, to.objective)
590+
self.benchmark(func, gpu_args, instance, verbose, to.objective, skip_nvml_setting=False)
583591
)
584592
last_benchmark_time = 1000 * (time.perf_counter() - start_benchmark)
585593

0 commit comments

Comments
 (0)