Skip to content

Commit 7e4253d

Browse files
committed
skip autotune when gpu is missing
1 parent 6cfac32 commit 7e4253d

File tree

1 file changed

+3
-8
lines changed

1 file changed

+3
-8
lines changed

tests/test_autotune_add.py

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,9 @@
88
from triton_viz import config as cfg
99

1010

11+
if not torch.backends.cuda.is_built():
12+
pytest.skip("This test requires a CUDA-enabled environment.")
13+
1114
cfg.sanitizer_backend = "symexec"
1215

1316
@triton.autotune(
@@ -34,10 +37,6 @@ def add_kernel_no_mask(x_ptr, y_ptr, out_ptr, n_elements, BLOCK_SIZE: tl.constex
3437
tl.store(out_ptr + offsets, x_val + y_val)
3538

3639

37-
@pytest.mark.skipif(
38-
not torch.backends.cuda.is_built(),
39-
reason="This test requires a CUDA-enabled environment.",
40-
)
4140
def test_autotune_add_inrange():
4241
"""
4342
This test uses n_elements = 128, matching the size of the input tensors.
@@ -54,10 +53,6 @@ def test_autotune_add_inrange():
5453
print("test_autotune_add_inrange() passed: No out-of-bound access.")
5554

5655

57-
@pytest.mark.skipif(
58-
not torch.backends.cuda.is_built(),
59-
reason="This test requires a CUDA-enabled environment.",
60-
)
6156
def test_autotune_add_out_of_bound():
6257
"""
6358
This test deliberately sets n_elements = 256, exceeding the actual buffer size (128).

0 commit comments

Comments
 (0)