Skip to content

[FIX] Fix CI #74

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 14 commits into from
Jun 11, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 9 additions & 5 deletions .github/workflows/python-app.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,13 @@ name: Python application

on:
push:
branches-ignore:
- '**'
branches:
- main
- keren/v2.0
pull_request:
branches-ignore:
- '**'
branches:
- main
- keren/v2.0

permissions:
contents: read
Expand All @@ -21,6 +23,8 @@ concurrency:
jobs:
build:
runs-on: ubuntu-latest
env:
TRITON_INTERPRET: "1"

steps:
- uses: actions/checkout@v3
Expand All @@ -44,7 +48,7 @@ jobs:
pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cu121
pip uninstall pytorch-triton -y

- name: Clone Triton and Install
- name: Install Triton
run: |
pip install triton==3.1.0

Expand Down
15 changes: 10 additions & 5 deletions tests/test_autotune_add.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import pytest
import torch
import triton
import triton.language as tl
Expand All @@ -7,8 +8,12 @@
from triton_viz import config as cfg


cfg.sanitizer_backend = "symexec"
try:
torch.cuda.current_device()
except:
pytest.skip("This test requires a CUDA-enabled environment.", allow_module_level=True)

cfg.sanitizer_backend = "symexec"

@triton.autotune(
configs=[
Expand Down Expand Up @@ -39,8 +44,8 @@ def test_autotune_add_inrange():
This test uses n_elements = 128, matching the size of the input tensors.
It should NOT cause any out-of-bound access.
"""
x = torch.randn(128, device="cuda")
y = torch.randn(128, device="cuda")
x = torch.randn(128)
y = torch.randn(128)
out = torch.empty_like(x)

# The kernel launch uses n_elements=128, aligned with the tensor size.
Expand All @@ -55,8 +60,8 @@ def test_autotune_add_out_of_bound():
This test deliberately sets n_elements = 256, exceeding the actual buffer size (128).
It will likely cause out-of-bound reads/writes, which may trigger errors or warnings.
"""
x = torch.randn(128, device="cuda")
y = torch.randn(128, device="cuda")
x = torch.randn(128)
y = torch.randn(128)
out = torch.empty_like(x)

# The kernel launch uses n_elements=256, exceeding the valid tensor size.
Expand Down
3 changes: 2 additions & 1 deletion tests/test_config.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import pytest
import pytest, os
os.environ["TRITON_SANITIZER_BACKEND"] = "off"
import triton_viz.core.config as cfg


Expand Down
2 changes: 1 addition & 1 deletion tests/test_print_traceback.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def kernel_A(ptr, n):


def test_print_nested_functions():
x = torch.arange(4, device="cuda", dtype=torch.float32)
x = torch.arange(4, dtype=torch.float32)
print("Input:", x)

# We'll launch a grid bigger than x.numel() to force a out-of-bounds error
Expand Down
1 change: 1 addition & 0 deletions tests/test_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ def _decorator(fn):
env = os.environ.copy()
env["PYTHONPATH"] = str(tmp_path) + os.pathsep + env.get("PYTHONPATH", "")
env["TRITON_SANITIZER_BACKEND"] = "symexec"
env["TRITON_INTERPRET"] = "1"

# run the dummy program using triton-sanitizer
cmd = ["triton-sanitizer", str(tmp_path / "dummy_program.py")]
Expand Down
10 changes: 8 additions & 2 deletions triton_viz/core/trace.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,11 @@ def add_client(self, new_client: Union[Client, str]) -> None:
new_client_instance = self._normalize_client(new_client)
self.client_manager.add_clients([new_client_instance])

def __init__(self, kernel: Union[JITFunction, InterpretedFunction], client: Union[str, Client]) -> None:
def __init__(
self,
kernel: Union[JITFunction, InterpretedFunction],
client: Union[str, Client],
) -> None:
self.fn = kernel
if isinstance(kernel, InterpretedFunction):
self.interpreter_fn = kernel
Expand Down Expand Up @@ -91,7 +95,9 @@ def decorator(kernel) -> Trace:
trace.add_client(clients)
return trace

raise TypeError(f"Expected JITFunction, InterpretedFunction or Trace, got {type(kernel)}")
raise TypeError(
f"Expected JITFunction, InterpretedFunction or Trace, got {type(kernel)}"
)

return decorator

Expand Down