Skip to content

Commit db8941a

Browse files
spcypptfacebook-github-bot
authored andcommitted
Fix trunk health GenAI tests (#4394)
Summary: Pull Request resolved: #4394 X-link: facebookresearch/FBGEMM#1465 This diff fixes the tests which fail when running with ` fbcode//mode/opt-amd-gpu`. - Properly skip gather_scatter tests on ROCm since the ops are not available for ROCm - Skip test_fp8_kv_e4m3fn_convert_to_e4m3fnuz on when there's no GPU available. - Add HIP SDK dependency Reviewed By: q10 Differential Revision: D77194089 fbshipit-source-id: ed758a4535581aa6b12efdc5759fb55a73214ad3
1 parent ba8f2b7 commit db8941a

File tree

2 files changed

+13
-5
lines changed

2 files changed

+13
-5
lines changed

fbgemm_gpu/experimental/gen_ai/test/gather_scatter/gather_scatter_test.py

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -19,13 +19,15 @@
1919
logger.setLevel(logging.INFO)
2020

2121

22-
@unittest.skipIf(
23-
not torch.cuda.is_available() or torch.cuda.get_device_capability() < (9, 0),
24-
"Skip when no Hopper GPU is available. This test is only for Hopper GPU.",
25-
)
2622
class GatherScatterTests(unittest.TestCase):
2723
"""Test Gathers."""
2824

25+
@unittest.skipIf(
26+
torch.version.hip
27+
or not torch.cuda.is_available()
28+
or torch.cuda.get_device_capability() < (9, 0),
29+
"Skip when no Hopper GPU is available. This test is only for Hopper GPU.",
30+
)
2931
def test_gather_along_first_dim(self) -> None:
3032
def _test_gather_along_first_dim(
3133
M: int, N: int, K: int, compile: bool = False
@@ -67,6 +69,12 @@ def ref_fn():
6769
_test_gather_along_first_dim(8192, 8192, 5120)
6870
_test_gather_along_first_dim(16384, 16384, 5120)
6971

72+
@unittest.skipIf(
73+
torch.version.hip
74+
or not torch.cuda.is_available()
75+
or torch.cuda.get_device_capability() < (9, 0),
76+
"Skip when no Hopper GPU is available. This test is only for Hopper GPU.",
77+
)
7078
def test_scatter_add_along_first_dim(self) -> None:
7179
def _test_scatter_add_along_first_dim(
7280
M: int, N: int, K: int, compile: bool = False

fbgemm_gpu/experimental/gen_ai/test/kv_cache/kv_cache_test.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -363,7 +363,7 @@ def test_fp8_kv_cache(self, MAX_T: int, N_KVH_L: int) -> None:
363363
N_KVH_L=st.sampled_from([1, 2]),
364364
)
365365
@unittest.skipIf(
366-
not torch.version.hip or not HAS_XFORMERS,
366+
not torch.cuda.is_available() or not HAS_XFORMERS or not torch.version.hip,
367367
"Skip when no AMD GPU or xformers is not available",
368368
)
369369
def test_fp8_kv_e4m3fn_convert_to_e4m3fnuz(self, MAX_T: int, N_KVH_L: int) -> None:

0 commit comments

Comments
 (0)