Skip to content

Commit 360479b

Browse files
spcypptfacebook-github-bot
authored andcommitted
Fix trunk health (#4379)
Summary: Pull Request resolved: #4379 X-link: facebookresearch/FBGEMM#1449 Some tests have long been failing in trunk. - Fix kv_cache test on AMD - Fix AMD build for trt_llm {F1979505502} - Fix AMD build for fbcode//deeplearning/fbgemm/fbgemm_gpu/experimental/example/ {F1979505460} - Fix type checking in triton_gemm Reviewed By: q10 Differential Revision: D76954296 fbshipit-source-id: ed0acb8ff50b1b27dc167a39a3dc613609ea2028
1 parent 783e132 commit 360479b

File tree

2 files changed

+5
-5
lines changed

2 files changed

+5
-5
lines changed

fbgemm_gpu/experimental/gemm/triton_gemm/grouped_gemm.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -266,7 +266,7 @@ def _fbgemm_grouped_gemm(
266266
c_desc_ptr = None
267267

268268
M_end_offset = 0
269-
M_end_offset = M_end_offset.to(tl.int64)
269+
M_end_offset = M_end_offset.to(tl.int64) # pyre-ignore
270270
iterated_tiles = 0
271271
for g in tl.range(G):
272272
# Move across groups
@@ -433,7 +433,7 @@ def _fbgemm_grouped_gemm_ws(
433433
c_desc_ptr = None
434434

435435
M_end_offset = 0
436-
M_end_offset = M_end_offset.to(tl.int64)
436+
M_end_offset = M_end_offset.to(tl.int64) # pyre-ignore
437437
iterated_tiles = 0
438438
for g in tl.range(G):
439439
# Move across groups
@@ -605,7 +605,7 @@ def _fbgemm_grouped_gemm_fp8_rowwise(
605605
c_desc_ptr = None
606606

607607
M_end_offset = 0
608-
M_end_offset = M_end_offset.to(tl.int64)
608+
M_end_offset = M_end_offset.to(tl.int64) # pyre-ignore
609609
iterated_tiles = 0
610610
for g in tl.range(G):
611611
# Move across groups
@@ -786,7 +786,7 @@ def _fbgemm_grouped_gemm_fp8_rowwise_ws(
786786
c_desc_ptr = None
787787

788788
M_end_offset = 0
789-
M_end_offset = M_end_offset.to(tl.int64)
789+
M_end_offset = M_end_offset.to(tl.int64) # pyre-ignore
790790
iterated_tiles = 0
791791
for g in tl.range(G):
792792
# Move across groups

fbgemm_gpu/experimental/gen_ai/test/kv_cache/kv_cache_test.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -363,7 +363,7 @@ def test_fp8_kv_cache(self, MAX_T: int, N_KVH_L: int) -> None:
363363
N_KVH_L=st.sampled_from([1, 2]),
364364
)
365365
@unittest.skipIf(
366-
not torch.cuda.is_available() or not HAS_XFORMERS,
366+
not torch.version.hip or not HAS_XFORMERS,
367367
"Skip when no AMD GPU or xformers is not available",
368368
)
369369
def test_fp8_kv_e4m3fn_convert_to_e4m3fnuz(self, MAX_T: int, N_KVH_L: int) -> None:

0 commit comments

Comments
 (0)