Skip to content

Commit c9d0f4f

Browse files
committed
test cleanup
Signed-off-by: Bill Nell <bnell@redhat.com>
1 parent 654cbea commit c9d0f4f

File tree

3 files changed

+26
-11
lines changed

3 files changed

+26
-11
lines changed

tests/kernels/moe/test_block_fp8.py

Lines changed: 13 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@
4545
GROUP_SIZE = [64, 128, 512]
4646
# Deepseek-V3's intermediate size 18432, so N is 18432*2/8=4608 at TP8
4747
# and its hidden size is 7168.
48-
M = [1, 2, 83, 128, 2048, 1024 * 128]
48+
M = [1, 2, 83, 128, 2048, 32768]
4949
M_dg = [128, 192, 1335, 2048]
5050
N = [128, 256, 1024, 4608] # [13824]
5151
K = [256, 512, 7168] # [13824]
@@ -104,11 +104,15 @@ def setup_cuda():
104104
"M,N,K,E,topk,block_size,dtype,seed",
105105
itertools.product(M, N, K, E, TOP_KS, BLOCK_SIZE, DTYPES, SEEDS))
106106
@torch.inference_mode()
107-
def test_w8a8_block_fp8_fused_moe(M, N, K, E, topk, block_size, dtype, seed):
107+
def test_w8a8_block_fp8_fused_moe(M, N, K, E, topk, block_size, dtype, seed,
108+
monkeypatch):
108109
if topk > E:
109110
pytest.skip(f"Skipping test; topk={topk} > E={E}")
110111

111112
torch.manual_seed(seed)
113+
114+
monkeypatch.setenv("VLLM_FUSED_MOE_CHUNK_SIZE", "8192")
115+
112116
factor_for_scale = 1e-2
113117
fp8_info = torch.finfo(torch.float8_e4m3fn)
114118
fp8_max, fp8_min = fp8_info.max, fp8_info.min
@@ -262,19 +266,20 @@ def deep_gemm_w8a8_block_fp8_moe(M, K, a, w1, w2, w1_s, w2_s, score, topk,
262266
itertools.product(M_dg, N, K, E, TOP_KS, SEEDS))
263267
@pytest.mark.skipif(not dg_available, reason="DeepGemm kernels not available.")
264268
@torch.inference_mode()
265-
def test_w8a8_block_fp8_deep_gemm_fused_moe(M, N, K, E, topk, seed):
266-
267-
block_m = deep_gemm.get_m_alignment_for_contiguous_layout()
268-
block_size = [block_m, block_m]
269-
dtype = torch.bfloat16
270-
269+
def test_w8a8_block_fp8_deep_gemm_fused_moe(M, N, K, E, topk, seed,
270+
monkeypatch):
271271
if topk > E:
272272
pytest.skip(f"Skipping test: topk={topk} > E={E}")
273273

274274
if not _valid_deep_gemm_shape(M, N, K):
275275
pytest.skip(f"Skipping test: invalid size m={M}, n={N}, k={K}")
276276

277277
torch.manual_seed(seed)
278+
279+
monkeypatch.setenv("VLLM_FUSED_MOE_CHUNK_SIZE", "8192")
280+
block_m = deep_gemm.get_m_alignment_for_contiguous_layout()
281+
block_size = [block_m, block_m]
282+
dtype = torch.bfloat16
278283
fp8_info = torch.finfo(torch.float8_e4m3fn)
279284
fp8_max, fp8_min = fp8_info.max, fp8_info.min
280285

tests/kernels/moe/test_cutlass_moe.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@
2929
(224, 1024, 1536),
3030
(224, 3072, 1024),
3131
(224, 3072, 1536),
32-
(1024 * 128, 1024, 1024),
32+
(32768, 1024, 1024),
3333
]
3434

3535
vllm_config = VllmConfig(parallel_config=ParallelConfig(
@@ -232,8 +232,10 @@ def test_cutlass_moe_8_bit_no_graph(
232232
topk: int,
233233
per_act_token: bool,
234234
per_out_ch: bool,
235+
monkeypatch,
235236
):
236237
current_platform.seed_everything(7)
238+
monkeypatch.setenv("VLLM_FUSED_MOE_CHUNK_SIZE", "8192")
237239
with set_current_vllm_config(vllm_config):
238240
mt = MOETensors8Bit.make_moe_tensors_8bit(m, k, n, e, per_act_token,
239241
per_out_ch)
@@ -274,8 +276,10 @@ def test_cutlass_moe_8_bit_cuda_graph(
274276
topk: int,
275277
per_act_token: bool,
276278
per_out_ch: bool,
279+
monkeypatch,
277280
):
278281
current_platform.seed_everything(7)
282+
monkeypatch.setenv("VLLM_FUSED_MOE_CHUNK_SIZE", "8192")
279283
with set_current_vllm_config(vllm_config):
280284
dtype = torch.half
281285

@@ -329,8 +333,10 @@ def test_cutlass_moe_8_bit_EP(
329333
per_act_token: bool,
330334
per_out_channel: bool,
331335
ep_size: int,
336+
monkeypatch,
332337
):
333338
current_platform.seed_everything(7)
339+
monkeypatch.setenv("VLLM_FUSED_MOE_CHUNK_SIZE", "8192")
334340
with set_current_vllm_config(vllm_config):
335341
mt = MOETensors8Bit.make_moe_tensors_8bit(m, k, n, e, per_act_token,
336342
per_out_channel)

tests/kernels/moe/test_moe.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@
4040
vllm_config.scheduler_config.max_model_len = 8192
4141

4242

43-
@pytest.mark.parametrize("m", [1, 33, 64, 222, 1024 * 128])
43+
@pytest.mark.parametrize("m", [1, 33, 64, 222, 32768])
4444
@pytest.mark.parametrize("n", [128, 1024, 2048])
4545
@pytest.mark.parametrize("k", [128, 511, 1024])
4646
@pytest.mark.parametrize("e", NUM_EXPERTS)
@@ -57,7 +57,11 @@ def test_fused_moe(
5757
ep_size: int,
5858
dtype: torch.dtype,
5959
padding: bool,
60+
monkeypatch,
6061
):
62+
current_platform.seed_everything(7)
63+
monkeypatch.setenv("VLLM_FUSED_MOE_CHUNK_SIZE", "8192")
64+
6165
a = torch.randn((m, k), device="cuda", dtype=dtype) / 10
6266
w1 = torch.randn((e, 2 * n, k), device="cuda", dtype=dtype) / 10
6367
w2 = torch.randn((e, k, n), device="cuda", dtype=dtype) / 10
@@ -81,7 +85,7 @@ def test_fused_moe(
8185
use_int8_w8a8=False,
8286
use_int8_w8a16=False,
8387
use_int4_w4a16=False,
84-
per_channel_quant=False,
88+
per_act_token_quant=False,
8589
block_shape=None)
8690

8791
with set_current_vllm_config(vllm_config):

0 commit comments

Comments
 (0)