Skip to content

Commit 8d17616

Browse files
committed
lint
Signed-off-by: Bill Nell <bnell@redhat.com>
1 parent d2c2790 commit 8d17616

File tree

2 files changed

+10
-7
lines changed

2 files changed

+10
-7
lines changed

tests/kernels/moe/test_batched_moe.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,8 @@
99
import triton.language as tl
1010

1111
from tests.kernels.moe.utils import (batched_moe, make_test_weights,
12-
torch_moe2, triton_moe)
12+
torch_moe2, triton_moe,
13+
per_block_cast_to_fp8)
1314
from tests.kernels.quant_utils import native_w8a8_block_matmul
1415
from vllm.config import VllmConfig, set_current_vllm_config
1516
from vllm.model_executor.layers.fused_moe.fused_batched_moe import (

tests/kernels/moe/test_pplx_moe.py

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -23,13 +23,15 @@
2323
torch_moe2)
2424
from tests.pplx_utils import ProcessGroupInfo, parallel_launch
2525
from vllm.config import VllmConfig, set_current_vllm_config
26-
from vllm.model_executor.layers.fused_moe import override_config
26+
from vllm.model_executor.layers.fused_moe import (override_config,
27+
FusedMoEConfig,
28+
fused_topk,
29+
get_default_config,
30+
FusedMoEModularKernel,
31+
BatchedTritonExperts,
32+
FusedMoEModularKernel)
2733
from vllm.model_executor.layers.fused_moe.fused_batched_moe import (
28-
BatchedPrepareAndFinalize, BatchedTritonExperts, NaiveBatchedExperts)
29-
from vllm.model_executor.layers.fused_moe.fused_moe import (fused_topk,
30-
get_default_config)
31-
from vllm.model_executor.layers.fused_moe.modular_kernel import (
32-
FusedMoEModularKernel)
34+
BatchedPrepareAndFinalize, NaiveBatchedExperts)
3335
from vllm.platforms import current_platform
3436
from vllm.utils import round_up
3537

0 commit comments

Comments
 (0)