Skip to content

Commit 82d0663

Browse files
committed
remove some logging
Signed-off-by: Bill Nell <bnell@redhat.com>
1 parent 639b868 commit 82d0663

File tree

2 files changed

+0
-6
lines changed

2 files changed

+0
-6
lines changed

tests/kernels/utils.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1080,7 +1080,6 @@ def torch_experts(
10801080
or (w1_scale is not None and w2_scale is not None))
10811081

10821082
M, K = a.shape
1083-
#N = w1.shape[1]
10841083
topk = topk_ids.shape[1]
10851084

10861085
a = a.view(M, -1, K).repeat(1, topk, 1).reshape(-1, K)

vllm/model_executor/layers/fused_moe/layer.py

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -98,9 +98,6 @@ def init_prepare_finalize(self, moe: FusedMoEConfig,
9898
block_shape=moe.block_shape,
9999
)
100100

101-
logger.debug("All2All %s, %s = %s/%s", moe.quant_dtype,
102-
moe.block_shape, hidden_dim_bytes, hidden_scale_bytes)
103-
104101
all_to_all_args = dict(
105102
max_num_tokens=moe.max_num_tokens,
106103
num_experts=moe.num_experts,
@@ -672,8 +669,6 @@ def __init__(
672669
# since model_config is not set in the pytest test.
673670
model_dtype = params_dtype
674671

675-
logger.debug("MODEL DTYPE %s", model_dtype)
676-
677672
moe = FusedMoEConfig.make(
678673
num_experts=self.global_num_experts,
679674
experts_per_token=top_k,

0 commit comments

Comments
 (0)