Skip to content

Commit a10105a

Browse files
committed
remove some logging
Signed-off-by: Bill Nell <bnell@redhat.com>
1 parent 91533a3 commit a10105a

File tree

2 files changed

+0
-6
lines changed

2 files changed

+0
-6
lines changed

tests/kernels/utils.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1080,7 +1080,6 @@ def torch_experts(
10801080
or (w1_scale is not None and w2_scale is not None))
10811081

10821082
M, K = a.shape
1083-
#N = w1.shape[1]
10841083
topk = topk_ids.shape[1]
10851084

10861085
a = a.view(M, -1, K).repeat(1, topk, 1).reshape(-1, K)

vllm/model_executor/layers/fused_moe/layer.py

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -100,9 +100,6 @@ def init_prepare_finalize(self, moe: FusedMoEConfig,
100100
block_shape=moe.block_shape,
101101
)
102102

103-
logger.debug("All2All %s, %s = %s/%s", moe.quant_dtype,
104-
moe.block_shape, hidden_dim_bytes, hidden_scale_bytes)
105-
106103
all_to_all_args = dict(
107104
max_num_tokens=moe.max_num_tokens,
108105
num_experts=moe.num_experts,
@@ -674,8 +671,6 @@ def __init__(
674671
# since model_config is not set in the pytest test.
675672
model_dtype = params_dtype
676673

677-
logger.debug("MODEL DTYPE %s", model_dtype)
678-
679674
moe = FusedMoEConfig.make(
680675
num_experts=self.global_num_experts,
681676
experts_per_token=top_k,

0 commit comments

Comments
 (0)