Skip to content

Commit df52070

Browse files
author
weijinqian_v1
committed
handle conflict
Signed-off-by: weijinqian_v1 <weijinqian@huawei.com>
2 parents d5656f4 + 57664f0 commit df52070

File tree

1 file changed

+1
-2
lines changed

1 file changed

+1
-2
lines changed

vllm_ascend/ops/fused_moe.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -968,7 +968,6 @@ def apply(
968968
topk_ids = torch.randint_like(topk_ids, 0, global_num_experts)
969969

970970
fused_moe_state = get_forward_context().fused_moe_state
971-
972971
if fused_moe_state == FusedMoEState.MC2:
973972
mc2_mask = kwargs.get("mc2_mask", None)
974973
return fused_experts_with_mc2(
@@ -1209,7 +1208,7 @@ def forward(self,
12091208
shared_hidden_states = shared_experts(hidden_states)
12101209

12111210
attn_metadata = get_forward_context().attn_metadata
1212-
mc2_mask = attn_metadata.decode.mc2_mask if attn_metadata is not None and attn_metadata.decode is not None else None
1211+
mc2_mask = attn_metadata.decode.mc2_mask if attn_metadata is not None and getattr(attn_metadata, "decode", None) is not None else None
12131212

12141213
tp_size = get_tensor_model_parallel_world_size()
12151214
if tp_size > 1 and fused_moe_state != FusedMoEState.AllGather:

0 commit comments

Comments
 (0)