Skip to content

Commit 2a308fa

Browse files
committed
fix some bugs
Signed-off-by: ningbenzhe1 <ningbenzhe@huawei.com>
1 parent 5903547 commit 2a308fa

File tree

6 files changed

+17
-8
lines changed

6 files changed

+17
-8
lines changed

vllm_ascend/attention/attention_v1.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -129,6 +129,9 @@ class AscendMetadata:
129129
attn_state: AscendAttentionState = AscendAttentionState.ChunkedPrefill
130130
attn_mask: Optional[torch.Tensor] = None
131131

132+
# For logging.
133+
num_input_tokens: int = 0 # Number of tokens including padding.
134+
132135

133136
class AscendAttentionMetadataBuilder:
134137

vllm_ascend/distributed/parallel_state.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,12 +21,18 @@ def get_etp_group() -> GroupCoordinator:
2121
return _ETP
2222

2323

24+
def model_parallel_initialized():
25+
return (_ETP is not None and _EP is not None)
26+
27+
2428
def init_ascend_model_parallel(
2529
tensor_model_parallel_size: int = 1,
2630
pipeline_model_parallel_size: int = 1,
2731
expert_tensor_parallel_size: int = 1,
2832
backend: Optional[str] = None,
2933
):
34+
if model_parallel_initialized():
35+
return
3036
assert torch.distributed.is_initialized()
3137
world_size: int = torch.distributed.get_world_size()
3238
backend = backend or torch.distributed.get_backend(

vllm_ascend/ops/fused_moe.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -66,8 +66,7 @@ def fused_experts_with_mc2(
6666
local_rank = torch.distributed.get_rank(group=ep_group)
6767
all_to_all_group_size = torch.distributed.get_world_size(ep_group)
6868

69-
world_szie = torch.distributed.get_world_size()
70-
tp_size = world_szie // all_to_all_group_size
69+
tp_size = get_etp_group().world_size
7170
tp_rank = rank % tp_size
7271

7372
stage1_kwargs = {

vllm_ascend/patch/__init__.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -70,13 +70,14 @@
7070
# on multi-node dp inference implementation
7171
# 4. `ParallelConfig.stateless_init_dp_group`
7272
# Why:
73-
# vLLM use gloo backend by default to initialize stateless dp process gourp, but we want to use hccl here to
74-
# get better performance
73+
# vLLM use gloo backend by default to initialize stateless dp process group, but we want to use hccl here to
74+
# get better performance. Initialize the global variable of dp_group to prefill dummy_run.
7575
# How:
76-
# adopt nccl backend to init process group
76+
# adopt nccl backend to init process group and add the global variable of dp_group.
7777
# Related PR (if no, explain why): no related PR, we want add this ability into vllm
7878
# Future Plan:
7979
# Remove those patch when vllm merged them
80+
# Add the global variable of dp_group in platform when vllm merged them.
8081
#
8182
#
8283
# * Worker Patch:

vllm_ascend/patch/platform/patch_common/patch_distributed.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
import torch
2121
import vllm
2222
import vllm.distributed
23+
import vllm.envs as envs
2324
from torch.distributed import ProcessGroup
2425
from torch.distributed.distributed_c10d import (Backend, PrefixStore,
2526
_get_default_timeout,
@@ -164,10 +165,9 @@ def parallel_config_get_dp_port(self) -> int:
164165
"""
165166
answer = self.data_parallel_master_port
166167
self.data_parallel_master_port += 1
167-
import os
168168

169169
# NOTE: Get port from envs directly when using torchrun
170-
port = int(os.environ.get("MASTER_PORT", answer)) # type: ignore
170+
port = envs.VLLM_DP_MASTER_PORT if envs.VLLM_DP_MASTER_PORT else answer
171171
return port
172172

173173

vllm_ascend/worker/worker_v1.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -173,7 +173,7 @@ def execute_model(
173173
scheduler_output: "SchedulerOutput",
174174
) -> Optional[ModelRunnerOutput]:
175175
output = self.model_runner.execute_model(scheduler_output)
176-
return output if self.rank == 0 else None
176+
return output if self.is_driver_worker else None
177177

178178
def load_model(self) -> None:
179179
self.model_runner.load_model()

0 commit comments

Comments
 (0)