Skip to content

Commit d12a057

Browse files
authored
Add note for deepseek related docs and remove unnecessary comments (#590)
### What this PR does / why we need it? Add notes for deepseek's patch and remove some of the unnecessary comments --------- Signed-off-by: ganyi <pleaplusone.gy@gmail.com>
1 parent c5850d3 commit d12a057

File tree

6 files changed

+78
-199
lines changed

6 files changed

+78
-199
lines changed

vllm_ascend/models/deepseek_v2.py

Lines changed: 1 addition & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
# SPDX-License-Identifier: Apache-2.0
2-
2+
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
33
# Copyright 2023 The vLLM team.
44
# Copyright 2023 DeepSeek-AI and the HuggingFace Inc. team. All rights reserved.
55
#
@@ -19,31 +19,11 @@
1919
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
2020
# See the License for the specific language governing permissions and
2121
# limitations under the License.
22-
# <<<<<<< HEAD
2322
# # Adapted from
2423
# # vllm-project/vllm/blob/main/vllm/model_executor/models/deepseek_v2.py
2524
# # https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/llama/modeling_llama.py
2625
# # vllm-project/vllm/vllm/model_executor/models/deepseek_v2.py
2726
# """Inference-only DeepseekV2/DeepseekV3 model."""
28-
# from typing import Optional, Union
29-
30-
# import torch
31-
# from torch import nn
32-
# from transformers import PretrainedConfig
33-
# from vllm.config import CacheConfig, ModelConfig, VllmConfig
34-
# from vllm.distributed import get_pp_group, get_tensor_model_parallel_world_size
35-
# from vllm.model_executor.layers.fused_moe import FusedMoE
36-
# from vllm.model_executor.layers.layernorm import RMSNorm
37-
# from vllm.model_executor.layers.linear import ReplicatedLinear
38-
# from vllm.model_executor.layers.logits_processor import LogitsProcessor
39-
# from vllm.model_executor.layers.quantization import QuantizationConfig
40-
# from vllm.model_executor.layers.sampler import get_sampler
41-
# from vllm.model_executor.layers.vocab_parallel_embedding import (
42-
# ParallelLMHead, VocabParallelEmbedding)
43-
# from vllm.model_executor.models.deepseek_v2 import ( # noqa
44-
# DeepseekV2Attention, DeepseekV2DecoderLayer, DeepseekV2ForCausalLM,
45-
# DeepseekV2MLAAttention, DeepseekV2MLP, DeepseekV2MoE)
46-
# =======
4727

4828
import os
4929
from typing import Any, Dict, Optional, Union
@@ -173,9 +153,6 @@ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
173153

174154
if (self.tp_size > 1 and self.enable_mc2
175155
and attn_metadata.num_prefills == 0):
176-
# hidden_states = dist._functional_collectives.reduce_scatter_tensor(
177-
# hidden_states, "sum", scatter_dim=0, group=self.tp_group
178-
# )
179156
chunks = torch.chunk(hidden_states,
180157
get_tp_group().world_size,
181158
dim=0)
@@ -365,29 +342,6 @@ def forward_eager(self, positions: torch.Tensor,
365342
k_pe,
366343
output_shape=hidden_states.shape)
367344

368-
# def forward(
369-
# self,
370-
# positions: torch.Tensor,
371-
# hidden_states: torch.Tensor,
372-
# # torchair should pass below two parameters
373-
# kv_cache: torch.Tensor = None,
374-
# attn_metadata: AttentionMetadata = None,
375-
# ) -> torch.Tensor:
376-
# if self.q_lora_rank is not None:
377-
# ckq = self.q_a_proj(hidden_states)[0]
378-
# hidden_states_or_q_c = self.q_a_layernorm(ckq)
379-
# else:
380-
# hidden_states_or_q_c = hidden_states
381-
# if VLLM_ENABLE_GRAPH_MODE == '1':
382-
# return self.mla_attn(hidden_states_or_q_c, hidden_states, None,
383-
# kv_cache, attn_metadata)
384-
# else:
385-
# kv_c, k_pe = self.kv_a_proj_with_mqa(hidden_states)[0].split(
386-
# [self.kv_lora_rank, self.qk_rope_head_dim], dim=-1)
387-
# kv_c_normed = self.kv_a_layernorm(kv_c.contiguous())
388-
# return self.mla_attn(hidden_states_or_q_c, kv_c_normed, k_pe, output_shape=hidden_states.shape)
389-
# kv_cache, attn_metadata)
390-
391345

392346
class CustomDeepseekV2DecoderLayer(DeepseekV2DecoderLayer):
393347

vllm_ascend/ops/fused_moe.py

Lines changed: 0 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -364,23 +364,6 @@ def select_experts(
364364
Raises:
365365
ValueError: If an unsupported scoring function is provided.
366366
"""
367-
# assert hidden_states.shape[0] == router_logits.shape[0], (
368-
# "Number of tokens mismatch")
369-
# if os.environ.get("VLLM_ENABLE_GRAPH_MODE") == "1" and not is_prefill:
370-
# topk_weight, topk_idx, _ = torch.ops.npu_inference.npu_moe_gating_top_k(
371-
# router_logits,
372-
# k=top_k, # topk当前写8
373-
# bias=e_score_correction_bias,
374-
# k_group=topk_group, # fix: 4
375-
# group_count=num_expert_group, # fix 8
376-
# group_select_mode=1, # 0: group中的最大; 1: topk2.sum(fix)
377-
# renorm=0, # 0: softmax->topk(fix); 1: topk->softmax
378-
# norm_type=1, # 0: softmax; 1: sigmoid(fix)
379-
# # out_flag=False, # todo new api; 第三个输出是否输出
380-
# # y2_flag=False, # old api; 第三个输出是否输出
381-
# routed_scaling_factor=1,
382-
# eps=float(1e-20))
383-
# return topk_weight, topk_idx
384367

385368
if custom_routing_function is not None:
386369
raise NotImplementedError(
@@ -483,8 +466,6 @@ def apply(
483466
is_prefill=False,
484467
**kwargs,
485468
):
486-
# assert router_logits.shape[
487-
# 1] == global_num_experts, "Number of global experts mismatch"
488469
# set prefill as false always, should fix this
489470
topk_weights, topk_ids = select_experts(
490471
hidden_states=x,
@@ -670,7 +651,6 @@ def forward(self,
670651
scatter_dim=0,
671652
group=get_dp_group().device_group)
672653

673-
# if self.reduce_results and self.tp_size > 1:
674654
if self.reduce_results and (self.tp_size > 1 or self.ep_size > 1):
675655
final_hidden_states = tensor_model_parallel_all_reduce(
676656
final_hidden_states)

vllm_ascend/ops/rotary_embedding.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -229,7 +229,6 @@ def _set_cos_sin_cache(self, seq_len, device, dtype):
229229

230230
# TODO: Patch when aclnn ops avaiable
231231
RotaryEmbedding.forward_oot = rope_forward_oot
232-
# DeepseekScalingRotaryEmbedding.forward = rope_deepseek_forward_oot
233232
DeepseekScalingRotaryEmbedding.forward = native_rope_deepseek_forward
234233
DeepseekScalingRotaryEmbedding._set_cos_sin_cache = _set_cos_sin_cache
235234
DeepseekScalingRotaryEmbedding.max_seq_len_cached = None

vllm_ascend/patch/platform/patch_0_8_4/patch_distributed.py

Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,22 @@
1+
#
2+
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
3+
# Copyright 2023 The vLLM team.
4+
#
5+
#
6+
# Licensed under the Apache License, Version 2.0 (the "License");
7+
# you may not use this file except in compliance with the License.
8+
# You may obtain a copy of the License at
9+
#
10+
# http://www.apache.org/licenses/LICENSE-2.0
11+
#
12+
# Unless required by applicable law or agreed to in writing, software
13+
# distributed under the License is distributed on an "AS IS" BASIS,
14+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
# See the License for the specific language governing permissions and
16+
# limitations under the License.
17+
# Adapted from vllm/model_executor/models/qwen2_vl.py
18+
# This file is a part of the vllm-ascend project.
19+
120
import torch
221
import vllm
322
import vllm.distributed
@@ -8,6 +27,40 @@
827
from torch.distributed.rendezvous import rendezvous
928
from vllm.config import ParallelConfig
1029

30+
# What's Patched and how it works:
31+
# ** File: platform/patch_0_8_4/patch_distributed.py**
32+
# 1. `vllm.distributed.parallel_state.destroy_model_parallel()`
33+
# Why:
34+
# vllm dose not support outside platform maintain its own `CoordinatorGroup`, vllm-ascend maintain EP and ETP
35+
# inside of the repo, and needs a common interface to destroy them, this patch add the interface of destroy
36+
# platform owned `CoordinatorGroup` to make sure all the CoordinateGroup can be properly destroyed
37+
# How:
38+
# Call platform method `destroy_platform_model_parallel` to destroy all the `CoordinateGroup`
39+
# Related PR (if no, explain why): no related PR, we want add this ability into vllm
40+
# Future Plan:
41+
# Remove those patch when vllm merged them
42+
# 2. `vllm.distributed.stateless_init_torch_distributed_process_group()`
43+
# Why:
44+
# The stateless process group can not be initialized except from gloo and nccl backend, vllm-ascend
45+
# needs to initialize its own stateless process group for communication, so we add the platform related
46+
# call to the `stateless_init_torch_distributed_process_group`, to enable other platform which may support
47+
# stateless process group initialize method
48+
# How:
49+
# Call platform method `platform_has_backend_register` to judge if there is a stateless process group initialize
50+
# method and call platform method `platform_register_backend` to initialize them
51+
# Related PR (if no, explain why): no related PR, we want add this ability into vllm
52+
# Future Plan:
53+
# Remove those patch when vllm merged them
54+
# 3. `ParallelConfig.get_next_dp_init_port`
55+
# Why:
56+
# We want to get dp port from env variable, so the multi-node inference can be properly initialized and run.
57+
# How:
58+
# Get the dp port from env variable enable multi-mode dp inference
59+
# Related PR (if no, explain why): no related PR, we want add this ability into vllm
60+
# Future Plan:
61+
# Its a workaround in vllm-ascend to enable multi-node dp inference, maybe removed if vllm have better plan
62+
# on multi-node dp inference implementation
63+
1164

1265
def ascend_destroy_model_parallel():
1366
"""Set the groups to none and destroy them."""
Lines changed: 24 additions & 130 deletions
Original file line numberDiff line numberDiff line change
@@ -1,138 +1,32 @@
1-
import torch
1+
#
2+
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
3+
# Copyright 2023 The vLLM team.
4+
#
5+
#
6+
# Licensed under the Apache License, Version 2.0 (the "License");
7+
# you may not use this file except in compliance with the License.
8+
# You may obtain a copy of the License at
9+
#
10+
# http://www.apache.org/licenses/LICENSE-2.0
11+
#
12+
# Unless required by applicable law or agreed to in writing, software
13+
# distributed under the License is distributed on an "AS IS" BASIS,
14+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
# See the License for the specific language governing permissions and
16+
# limitations under the License.
17+
# Adapted from vllm/model_executor/models/qwen2_vl.py
18+
# This file is a part of the vllm-ascend project.
19+
220
import vllm
321
import vllm.distributed
4-
from torch.distributed import ProcessGroup
5-
from torch.distributed.distributed_c10d import (Backend, PrefixStore,
6-
_get_default_timeout,
7-
is_nccl_available)
8-
from torch.distributed.rendezvous import rendezvous
922
from vllm.config import ParallelConfig
1023

24+
from vllm_ascend.patch.platform.patch_0_8_4.patch_distributed import (
25+
ascend_destroy_model_parallel,
26+
ascend_stateless_init_torch_distributed_process_group,
27+
parallel_config_get_dp_port)
1128

12-
def ascend_destroy_model_parallel():
13-
"""Set the groups to none and destroy them."""
14-
from vllm.distributed.parallel_state import _DP, _PP, _TP
15-
if _TP:
16-
_TP.destroy()
17-
_TP = None
18-
19-
if _PP:
20-
_PP.destroy()
21-
_PP = None
22-
23-
if _DP:
24-
_DP.destroy()
25-
_DP = None
26-
from vllm.platforms import current_platform
27-
current_platform.destroy_platform_model_parallel()
28-
29-
30-
def ascend_stateless_init_torch_distributed_process_group(
31-
host: str, port: int, rank: int, world_size: int,
32-
backend: str) -> ProcessGroup:
33-
"""
34-
A replacement for `torch.distributed.init_process_group` that does not
35-
pollute the global state. The created ProcessGroup object can be used for
36-
some operations such as `allreduce`, because it does not depend on the
37-
global rank. However, some operations such as `broadcast` cannot be used
38-
because it depends on the global rank.
39-
40-
# TODO: ask for help from PyTorch team if we need the `broadcast` operation.
41-
42-
This function is useful when we are not sure about the total number of
43-
processes in the process group. For example, we may have process
44-
1, 2, ..., 8 who want to communicate, and process 9 might be the same
45-
process as process 1, or it might be a different process; process 10
46-
might be the same process as process 5, or it might be a different process.
47-
In this case, how can we reliably form a communication channel within
48-
process 9 and 10, without affecting the communication channel within
49-
process 1, 2, ..., 8?
50-
51-
One possible solution is to figure out if process 9 and 10 are the same
52-
as process 1 and 5 beforehand, and then form a communication channel
53-
based on the information, adjusting the ranks and world_size etc. However,
54-
figuring out the information is not always easy, and it will interfere
55-
with the main communication channel.
56-
57-
Our solution is to always form a communication channel with process 1, 2,
58-
..., 8, and then use this function to form another communication channel
59-
with process 9 and 10. This way, regardless of whether process 9 and 10
60-
are the same as process 1 and 5, the main communication channel is
61-
always formed with process 1, 2, ..., 8, and the additional communication
62-
channel is formed with process 9 and 10.
63-
"""
64-
init_method = f"tcp://{host}:{port}"
65-
backend = Backend(backend) # it is basically string
66-
timeout = _get_default_timeout(backend)
67-
68-
store, rank, world_size = next(
69-
rendezvous(init_method, rank, world_size, timeout=timeout))
70-
store.set_timeout(timeout)
71-
72-
group_rank = rank
73-
group_size = world_size
74-
75-
# Use a PrefixStore to avoid accidental overrides of keys used by
76-
# different systems (e.g. RPC) in case the store is multi-tenant.
77-
prefix_store = PrefixStore(init_method, store)
78-
79-
pg: ProcessGroup = ProcessGroup(
80-
prefix_store,
81-
group_rank,
82-
group_size,
83-
)
84-
from vllm.platforms import current_platform
85-
if backend == "gloo":
86-
from torch.distributed.distributed_c10d import ProcessGroupGloo
87-
backend_class = ProcessGroupGloo(prefix_store,
88-
group_rank,
89-
group_size,
90-
timeout=timeout)
91-
backend_type = ProcessGroup.BackendType.GLOO
92-
device = torch.device("cpu")
93-
elif backend == "nccl":
94-
assert is_nccl_available()
95-
from torch.distributed.distributed_c10d import ProcessGroupNCCL
96-
97-
backend_options = ProcessGroupNCCL.Options()
98-
backend_options._timeout = timeout
99-
100-
backend_class = ProcessGroupNCCL(prefix_store, group_rank, group_size,
101-
backend_options)
102-
backend_type = ProcessGroup.BackendType.NCCL
103-
device = torch.device("cuda")
104-
elif current_platform.platform_has_backend_register():
105-
current_platform.platform_register_backend()
106-
return pg
107-
else:
108-
raise RuntimeError(f"Unsupported torch distributed backend: {backend}")
109-
110-
pg._set_default_backend(backend_type)
111-
backend_class._set_sequence_number_for_group()
112-
113-
pg._register_backend(device, backend_type, backend_class)
114-
115-
return pg
116-
117-
118-
def parallel_config_get_dp_port(self) -> int:
119-
"""
120-
We might need to initialize process groups in multiple
121-
processes that is related to data parallelism,
122-
e.g. both in the worker and in the engine, which
123-
can live in different processes. To avoid port conflicts, we
124-
increment the port number each time we need to initialize a
125-
new process group related to data parallelism.
126-
"""
127-
answer = self.data_parallel_master_port
128-
self.data_parallel_master_port += 1
129-
import os
130-
131-
# NOTE: Get port from envs directly when using torchrun
132-
port = int(os.environ.get("MASTER_PORT", answer)) # type: ignore
133-
return port
134-
135-
29+
# All details of those patch please refer to vllm_ascend/patch/platform/patch_0_8_4/patch_distributed.py
13630
vllm.distributed.parallel_state.destroy_model_parallel = ascend_destroy_model_parallel
13731
vllm.distributed.stateless_init_torch_distributed_process_group = ascend_stateless_init_torch_distributed_process_group
13832
ParallelConfig.get_next_dp_init_port = parallel_config_get_dp_port

vllm_ascend/worker/model_runner_v1.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -835,7 +835,6 @@ def initialize_kv_cache(self, kv_cache_config: KVCacheConfig) -> None:
835835
assert num_blocks >= kv_cache_config.num_blocks
836836
# TODO: remove this after the OOM issue is located and fixed, otherwise, some model may
837837
# encounter OOM issue
838-
num_blocks = num_blocks // 4
839838
if isinstance(kv_cache_spec, FullAttentionSpec):
840839
kv_cache_shape = self.attn_backend.get_kv_cache_shape(
841840
num_blocks, kv_cache_spec.block_size,

0 commit comments

Comments
 (0)