Skip to content

Commit 6061f33

Browse files
authored
[Bugfix][Model] Fix api in DeepSeek model (#545)
### What this PR does / why we need it? Fix api in DeepSeekV2, aligning with the latest code of the main branch in vllm. ### Does this PR introduce _any_ user-facing change? N/A ### How was this patch tested? Test locally with deepseek-v2-lite, and will add CI by @Potabk. Plz update the model UT after this pr is merged, thx! cc @Potabk Signed-off-by: MengqingCao <cmq0113@163.com>
1 parent 9859e73 commit 6061f33

File tree

1 file changed

+12
-18
lines changed

1 file changed

+12
-18
lines changed

vllm_ascend/models/deepseek_v2.py

Lines changed: 12 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -24,12 +24,11 @@
2424
# See the License for the specific language governing permissions and
2525
# limitations under the License.
2626
"""Inference-only DeepseekV2/DeepseekV3 model."""
27-
from typing import List, Optional, Union
27+
from typing import Optional, Union
2828

2929
import torch
3030
from torch import nn
3131
from transformers import PretrainedConfig
32-
from vllm.attention import AttentionMetadata
3332
from vllm.config import CacheConfig, ModelConfig, VllmConfig
3433
from vllm.distributed import get_pp_group, get_tensor_model_parallel_world_size
3534
from vllm.model_executor.layers.fused_moe import FusedMoE
@@ -61,11 +60,6 @@ def __init__(
6160
self.tp_size = get_tensor_model_parallel_world_size()
6261
self.routed_scaling_factor = config.routed_scaling_factor
6362
self.n_shared_experts = config.n_shared_experts
64-
self.routed_scaling_factor = config.routed_scaling_factor
65-
if self.tp_size > config.n_routed_experts:
66-
raise ValueError(
67-
f"Tensor parallel size {self.tp_size} is greater than "
68-
f"the number of experts {config.n_routed_experts}.")
6963

7064
if config.hidden_act != "silu":
7165
raise ValueError(f"Unsupported activation: {config.hidden_act}. "
@@ -129,6 +123,7 @@ def __init__(
129123
# DecoderLayers are created with `make_layers` which passes the prefix
130124
# with the layer's index.
131125
layer_idx = int(prefix.split(sep='.')[-1])
126+
self.layer_idx = layer_idx
132127
if model_config.use_mla:
133128
attn_cls = DeepseekV2MLAAttention
134129
else:
@@ -171,6 +166,7 @@ def __init__(
171166
eps=config.rms_norm_eps)
172167
self.post_attention_layernorm = RMSNorm(config.hidden_size,
173168
eps=config.rms_norm_eps)
169+
self.routed_scaling_factor = config.routed_scaling_factor
174170

175171

176172
class CustomDeepseekV2Model(nn.Module):
@@ -184,8 +180,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
184180
model_config = vllm_config.model_config
185181
cache_config = vllm_config.cache_config
186182
quant_config = vllm_config.quant_config
183+
self.config = config
187184

188-
self.padding_idx = config.pad_token_id
189185
self.vocab_size = config.vocab_size
190186

191187
if get_pp_group().is_first_rank:
@@ -223,8 +219,6 @@ def forward(
223219
self,
224220
input_ids: torch.Tensor,
225221
positions: torch.Tensor,
226-
kv_caches: List[torch.Tensor],
227-
attn_metadata: AttentionMetadata,
228222
intermediate_tensors: Optional[IntermediateTensors],
229223
inputs_embeds: Optional[torch.Tensor] = None,
230224
) -> Union[torch.Tensor, IntermediateTensors]:
@@ -239,11 +233,8 @@ def forward(
239233
hidden_states = intermediate_tensors["hidden_states"]
240234
residual = intermediate_tensors["residual"]
241235

242-
for i in range(self.start_layer, self.end_layer):
243-
layer = self.layers[i]
244-
hidden_states, residual = layer(positions, hidden_states,
245-
kv_caches[i - self.start_layer],
246-
attn_metadata, residual)
236+
for layer in self.layers[self.start_layer:self.end_layer]:
237+
hidden_states, residual = layer(positions, hidden_states, residual)
247238

248239
if not get_pp_group().is_last_rank:
249240
return IntermediateTensors({
@@ -272,9 +263,12 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
272263
self.model = CustomDeepseekV2Model(vllm_config=vllm_config,
273264
prefix=maybe_prefix(
274265
prefix, "model"))
275-
self.lm_head = ParallelLMHead(config.vocab_size,
276-
config.hidden_size,
277-
quant_config=quant_config)
266+
if get_pp_group().is_last_rank:
267+
self.lm_head = ParallelLMHead(config.vocab_size,
268+
config.hidden_size,
269+
quant_config=quant_config)
270+
else:
271+
self.lm_head = PPMissingLayer()
278272
self.logits_processor = LogitsProcessor(config.vocab_size)
279273
self.sampler = get_sampler()
280274
self.make_empty_intermediate_tensors = (

0 commit comments

Comments
 (0)