Skip to content

Commit cb2ee7d

Browse files
committed
[Bugfix] Fix deepseek V0 issue and add acc ci for it
Signed-off-by: MengqingCao <cmq0113@163.com>
1 parent 5cf9ff1 commit cb2ee7d

File tree

4 files changed

+75
-1
lines changed

4 files changed

+75
-1
lines changed

.github/workflows/vllm_ascend_test.yaml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -112,6 +112,7 @@ jobs:
112112
run: |
113113
if [[ "${{ matrix.os }}" == "linux-arm64-npu-1" ]]; then
114114
pytest -sv tests/singlecard/test_offline_inference.py
115+
pytest -sv tests/singlecard --ignore=tests/singlecard/test_offline_inference.py
115116
pytest -sv tests/ops
116117
pytest -sv tests/compile
117118
else
@@ -126,6 +127,7 @@ jobs:
126127
run: |
127128
if [[ "${{ matrix.os }}" == "linux-arm64-npu-1" ]]; then
128129
pytest -sv tests/singlecard/test_offline_inference.py
130+
pytest -sv tests/singlecard --ignore=tests/singlecard/test_offline_inference.py
129131
pytest -sv tests/ops
130132
else
131133
pytest -sv -k "QwQ" tests/multicard/test_offline_inference_distributed.py

tests/multicard/test_accuracy.py

Lines changed: 71 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,71 @@
1+
#
2+
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
3+
# Copyright 2023 The vLLM team.
4+
#
5+
# Licensed under the Apache License, Version 2.0 (the "License");
6+
# you may not use this file except in compliance with the License.
7+
# You may obtain a copy of the License at
8+
#
9+
# http://www.apache.org/licenses/LICENSE-2.0
10+
#
11+
# Unless required by applicable law or agreed to in writing, software
12+
# distributed under the License is distributed on an "AS IS" BASIS,
13+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14+
# See the License for the specific language governing permissions and
15+
# limitations under the License.
16+
# This file is a part of the vllm-ascend project.
17+
# Adapted from vllm-project/blob/main/tests/entrypoints/llm/test_accuracy.py
18+
#
19+
20+
import gc
21+
import multiprocessing
22+
from multiprocessing import Queue
23+
24+
import lm_eval
25+
import pytest
26+
import torch
27+
28+
# pre-trained model path on Hugging Face.
29+
MODELS = ["deepseek-ai/DeepSeek-V2-Lite"]
30+
# Math reasoning benchmark (Grade School Math 8K).
31+
TASK = "gsm8k"
32+
# Answer validation requiring format consistency.
33+
FILTER = "exact_match,strict-match"
34+
# 3% relative tolerance for numerical accuracy.
35+
RTOL = 0.03
36+
# Baseline accuracy after VLLM optimization.
37+
EXPECTED_VALUE = 0.316
38+
39+
40+
def run_test(model_name, queue, more_args=None):
41+
model_args = f"pretrained={model_name},max_model_len=4096,trust_remote_code=True,tensor_parallel_size=4"
42+
if more_args is not None:
43+
model_args = f"{model_args},{more_args}"
44+
results = lm_eval.simple_evaluate(
45+
model="vllm",
46+
model_args=model_args,
47+
tasks=TASK,
48+
batch_size="auto",
49+
)
50+
result = results["results"][TASK][FILTER]
51+
print("result:", result)
52+
queue.put(result)
53+
del results
54+
torch.npu.empty_cache()
55+
gc.collect()
56+
57+
58+
@pytest.mark.parametrize("model", MODELS)
59+
def test_lm_eval_accuracy(model, monkeypatch: pytest.MonkeyPatch):
60+
with monkeypatch.context():
61+
result_queue: Queue[float] = multiprocessing.Queue()
62+
p = multiprocessing.Process(target=run_test,
63+
args=(
64+
model,
65+
result_queue,
66+
))
67+
p.start()
68+
p.join()
69+
result = result_queue.get()
70+
assert (EXPECTED_VALUE - RTOL < result < EXPECTED_VALUE + RTOL), \
71+
f"Expected: {EXPECTED_VALUE}±{RTOL} | Measured: {result}"

tests/singlecard/test_accuracy.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,4 +63,4 @@ def test_lm_eval_accuracy(monkeypatch: pytest.MonkeyPatch):
6363
p.join()
6464
result = result_queue.get()
6565
assert (EXPECTED_VALUE - RTOL < result < EXPECTED_VALUE + RTOL), \
66-
f"Expected: {EXPECTED_VALUE}±{RTOL} | Measured: {result}"
66+
f"Expected: {EXPECTED_VALUE}±{RTOL} | Measured: {result}"

vllm_ascend/ops/fused_moe.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -198,6 +198,7 @@ def fused_experts(
198198
num_experts = w1.shape[0]
199199
dtype = hidden_states.dtype
200200
device = hidden_states.device
201+
topk_weights = topk_weights.to(dtype)
201202
# assert dtype in [torch.float32, torch.float16, torch.bfloat16
202203
# ], "Only float32, float16, and bfloat16 are supported"
203204

0 commit comments

Comments
 (0)