Skip to content

Commit ada41e7

Browse files
MengqingCaowangxiaoxin (A)
authored andcommitted
[CI/UT][Graph] Add ut for torchair graph mode (#1103)
### What this PR does / why we need it? Add ut for torchair graph mode on DeepSeekV3 ### How was this patch tested? CI passed with new added test. --------- Signed-off-by: MengqingCao <cmq0113@163.com> Signed-off-by: Mengqing Cao <cmq0113@163.com>
1 parent cecac6d commit ada41e7

File tree

4 files changed

+100
-12
lines changed

4 files changed

+100
-12
lines changed

docs/source/user_guide/additional_config.md

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ The details of each config option are as follows:
5454
| ---- | ---- | ------- | ----------- |
5555
| `enabled` | bool | `False` | Whether to enable ascend scheduler for V1 engine|
5656

57-
ascend_scheduler_config also support the options from [vllm scheduler config](https://docs.vllm.ai/en/stable/api/vllm/config.html#vllm.config.SchedulerConfig). For example, you can add `chunked_prefill_enabled: true` to ascend_scheduler_config as well.
57+
ascend_scheduler_config also support the options from [vllm scheduler config](https://docs.vllm.ai/en/stable/api/vllm/config.html#vllm.config.SchedulerConfig). For example, you can add `enable_chunked_prefill: True` to ascend_scheduler_config as well.
5858

5959
### Example
6060

@@ -63,18 +63,18 @@ A full example of additional configuration is as follows:
6363
```
6464
{
6565
"torchair_graph_config": {
66-
"enabled": true,
67-
"use_cached_graph": true,
66+
"enabled": True,
67+
"use_cached_graph": True,
6868
"graph_batch_sizes": [1, 2, 4, 8],
69-
"graph_batch_sizes_init": false,
70-
"enable_multistream_moe": false,
71-
"enable_kv_nz": false
69+
"graph_batch_sizes_init": False,
70+
"enable_multistream_moe": False,
71+
"enable_kv_nz": False
7272
},
7373
"ascend_scheduler_config": {
74-
"enabled": true,
75-
"chunked_prefill_enabled": true,
74+
"enabled": True,
75+
"enable_chunked_prefill": True,
7676
},
7777
"expert_tensor_parallel_size": 1,
78-
"refresh": false,
78+
"refresh": False,
7979
}
8080
```

docs/source/user_guide/graph_mode.md

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -47,14 +47,15 @@ from vllm import LLM
4747

4848
os.environ["VLLM_USE_V1"] = 1
4949

50-
model = LLM(model="deepseek-ai/DeepSeek-R1-0528", additional_config={"torchair_graph_config": {"enabled": True}})
50+
# TorchAirGraph is only work without chunked-prefill now
51+
model = LLM(model="deepseek-ai/DeepSeek-R1-0528", additional_config={"torchair_graph_config": {"enabled": True},"ascend_scheduler_config": {"enabled": True,}})
5152
outputs = model.generate("Hello, how are you?")
5253
```
5354

5455
online example:
5556

5657
```shell
57-
vllm serve Qwen/Qwen2-7B-Instruct --additional-config='{"torchair_graph_config": {"enabled": true}}'
58+
vllm serve Qwen/Qwen2-7B-Instruct --additional-config='{"torchair_graph_config": {"enabled": True},"ascend_scheduler_config": {"enabled": True,}}'
5859
```
5960

6061
You can find more detail about additional config [here](./additional_config.md)

tests/conftest.py

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
# Adapted from vllm-project/vllm/blob/main/tests/conftest.py
1818
#
1919

20+
import contextlib
2021
import gc
2122
from typing import List, Optional, Tuple, TypeVar, Union
2223

@@ -53,11 +54,17 @@
5354
PromptVideoInput = _PromptMultiModalInput[np.ndarray]
5455

5556

56-
def cleanup_dist_env_and_memory():
57+
def cleanup_dist_env_and_memory(shutdown_ray: bool = False):
5758
destroy_model_parallel()
5859
destroy_distributed_environment()
60+
with contextlib.suppress(AssertionError):
61+
torch.distributed.destroy_process_group()
62+
if shutdown_ray:
63+
import ray # Lazy import Ray
64+
ray.shutdown()
5965
gc.collect()
6066
torch.npu.empty_cache()
67+
torch.npu.reset_peak_memory_stats()
6168

6269

6370
class VllmRunner:
Lines changed: 80 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,80 @@
1+
#
2+
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
3+
# Copyright 2023 The vLLM team.
4+
#
5+
# Licensed under the Apache License, Version 2.0 (the "License");
6+
# you may not use this file except in compliance with the License.
7+
# You may obtain a copy of the License at
8+
#
9+
# http://www.apache.org/licenses/LICENSE-2.0
10+
#
11+
# Unless required by applicable law or agreed to in writing, software
12+
# distributed under the License is distributed on an "AS IS" BASIS,
13+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14+
# See the License for the specific language governing permissions and
15+
# limitations under the License.
16+
# This file is a part of the vllm-ascend project.
17+
#
18+
"""Compare the short outputs of HF and vLLM when using greedy sampling.
19+
20+
Run `pytest tests/multicard/test_torchair_graph_mode.py`.
21+
"""
22+
import os
23+
24+
import pytest
25+
26+
from tests.conftest import VllmRunner
27+
28+
os.environ["PYTORCH_NPU_ALLOC_CONF"] = "max_split_size_mb:256"
29+
30+
31+
@pytest.mark.skipif(os.getenv("VLLM_USE_V1") == "0",
32+
reason="torchair graph is not supported on v0")
33+
def test_e2e_deepseekv3_with_torchair(monkeypatch: pytest.MonkeyPatch):
34+
with monkeypatch.context() as m:
35+
m.setenv("VLLM_USE_MODELSCOPE", "True")
36+
m.setenv("VLLM_WORKER_MULTIPROC_METHOD", "spawn")
37+
38+
example_prompts = [
39+
"Hello, my name is",
40+
"The president of the United States is",
41+
"The capital of France is",
42+
"The future of AI is",
43+
]
44+
dtype = "half"
45+
max_tokens = 5
46+
# torchair is only work without chunked-prefill now
47+
with VllmRunner(
48+
"vllm-ascend/DeepSeek-V3-Pruning",
49+
dtype=dtype,
50+
tensor_parallel_size=4,
51+
distributed_executor_backend="mp",
52+
additional_config={
53+
"torchair_graph_config": {
54+
"enabled": True,
55+
},
56+
"ascend_scheduler_config": {
57+
"enabled": True,
58+
},
59+
"refresh": True,
60+
},
61+
enforce_eager=False,
62+
) as vllm_model:
63+
# use greedy sampler to make sure the generated results are fix
64+
vllm_output = vllm_model.generate_greedy(example_prompts,
65+
max_tokens)
66+
# NOTE: vllm-ascend/DeepSeek-V3-Pruning is a random weight of
67+
# DeepSeek-V3 with 2 hidden layers, thus the golden results seems
68+
# inaccurate. This will only change if accuracy improves with the
69+
# official weights of DeepSeek-V3.
70+
golden_results = [
71+
'Hello, my name is feasibility伸 spazio debtor添',
72+
'The president of the United States is begg"""\n杭州风和 bestimm',
73+
'The capital of France is frequentlyশามalinkAllowed',
74+
'The future of AI is deleting俯احت怎么样了حراف',
75+
]
76+
77+
assert len(golden_results) == len(vllm_output)
78+
for i in range(len(vllm_output)):
79+
assert golden_results[i] == vllm_output[i][1]
80+
print(f"Generated text: {vllm_output[i][1]!r}")

0 commit comments

Comments
 (0)