Skip to content

Commit dd207cb

Browse files
authored
[CI][Benchmark] Add new model and v1 test to perf benchmarks (#1099)
### What this PR does / why we need it? - Add qwen2.5-7b-instruct test - Add v1 test --------- Signed-off-by: wangli <wangli858794774@gmail.com>
1 parent 2498d29 commit dd207cb

File tree

4 files changed

+62
-11
lines changed

4 files changed

+62
-11
lines changed

.github/workflows/nightly_benchmarks.yaml

Lines changed: 17 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -45,13 +45,18 @@ jobs:
4545
test:
4646
if: ${{ contains(github.event.pull_request.labels.*.name, 'performance-test') && contains(github.event.pull_request.labels.*.name, 'ready-for-test') || github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }}
4747

48-
name: Benchmarks/vLLM=${{ matrix.vllm_branch }}, vLLM-Ascend=${{ matrix.vllm_ascend_branch }}
48+
name: Benchmarks/vLLM=${{ matrix.vllm_branch }}, vLLM-Ascend=${{ matrix.vllm_ascend_branch }}, use_v1=${{ matrix.vllm_use_v1 }}
4949
runs-on: 'linux-arm64-npu-static-8'
5050
strategy:
5151
matrix:
5252
include:
5353
- vllm_branch: v0.9.1
5454
vllm_ascend_branch: main
55+
vllm_use_v1: 0
56+
- vllm_branch: v0.9.0
57+
vllm_ascend_branch: main
58+
vllm_use_v1: 1
59+
max-parallel: 1
5560
container:
5661
image: m.daocloud.io/quay.io/ascend/cann:8.1.rc1-910b-ubuntu22.04-py3.10
5762
volumes:
@@ -71,6 +76,7 @@ jobs:
7176
HF_TOKEN: ${{ secrets.HF_TOKEN }}
7277
ES_OM_DOMAIN: ${{ secrets.ES_OM_DOMAIN }}
7378
ES_OM_AUTHORIZATION: ${{ secrets.ES_OM_AUTHORIZATION }}
79+
VLLM_USE_V1: ${{ matrix.vllm_use_v1 }}
7480
steps:
7581
- name: Check npu and CANN info
7682
run: |
@@ -140,7 +146,7 @@ jobs:
140146
- name: Install elastic_tool
141147
if: github.event_name != 'pull_request'
142148
run: |
143-
pip install escli-tool==0.2.1
149+
pip install escli-tool==0.2.2
144150
145151
- name: Collect pr info from vllm-project/vllm-ascend
146152
if: github.event_name != 'pull_request'
@@ -177,17 +183,17 @@ jobs:
177183
echo "vllm branch: ${{ matrix.vllm_branch }}"
178184
echo "vllm-ascend branch: ${{ matrix.vllm_ascend_branch }}"
179185
echo "------------------------"
186+
180187
cd /github/home
181188
bash benchmarks/scripts/run-performance-benchmarks.sh
182189
# send the result to es
183-
if [[ "${{ github.event_name }}" != "pull request" ]]; then
184-
escli add --vllm_branch ${{ matrix.vllm_branch }} \
185-
--vllm_ascend_branch ${{ matrix.vllm_ascend_branch }} \
186-
--commit_id $commit_id \
187-
--commit_title "$commit_title" \
188-
--created_at "$commit_time_no_tz" \
189-
--res_dir ./benchmarks/results
190-
rm -rf ./benchmarks/results
191-
fi
190+
escli add --vllm_branch ${{ matrix.vllm_branch }} \
191+
--vllm_ascend_branch ${{ matrix.vllm_ascend_branch }} \
192+
--commit_id $commit_id \
193+
--commit_title "$commit_title" \
194+
--created_at "$commit_time_no_tz" \
195+
--res_dir ./benchmarks/results \
196+
--extra_feat '{"VLLM_USE_V1": "${{ matrix.vllm_use_v1 }}"}'
197+
rm -rf ./benchmarks/results
192198
cd -
193199
done < commit_log.txt

benchmarks/tests/latency-tests.json

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,5 +9,15 @@
99
"num_iters_warmup": 5,
1010
"num_iters": 15
1111
}
12+
},
13+
{
14+
"test_name": "latency_qwen2_5_7B_tp1",
15+
"parameters": {
16+
"model": "Qwen/Qwen2.5-7B-Instruct",
17+
"tensor_parallel_size": 1,
18+
"load_format": "dummy",
19+
"num_iters_warmup": 5,
20+
"num_iters": 15
21+
}
1222
}
1323
]

benchmarks/tests/serving-tests.json

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -49,5 +49,29 @@
4949
"dataset_path": "/github/home/.cache/datasets/ShareGPT_V3_unfiltered_cleaned_split.json",
5050
"num_prompts": 200
5151
}
52+
},
53+
{
54+
"test_name": "serving_qwen2_5_7B_tp1",
55+
"qps_list": [
56+
1,
57+
4,
58+
16,
59+
"inf"
60+
],
61+
"server_parameters": {
62+
"model": "Qwen/Qwen2.5-7B-Instruct",
63+
"tensor_parallel_size": 1,
64+
"swap_space": 16,
65+
"disable_log_stats": "",
66+
"disable_log_requests": "",
67+
"load_format": "dummy"
68+
},
69+
"client_parameters": {
70+
"model": "Qwen/Qwen2.5-7B-Instruct",
71+
"backend": "vllm",
72+
"dataset_name": "sharegpt",
73+
"dataset_path": "/github/home/.cache/datasets/ShareGPT_V3_unfiltered_cleaned_split.json",
74+
"num_prompts": 200
75+
}
5276
}
5377
]

benchmarks/tests/throughput-tests.json

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,17 @@
2222
"dataset_path": "lmarena-ai/vision-arena-bench-v0.1",
2323
"num_prompts": 200
2424
}
25+
},
26+
{
27+
"test_name": "throughput_qwen2_5_7B_tp1",
28+
"parameters": {
29+
"model": "Qwen/Qwen2.5-7B-Instruct",
30+
"tensor_parallel_size": 1,
31+
"load_format": "dummy",
32+
"dataset_path": "/github/home/.cache/datasets/ShareGPT_V3_unfiltered_cleaned_split.json",
33+
"num_prompts": 200,
34+
"backend": "vllm"
35+
}
2536
}
2637
]
2738

0 commit comments

Comments
 (0)