|
| 1 | +# |
| 2 | +# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved. |
| 3 | +# |
| 4 | +# Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | +# you may not use this file except in compliance with the License. |
| 6 | +# You may obtain a copy of the License at |
| 7 | +# |
| 8 | +# http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | +# |
| 10 | +# Unless required by applicable law or agreed to in writing, software |
| 11 | +# distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | +# See the License for the specific language governing permissions and |
| 14 | +# limitations under the License. |
| 15 | +# This file is a part of the vllm-ascend project. |
| 16 | +# |
| 17 | + |
| 18 | +name: Accuracy Tests |
| 19 | + |
| 20 | +on: |
| 21 | + workflow_dispatch: |
| 22 | + inputs: |
| 23 | + vllm-version: |
| 24 | + description: 'what vllm version to accuracy test?' |
| 25 | + required: true |
| 26 | + type: string |
| 27 | + vllm-ascend-version: |
| 28 | + description: 'what vllm-ascend version to accuracy test?' |
| 29 | + required: true |
| 30 | + type: string |
| 31 | + models: |
| 32 | + description: 'choose model(all/Qwen2.5-7B-Instruct/Llama-3.1-8B-Instruct/Qwen2.5-VL-7B-Instruct/Qwen3-8B)' |
| 33 | + required: true |
| 34 | + type: choice |
| 35 | + options: |
| 36 | + - all |
| 37 | + - Qwen/Qwen2.5-7B-Instruct |
| 38 | + - meta-llama/Llama-3.1-8B-Instruct |
| 39 | + - Qwen/Qwen2.5-VL-7B-Instruct |
| 40 | + - Qwen/Qwen3-8B |
| 41 | + default: 'all' |
| 42 | + |
| 43 | +# Bash shells do not use ~/.profile or ~/.bashrc so these shells need to be explicitly |
| 44 | +# declared as "shell: bash -el {0}" on steps that need to be properly activated. |
| 45 | +# It's used to activate ascend-toolkit environment variables. |
| 46 | +defaults: |
| 47 | + run: |
| 48 | + shell: bash -el {0} |
| 49 | + |
| 50 | +jobs: |
| 51 | + model_tests: |
| 52 | + name: Model Test - ${{ matrix.model_name }} |
| 53 | + runs-on: 'linux-arm64-npu-2' |
| 54 | + strategy: |
| 55 | + matrix: |
| 56 | + include: ${{ fromJSON( |
| 57 | + (github.event.inputs.models == 'all' && '[{"model_name":"Qwen/Qwen2.5-7B-Instruct","output_file":"Qwen2.5-7B-Instruct"},{"model_name":"meta-llama/Llama-3.1-8B-Instruct","output_file":"Llama-3.1-8B-Instruct"},{"model_name":"Qwen/Qwen2.5-VL-7B-Instruct","output_file":"Qwen2.5-VL-7B-Instruct"}, {"model_name":"Qwen/Qwen3-8B","output_file":"Qwen3-8B"}]') || |
| 58 | + (github.event.inputs.models == 'Qwen/Qwen2.5-7B-Instruct' && '[{"model_name":"Qwen/Qwen2.5-7B-Instruct","output_file":"Qwen2.5-7B-Instruct"}]') || |
| 59 | + (github.event.inputs.models == 'meta-llama/Llama-3.1-8B-Instruct' && '[{"model_name":"meta-llama/Llama-3.1-8B-Instruct","output_file":"Llama-3.1-8B-Instruct"}]') || |
| 60 | + (github.event.inputs.models == 'Qwen/Qwen2.5-VL-7B-Instruct' && '[{"model_name":"Qwen/Qwen2.5-VL-7B-Instruct","output_file":"Qwen2.5-VL-7B-Instruct"}]') || |
| 61 | + (github.event.inputs.models == 'Qwen/Qwen3-8B' && '[{"model_name":"Qwen/Qwen3-8B","output_file":"Qwen3-8B"}]') |
| 62 | + ) }} |
| 63 | + fail-fast: false |
| 64 | + |
| 65 | + container: |
| 66 | + image: quay.io/ascend/cann:8.0.0-910b-ubuntu22.04-py3.10 |
| 67 | + env: |
| 68 | + HF_ENDPOINT: https://hf-mirror.com |
| 69 | + HF_TOKEN: ${{ secrets.HF_TOKEN }} |
| 70 | + DATASET_SOURCE: ModelScope |
| 71 | + |
| 72 | + steps: |
| 73 | + - name: Checkout repository |
| 74 | + uses: actions/checkout@v4 |
| 75 | + |
| 76 | + - name: Check npu and CANN info |
| 77 | + run: | |
| 78 | + npu-smi info |
| 79 | + cat /usr/local/Ascend/ascend-toolkit/latest/"$(uname -i)"-linux/ascend_toolkit_install.info |
| 80 | +
|
| 81 | + - name: Config mirrors |
| 82 | + run: | |
| 83 | + sed -i 's|ports.ubuntu.com|mirrors.tuna.tsinghua.edu.cn|g' /etc/apt/sources.list |
| 84 | + pip config set global.index-url https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple |
| 85 | + apt-get update -y |
| 86 | + apt install git -y |
| 87 | + git config --global url."https://gh-proxy.test.osinfra.cn/https://github.com/".insteadOf https://github.com/ |
| 88 | +
|
| 89 | + - name: Install system dependencies |
| 90 | + run: | |
| 91 | + apt-get -y install `cat packages.txt` |
| 92 | + apt-get -y install gcc g++ cmake libnuma-dev |
| 93 | +
|
| 94 | +
|
| 95 | + - name: Install system dependencies |
| 96 | + run: | |
| 97 | + apt-get -y install `cat packages.txt` |
| 98 | + apt-get -y install gcc g++ cmake libnuma-dev |
| 99 | +
|
| 100 | + - name: Checkout vllm-project/vllm repo |
| 101 | + uses: actions/checkout@v4 |
| 102 | + with: |
| 103 | + repository: vllm-project/vllm |
| 104 | + path: ./vllm-empty |
| 105 | + ref: ${{ github.event.inputs.vllm-version }} |
| 106 | + |
| 107 | + - name: Install vllm-project/vllm from source |
| 108 | + working-directory: ./vllm-empty |
| 109 | + run: VLLM_TARGET_DEVICE=empty pip install -e . |
| 110 | + |
| 111 | + |
| 112 | + - name: Checkout vllm-project/vllm-ascend repo |
| 113 | + uses: actions/checkout@v4 |
| 114 | + with: |
| 115 | + repository: vllm-project/vllm-ascend |
| 116 | + path: ./vllm-ascend |
| 117 | + ref: ${{ github.event.inputs.vllm-ascend-version }} |
| 118 | + fetch-depth: 0 |
| 119 | + |
| 120 | + - name: Install pta |
| 121 | + run: | |
| 122 | + if [ ! -d /root/.cache/pta ]; then |
| 123 | + mkdir -p /root/.cache/pta |
| 124 | + fi |
| 125 | + if [ ! -f /root/.cache/pta/torch_npu-2.5.1.dev20250320-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl ]; then |
| 126 | + cd /root/.cache/pta |
| 127 | + rm -rf pytorch_v2.5.1_py310* |
| 128 | + wget https://pytorch-package.obs.cn-north-4.myhuaweicloud.com/pta/Daily/v2.5.1/20250320.3/pytorch_v2.5.1_py310.tar.gz |
| 129 | + tar -zxvf pytorch_v2.5.1_py310.tar.gz |
| 130 | + fi |
| 131 | + pip install /root/.cache/pta/torch_npu-2.5.1.dev20250320-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl |
| 132 | +
|
| 133 | + - name: Install vllm-project/vllm-ascend |
| 134 | + working-directory: ./vllm-ascend |
| 135 | + run: | |
| 136 | + pip install -r requirements-dev.txt |
| 137 | + pip install -e . |
| 138 | + |
| 139 | + - name: Checkout EleutherAI/lm-evaluation-harness repo |
| 140 | + uses: actions/checkout@v4 |
| 141 | + with: |
| 142 | + repository: EleutherAI/lm-evaluation-harness |
| 143 | + path: ./lm-eval |
| 144 | + fetch-depth: 0 |
| 145 | + |
| 146 | + - name: Install EleutherAI/lm-evaluation-harness |
| 147 | + working-directory: ./lm-eval |
| 148 | + run: | |
| 149 | + pip install -e . |
| 150 | + pip install ray datasets==2.16.0 transformers==4.50.3 huggingface-hub==0.29.3 |
| 151 | + |
| 152 | + - name: Collect version info |
| 153 | + run: | |
| 154 | + for dir in /usr/local/Ascend/ascend-toolkit/*; do |
| 155 | + dname=$(basename "$dir") |
| 156 | + if [ "$dname" != "latest" ]; then |
| 157 | + TOOLKIT_DIR="$dname" |
| 158 | + break |
| 159 | + fi |
| 160 | + done |
| 161 | + INFO_FILE="/usr/local/Ascend/ascend-toolkit/${TOOLKIT_DIR}/$(uname -i)-linux/ascend_toolkit_install.info" |
| 162 | + CANN_VERSION=$(grep "version=" "$INFO_FILE" \ |
| 163 | + | head -n1 \ |
| 164 | + | cut -d'=' -f2 \ |
| 165 | + | tr -d '"') |
| 166 | + { |
| 167 | + echo "CANN_VERSION=$CANN_VERSION" |
| 168 | + pip show torch | grep "Version:" | awk '{print "TORCH_VERSION="$2}' |
| 169 | + pip show torch_npu | grep "Version:" | awk '{print "TORCH_NPU_VERSION="$2}' |
| 170 | + pip show vllm | grep "Version:" | awk '{print "VLLM_VERSION="$2}' | sed 's/+.*//' |
| 171 | + } >> "$GITHUB_ENV" |
| 172 | + |
| 173 | + - name: Print versions |
| 174 | + run: | |
| 175 | + echo "CANN: ${{ env.CANN_VERSION }}" |
| 176 | + echo "Torch NPU: ${{ env.TORCH_NPU_VERSION }}" |
| 177 | + echo "Torch: ${{ env.TORCH_VERSION }}" |
| 178 | + echo "vLLM: ${{ env.VLLM_VERSION }}" |
| 179 | +
|
| 180 | + - name: Run Accuracy Test for V0 |
| 181 | + working-directory: ./benchmarks |
| 182 | + env: |
| 183 | + VLLM_USE_V1: 0 |
| 184 | + PYTORCH_NPU_ALLOC_CONF: max_split_size_mb:256 |
| 185 | + run: | |
| 186 | + mkdir -p ./accuracy/V0 |
| 187 | + python ./scripts/run_accuracy.py \ |
| 188 | + --model "${{ matrix.model_name }}" \ |
| 189 | + --output "./accuracy/V0/${{ matrix.output_file }}.md" \ |
| 190 | + --vllm_ascend_version "${{ github.event.inputs.vllm-ascend-version }}" \ |
| 191 | + --cann_version "${{ env.CANN_VERSION }}" \ |
| 192 | + --torch_npu_version "${{ env.TORCH_NPU_VERSION }}" \ |
| 193 | + --torch_version "${{ env.TORCH_VERSION }}" \ |
| 194 | + --vllm_version "${{ env.VLLM_VERSION }}" |
| 195 | +
|
| 196 | + - name: Upload Report for V0 |
| 197 | + uses: actions/upload-artifact@v4 |
| 198 | + with: |
| 199 | + name: "${{ github.event.inputs.vllm-ascend-version }}-${{ matrix.output_file }}-V0-report" |
| 200 | + path: ./benchmarks/accuracy/V0/${{ matrix.output_file }}.md |
| 201 | + if-no-files-found: warn |
| 202 | + retention-days: 90 |
| 203 | + overwrite: true |
0 commit comments