From d6718ce4301e8f68c4c6dd5ebe5b48a7565914a2 Mon Sep 17 00:00:00 2001 From: MengqingCao Date: Mon, 16 Jun 2025 03:40:51 +0000 Subject: [PATCH 1/3] [DP][V1] Fix rank set in DP scenario Signed-off-by: MengqingCao --- .github/workflows/accuracy_test.yaml | 2 +- .github/workflows/nightly_benchmarks.yaml | 2 +- .github/workflows/vllm_ascend_test.yaml | 4 +- .../workflows/vllm_ascend_test_long_term.yaml | 2 +- .github/workflows/vllm_ascend_test_pd.yaml | 2 +- Dockerfile | 2 +- Dockerfile.openEuler | 2 +- pyproject.toml | 2 +- requirements.txt | 3 +- tests/multicard/test_data_parallel.py | 66 +++++++++++++++++++ vllm_ascend/patch/__init__.py | 13 +--- .../patch_common/patch_distributed.py | 18 +---- vllm_ascend/worker/worker_v1.py | 8 +-- 13 files changed, 81 insertions(+), 45 deletions(-) create mode 100644 tests/multicard/test_data_parallel.py diff --git a/.github/workflows/accuracy_test.yaml b/.github/workflows/accuracy_test.yaml index 999fb6ad5..1f86bae4f 100644 --- a/.github/workflows/accuracy_test.yaml +++ b/.github/workflows/accuracy_test.yaml @@ -175,7 +175,7 @@ jobs: working-directory: ./vllm-ascend run: | pip install -r requirements-dev.txt - pip install -e . + pip install -e . --no-build-isolation - name: Install lm-eval, ray, and datasets run: | diff --git a/.github/workflows/nightly_benchmarks.yaml b/.github/workflows/nightly_benchmarks.yaml index b060076c9..17e5fa3a8 100644 --- a/.github/workflows/nightly_benchmarks.yaml +++ b/.github/workflows/nightly_benchmarks.yaml @@ -116,7 +116,7 @@ jobs: - name: Install vllm-project/vllm-ascend run: | - pip install -e . + pip install -e . --no-build-isolation pip install -r benchmarks/requirements-bench.txt - name: Run current commit benchmarks diff --git a/.github/workflows/vllm_ascend_test.yaml b/.github/workflows/vllm_ascend_test.yaml index 62c91de8c..9a47e0c91 100644 --- a/.github/workflows/vllm_ascend_test.yaml +++ b/.github/workflows/vllm_ascend_test.yaml @@ -216,7 +216,7 @@ jobs: - name: Install vllm-project/vllm-ascend run: | pip install -r requirements-dev.txt - pip install -v -e . + pip install -v -e . --no-build-isolation - name: Run e2e test for V1 Engine env: @@ -313,7 +313,7 @@ jobs: - name: Install vllm-project/vllm-ascend run: | pip install -r requirements-dev.txt - pip install -v -e . + pip install -v -e . --no-build-isolation - name: Run vllm-project/vllm-ascend test for V1 Engine env: diff --git a/.github/workflows/vllm_ascend_test_long_term.yaml b/.github/workflows/vllm_ascend_test_long_term.yaml index 46123590a..32654b9c1 100644 --- a/.github/workflows/vllm_ascend_test_long_term.yaml +++ b/.github/workflows/vllm_ascend_test_long_term.yaml @@ -90,7 +90,7 @@ jobs: - name: Install vllm-project/vllm-ascend run: | pip install -r requirements-dev.txt - pip install -v -e . + pip install -v -e . --no-build-isolation - name: Run vllm-project/vllm-ascend long term test run: | diff --git a/.github/workflows/vllm_ascend_test_pd.yaml b/.github/workflows/vllm_ascend_test_pd.yaml index c2c76c9f2..fbcd69ec8 100644 --- a/.github/workflows/vllm_ascend_test_pd.yaml +++ b/.github/workflows/vllm_ascend_test_pd.yaml @@ -99,7 +99,7 @@ jobs: - name: Install vllm-project/vllm-ascend run: | pip install -r requirements-dev.txt - pip install -v -e . + pip install -v -e . --no-build-isolation - name: Run vllm-project/vllm-ascend PD Disaggregation test run: | diff --git a/Dockerfile b/Dockerfile index 952e77fe9..4400ffd18 100644 --- a/Dockerfile +++ b/Dockerfile @@ -49,7 +49,7 @@ RUN VLLM_TARGET_DEVICE="empty" python3 -m pip install -v -e /vllm-workspace/vllm RUN source /usr/local/Ascend/ascend-toolkit/set_env.sh && \ source /usr/local/Ascend/nnal/atb/set_env.sh && \ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/`uname -i`-linux/devlib && \ - python3 -m pip install -v -e /vllm-workspace/vllm-ascend/ --extra-index https://download.pytorch.org/whl/cpu/ && \ + python3 -m pip install -v -e /vllm-workspace/vllm-ascend/ --no-build-isolation --extra-index https://download.pytorch.org/whl/cpu/ && \ python3 -m pip cache purge # Install modelscope (for fast download) and ray (for multinode) diff --git a/Dockerfile.openEuler b/Dockerfile.openEuler index 2ff3d0b39..71fc28d87 100644 --- a/Dockerfile.openEuler +++ b/Dockerfile.openEuler @@ -46,7 +46,7 @@ RUN VLLM_TARGET_DEVICE="empty" python3 -m pip install -e /vllm-workspace/vllm/ - RUN source /usr/local/Ascend/ascend-toolkit/set_env.sh && \ source /usr/local/Ascend/nnal/atb/set_env.sh && \ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/`uname -i`-linux/devlib && \ - python3 -m pip install -v -e /vllm-workspace/vllm-ascend/ --extra-index https://download.pytorch.org/whl/cpu/ && \ + python3 -m pip install -v -e /vllm-workspace/vllm-ascend/ --no-build-isolation --extra-index https://download.pytorch.org/whl/cpu/ && \ python3 -m pip cache purge # Install modelscope (for fast download) and ray (for multinode) diff --git a/pyproject.toml b/pyproject.toml index b44197038..514b755c3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -12,7 +12,7 @@ requires = [ "scipy", "setuptools>=64", "setuptools-scm>=8", - "torch-npu==2.5.1", + "torch-npu==2.5.1.post1.dev20250528", "torch>=2.5.1", "torchvision<0.21.0", "wheel", diff --git a/requirements.txt b/requirements.txt index 2f844df74..6b4abd6bc 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,7 +10,8 @@ pyyaml scipy setuptools>=64 setuptools-scm>=8 -torch-npu==2.5.1 +--extra-index-url https://mirrors.huaweicloud.com/ascend/repos/pypi +torch-npu==2.5.1.post1.dev20250528 torch>=2.5.1 torchvision<0.21.0 wheel diff --git a/tests/multicard/test_data_parallel.py b/tests/multicard/test_data_parallel.py new file mode 100644 index 000000000..6c0a20de9 --- /dev/null +++ b/tests/multicard/test_data_parallel.py @@ -0,0 +1,66 @@ +# +# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved. +# Copyright 2023 The vLLM team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +""" +Compare the outputs of vLLM with and without aclgraph. + +Run `pytest tests/multicard/test_data_parallel.py`. +""" + +import os + +import pytest + +from tests.conftest import VllmRunner +from tests.model_utils import check_outputs_equal + +MODELS = ["Qwen/Qwen2.5-0.5B-Instruct"] + + +@pytest.mark.skipif(os.getenv("VLLM_USE_V1") == "0", + reason="Data parallel only support on v1") +@pytest.mark.parametrize("model", MODELS) +@pytest.mark.parametrize("max_tokens", [32]) +def test_data_parallel_correctness( + model: str, + max_tokens: int, +) -> None: + example_prompts = [ + "Hello, my name is", "The president of the United States is", + "The capital of France is", "The future of AI is" + ] + + with VllmRunner(model_name=model, + max_model_len=1024, + max_num_seqs=16, + data_parallel_size=2, + distributed_executor_backend="mp") as vllm_model: + vllm_dp_outputs = vllm_model.generate_greedy(example_prompts, + max_tokens) + + with VllmRunner( + model_name=model, + max_model_len=1024, + max_num_seqs=16, + ) as vllm_model: + vllm_outputs = vllm_model.generate_greedy(example_prompts, max_tokens) + + check_outputs_equal( + outputs_0_lst=vllm_outputs, + outputs_1_lst=vllm_dp_outputs, + name_0="vllm_outputs", + name_1="vllm_dp_outputs", + ) diff --git a/vllm_ascend/patch/__init__.py b/vllm_ascend/patch/__init__.py index 3c24bfc70..d817f9063 100644 --- a/vllm_ascend/patch/__init__.py +++ b/vllm_ascend/patch/__init__.py @@ -47,16 +47,7 @@ # Related PR (if no, explain why): # Future Plan: # Remove those patch when vllm merged them -# 2. `vllm.v1.engine.core.DPEngineCoreProc._init_data_parallel` -# Why: -# There is some bug for ASCEND_RT_VISIBLE_DEVICES usage. -# How: -# The ASCEND_RT_VISIBLE_DEVICES related code is dropped. -# Related PR (if no, explain why): -# No, this is a bug for vllm ascend -# Future Plan: -# Remove this patch once ASCEND_RT_VISIBLE_DEVICES bug is fixed. -# 3. `vllm.config.ParallelConfig.get_next_dp_init_port` +# 2. `vllm.config.ParallelConfig.get_next_dp_init_port` # Why: # vllm doesn't support get port from environment. # How: @@ -65,7 +56,7 @@ # Need a PR to vllm to support get port from environment. # Future Plan: # Remove those patch when vllm merged them -# 4. `vllm.config.ParallelConfig.ParallelConfig.stateless_init_dp_group` +# 3. `vllm.config.ParallelConfig.ParallelConfig.stateless_init_dp_group` # Why: # vLLM use gloo backend by default to initialize stateless dp process gourp, but we want to use hccl here to # get better performance diff --git a/vllm_ascend/patch/platform/patch_common/patch_distributed.py b/vllm_ascend/patch/platform/patch_common/patch_distributed.py index 21f846d31..86515df86 100644 --- a/vllm_ascend/patch/platform/patch_common/patch_distributed.py +++ b/vllm_ascend/patch/platform/patch_common/patch_distributed.py @@ -21,10 +21,9 @@ import vllm.distributed import vllm.envs as envs from torch.distributed import ProcessGroup -from vllm.config import ParallelConfig, VllmConfig +from vllm.config import ParallelConfig from vllm.distributed.utils import \ stateless_init_torch_distributed_process_group -from vllm.v1.engine.core import DPEngineCoreProc def ascend_destroy_model_parallel(): @@ -79,21 +78,6 @@ def stateless_init_dp_group(self) -> "ProcessGroup": return dp_group -def _init_data_parallel(self, vllm_config: VllmConfig): - # Configure NPUs and stateless process group for data parallel. - dp_rank = vllm_config.parallel_config.data_parallel_rank - dp_size = vllm_config.parallel_config.data_parallel_size - local_dp_rank = vllm_config.parallel_config.data_parallel_rank_local - - assert dp_size > 1 - assert 0 <= local_dp_rank <= dp_rank < dp_size - - self.local_dp_rank = local_dp_rank - self.dp_group = vllm_config.parallel_config.stateless_init_dp_group() - self.current_wave = 0 - - vllm.distributed.parallel_state.destroy_model_parallel = ascend_destroy_model_parallel -DPEngineCoreProc._init_data_parallel = _init_data_parallel ParallelConfig.get_next_dp_init_port = parallel_config_get_dp_port ParallelConfig.stateless_init_dp_group = stateless_init_dp_group diff --git a/vllm_ascend/worker/worker_v1.py b/vllm_ascend/worker/worker_v1.py index ebdf01e15..76844c96a 100644 --- a/vllm_ascend/worker/worker_v1.py +++ b/vllm_ascend/worker/worker_v1.py @@ -75,12 +75,6 @@ def __init__( distributed_init_method=distributed_init_method, is_driver_worker=is_driver_worker) - # NOTE(Yizhou): Since we do not set ASCEND_RT_VISIBLE_DEVICES in - # vllm_ascend, we need to set the device id manually. - local_dp_rank = self.vllm_config.parallel_config.data_parallel_rank_local - world_size = self.vllm_config.parallel_config.world_size - self.local_rank_across_dp = local_dp_rank * world_size + self.local_rank - # Try to import mindie_turbo to accelerate vLLM inference. try_register_lib( "mindie_turbo", @@ -124,7 +118,7 @@ def initialize_cache(self, num_gpu_blocks: int, def init_device(self): if self.device_config.device.type == "npu": - self.device = torch.device(f"npu:{self.local_rank_across_dp}") + self.device = torch.device(f"npu:{self.local_rank}") NPUPlatform.set_device(self.device) NPUPlatform.empty_cache() self.init_npu_memory = NPUPlatform.mem_get_info()[0] From 9ad75229e0bec1032ce24c163bf26e44ef19dc10 Mon Sep 17 00:00:00 2001 From: MengqingCao Date: Mon, 16 Jun 2025 12:20:36 +0000 Subject: [PATCH 2/3] rm no-build-isolation Signed-off-by: MengqingCao --- .github/workflows/accuracy_test.yaml | 2 +- .github/workflows/nightly_benchmarks.yaml | 2 +- .github/workflows/vllm_ascend_test.yaml | 4 ++-- .github/workflows/vllm_ascend_test_long_term.yaml | 2 +- .github/workflows/vllm_ascend_test_pd.yaml | 2 +- Dockerfile | 2 +- Dockerfile.openEuler | 2 +- 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/accuracy_test.yaml b/.github/workflows/accuracy_test.yaml index 1f86bae4f..999fb6ad5 100644 --- a/.github/workflows/accuracy_test.yaml +++ b/.github/workflows/accuracy_test.yaml @@ -175,7 +175,7 @@ jobs: working-directory: ./vllm-ascend run: | pip install -r requirements-dev.txt - pip install -e . --no-build-isolation + pip install -e . - name: Install lm-eval, ray, and datasets run: | diff --git a/.github/workflows/nightly_benchmarks.yaml b/.github/workflows/nightly_benchmarks.yaml index 17e5fa3a8..b060076c9 100644 --- a/.github/workflows/nightly_benchmarks.yaml +++ b/.github/workflows/nightly_benchmarks.yaml @@ -116,7 +116,7 @@ jobs: - name: Install vllm-project/vllm-ascend run: | - pip install -e . --no-build-isolation + pip install -e . pip install -r benchmarks/requirements-bench.txt - name: Run current commit benchmarks diff --git a/.github/workflows/vllm_ascend_test.yaml b/.github/workflows/vllm_ascend_test.yaml index 9a47e0c91..62c91de8c 100644 --- a/.github/workflows/vllm_ascend_test.yaml +++ b/.github/workflows/vllm_ascend_test.yaml @@ -216,7 +216,7 @@ jobs: - name: Install vllm-project/vllm-ascend run: | pip install -r requirements-dev.txt - pip install -v -e . --no-build-isolation + pip install -v -e . - name: Run e2e test for V1 Engine env: @@ -313,7 +313,7 @@ jobs: - name: Install vllm-project/vllm-ascend run: | pip install -r requirements-dev.txt - pip install -v -e . --no-build-isolation + pip install -v -e . - name: Run vllm-project/vllm-ascend test for V1 Engine env: diff --git a/.github/workflows/vllm_ascend_test_long_term.yaml b/.github/workflows/vllm_ascend_test_long_term.yaml index 32654b9c1..46123590a 100644 --- a/.github/workflows/vllm_ascend_test_long_term.yaml +++ b/.github/workflows/vllm_ascend_test_long_term.yaml @@ -90,7 +90,7 @@ jobs: - name: Install vllm-project/vllm-ascend run: | pip install -r requirements-dev.txt - pip install -v -e . --no-build-isolation + pip install -v -e . - name: Run vllm-project/vllm-ascend long term test run: | diff --git a/.github/workflows/vllm_ascend_test_pd.yaml b/.github/workflows/vllm_ascend_test_pd.yaml index fbcd69ec8..c2c76c9f2 100644 --- a/.github/workflows/vllm_ascend_test_pd.yaml +++ b/.github/workflows/vllm_ascend_test_pd.yaml @@ -99,7 +99,7 @@ jobs: - name: Install vllm-project/vllm-ascend run: | pip install -r requirements-dev.txt - pip install -v -e . --no-build-isolation + pip install -v -e . - name: Run vllm-project/vllm-ascend PD Disaggregation test run: | diff --git a/Dockerfile b/Dockerfile index 4400ffd18..952e77fe9 100644 --- a/Dockerfile +++ b/Dockerfile @@ -49,7 +49,7 @@ RUN VLLM_TARGET_DEVICE="empty" python3 -m pip install -v -e /vllm-workspace/vllm RUN source /usr/local/Ascend/ascend-toolkit/set_env.sh && \ source /usr/local/Ascend/nnal/atb/set_env.sh && \ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/`uname -i`-linux/devlib && \ - python3 -m pip install -v -e /vllm-workspace/vllm-ascend/ --no-build-isolation --extra-index https://download.pytorch.org/whl/cpu/ && \ + python3 -m pip install -v -e /vllm-workspace/vllm-ascend/ --extra-index https://download.pytorch.org/whl/cpu/ && \ python3 -m pip cache purge # Install modelscope (for fast download) and ray (for multinode) diff --git a/Dockerfile.openEuler b/Dockerfile.openEuler index 71fc28d87..2ff3d0b39 100644 --- a/Dockerfile.openEuler +++ b/Dockerfile.openEuler @@ -46,7 +46,7 @@ RUN VLLM_TARGET_DEVICE="empty" python3 -m pip install -e /vllm-workspace/vllm/ - RUN source /usr/local/Ascend/ascend-toolkit/set_env.sh && \ source /usr/local/Ascend/nnal/atb/set_env.sh && \ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/`uname -i`-linux/devlib && \ - python3 -m pip install -v -e /vllm-workspace/vllm-ascend/ --no-build-isolation --extra-index https://download.pytorch.org/whl/cpu/ && \ + python3 -m pip install -v -e /vllm-workspace/vllm-ascend/ --extra-index https://download.pytorch.org/whl/cpu/ && \ python3 -m pip cache purge # Install modelscope (for fast download) and ray (for multinode) From c11220f71545b9a2ad5e902040a23c4922355d5f Mon Sep 17 00:00:00 2001 From: Icey <1790571317@qq.com> Date: Mon, 16 Jun 2025 17:11:07 +0800 Subject: [PATCH 3/3] Bump torch-npu version to 2.5.1.post1.dev20250528 Signed-off-by: Icey <1790571317@qq.com> Signed-off-by: MengqingCao --- .github/workflows/accuracy_test.yaml | 2 ++ .github/workflows/image_openeuler.yml | 6 ++++++ .github/workflows/image_ubuntu.yml | 6 ++++++ .github/workflows/nightly_benchmarks.yaml | 2 ++ .github/workflows/vllm_ascend_test.yaml | 5 +++++ .github/workflows/vllm_ascend_test_long_term.yaml | 2 ++ .github/workflows/vllm_ascend_test_pd.yaml | 2 ++ Dockerfile | 3 ++- Dockerfile.openEuler | 3 ++- README.md | 2 +- README.zh.md | 2 +- docs/source/installation.md | 11 ++++++----- requirements.txt | 7 +++++-- setup.py | 2 +- 14 files changed, 43 insertions(+), 12 deletions(-) diff --git a/.github/workflows/accuracy_test.yaml b/.github/workflows/accuracy_test.yaml index 999fb6ad5..0297f625d 100644 --- a/.github/workflows/accuracy_test.yaml +++ b/.github/workflows/accuracy_test.yaml @@ -173,6 +173,8 @@ jobs: - name: Install vllm-project/vllm-ascend working-directory: ./vllm-ascend + env: + PIP_EXTRA_INDEX_URL: https://mirrors.huaweicloud.com/ascend/repos/pypi run: | pip install -r requirements-dev.txt pip install -e . diff --git a/.github/workflows/image_openeuler.yml b/.github/workflows/image_openeuler.yml index 0d585e690..690d8145c 100644 --- a/.github/workflows/image_openeuler.yml +++ b/.github/workflows/image_openeuler.yml @@ -19,6 +19,12 @@ on: - '.github/workflows/image_openeuler.yml' - 'Dockerfile.openEuler' - 'vllm_ascend/**' + - 'setup.py' + - 'pyproject.toml' + - 'requirements.txt' + - 'cmake/**' + - 'CMakeLists.txt' + - 'csrc/**' push: # Publish image when tagging, the Dockerfile in tag will be build as tag image branches: diff --git a/.github/workflows/image_ubuntu.yml b/.github/workflows/image_ubuntu.yml index 63dadc64a..a2cfbcefb 100644 --- a/.github/workflows/image_ubuntu.yml +++ b/.github/workflows/image_ubuntu.yml @@ -19,6 +19,12 @@ on: - '.github/workflows/image_ubuntu.yml' - 'Dockerfile' - 'vllm_ascend/**' + - 'setup.py' + - 'pyproject.toml' + - 'requirements.txt' + - 'cmake/**' + - 'CMakeLists.txt' + - 'csrc/**' push: # Publish image when tagging, the Dockerfile in tag will be build as tag image branches: diff --git a/.github/workflows/nightly_benchmarks.yaml b/.github/workflows/nightly_benchmarks.yaml index b060076c9..6e03c1e6b 100644 --- a/.github/workflows/nightly_benchmarks.yaml +++ b/.github/workflows/nightly_benchmarks.yaml @@ -115,6 +115,8 @@ jobs: VLLM_TARGET_DEVICE=empty pip install -e . - name: Install vllm-project/vllm-ascend + env: + PIP_EXTRA_INDEX_URL: https://mirrors.huaweicloud.com/ascend/repos/pypi run: | pip install -e . pip install -r benchmarks/requirements-bench.txt diff --git a/.github/workflows/vllm_ascend_test.yaml b/.github/workflows/vllm_ascend_test.yaml index 62c91de8c..540680dd2 100644 --- a/.github/workflows/vllm_ascend_test.yaml +++ b/.github/workflows/vllm_ascend_test.yaml @@ -151,6 +151,7 @@ jobs: - name: Install vllm-project/vllm-ascend run: | + export PIP_EXTRA_INDEX_URL=https://mirrors.huaweicloud.com/ascend/repos/pypi export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/x86_64-linux/devlib python3 -m pip install -r requirements-dev.txt --extra-index https://download.pytorch.org/whl/cpu/ python3 -m pip install -v . --extra-index https://download.pytorch.org/whl/cpu/ @@ -214,6 +215,8 @@ jobs: VLLM_TARGET_DEVICE=empty pip install -e . - name: Install vllm-project/vllm-ascend + env: + PIP_EXTRA_INDEX_URL: https://mirrors.huaweicloud.com/ascend/repos/pypi run: | pip install -r requirements-dev.txt pip install -v -e . @@ -311,6 +314,8 @@ jobs: VLLM_TARGET_DEVICE=empty pip install -e . - name: Install vllm-project/vllm-ascend + env: + PIP_EXTRA_INDEX_URL: https://mirrors.huaweicloud.com/ascend/repos/pypi run: | pip install -r requirements-dev.txt pip install -v -e . diff --git a/.github/workflows/vllm_ascend_test_long_term.yaml b/.github/workflows/vllm_ascend_test_long_term.yaml index 46123590a..e249849e1 100644 --- a/.github/workflows/vllm_ascend_test_long_term.yaml +++ b/.github/workflows/vllm_ascend_test_long_term.yaml @@ -88,6 +88,8 @@ jobs: VLLM_TARGET_DEVICE=empty pip install -e . - name: Install vllm-project/vllm-ascend + env: + PIP_EXTRA_INDEX_URL: https://mirrors.huaweicloud.com/ascend/repos/pypi run: | pip install -r requirements-dev.txt pip install -v -e . diff --git a/.github/workflows/vllm_ascend_test_pd.yaml b/.github/workflows/vllm_ascend_test_pd.yaml index c2c76c9f2..932b3e59b 100644 --- a/.github/workflows/vllm_ascend_test_pd.yaml +++ b/.github/workflows/vllm_ascend_test_pd.yaml @@ -97,6 +97,8 @@ jobs: VLLM_TARGET_DEVICE=empty pip install -e . - name: Install vllm-project/vllm-ascend + env: + PIP_EXTRA_INDEX_URL: https://mirrors.huaweicloud.com/ascend/repos/pypi run: | pip install -r requirements-dev.txt pip install -v -e . diff --git a/Dockerfile b/Dockerfile index 952e77fe9..e6ec009d2 100644 --- a/Dockerfile +++ b/Dockerfile @@ -46,7 +46,8 @@ RUN VLLM_TARGET_DEVICE="empty" python3 -m pip install -v -e /vllm-workspace/vllm # Install vllm-ascend # Append `libascend_hal.so` path (devlib) to LD_LIBRARY_PATH -RUN source /usr/local/Ascend/ascend-toolkit/set_env.sh && \ +RUN export PIP_EXTRA_INDEX_URL=https://mirrors.huaweicloud.com/ascend/repos/pypi && \ + source /usr/local/Ascend/ascend-toolkit/set_env.sh && \ source /usr/local/Ascend/nnal/atb/set_env.sh && \ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/`uname -i`-linux/devlib && \ python3 -m pip install -v -e /vllm-workspace/vllm-ascend/ --extra-index https://download.pytorch.org/whl/cpu/ && \ diff --git a/Dockerfile.openEuler b/Dockerfile.openEuler index 2ff3d0b39..4e414e04b 100644 --- a/Dockerfile.openEuler +++ b/Dockerfile.openEuler @@ -43,7 +43,8 @@ RUN VLLM_TARGET_DEVICE="empty" python3 -m pip install -e /vllm-workspace/vllm/ - python3 -m pip cache purge # Install vllm-ascend -RUN source /usr/local/Ascend/ascend-toolkit/set_env.sh && \ +RUN export PIP_EXTRA_INDEX_URL=https://mirrors.huaweicloud.com/ascend/repos/pypi && \ + source /usr/local/Ascend/ascend-toolkit/set_env.sh && \ source /usr/local/Ascend/nnal/atb/set_env.sh && \ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/`uname -i`-linux/devlib && \ python3 -m pip install -v -e /vllm-workspace/vllm-ascend/ --extra-index https://download.pytorch.org/whl/cpu/ && \ diff --git a/README.md b/README.md index f3f9296c1..7d0966c8d 100644 --- a/README.md +++ b/README.md @@ -38,7 +38,7 @@ By using vLLM Ascend plugin, popular open-source models, including Transformer-l - Software: * Python >= 3.9, < 3.12 * CANN >= 8.1.RC1 - * PyTorch >= 2.5.1, torch-npu >= 2.5.1 + * PyTorch >= 2.5.1, torch-npu >= 2.5.1.post1.dev20250528 * vLLM (the same version as vllm-ascend) ## Getting Started diff --git a/README.zh.md b/README.zh.md index 6adb0e183..2d2062a8b 100644 --- a/README.zh.md +++ b/README.zh.md @@ -39,7 +39,7 @@ vLLM 昇腾插件 (`vllm-ascend`) 是一个由社区维护的让vLLM在Ascend NP - 软件: * Python >= 3.9, < 3.12 * CANN >= 8.1.RC1 - * PyTorch >= 2.5.1, torch-npu >= 2.5.1 + * PyTorch >= 2.5.1, torch-npu >= 2.5.1.post1.dev20250528 * vLLM (与vllm-ascend版本一致) ## 开始使用 diff --git a/docs/source/installation.md b/docs/source/installation.md index c9f684491..79e4a88db 100644 --- a/docs/source/installation.md +++ b/docs/source/installation.md @@ -9,11 +9,11 @@ This document describes how to install vllm-ascend manually. - A hardware with Ascend NPU. It's usually the Atlas 800 A2 series. - Software: - | Software | Supported version | Note | - |-----------|-------------------|----------------------------------------| - | CANN | >= 8.1.RC1 | Required for vllm-ascend and torch-npu | - | torch-npu | >= 2.5.1 | Required for vllm-ascend | - | torch | >= 2.5.1 | Required for torch-npu and vllm | + | Software | Supported version | Note | + |---------------|----------------------------------|-------------------------------------------| + | CANN | >= 8.1.RC1 | Required for vllm-ascend and torch-npu | + | torch-npu | >= 2.5.1.post1.dev20250528 | Required for vllm-ascend | + | torch | >= 2.5.1 | Required for torch-npu and vllm | You have 2 way to install: - **Using pip**: first prepare env manually or via CANN image, then install `vllm-ascend` using pip. @@ -156,6 +156,7 @@ cd .. # Install vLLM Ascend git clone --depth 1 --branch |vllm_ascend_version| https://github.com/vllm-project/vllm-ascend.git cd vllm-ascend +export PIP_EXTRA_INDEX_URL=https://mirrors.huaweicloud.com/ascend/repos/pypi pip install -v -e . cd .. ``` diff --git a/requirements.txt b/requirements.txt index 6b4abd6bc..eadb96f1e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,8 +10,6 @@ pyyaml scipy setuptools>=64 setuptools-scm>=8 ---extra-index-url https://mirrors.huaweicloud.com/ascend/repos/pypi -torch-npu==2.5.1.post1.dev20250528 torch>=2.5.1 torchvision<0.21.0 wheel @@ -22,3 +20,8 @@ quart # Required for N-gram speculative decoding numba + +# Install torch_npu +--pre +--extra-index-url https://mirrors.huaweicloud.com/ascend/repos/pypi +torch-npu==2.5.1.post1.dev20250528 diff --git a/setup.py b/setup.py index 631e55c1b..327c2f879 100644 --- a/setup.py +++ b/setup.py @@ -152,7 +152,7 @@ def configure(self, ext: CMakeExtension) -> None: # if pybind11 is installed via pip pybind11_cmake_path = (subprocess.check_output( [python_executable, "-m", "pybind11", - "--cmake"]).decode().strip()) + "--cmakedir"]).decode().strip()) except subprocess.CalledProcessError as e: # else specify pybind11 path installed from source code on CI container raise RuntimeError(f"CMake configuration failed: {e}")