From cf26a500f5a0f5429d7e95dec5683a281eb58b68 Mon Sep 17 00:00:00 2001 From: Yikun Jiang Date: Sun, 8 Jun 2025 07:31:27 +0800 Subject: [PATCH 1/3] Move numba/quart from requirements-dev to requirments Signed-off-by: Yikun Jiang --- pyproject.toml | 3 +++ requirements-dev.txt | 2 -- requirements.txt | 3 +++ 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index df5b6a1201..b44197038b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,5 +16,8 @@ requires = [ "torch>=2.5.1", "torchvision<0.21.0", "wheel", + "msgpack", + "quart", + "numba", ] build-backend = "setuptools.build_meta" diff --git a/requirements-dev.txt b/requirements-dev.txt index 6770a005d9..133d4604c2 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -9,6 +9,4 @@ ray types-jsonschema xgrammar zmq -numba -quart types-psutil diff --git a/requirements.txt b/requirements.txt index 58afc4023b..2f844df74a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -18,3 +18,6 @@ wheel # requirements for disaggregated prefill msgpack quart + +# Required for N-gram speculative decoding +numba From 73ff936638d3d8fdee9a1179dc06f58ed3873049 Mon Sep 17 00:00:00 2001 From: Yikun Jiang Date: Sun, 8 Jun 2025 09:11:32 +0800 Subject: [PATCH 2/3] Update DeepSeek-V2-Lite accuracy base line Signed-off-by: Yikun Jiang --- tests/long_term/test_deepseek_v2_lite_tp2_accuracy.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/long_term/test_deepseek_v2_lite_tp2_accuracy.py b/tests/long_term/test_deepseek_v2_lite_tp2_accuracy.py index 6a3118d141..27986cb149 100644 --- a/tests/long_term/test_deepseek_v2_lite_tp2_accuracy.py +++ b/tests/long_term/test_deepseek_v2_lite_tp2_accuracy.py @@ -34,8 +34,7 @@ # 3% relative tolerance for numerical accuracy. RTOL = 0.03 # Baseline accuracy after VLLM optimization. -# FIXME: fix the accuracy issue -EXPECTED_VALUE = 0.000758150113722517 +EXPECTED_VALUE = 0.3843821076573162 def run_test(model_name, queue, more_args=None): From b668d96a14063d327262b9d5570c7ec3ee1c032f Mon Sep 17 00:00:00 2001 From: Yikun Jiang Date: Sun, 8 Jun 2025 20:02:00 +0800 Subject: [PATCH 3/3] Follow vllm-project/vllm@eaa2e51 Signed-off-by: Yikun Jiang --- tests/singlecard/compile/test_simple.py | 32 +++++++++++++++----- vllm_ascend/compilation/piecewise_backend.py | 7 ++++- 2 files changed, 30 insertions(+), 9 deletions(-) diff --git a/tests/singlecard/compile/test_simple.py b/tests/singlecard/compile/test_simple.py index cb544224e3..64d4cba672 100644 --- a/tests/singlecard/compile/test_simple.py +++ b/tests/singlecard/compile/test_simple.py @@ -14,6 +14,8 @@ set_current_vllm_config) from vllm.utils import direct_register_custom_op +from vllm_ascend.utils import vllm_version_is + global_counter = 0 # create a library to hold the custom op @@ -92,14 +94,28 @@ def test_simple_piecewise_compile(): inputs = torch.randn(100).npu() - with compilation_counter.expect( - num_graphs_seen=1, # one graph for the model - num_piecewise_graphs_seen=5, # 2 * num_layers + 1 - num_piecewise_capturable_graphs_seen=3, # 1 + num_layers - num_backend_compilations=3, # num_piecewise_capturable_graphs_seen - num_cudagraph_caputured= - 6, # num_cudagraph_sizes * num_piecewise_capturable_graphs_seen - ): + if vllm_version_is("0.9.0"): + kwargs = { + "num_graphs_seen": 1, # one graph for the model + "num_piecewise_graphs_seen": 5, # 2 * num_layers + 1 + "num_piecewise_capturable_graphs_seen": 3, # 1 + num_layers + "num_backend_compilations": + 3, # num_piecewise_capturable_graphs_seen + "num_cudagraph_caputured": + 6 # num_cudagraph_sizes * num_piecewise_capturable_graphs_seen + } + else: + kwargs = { + "num_graphs_seen": 1, # one graph for the model + "num_piecewise_graphs_seen": 5, # 2 * num_layers + 1 + "num_piecewise_capturable_graphs_seen": 3, # 1 + num_layers + "num_backend_compilations": + 3, # num_piecewise_capturable_graphs_seen + "num_cudagraph_captured": + 6 # num_cudagraph_sizes * num_piecewise_capturable_graphs_seen + } + + with compilation_counter.expect(kwargs): model(inputs) diff --git a/vllm_ascend/compilation/piecewise_backend.py b/vllm_ascend/compilation/piecewise_backend.py index fc95983d29..95ce693933 100644 --- a/vllm_ascend/compilation/piecewise_backend.py +++ b/vllm_ascend/compilation/piecewise_backend.py @@ -31,6 +31,8 @@ from vllm.logger import logger from vllm.utils import weak_ref_tensors +from vllm_ascend.utils import vllm_version_is + @dataclasses.dataclass class ConcreteSizeEntry: @@ -205,7 +207,10 @@ def __call__(self, *args) -> Any: entry.output = weak_ref_tensors(output) entry.aclgraph = aclgraph - compilation_counter.num_cudagraph_caputured += 1 + if vllm_version_is("0.9.0"): + compilation_counter.num_cudagraph_caputured += 1 + else: + compilation_counter.num_cudagraph_captured += 1 # important: we need to return the output, rather than # the weak ref of the output, so that pytorch can correctly