Skip to content

Commit cf6ab42

Browse files
authored
[CI]Add guided decoding test (#422)
### What this PR does / why we need it? After extensive testing, we are happy to say that guided_decoding is fully supported by npu, in this pr, we add guided_decoding integrated with our test, mainly does the following things: 1. test v0 supported backends including ` "outlines", "lm-format-enforcer","xgrammar"` 2. test v1 supported backends including ` "guidance", "xgrammar"` --------- Signed-off-by: wangli <wangli858794774@gmail.com>
1 parent 538a69c commit cf6ab42

File tree

2 files changed

+176
-0
lines changed

2 files changed

+176
-0
lines changed

requirements-dev.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,3 +4,4 @@ pytest >= 6.0
44
pytest-asyncio
55
lm-eval
66
ray
7+
types-jsonschema
Lines changed: 175 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,175 @@
1+
#
2+
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
3+
# This file is a part of the vllm-ascend project.
4+
# Adapted from vllm/tests/entrypoints/llm/test_guided_generate.py
5+
# Copyright 2023 The vLLM team.
6+
#
7+
# Licensed under the Apache License, Version 2.0 (the "License");
8+
# you may not use this file except in compliance with the License.
9+
# You may obtain a copy of the License at
10+
#
11+
# http://www.apache.org/licenses/LICENSE-2.0
12+
#
13+
# Unless required by applicable law or agreed to in writing, software
14+
# distributed under the License is distributed on an "AS IS" BASIS,
15+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16+
# See the License for the specific language governing permissions and
17+
# limitations under the License.
18+
#
19+
import json
20+
import os
21+
import re
22+
23+
import jsonschema
24+
import pytest
25+
from vllm.outputs import RequestOutput
26+
from vllm.sampling_params import GuidedDecodingParams, SamplingParams
27+
28+
from tests.conftest import VllmRunner
29+
30+
os.environ["PYTORCH_NPU_ALLOC_CONF"] = "max_split_size_mb:256"
31+
MODEL_NAME = "Qwen/Qwen2.5-1.5B-Instruct"
32+
GuidedDecodingBackendV0 = [
33+
"outlines",
34+
"lm-format-enforcer",
35+
"xgrammar",
36+
]
37+
GuidedDecodingBackendV1 = ["xgrammar", "guidance:disable-any-whitespace"]
38+
GuidedDecodingBackend = list(
39+
set(GuidedDecodingBackendV0 + GuidedDecodingBackendV1))
40+
41+
42+
@pytest.fixture(scope="module")
43+
def sample_regex():
44+
return (r"((25[0-5]|(2[0-4]|1\d|[1-9]|)\d)\.){3}"
45+
r"(25[0-5]|(2[0-4]|1\d|[1-9]|)\d)")
46+
47+
48+
@pytest.fixture(scope="module")
49+
def sample_json_schema():
50+
return {
51+
"type": "object",
52+
"properties": {
53+
"name": {
54+
"type": "string"
55+
},
56+
"age": {
57+
"type": "integer"
58+
},
59+
"skills": {
60+
"type": "array",
61+
"items": {
62+
"type": "string",
63+
"maxLength": 10
64+
},
65+
"minItems": 3
66+
},
67+
"work_history": {
68+
"type": "array",
69+
"items": {
70+
"type": "object",
71+
"properties": {
72+
"company": {
73+
"type": "string"
74+
},
75+
"duration": {
76+
"type": "number"
77+
},
78+
"position": {
79+
"type": "string"
80+
}
81+
},
82+
"required": ["company", "position"]
83+
}
84+
}
85+
},
86+
"required": ["name", "age", "skills", "work_history"]
87+
}
88+
89+
90+
@pytest.mark.parametrize("guided_decoding_backend", GuidedDecodingBackend)
91+
def test_guided_json_completion(guided_decoding_backend: str,
92+
sample_json_schema):
93+
if guided_decoding_backend == "xgrammar":
94+
# xgrammar does not support json schema, will fall back to outlines, skip it
95+
pytest.skip(
96+
f"{guided_decoding_backend} will fall back to outlines, skip it")
97+
if guided_decoding_backend not in GuidedDecodingBackendV0 and os.getenv(
98+
"VLLM_USE_V1") == "0":
99+
# guidance does not support on v0, skip it
100+
pytest.skip(
101+
f"{guided_decoding_backend} does not support on v0, skip it")
102+
if guided_decoding_backend not in GuidedDecodingBackendV1 and os.getenv(
103+
"VLLM_USE_V1") == "1":
104+
pytest.skip(f"{guided_decoding_backend} does not support v1, skip it")
105+
106+
sampling_params = SamplingParams(
107+
temperature=1.0,
108+
max_tokens=1000,
109+
guided_decoding=GuidedDecodingParams(json=sample_json_schema))
110+
with VllmRunner(
111+
MODEL_NAME,
112+
seed=0,
113+
dtype="auto",
114+
guided_decoding_backend=guided_decoding_backend,
115+
) as vllm_model:
116+
prompts = [
117+
f"Give an example JSON for an employee profile "
118+
f"that fits this schema: {sample_json_schema}"
119+
] * 2
120+
inputs = vllm_model.get_inputs(prompts)
121+
outputs = vllm_model.model.generate(inputs,
122+
sampling_params=sampling_params)
123+
124+
assert outputs is not None
125+
126+
for output in outputs:
127+
assert output is not None
128+
assert isinstance(output, RequestOutput)
129+
prompt = output.prompt
130+
131+
generated_text = output.outputs[0].text
132+
assert generated_text is not None
133+
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
134+
output_json = json.loads(generated_text)
135+
jsonschema.validate(instance=output_json,
136+
schema=sample_json_schema)
137+
138+
139+
@pytest.mark.parametrize("guided_decoding_backend", GuidedDecodingBackend)
140+
def test_guided_regex(guided_decoding_backend: str, sample_regex):
141+
if guided_decoding_backend not in GuidedDecodingBackendV0 and os.getenv(
142+
"VLLM_USE_V1") == "0":
143+
# guidance does not support on v0, skip it
144+
pytest.skip(
145+
f"{guided_decoding_backend} does not support on v0, skip it")
146+
if guided_decoding_backend not in GuidedDecodingBackendV1 and os.getenv(
147+
"VLLM_USE_V1") == "1":
148+
pytest.skip(f"{guided_decoding_backend} does not support v1, skip it")
149+
150+
sampling_params = SamplingParams(temperature=0.8,
151+
top_p=0.95,
152+
guided_decoding=GuidedDecodingParams(
153+
regex=sample_regex, ))
154+
with VllmRunner(
155+
MODEL_NAME,
156+
seed=0,
157+
dtype="auto",
158+
guided_decoding_backend=guided_decoding_backend,
159+
) as vllm_model:
160+
prompts = [
161+
f"Give an example IPv4 address with this regex: {sample_regex}"
162+
] * 2
163+
inputs = vllm_model.get_inputs(prompts)
164+
outputs = vllm_model.model.generate(inputs,
165+
sampling_params=sampling_params)
166+
assert outputs is not None
167+
for output in outputs:
168+
assert output is not None
169+
assert isinstance(output, RequestOutput)
170+
prompt = output.prompt
171+
generated_text = output.outputs[0].text
172+
print(generated_text)
173+
assert generated_text is not None
174+
assert re.fullmatch(".*", generated_text) is not None
175+
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")

0 commit comments

Comments
 (0)