Skip to content

Commit 494b0f4

Browse files
authored
[CI]Fix broken CI (#1773)
This PR fixed the broken CI. It require vllm-project/vllm#20900 merged first. - vLLM version: v0.9.2 - vLLM main: vllm-project/vllm@e8cc53a Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
1 parent afcfe91 commit 494b0f4

File tree

6 files changed

+22
-392
lines changed

6 files changed

+22
-392
lines changed

tests/conftest.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@
4747
from vllm_ascend.utils import adapt_patch # noqa E402
4848

4949
adapt_patch(True)
50+
adapt_patch(False)
5051

5152
from vllm.distributed.parallel_state import ( # noqa E402
5253
destroy_distributed_environment, destroy_model_parallel)

tests/e2e/singlecard/core/ascend_scheduler/test_ascend_scheduler.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
from vllm.v1.structured_output import StructuredOutputManager
1717

1818
from vllm_ascend.core.scheduler import AscendScheduler
19+
from vllm_ascend.utils import vllm_version_is
1920

2021
EOS_TOKEN_ID = 50256
2122

@@ -303,6 +304,8 @@ def test_stop_via_update_from_output():
303304
req.num_computed_tokens = req.num_tokens
304305
scheduler.requests[req.request_id] = req
305306
scheduler.running.append(req)
307+
if not vllm_version_is("0.9.2"):
308+
req.status = RequestStatus.RUNNING
306309

307310
scheduler_output = SchedulerOutput(scheduled_new_reqs=[],
308311
scheduled_cached_reqs=[],
@@ -355,6 +358,8 @@ def test_stop_via_update_from_output():
355358
req.num_computed_tokens = req.num_tokens
356359
scheduler.requests[req.request_id] = req
357360
scheduler.running.append(req)
361+
if not vllm_version_is("0.9.2"):
362+
req.status = RequestStatus.RUNNING
358363

359364
scheduler_output = SchedulerOutput(scheduled_new_reqs=[],
360365
scheduled_cached_reqs=[],
@@ -405,6 +410,8 @@ def test_stop_via_update_from_output():
405410
req.num_computed_tokens = req.num_tokens
406411
scheduler.requests[req.request_id] = req
407412
scheduler.running.append(req)
413+
if not vllm_version_is("0.9.2"):
414+
req.status = RequestStatus.RUNNING
408415

409416
scheduler_output = SchedulerOutput(scheduled_new_reqs=[],
410417
scheduled_cached_reqs=[],

tests/e2e/singlecard/test_prompt_embedding.py

Lines changed: 0 additions & 259 deletions
This file was deleted.

tests/e2e/singlecard/test_scheduler.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@
3131
from vllm.v1.structured_output import StructuredOutputManager
3232

3333
from vllm_ascend.core.scheduler import AscendScheduler
34+
from vllm_ascend.utils import vllm_version_is
3435

3536
EOS_TOKEN_ID = 50256
3637

@@ -213,6 +214,8 @@ def test_stop_via_update_from_output():
213214
scheduler.requests[req.request_id] = req
214215
scheduler.running.append(req)
215216
scheduler.scheduled_req_ids.add(req.request_id)
217+
if not vllm_version_is("0.9.2"):
218+
req.status = RequestStatus.RUNNING
216219

217220
scheduler_output = SchedulerOutput(scheduled_new_reqs=[],
218221
scheduled_cached_reqs=[],
@@ -263,6 +266,8 @@ def test_stop_via_update_from_output():
263266
scheduler.requests[req.request_id] = req
264267
scheduler.running.append(req)
265268
scheduler.scheduled_req_ids.add(req.request_id)
269+
if not vllm_version_is("0.9.2"):
270+
req.status = RequestStatus.RUNNING
266271

267272
scheduler_output = SchedulerOutput(scheduled_new_reqs=[],
268273
scheduled_cached_reqs=[],
@@ -311,6 +316,8 @@ def test_stop_via_update_from_output():
311316
scheduler.requests[req.request_id] = req
312317
scheduler.running.append(req)
313318
scheduler.scheduled_req_ids.add(req.request_id)
319+
if not vllm_version_is("0.9.2"):
320+
req.status = RequestStatus.RUNNING
314321

315322
scheduler_output = SchedulerOutput(scheduled_new_reqs=[],
316323
scheduled_cached_reqs=[],

0 commit comments

Comments
 (0)