Skip to content

Commit 54f3a05

Browse files
committed
feat: support compile torchair graph while warming up
Signed-off-by: boying <897013703@qq.com>
1 parent 3442fbd commit 54f3a05

File tree

7 files changed

+207
-215
lines changed

7 files changed

+207
-215
lines changed

.github/workflows/vllm_ascend_test.yaml

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -108,8 +108,7 @@ jobs:
108108
run: |
109109
if [[ "${{ matrix.os }}" == "linux-arm64-npu-1" ]]; then
110110
VLLM_USE_MODELSCOPE=True pytest -sv tests/singlecard/test_offline_inference.py
111-
# AscendScheduler doesn't work, fix it later
112-
# pytest -sv tests/singlecard/tets_schedule.py
111+
pytest -sv tests/singlecard/test_scheduler.py
113112
# guided decoding doesn't work, fix it later
114113
# pytest -sv tests/singlecard/test_guided_decoding.py.py
115114
pytest -sv tests/singlecard/ --ignore=tests/singlecard/test_offline_inference.py --ignore=tests/singlecard/test_scheduler.py --ignore=tests/singlecard/test_guided_decoding.py
@@ -124,8 +123,7 @@ jobs:
124123
run: |
125124
if [[ "${{ matrix.os }}" == "linux-arm64-npu-1" ]]; then
126125
VLLM_USE_MODELSCOPE=True pytest -sv tests/singlecard/test_offline_inference.py
127-
# AscendScheduler doesn't work, fix it later
128-
# pytest -sv tests/singlecard/tets_schedule.py
126+
pytest -sv tests/singlecard/test_scheduler.py
129127
# guided decoding doesn't work, fix it later
130128
# pytest -sv tests/singlecard/test_guided_decoding.py.py
131129
pytest -sv tests/singlecard/ --ignore=tests/singlecard/test_offline_inference.py --ignore=tests/singlecard/test_scheduler.py --ignore=tests/singlecard/test_guided_decoding.py

tests/singlecard/test_scheduler.py

Lines changed: 5 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -83,11 +83,10 @@ def create_scheduler(
8383
cache_dtype="auto",
8484
**kwargs_cache,
8585
)
86-
vllm_config = VllmConfig(
87-
scheduler_config=scheduler_config,
88-
model_config=model_config,
89-
cache_config=cache_config,
90-
)
86+
vllm_config = VllmConfig(scheduler_config=scheduler_config,
87+
model_config=model_config,
88+
cache_config=cache_config)
89+
9190
kv_cache_config = KVCacheConfig(
9291
num_blocks=10000, # A large number of blocks to hold all requests
9392
tensors={},
@@ -98,10 +97,7 @@ def create_scheduler(
9897
)
9998
cache_config.num_gpu_blocks = 10000
10099
return AscendScheduler(
101-
scheduler_config,
102-
model_config,
103-
cache_config,
104-
lora_config=None,
100+
vllm_config,
105101
kv_cache_config=kv_cache_config,
106102
log_stats=True,
107103
structured_output_manager=StructuredOutputManager(vllm_config),
@@ -128,14 +124,12 @@ def create_requests(num_requests: int,
128124
mm_inputs = None
129125
request = Request(
130126
request_id=f"{i}",
131-
prompt=None,
132127
prompt_token_ids=[i] * num_tokens,
133128
sampling_params=sampling_params,
134129
multi_modal_inputs=mm_inputs,
135130
multi_modal_placeholders=mm_position,
136131
multi_modal_hashes=None,
137132
eos_token_id=EOS_TOKEN_ID,
138-
arrival_time=0,
139133
)
140134
requests.append(request)
141135
return requests
@@ -227,10 +221,6 @@ def test_stop_via_update_from_output():
227221
},
228222
total_num_scheduled_tokens=3,
229223
scheduled_encoder_inputs={},
230-
scheduled_spec_decode_tokens={
231-
requests[0].request_id: [],
232-
requests[1].request_id: [10]
233-
},
234224
num_common_prefix_blocks=0,
235225
finished_req_ids=set(),
236226
free_encoder_input_ids=[],
@@ -277,10 +267,6 @@ def test_stop_via_update_from_output():
277267
},
278268
total_num_scheduled_tokens=5,
279269
scheduled_encoder_inputs={},
280-
scheduled_spec_decode_tokens={
281-
requests[0].request_id: [10, 42],
282-
requests[1].request_id: [13]
283-
},
284270
num_common_prefix_blocks=0,
285271
finished_req_ids=set(),
286272
free_encoder_input_ids=[],
@@ -325,10 +311,6 @@ def test_stop_via_update_from_output():
325311
},
326312
total_num_scheduled_tokens=4,
327313
scheduled_encoder_inputs={},
328-
scheduled_spec_decode_tokens={
329-
requests[0].request_id: [10, 11],
330-
requests[1].request_id: []
331-
},
332314
num_common_prefix_blocks=0,
333315
finished_req_ids=set(),
334316
free_encoder_input_ids=[],
@@ -371,9 +353,6 @@ def test_stop_via_update_from_output():
371353
num_scheduled_tokens={requests[0].request_id: 3},
372354
total_num_scheduled_tokens=3,
373355
scheduled_encoder_inputs={},
374-
scheduled_spec_decode_tokens={
375-
requests[0].request_id: [EOS_TOKEN_ID, 10]
376-
},
377356
num_common_prefix_blocks=0,
378357
finished_req_ids=set(),
379358
free_encoder_input_ids=[],

vllm_ascend/attention/mla_v1.py

Lines changed: 39 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -241,7 +241,44 @@ def _get_graph_runner_block_tables(
241241
max_blocks] = block_tables[:num_seqs, :
242242
max_blocks]
243243

244-
return graph_block_tables
244+
return graph_block_tables[:num_seqs, :max_blocks]
245+
246+
def build_dummy(self, num_reqs: int,
247+
num_actual_tokens: int) -> AscendMLAMetadata:
248+
device = self.runner.device
249+
_, max_blocks = self.runner.graph_block_tables.shape
250+
block_table = torch.zeros((num_reqs, max_blocks),
251+
dtype=torch.int32,
252+
device=device)
253+
block_table = self._get_graph_runner_block_tables(
254+
num_reqs, block_table)
255+
seq_lens = torch.ones(num_reqs, dtype=torch.int32, device=device)
256+
input_positions = torch.zeros(num_reqs,
257+
dtype=torch.int32,
258+
device=device).long()
259+
slot_mapping = torch.full((num_reqs, ),
260+
PAD_SLOT_ID,
261+
dtype=torch.int32,
262+
device=device)
263+
decode_metadata = AscendMLADecodeMetadata(
264+
input_positions=input_positions,
265+
block_table=block_table,
266+
seq_lens=seq_lens,
267+
seq_lens_list=seq_lens.tolist(),
268+
max_seq_lens=1)
269+
return self.metadata_cls( # type: ignore
270+
num_input_tokens=num_actual_tokens,
271+
num_actual_tokens=num_actual_tokens,
272+
slot_mapping=slot_mapping,
273+
head_dim=self.runner.model_config.get_head_size(),
274+
num_decodes=1,
275+
num_decode_tokens=1,
276+
num_prefills=0,
277+
attn_mask=self.runner.attn_mask,
278+
attn_state=AscendAttentionState.DecodeOnly,
279+
prefill=None,
280+
decode=decode_metadata,
281+
)
245282

246283
def build(self,
247284
num_reqs: int,
@@ -324,7 +361,7 @@ def build(self,
324361
block_table = torch.cat([block_table, block_table_padding],
325362
dim=0)
326363
block_table = self._get_graph_runner_block_tables(
327-
num_seqs, block_table)
364+
num_seqs + graph_pad_size, block_table)
328365
padding_0 = torch.zeros(graph_pad_size,
329366
dtype=input_positions.dtype,
330367
device=input_positions.device)

vllm_ascend/core/scheduler.py

Lines changed: 6 additions & 139 deletions
Original file line numberDiff line numberDiff line change
@@ -15,20 +15,18 @@
1515
# This file is a part of the vllm-ascend project.
1616
#
1717
from collections import deque
18-
from typing import Iterable, Optional, Union
18+
from typing import Iterable, Union
1919

2020
from vllm.config import VllmConfig
2121
from vllm.logger import logger
2222
from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalRegistry
2323
from vllm.utils import cdiv
2424
from vllm.v1.core.sched.output import NewRequestData, SchedulerOutput
2525
from vllm.v1.core.sched.scheduler import Scheduler
26-
from vllm.v1.core.sched.utils import check_stop
27-
from vllm.v1.engine import EngineCoreOutput, EngineCoreOutputs
26+
from vllm.v1.engine import EngineCoreOutputs
2827
from vllm.v1.kv_cache_interface import KVCacheConfig
2928
from vllm.v1.outputs import ModelRunnerOutput
3029
from vllm.v1.request import Request, RequestStatus
31-
from vllm.v1.spec_decode.metrics import SpecDecodingStats
3230
from vllm.v1.structured_output import StructuredOutputManager
3331

3432

@@ -101,6 +99,7 @@ def skip_cur_request():
10199
# Get already-cached tokens.
102100
computed_blocks, num_computed_tokens = (
103101
self.kv_cache_manager.get_computed_blocks(request))
102+
computed_blocks = computed_blocks.blocks
104103
num_new_tokens = request.num_tokens - num_computed_tokens
105104
if (0 < self.scheduler_config.long_prefill_token_threshold <
106105
num_new_tokens):
@@ -365,41 +364,22 @@ def finish_requests(
365364
For example, the API server can abort a request when the client
366365
disconnects.
367366
"""
368-
assert RequestStatus.is_finished(finished_status)
369-
if isinstance(request_ids, str):
370-
request_ids = (request_ids, )
371-
else:
372-
request_ids = set(request_ids)
373-
374367
for req_id in request_ids:
375368
request = self.requests.get(req_id)
376369
if request is None:
377370
# Invalid request ID.
378371
continue
379-
380372
if request.status == RequestStatus.RUNNING:
381-
self.running.remove(request)
382373
self.scheduled_req_ids.discard(request.request_id)
383-
else:
384-
self.waiting.remove(request)
385-
request.status = finished_status
386-
self._free_request(request)
374+
super().finish_requests(request_ids, finished_status)
387375

388376
def update_from_output(
389377
self,
390378
scheduler_output: SchedulerOutput,
391379
model_runner_output: ModelRunnerOutput,
392380
) -> EngineCoreOutputs:
393-
sampled_token_ids = model_runner_output.sampled_token_ids
394-
spec_token_ids = model_runner_output.spec_token_ids
395-
logprobs = model_runner_output.logprobs
396-
prompt_logprobs_dict = model_runner_output.prompt_logprobs_dict
397381
num_scheduled_tokens = scheduler_output.num_scheduled_tokens
398382

399-
new_running: list[Request] = []
400-
outputs: list[EngineCoreOutput] = []
401-
spec_decoding_stats: Optional[SpecDecodingStats] = None
402-
403383
# NOTE(woosuk): As len(self.running) can be up to 1K or more, the below
404384
# loop can be a performance bottleneck. We should do our best to avoid
405385
# expensive operations inside the loop.
@@ -408,121 +388,8 @@ def update_from_output(
408388
num_tokens_scheduled = num_scheduled_tokens.get(req_id, 0)
409389
if num_tokens_scheduled == 0:
410390
# The request was not scheduled in this step.
411-
new_running.append(request)
412391
continue
413-
414-
req_index = model_runner_output.req_id_to_index[req_id]
415-
generated_token_ids = sampled_token_ids[req_index]
416-
417-
scheduled_spec_token_ids = (
418-
scheduler_output.scheduled_spec_decode_tokens.get(req_id))
419-
if scheduled_spec_token_ids:
420-
# num_computed_tokens represents the number of tokens
421-
# processed in the current step, considering scheduled
422-
# tokens and rejections. If some tokens are rejected,
423-
# num_computed_tokens is decreased by the number of rejected
424-
# tokens, where is given by:
425-
# len(scheduled_spec_token_ids) + 1 - len(generated_token_ids).
426-
num_tokens_rejected = (len(scheduled_spec_token_ids) + 1 -
427-
len(generated_token_ids))
428-
request.num_computed_tokens -= num_tokens_rejected
429-
spec_decoding_stats = self.make_spec_decoding_stats(
430-
spec_decoding_stats,
431-
num_draft_tokens=len(scheduled_spec_token_ids),
432-
num_accepted_tokens=len(generated_token_ids) - 1)
433-
434-
cached_encoder_input_ids = (
435-
self.encoder_cache_manager.get_cached_input_ids(request))
436-
# OPTIMIZATION: Avoid list(set) if the set is empty.
437-
if cached_encoder_input_ids:
438-
for input_id in list(cached_encoder_input_ids):
439-
mm_positions = request.mm_positions[input_id]
440-
start_pos = mm_positions.offset
441-
num_tokens = mm_positions.length
442-
if start_pos + num_tokens <= request.num_computed_tokens:
443-
# The encoder output is already processed and stored
444-
# in the decoder's KV cache.
445-
self.encoder_cache_manager.free_encoder_input(
446-
request, input_id)
447-
448-
stopped = False
449-
new_logprobs = None
450-
new_token_ids = generated_token_ids
451-
452-
# Append generated tokens and check for stop. Note that if
453-
# a request is still being prefilled, we expect the model runner
454-
# to return empty token ids for the request.
455-
for num_new, output_token_id in enumerate(new_token_ids, 1):
456-
request.append_output_token_ids(output_token_id)
457-
458-
# Check for stop and update request state.
459-
# This must be called before we make the EngineCoreOutput.
460-
stopped = check_stop(request, self.max_model_len)
461-
if stopped:
462-
self._free_request(request)
463-
del new_token_ids[num_new:] # Trim new tokens if needed.
464-
break
465-
466-
# Extract sample logprobs if needed.
467-
if request.sampling_params.logprobs is not None and logprobs:
468-
# NOTE: once we support N tokens per step (spec decode),
469-
# the outer lists can be of length > 1.
470-
new_logprobs = logprobs.slice(req_index, req_index + 1)
471-
472-
if new_token_ids and request.use_structured_output:
473-
# NOTE: structured_output_request
474-
# should not be None if use_structured_output, we have
475-
# check above, so safe to ignore type warning
476-
request.structured_output_request.grammar.accept_tokens( # type: ignore[union-attr]
477-
req_id, new_token_ids)
478-
479-
# Add newly generated spec token ids to the request.
480-
if spec_token_ids is not None:
481-
if request.use_structured_output:
482-
metadata = request.structured_output_request
483-
assert metadata is not None and metadata.grammar is not None
484-
# Needs to happen after new_token_ids are accepted.
485-
request.spec_token_ids = metadata.grammar.validate_tokens(
486-
spec_token_ids[req_index])
487-
else:
488-
request.spec_token_ids = spec_token_ids[req_index]
489-
490-
# Get prompt logprobs for this request.
491-
prompt_logprobs_tensors = prompt_logprobs_dict.get(req_id)
492-
if new_token_ids:
493-
# Add EngineCoreOutput for this Request.
494-
outputs.append(
495-
EngineCoreOutput(
496-
request_id=req_id,
497-
new_token_ids=new_token_ids,
498-
finish_reason=request.get_finished_reason(),
499-
new_logprobs=new_logprobs,
500-
new_prompt_logprobs_tensors=prompt_logprobs_tensors,
501-
stop_reason=request.stop_reason,
502-
events=request.take_events()))
503-
else:
504-
# Invariant: EngineCore returns no partial prefill outputs.
505-
assert not prompt_logprobs_tensors
506-
507392
self.scheduled_req_ids.remove(req_id)
508-
if not stopped:
509-
new_running.append(request)
510-
511-
# Return the cached request data to the queue so they can be reused.
512-
for req_data in scheduler_output.scheduled_cached_reqs:
513-
# NOTE(rob): since we free stopped reqs above, adding stopped reqs
514-
# to _cached_reqs_data will cause a memory leak.
515-
if req_data.req_id not in self.finished_req_ids:
516-
self._cached_reqs_data[req_data.req_id].append(req_data)
517-
518-
self.running = new_running
519-
engine_core_outputs = EngineCoreOutputs(
520-
outputs=outputs,
521-
scheduler_stats=self.make_stats(spec_decoding_stats),
522-
)
523-
if self.include_finished_set:
524-
#TODO currently sending duplicates here, improve this
525-
engine_core_outputs.finished_requests = (
526-
scheduler_output.finished_req_ids | self.finished_req_ids)
527393

528-
return engine_core_outputs
394+
return super().update_from_output(scheduler_output,
395+
model_runner_output)

vllm_ascend/envs.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,8 @@
6666
lambda: os.getenv("C_COMPILER", None),
6767
"VLLM_VERSION":
6868
lambda: os.getenv("VLLM_VERSION", None),
69+
"VLLM_ASCEND_TRACE_RECOMPILES":
70+
lambda: bool(int(os.getenv("VLLM_ASCEND_TRACE_RECOMPILES", '0'))),
6971
}
7072

7173
# end-env-vars-definition

0 commit comments

Comments
 (0)