Skip to content

Commit 9386057

Browse files
[ModelRunner][MultiModal] Remove legacy input mapper/processor from V0 (#951)
### What this PR does / why we need it? Remove legacy input mapper/processor from V0. Find more details at #673 and vllm-project/vllm#15686. ### Does this PR introduce _any_ user-facing change? no. ### How was this patch tested? Launch online service: ```bash vllm serve Qwen/Qwen2.5-VL-7B-Instruct \ --dtype bfloat16 \ --max_model_len 32768 \ --max-num-batched-tokens 32768 ``` Query the server: ```bash curl http://localhost:8000/v1/chat/completions \ -H "Content-Type: application/json" \ -d '{ "model": "Qwen/Qwen2.5-VL-7B-Instruct", "messages": [ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": [ {"type": "image_url", "image_url": {"url": "https://modelscope.oss-cn-beijing.aliyuncs.com/resource/qwen.png"}}, {"type": "text", "text": "What is the text in the illustrate?"} ]} ] }' ``` Result: ```bash {"id":"chatcmpl-619e70733ed148b3be3a0b6524ee0ef3","object":"chat.completion","created":1748226332,"model":"/home/sss/.cache/modelscope/hub/models/Qwen/Qwen2___5-VL-7B-Instruct","choices":[{"index":0,"message":{"role":"assistant","reasoning_content":null,"content":"The text in the illustration reads \"TONGYI Qwen.\"","tool_calls":[]},"logprobs":null,"finish_reason":"stop","stop_reason":null}],"usage":{"pro ``` Signed-off-by: shen-shanshan <467638484@qq.com> Co-authored-by: wangxiyuan <wangxiyuan1007@gmail.com>
1 parent 6ec64a3 commit 9386057

File tree

1 file changed

+5
-17
lines changed

1 file changed

+5
-17
lines changed

vllm_ascend/worker/model_runner.py

Lines changed: 5 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -391,7 +391,6 @@ def __init__(self,
391391
self.sliding_window = self.runner.sliding_window
392392
self.block_size = self.runner.block_size
393393
self.enable_lora = self.runner.lora_config is not None
394-
self.multi_modal_input_mapper = self.runner.multi_modal_input_mapper
395394
self.finished_requests_ids = finished_requests_ids
396395
self.decode_only = True
397396
self.is_encoder_decoder = self.runner.model_config.is_encoder_decoder
@@ -786,23 +785,15 @@ def _compute_lora_input(self, inter_data: InterDataForSeqGroup,
786785
def _compute_multi_modal_input(self, inter_data: InterDataForSeqGroup,
787786
seq_group_metadata: SequenceGroupMetadata):
788787
"""If multi-modal data is given, add it to the input."""
789-
# NOTE: mm_data only includes the subset of multi-modal items that
788+
# NOTE: mm_kwargs only includes the subset of multi-modal items that
790789
# intersect with the current prefill positions.
791790
positions = inter_data.input_positions[0]
792-
mm_data, placeholder_maps = MultiModalPlaceholderMap.from_seq_group(
791+
mm_kwargs, placeholder_maps = MultiModalPlaceholderMap.from_seq_group(
793792
seq_group_metadata,
794793
range(positions[0], positions[0] + len(positions)))
795-
if not mm_data:
794+
if not mm_kwargs:
796795
return
797796

798-
if self.runner.mm_registry.has_processor(self.runner.model_config):
799-
mm_kwargs = mm_data
800-
else:
801-
mm_kwargs = self.multi_modal_input_mapper(
802-
mm_data,
803-
seq_group_metadata.mm_processor_kwargs,
804-
)
805-
806797
inter_data.multi_modal_kwargs = mm_kwargs
807798
inter_data.multi_modal_placeholder_maps = placeholder_maps
808799

@@ -918,9 +909,6 @@ def __init__(
918909
# Multi-modal data support
919910
self.input_registry = input_registry
920911
self.mm_registry = mm_registry
921-
self.multi_modal_input_mapper = mm_registry \
922-
.create_input_mapper(model_config)
923-
self.mm_registry.init_mm_limits_per_prompt(self.model_config)
924912

925913
# Lazy initialization
926914
self.model: nn.Module # Set after load_model
@@ -1116,8 +1104,8 @@ def profile_run(self) -> None:
11161104

11171105
dummy_data = self.input_registry \
11181106
.dummy_data_for_profiling(self.model_config,
1119-
seq_len,
1120-
self.mm_registry)
1107+
seq_len,
1108+
self.mm_registry)
11211109

11221110
seq = SequenceGroupMetadata(
11231111
request_id=str(group_id),

0 commit comments

Comments
 (0)