Skip to content

Commit b9fca83

Browse files
authored
[Bugfix] Fix GLM-4.1-V video prompt update (#20635)
Signed-off-by: Isotr0py <2037008807@qq.com>
1 parent 32dffc2 commit b9fca83

File tree

1 file changed

+5
-2
lines changed

1 file changed

+5
-2
lines changed

vllm/model_executor/models/glm4_1v.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@
6565
MultiModalDataParser)
6666
from vllm.multimodal.processing import (BaseMultiModalProcessor,
6767
BaseProcessingInfo, PromptReplacement,
68-
PromptUpdate)
68+
PromptUpdate, PromptUpdateDetails)
6969
from vllm.multimodal.profiling import BaseDummyInputsBuilder
7070
from vllm.platforms import _Backend
7171
from vllm.sequence import IntermediateTensors
@@ -1213,7 +1213,10 @@ def get_video_replacement_glm4v(item_idx: int):
12131213
placeholder.append(eoi_token_id)
12141214
placeholder.extend(frame_idx)
12151215
placeholder.append(eov_token_id)
1216-
return placeholder
1216+
return PromptUpdateDetails.select_token_id(
1217+
placeholder,
1218+
embed_token_id=hf_processor.video_token_id,
1219+
)
12171220

12181221
return [
12191222
PromptReplacement(

0 commit comments

Comments
 (0)