Skip to content

Commit e730323

Browse files
committed
forgot to add @support_torch_compile decorator
Signed-off-by: raushan <raushan@huggingface.co>
1 parent be850dc commit e730323

File tree

1 file changed

+5
-0
lines changed

1 file changed

+5
-0
lines changed

vllm/model_executor/models/transformers.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -750,6 +750,7 @@ def load_weights(self, weights: Iterable[tuple[str,
750750
MultiModalProcessor,
751751
info=MultiModalProcessingInfo,
752752
dummy_inputs=MultiModalDummyInputsBuilder)
753+
@support_torch_compile
753754
class TransformersForMultimodalLM(nn.Module, SupportsQuant, SupportsLoRA,
754755
SupportsPP, SupportsMultiModal):
755756
embedding_padding_modules = ["lm_head"]
@@ -790,6 +791,10 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
790791

791792
@property
792793
def hf_to_vllm_mapper(self):
794+
# Backwards compatibility for prev released models
795+
# State dicts back then had different formats
796+
# and cannot be loaded with `AutoModel` mapping
797+
# as is
793798
prefix_mapper = {
794799
"language_model.model": "model.language_model",
795800
"text_model.model": "model.text_model",

0 commit comments

Comments
 (0)