We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent a0f8a79 commit a931b4cCopy full SHA for a931b4c
vllm/transformers_utils/config.py
@@ -733,13 +733,6 @@ def get_hf_text_config(config: PretrainedConfig):
733
"""Get the "sub" config relevant to llm for multi modal models.
734
No op for pure text models.
735
"""
736
- # This block should be unnecessary after https://github.com/huggingface/transformers/pull/37517
737
- if hasattr(config, "thinker_config"):
738
- # TODO(suyang.fy): Refactor code.
739
- # For Qwen2.5-Omni, change hf_text_config to
740
- # thinker_config.text_config.
741
- return config.thinker_config.text_config
742
-
743
text_config = config.get_text_config()
744
745
if text_config is not config:
0 commit comments