Skip to content

Commit 8c1f220

Browse files
committed
i dont get the style guidelines
Signed-off-by: raushan <raushan@huggingface.co>
1 parent 2c73f88 commit 8c1f220

File tree

2 files changed

+10
-13
lines changed

2 files changed

+10
-13
lines changed

vllm/model_executor/models/transformers.py

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -163,7 +163,8 @@ def wrapper(*args, **kwargs):
163163
yield
164164
finally:
165165
nn.Module.register_parameter = old_register_parameter
166-
for torch_function_name, old_torch_function in tensor_constructors_to_patch.items():
166+
for torch_function_name, old_torch_function in tensor_constructors_to_patch.items(
167+
):
167168
setattr(torch, torch_function_name, old_torch_function)
168169

169170

@@ -256,7 +257,8 @@ def _get_mm_fields_config(
256257
# HF Processors always return a mask but vLLM doesn't need it
257258
hf_inputs.pop("attention_mask", None)
258259
mm_fields = {
259-
key: MultiModalFieldConfig.flat_from_sizes("image", num_image_patches)
260+
key: MultiModalFieldConfig.flat_from_sizes("image",
261+
num_image_patches)
260262
for key in hf_inputs
261263
}
262264
mm_fields["image_embeds"] = MultiModalFieldConfig.flat_from_sizes(
@@ -310,16 +312,13 @@ def apply(
310312
if return_mm_hashes:
311313
raise ValueError(
312314
"TransformersMultimodalLM doesn't support mm hashing yet! "
313-
"Probably you did not set `disable_mm_preprocessor_cache=True`")
315+
"Probably you didn't set `disable_mm_preprocessor_cache=True`")
314316

315317
mm_items = self._to_mm_items(mm_data)
316318
hf_processor = self.info.get_hf_processor(**hf_processor_mm_kwargs)
317319

318-
(
319-
prompt_ids,
320-
processed_data,
321-
mm_token_type_ids
322-
) = self._apply_hf_processor_text_mm(
320+
(prompt_ids, processed_data,
321+
mm_token_type_ids) = self._apply_hf_processor_text_mm(
323322
prompt_text=prompt,
324323
mm_items=mm_items,
325324
hf_processor_mm_kwargs=hf_processor_mm_kwargs,

vllm/v1/engine/mm_input_cache.py

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -35,12 +35,10 @@ class MirroredProcessingCache:
3535
def __init__(self, model_config):
3636
mm_config = model_config.multimodal_config
3737
disable_mm_preprocessor_cache = (
38-
mm_config is not None and mm_config.disable_mm_preprocessor_cache
39-
)
38+
mm_config is not None and mm_config.disable_mm_preprocessor_cache)
4039
self.use_cache = not disable_mm_preprocessor_cache
41-
self.mm_cache = ProcessingCache.get_lru_cache(
42-
VLLM_MM_INPUT_CACHE_GIB, MultiModalKwargs
43-
)
40+
self.mm_cache = ProcessingCache.get_lru_cache(VLLM_MM_INPUT_CACHE_GIB,
41+
MultiModalKwargs)
4442

4543
def get_and_update_p0(
4644
self,

0 commit comments

Comments
 (0)