@@ -163,7 +163,8 @@ def wrapper(*args, **kwargs):
163
163
yield
164
164
finally :
165
165
nn .Module .register_parameter = old_register_parameter
166
- for torch_function_name , old_torch_function in tensor_constructors_to_patch .items ():
166
+ for torch_function_name , old_torch_function in tensor_constructors_to_patch .items (
167
+ ):
167
168
setattr (torch , torch_function_name , old_torch_function )
168
169
169
170
@@ -256,7 +257,8 @@ def _get_mm_fields_config(
256
257
# HF Processors always return a mask but vLLM doesn't need it
257
258
hf_inputs .pop ("attention_mask" , None )
258
259
mm_fields = {
259
- key : MultiModalFieldConfig .flat_from_sizes ("image" , num_image_patches )
260
+ key : MultiModalFieldConfig .flat_from_sizes ("image" ,
261
+ num_image_patches )
260
262
for key in hf_inputs
261
263
}
262
264
mm_fields ["image_embeds" ] = MultiModalFieldConfig .flat_from_sizes (
@@ -310,16 +312,13 @@ def apply(
310
312
if return_mm_hashes :
311
313
raise ValueError (
312
314
"TransformersMultimodalLM doesn't support mm hashing yet! "
313
- "Probably you did not set `disable_mm_preprocessor_cache=True`" )
315
+ "Probably you didn't set `disable_mm_preprocessor_cache=True`" )
314
316
315
317
mm_items = self ._to_mm_items (mm_data )
316
318
hf_processor = self .info .get_hf_processor (** hf_processor_mm_kwargs )
317
319
318
- (
319
- prompt_ids ,
320
- processed_data ,
321
- mm_token_type_ids
322
- ) = self ._apply_hf_processor_text_mm (
320
+ (prompt_ids , processed_data ,
321
+ mm_token_type_ids ) = self ._apply_hf_processor_text_mm (
323
322
prompt_text = prompt ,
324
323
mm_items = mm_items ,
325
324
hf_processor_mm_kwargs = hf_processor_mm_kwargs ,
0 commit comments