We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 476844d commit d81eddeCopy full SHA for d81edde
vllm/v1/engine/mm_input_cache.py
@@ -34,8 +34,8 @@ class MirroredProcessingCache:
34
35
def __init__(self, model_config):
36
mm_config = model_config.multimodal_config
37
- disable_mm_preprocessor_cache = mm_config is not None and \
38
- not mm_config.disable_mm_preprocessor_cache
+ disable_mm_preprocessor_cache = (
+ mm_config is not None and mm_config.disable_mm_preprocessor_cache)
39
self.use_cache = not disable_mm_preprocessor_cache
40
self.mm_cache = ProcessingCache.get_lru_cache(VLLM_MM_INPUT_CACHE_GIB,
41
MultiModalKwargs)
0 commit comments