Skip to content

Commit 673aeb0

Browse files
Make pre-commits pass
Signed-off-by: Christian Pinto <christian.pinto@ibm.com>
1 parent 97c11e6 commit 673aeb0

File tree

2 files changed

+5
-4
lines changed

2 files changed

+5
-4
lines changed

vllm/v1/core/kv_cache_coordinator.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -390,7 +390,7 @@ def get_kv_cache_coordinator(
390390
enable_kv_cache_events: bool) -> KVCacheCoordinator:
391391
if not enable_caching or len(kv_cache_config.kv_cache_groups) == 0:
392392
# We instantiate this coordinator also for attention free models that
393-
# have 0 kv_cache_groups
393+
# have 0 kv_cache_groups
394394
return KVCacheCoordinatorNoPrefixCache(kv_cache_config, max_model_len,
395395
use_eagle, caching_hash_fn,
396396
enable_kv_cache_events)

vllm/v1/core/kv_cache_utils.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -752,12 +752,14 @@ def is_kv_cache_page_size_uniform(
752752
page_sizes = {layer.page_size_bytes for layer in kv_cache_spec.values()}
753753
return len(page_sizes) == 1
754754

755+
755756
def is_kv_cache_type_attention_free(
756757
kv_cache_spec: dict[str, KVCacheSpec]) -> bool:
757758

758759
# kv_cache_spec is an empty dict for attention free models
759760
return not kv_cache_spec
760761

762+
761763
def _get_kv_cache_config_uniform_page_size(
762764
vllm_config: VllmConfig, kv_cache_spec: dict[str, KVCacheSpec],
763765
available_memory: int) -> KVCacheConfig:
@@ -901,9 +903,8 @@ def _get_kv_cache_config_uniform_page_size(
901903

902904

903905
def _get_kv_cache_config_attention_free() -> KVCacheConfig:
904-
return KVCacheConfig(num_blocks=1,
905-
kv_cache_tensors=[],
906-
kv_cache_groups=[])
906+
return KVCacheConfig(num_blocks=1, kv_cache_tensors=[], kv_cache_groups=[])
907+
907908

908909
def unify_hybrid_kv_cache_specs(kv_cache_spec: dict[str, KVCacheSpec]):
909910
"""

0 commit comments

Comments
 (0)