Skip to content

Commit 5fe55fd

Browse files
Few more changes to solve some other pre-commit hooks failures
Signed-off-by: Christian Pinto <christian.pinto@ibm.com>
1 parent 8f27e28 commit 5fe55fd

File tree

3 files changed

+17
-13
lines changed

3 files changed

+17
-13
lines changed

tests/models/multimodal/pooling/test_prithvi_mae.py

Lines changed: 14 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -6,31 +6,35 @@
66

77
from ....conftest import VllmRunner
88

9+
910
def generate_test_mm_data():
1011
mm_data = {
1112
"pixel_values": torch.full((6, 512, 512), 1.0, dtype=torch.float16),
1213
"location_coords": torch.full((1, 2), 1.0, dtype=torch.float16),
1314
}
1415
return mm_data
15-
16+
17+
1618
def _run_test(
1719
vllm_runner: type[VllmRunner],
1820
model: str,
19-
) -> None:
21+
) -> None:
2022

2123
mm_data = generate_test_mm_data()
2224
prompt = {
2325
# This model deals with no text input
2426
"prompt_token_ids": [1],
2527
"multi_modal_data": mm_data
2628
}
27-
with vllm_runner(model, task="embed",
28-
dtype=torch.float16,
29-
enforce_eager=True,
30-
skip_tokenizer_init=True) as vllm_model:
31-
output = vllm_model.encode(prompt)
32-
33-
MODELS=["christian-pinto/Prithvi-EO-2.0-300M-TL-VLLM"]
29+
with vllm_runner(model,
30+
task="embed",
31+
dtype=torch.float16,
32+
enforce_eager=True,
33+
skip_tokenizer_init=True) as vllm_model:
34+
vllm_model.encode(prompt)
35+
36+
MODELS = ["christian-pinto/Prithvi-EO-2.0-300M-TL-VLLM"]
37+
3438
@pytest.mark.parametrize("model", MODELS)
3539
def test_models_image(
3640
hf_runner,
@@ -41,4 +45,4 @@ def test_models_image(
4145
_run_test(
4246
vllm_runner,
4347
model,
44-
)
48+
)

vllm/v1/core/kv_cache_manager.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
# SPDX-License-Identifier: Apache-2.0
22
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
33

4+
from abc import ABC, abstractmethod
45
from collections import defaultdict
56
from dataclasses import dataclass
67
from typing import Optional
@@ -65,7 +66,6 @@ def new_empty(self) -> "KVCacheBlocks":
6566

6667

6768
class KVCacheManager:
68-
6969
def __init__(
7070
self,
7171
kv_cache_config: KVCacheConfig,

vllm/v1/core/sched/scheduler.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -488,8 +488,8 @@ def schedule(self) -> SchedulerOutput:
488488

489489
if self.lora_config and request.lora_request:
490490
scheduled_loras.add(request.lora_request.lora_int_id)
491-
req_to_new_block_ids[request.request_id] = (
492-
self.kv_cache_manager.get_block_ids(request.request_id))
491+
req_to_new_block_ids[request.request_id] = \
492+
self.kv_cache_manager.get_block_ids(request.request_id)
493493
num_scheduled_tokens[request.request_id] = num_new_tokens
494494
token_budget -= num_new_tokens
495495
request.status = RequestStatus.RUNNING

0 commit comments

Comments
 (0)