Skip to content

Commit 267a57f

Browse files
committed
add tests
1 parent ba1143a commit 267a57f

File tree

1 file changed

+22
-0
lines changed

1 file changed

+22
-0
lines changed

tests/models/test_transformers.py

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
from ..core.block.e2e.test_correctness_sliding_window import prep_prompts
1111
from ..utils import multi_gpu_test
1212
from .utils import check_logprobs_close
13+
from transformers import AutoModelForImageTextToText
1314

1415

1516
def check_implementation(
@@ -71,6 +72,27 @@ def test_models(
7172
model_impl=model_impl)
7273

7374

75+
@pytest.mark.parametrize(
76+
"model,model_impl",
77+
[
78+
("llava-hf/llava-onevision-qwen2-0.5b-ov-hf", "transformers"), # dynamic image length and number of patches
79+
("HuggingFaceTB/SmolVLM-256M-Instruct", "transformers"), # has col/row special token between patches
80+
("Qwen/Qwen2.5-VL-3B-Instruct", "transformers"), # pixel values from processor are not 4D or 5D arraya
81+
]) # no custom code support because custom models don't follow the standard yet!
82+
def test_models_multimodal(
83+
hf_runner: type[HfRunner],
84+
vllm_runner: type[VllmRunner],
85+
example_prompts: list[str],
86+
model: str,
87+
model_impl: str,
88+
) -> None:
89+
check_implementation(hf_runner,
90+
vllm_runner,
91+
example_prompts,
92+
model,
93+
model_impl=model_impl,
94+
kwargs_ref={"auto_cls": AutoModelForImageTextToText},)
95+
7496
def test_hybrid_attention(vllm_runner: type[VllmRunner]) -> None:
7597
prompts, _, _ = prep_prompts(4, (800, 801))
7698
kwargs_ref = {"max_model_len": 8192, "enforce_eager": True}

0 commit comments

Comments
 (0)