We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent d29081f commit feab23bCopy full SHA for feab23b
tests/conftest.py
@@ -33,6 +33,7 @@
33
from vllm.logger import init_logger
34
from vllm.outputs import RequestOutput
35
from vllm.sampling_params import BeamSearchParams
36
+from vllm.transformers_utils.utils import maybe_model_redirect
37
from vllm.utils import cuda_device_count_stateless
38
39
logger = init_logger(__name__)
@@ -321,6 +322,7 @@ def __init__(
321
322
skip_tokenizer_init: bool = False,
323
auto_cls: type[_BaseAutoModelClass] = AutoModelForCausalLM,
324
) -> None:
325
+ model_name = maybe_model_redirect(model_name)
326
self.model_name = model_name
327
328
self.config = AutoConfig.from_pretrained(
0 commit comments