We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent d6902ce commit 299252eCopy full SHA for 299252e
vllm/entrypoints/openai/serving_score.py
@@ -216,8 +216,8 @@ async def _cross_encoding_score(
216
# cross_encoder models defaults to using pad_token.
217
tokenized_prompts = await asyncio.gather(*(
218
tokenize_async(
219
- text=t1, # type: ignore[arg-type]
220
- text_pair=t2, # type: ignore[arg-type]
+ text=t1, # type: ignore[arg-type]
+ text_pair=t2, # type: ignore[arg-type]
221
**tokenization_kwargs) for t1, t2 in input_pairs))
222
else:
223
# `llm as reranker` models defaults to not using pad_token.
0 commit comments