Skip to content

Commit 4848f49

Browse files
committed
fix: Resolve type checking issues in output logging
Fix type annotation and variable naming issues identified by mypy: - Change output_token_ids parameter type from list[int] to Sequence[int] to handle compatibility with different sequence types from output objects - Fix variable naming conflict in tool call logging (tool_call_info -> tool_call_descriptions) - Add proper type conversion in log_outputs method for truncation - Update test imports to include Sequence type These fixes ensure the output logging feature passes mypy type checking while maintaining full functionality and backward compatibility. Signed-off-by: Adrian Garcia <adrian.garcia@inceptionai.ai>
1 parent f2263b7 commit 4848f49

File tree

3 files changed

+8
-7
lines changed

3 files changed

+8
-7
lines changed

tests/test_logger.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
from dataclasses import dataclass
1010
from json.decoder import JSONDecodeError
1111
from tempfile import NamedTemporaryFile
12-
from typing import Any
12+
from typing import Any, Sequence
1313
from unittest.mock import patch, MagicMock
1414
from uuid import uuid4
1515

vllm/entrypoints/logger.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
# SPDX-License-Identifier: Apache-2.0
22
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
33

4-
from typing import Optional, Union
4+
from typing import Optional, Union, Sequence
55

66
import torch
77

@@ -53,7 +53,7 @@ def log_outputs(
5353
self,
5454
request_id: str,
5555
outputs: str,
56-
output_token_ids: Optional[list[int]],
56+
output_token_ids: Optional[Sequence[int]],
5757
finish_reason: Optional[str] = None,
5858
is_streaming: bool = False,
5959
delta: bool = False,
@@ -64,7 +64,8 @@ def log_outputs(
6464
outputs = outputs[:max_log_len]
6565

6666
if output_token_ids is not None:
67-
output_token_ids = output_token_ids[:max_log_len]
67+
# Convert to list and apply truncation
68+
output_token_ids = list(output_token_ids)[:max_log_len]
6869

6970
stream_info = ""
7071
if is_streaming:

vllm/entrypoints/openai/serving_chat.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1197,11 +1197,11 @@ async def chat_completion_full_generator(
11971197
output_text = choice.message.content
11981198
elif choice.message.tool_calls:
11991199
# For tool calls, log the function name and arguments
1200-
tool_call_info = []
1200+
tool_call_descriptions = []
12011201
for tool_call in choice.message.tool_calls:
12021202
if hasattr(tool_call.function, 'name') and hasattr(tool_call.function, 'arguments'):
1203-
tool_call_info.append(f"{tool_call.function.name}({tool_call.function.arguments})")
1204-
output_text = f"[tool_calls: {', '.join(tool_call_info)}]"
1203+
tool_call_descriptions.append(f"{tool_call.function.name}({tool_call.function.arguments})")
1204+
output_text = f"[tool_calls: {', '.join(tool_call_descriptions)}]"
12051205

12061206
if output_text:
12071207
# Get the corresponding output token IDs

0 commit comments

Comments
 (0)