Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 6 additions & 9 deletions camel/agents/_types.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,14 +11,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
from typing import Any, Dict, List, Optional, Union
from typing import Any, Dict, List, Optional

from openai import AsyncStream, Stream
from openai.types.chat import ChatCompletionChunk
from pydantic import BaseModel, ConfigDict

from camel.messages import BaseMessage
from camel.types import ChatCompletion


class ToolCallRequest(BaseModel):
Expand All @@ -33,11 +30,11 @@ class ModelResponse(BaseModel):
r"""The response from the model."""

model_config = ConfigDict(arbitrary_types_allowed=True)
response: Union[
ChatCompletion,
Stream[ChatCompletionChunk],
AsyncStream[ChatCompletionChunk],
]
# Phase 1: relax the annotation to decouple from provider schemas.
# Existing call sites do not rely on static typing here and tests
# often pass MagicMock; this change avoids tight coupling to
# ChatCompletion when adapters introduce unified responses.
response: Any
tool_call_requests: Optional[List[ToolCallRequest]]
output_messages: List[BaseMessage]
finish_reasons: List[str]
Expand Down
107 changes: 95 additions & 12 deletions camel/agents/chat_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,10 @@
)
from camel.prompts import TextPrompt
from camel.responses import ChatAgentResponse
from camel.responses.adapters.chat_completions import (
adapt_chat_to_camel_response,
)
from camel.responses.model_response import CamelModelResponse
from camel.storages import JsonStorage
from camel.toolkits import FunctionTool, RegisteredAgentToolkit
from camel.types import (
Expand Down Expand Up @@ -2566,12 +2570,8 @@ def _get_model_response(
f"[{current_iteration}]: {sanitized}"
)

if not isinstance(response, ChatCompletion):
raise TypeError(
f"Expected ChatCompletion, got {type(response).__name__}"
)

return self._handle_batch_response(response)
camel_resp = self._normalize_to_camel_response(response)
return self._handle_camel_response(camel_resp)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

maybe we can unify _handle_camel_response and _handle_batch_response


@observe()
async def _aget_model_response(
Expand Down Expand Up @@ -2631,12 +2631,8 @@ async def _aget_model_response(
f"[{current_iteration}]: {sanitized}"
)

if not isinstance(response, ChatCompletion):
raise TypeError(
f"Expected ChatCompletion, got {type(response).__name__}"
)

return self._handle_batch_response(response)
camel_resp = self._normalize_to_camel_response(response)
return self._handle_camel_response(camel_resp)

def _sanitize_messages_for_logging(
self, messages, prev_num_openai_messages: int
Expand Down Expand Up @@ -2907,6 +2903,93 @@ def _handle_batch_response(
response_id=response.id or "",
)

def _normalize_to_camel_response(self, resp: Any) -> CamelModelResponse:
"""Normalize backend response into CamelModelResponse.

Accepts ChatCompletion (legacy) or already-normalized
CamelModelResponse.
"""
if isinstance(resp, CamelModelResponse):
return resp
# Best-effort detect ChatCompletion without tight import coupling
try:
from camel.types import (
ChatCompletion as _CC, # local import to avoid cycles
)

if isinstance(resp, _CC):
return adapt_chat_to_camel_response(resp)
except Exception:
pass
raise TypeError(
f"Unsupported response type for normalization: {type(resp).__name__}" # noqa:E501
)
Comment on lines +2922 to +2926
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

likewise


def _handle_camel_response(
self, response: CamelModelResponse
) -> ModelResponse:
"""Process a CamelModelResponse and build the legacy ModelResponse.

Mirrors _handle_batch_response semantics to keep behavior identical.
"""
output_messages: List[BaseMessage] = []
for msg in response.output_messages:
# Re-wrap to preserve agent role naming convention
chat_message = BaseMessage(
role_name=self.role_name,
role_type=self.role_type,
meta_dict=msg.meta_dict,
content=msg.content,
parsed=msg.parsed,
)
output_messages.append(chat_message)

finish_reasons = response.finish_reasons or []

usage: Dict[str, Any] = {}
if response.usage and response.usage.raw:
usage = dict(response.usage.raw)
else:
# Synthesize from normalized fields if raw missing
usage = {
"prompt_tokens": response.usage.input_tokens
if response.usage
else 0,
"completion_tokens": response.usage.output_tokens
if response.usage
else 0,
"total_tokens": response.usage.total_tokens
if response.usage
else 0,
}

tool_call_requests: Optional[List[ToolCallRequest]] = None
if response.tool_call_requests:
tool_call_requests = []
for tc in response.tool_call_requests:
tool_call_requests.append(
ToolCallRequest(
tool_name=tc.name,
args=tc.args,
tool_call_id=tc.id,
)
)

# For compatibility, return original provider payload when available
provider_payload = getattr(response, "raw", None)
response_id = response.id or ""

return ModelResponse(
response=provider_payload
if provider_payload is not None
else response,
tool_call_requests=tool_call_requests,
output_messages=output_messages,
finish_reasons=finish_reasons,
usage_dict=usage,
response_id=response_id,
)

def _step_terminate(
self,
num_tokens: int,
Expand Down
20 changes: 20 additions & 0 deletions camel/core/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
"""Core abstractions for CAMEL runtime.

This package hosts model-agnostic message types that can be adapted to
either legacy Chat Completions or the newer OpenAI Responses API.
"""

__all__: list[str] = []
Loading
Loading