Skip to content

Commit c248666

Browse files
authored
Merge branch 'master' into workforce_pipeline
2 parents 92804d6 + 85d2de1 commit c248666

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

49 files changed

+4711
-1376
lines changed

.github/ISSUE_TEMPLATE/bug_report.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ body:
2626
attributes:
2727
label: What version of camel are you using?
2828
description: Run command `python3 -c 'print(__import__("camel").__version__)'` in your shell and paste the output here.
29-
placeholder: E.g., 0.2.78
29+
placeholder: E.g., 0.2.79a0
3030
validations:
3131
required: true
3232

.pre-commit-config.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ repos:
44
hooks:
55
- id: ruff
66
args: [--fix, --exit-non-zero-on-fix, --show-fixes]
7-
exclude: ^(docs/cookbooks/|examples/usecases/) # Ignore files under docs/cookbooks and examples/usecases
7+
exclude: ^(docs/cookbooks/|examples/usecases/|examples/custom_client_usage\.py) # Ignore files under docs/cookbooks and examples/usecases
88
- id: ruff-format
99
exclude: ^(docs/cookbooks/|examples/usecases/) # Ignore files under docs/cookbooks and examples/usecases
1010

camel/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414

1515
from camel.logger import disable_logging, enable_logging, set_log_level
1616

17-
__version__ = '0.2.78'
17+
__version__ = '0.2.79a0'
1818

1919
__all__ = [
2020
'__version__',

camel/agents/chat_agent.py

Lines changed: 75 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -491,9 +491,7 @@ def __init__(
491491

492492
# Set up system message and initialize messages
493493
self._original_system_message = (
494-
BaseMessage.make_assistant_message(
495-
role_name="Assistant", content=system_message
496-
)
494+
BaseMessage.make_system_message(system_message)
497495
if isinstance(system_message, str)
498496
else system_message
499497
)
@@ -1629,10 +1627,7 @@ def _generate_system_message_for_output_language(
16291627
content = self._original_system_message.content + language_prompt
16301628
return self._original_system_message.create_new_instance(content)
16311629
else:
1632-
return BaseMessage.make_assistant_message(
1633-
role_name="Assistant",
1634-
content=language_prompt,
1635-
)
1630+
return BaseMessage.make_system_message(language_prompt)
16361631

16371632
def init_messages(self) -> None:
16381633
r"""Initializes the stored messages list with the current system
@@ -1650,6 +1645,62 @@ def init_messages(self) -> None:
16501645
)
16511646
)
16521647

1648+
def update_system_message(
1649+
self,
1650+
system_message: Union[BaseMessage, str],
1651+
reset_memory: bool = True,
1652+
) -> None:
1653+
r"""Update the system message.
1654+
It will reset conversation with new system message.
1655+
1656+
Args:
1657+
system_message (Union[BaseMessage, str]): The new system message.
1658+
Can be either a BaseMessage object or a string.
1659+
If a string is provided, it will be converted
1660+
into a BaseMessage object.
1661+
reset_memory (bool):
1662+
Whether to reinitialize conversation messages after updating
1663+
the system message. Defaults to True.
1664+
"""
1665+
if system_message is None:
1666+
raise ValueError("system_message is required and cannot be None. ")
1667+
self._original_system_message = (
1668+
BaseMessage.make_system_message(system_message)
1669+
if isinstance(system_message, str)
1670+
else system_message
1671+
)
1672+
self._system_message = (
1673+
self._generate_system_message_for_output_language()
1674+
)
1675+
if reset_memory:
1676+
self.init_messages()
1677+
1678+
def append_to_system_message(
1679+
self, content: str, reset_memory: bool = True
1680+
) -> None:
1681+
"""Append additional context to existing system message.
1682+
1683+
Args:
1684+
content (str): The additional system message.
1685+
reset_memory (bool):
1686+
Whether to reinitialize conversation messages after appending
1687+
additional context. Defaults to True.
1688+
"""
1689+
original_content = (
1690+
self._original_system_message.content
1691+
if self._original_system_message
1692+
else ""
1693+
)
1694+
new_system_message = original_content + '\n' + content
1695+
self._original_system_message = BaseMessage.make_system_message(
1696+
new_system_message
1697+
)
1698+
self._system_message = (
1699+
self._generate_system_message_for_output_language()
1700+
)
1701+
if reset_memory:
1702+
self.init_messages()
1703+
16531704
def reset_to_original_system_message(self) -> None:
16541705
r"""Reset system message to original, removing any appended context.
16551706
@@ -4468,23 +4519,29 @@ def _clone_tools(
44684519
# Toolkit doesn't support cloning, use original
44694520
cloned_toolkits[toolkit_id] = toolkit_instance
44704521

4471-
if getattr(
4472-
tool.func, "__message_integration_enhanced__", False
4473-
):
4474-
cloned_tools.append(
4475-
FunctionTool(
4476-
func=tool.func,
4477-
openai_tool_schema=tool.get_openai_tool_schema(),
4478-
)
4479-
)
4480-
continue
4481-
44824522
# Get the method from the cloned (or original) toolkit
44834523
toolkit = cloned_toolkits[toolkit_id]
44844524
method_name = tool.func.__name__
44854525

4526+
# Check if toolkit was actually cloned or just reused
4527+
toolkit_was_cloned = toolkit is not toolkit_instance
4528+
44864529
if hasattr(toolkit, method_name):
44874530
new_method = getattr(toolkit, method_name)
4531+
4532+
# If toolkit wasn't cloned (stateless), preserve the
4533+
# original function to maintain any enhancements/wrappers
4534+
if not toolkit_was_cloned:
4535+
# Toolkit is stateless, safe to reuse original function
4536+
cloned_tools.append(
4537+
FunctionTool(
4538+
func=tool.func,
4539+
openai_tool_schema=tool.get_openai_tool_schema(),
4540+
)
4541+
)
4542+
continue
4543+
4544+
# Toolkit was cloned, use the new method
44884545
# Wrap cloned method into a new FunctionTool,
44894546
# preserving schema
44904547
try:

camel/messages/base.py

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -178,6 +178,32 @@ def make_assistant_message(
178178
OpenAIVisionDetailType(video_detail).value,
179179
)
180180

181+
@classmethod
182+
def make_system_message(
183+
cls,
184+
content: str,
185+
role_name: str = "System",
186+
meta_dict: Optional[Dict[str, str]] = None,
187+
) -> "BaseMessage":
188+
r"""Create a new system message.
189+
190+
Args:
191+
content (str): The content of the system message.
192+
role_name (str): The name of the system role.
193+
(default: :obj:`"System"`)
194+
meta_dict (Optional[Dict[str, str]]): Additional metadata
195+
dictionary for the message.
196+
197+
Returns:
198+
BaseMessage: The new system message.
199+
"""
200+
return cls(
201+
role_name,
202+
RoleType.SYSTEM,
203+
meta_dict,
204+
content,
205+
)
206+
181207
def create_new_instance(self, content: str) -> "BaseMessage":
182208
r"""Create a new instance of the :obj:`BaseMessage` with updated
183209
content.

camel/models/aws_bedrock_model.py

Lines changed: 1 addition & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -13,17 +13,11 @@
1313
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
1414

1515
import os
16-
from typing import Any, Dict, List, Optional, Type, Union
17-
18-
from openai import AsyncStream
19-
from pydantic import BaseModel
16+
from typing import Any, Dict, Optional, Union
2017

2118
from camel.configs import BedrockConfig
22-
from camel.messages import OpenAIMessage
2319
from camel.models.openai_compatible_model import OpenAICompatibleModel
2420
from camel.types import (
25-
ChatCompletion,
26-
ChatCompletionChunk,
2721
ModelType,
2822
)
2923
from camel.utils import BaseTokenCounter, api_keys_required
@@ -93,13 +87,3 @@ def __init__(
9387
max_retries=max_retries,
9488
**kwargs,
9589
)
96-
97-
async def _arun(
98-
self,
99-
messages: List[OpenAIMessage],
100-
response_format: Optional[Type[BaseModel]] = None,
101-
tools: Optional[List[Dict[str, Any]]] = None,
102-
) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
103-
raise NotImplementedError(
104-
"AWS Bedrock does not support async inference."
105-
)

camel/models/azure_openai_model.py

Lines changed: 78 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -88,8 +88,19 @@ class AzureOpenAIModel(BaseModelBackend):
8888
(default: :obj:`None`)
8989
max_retries (int, optional): Maximum number of retries for API calls.
9090
(default: :obj:`3`)
91+
client (Optional[Any], optional): A custom synchronous AzureOpenAI
92+
client instance. If provided, this client will be used instead of
93+
creating a new one. Useful for RL frameworks like AReaL or rLLM
94+
that provide Azure OpenAI-compatible clients. The client should
95+
implement the AzureOpenAI client interface with
96+
`.chat.completions.create()` and `.beta.chat.completions.parse()`
97+
methods. (default: :obj:`None`)
98+
async_client (Optional[Any], optional): A custom asynchronous
99+
AzureOpenAI client instance. If provided, this client will be
100+
used instead of creating a new one. The client should implement
101+
the AsyncAzureOpenAI client interface. (default: :obj:`None`)
91102
**kwargs (Any): Additional arguments to pass to the client
92-
initialization.
103+
initialization. Ignored if custom clients are provided.
93104
94105
References:
95106
https://learn.microsoft.com/en-us/azure/ai-services/openai/
@@ -108,6 +119,8 @@ def __init__(
108119
azure_ad_token_provider: Optional["AzureADTokenProvider"] = None,
109120
azure_ad_token: Optional[str] = None,
110121
max_retries: int = 3,
122+
client: Optional[Any] = None,
123+
async_client: Optional[Any] = None,
111124
**kwargs: Any,
112125
) -> None:
113126
if model_config_dict is None:
@@ -138,56 +151,72 @@ def __init__(
138151
"or `AZURE_DEPLOYMENT_NAME` environment variable."
139152
)
140153

141-
if is_langfuse_available():
142-
from langfuse.openai import AsyncAzureOpenAI as LangfuseAsyncOpenAI
143-
from langfuse.openai import AzureOpenAI as LangfuseOpenAI
144-
145-
self._client = LangfuseOpenAI(
146-
azure_endpoint=str(self._url),
147-
azure_deployment=self._azure_deployment_name,
148-
api_version=self.api_version,
149-
api_key=self._api_key,
150-
azure_ad_token=self._azure_ad_token,
151-
azure_ad_token_provider=self.azure_ad_token_provider,
152-
timeout=self._timeout,
153-
max_retries=max_retries,
154-
**kwargs,
155-
)
156-
self._async_client = LangfuseAsyncOpenAI(
157-
azure_endpoint=str(self._url),
158-
azure_deployment=self._azure_deployment_name,
159-
api_version=self.api_version,
160-
api_key=self._api_key,
161-
azure_ad_token=self._azure_ad_token,
162-
azure_ad_token_provider=self.azure_ad_token_provider,
163-
timeout=self._timeout,
164-
max_retries=max_retries,
165-
**kwargs,
166-
)
154+
# Use custom clients if provided, otherwise create new ones
155+
if client is not None:
156+
# Use the provided custom sync client
157+
self._client = client
167158
else:
168-
self._client = AzureOpenAI(
169-
azure_endpoint=str(self._url),
170-
azure_deployment=self._azure_deployment_name,
171-
api_version=self.api_version,
172-
api_key=self._api_key,
173-
azure_ad_token=self._azure_ad_token,
174-
azure_ad_token_provider=self.azure_ad_token_provider,
175-
timeout=self._timeout,
176-
max_retries=max_retries,
177-
**kwargs,
178-
)
159+
# Create default sync client
160+
if is_langfuse_available():
161+
from langfuse.openai import AzureOpenAI as LangfuseOpenAI
162+
163+
self._client = LangfuseOpenAI(
164+
azure_endpoint=str(self._url),
165+
azure_deployment=self._azure_deployment_name,
166+
api_version=self.api_version,
167+
api_key=self._api_key,
168+
azure_ad_token=self._azure_ad_token,
169+
azure_ad_token_provider=self.azure_ad_token_provider,
170+
timeout=self._timeout,
171+
max_retries=max_retries,
172+
**kwargs,
173+
)
174+
else:
175+
self._client = AzureOpenAI(
176+
azure_endpoint=str(self._url),
177+
azure_deployment=self._azure_deployment_name,
178+
api_version=self.api_version,
179+
api_key=self._api_key,
180+
azure_ad_token=self._azure_ad_token,
181+
azure_ad_token_provider=self.azure_ad_token_provider,
182+
timeout=self._timeout,
183+
max_retries=max_retries,
184+
**kwargs,
185+
)
179186

180-
self._async_client = AsyncAzureOpenAI(
181-
azure_endpoint=str(self._url),
182-
azure_deployment=self._azure_deployment_name,
183-
api_version=self.api_version,
184-
api_key=self._api_key,
185-
azure_ad_token=self._azure_ad_token,
186-
azure_ad_token_provider=self.azure_ad_token_provider,
187-
timeout=self._timeout,
188-
max_retries=max_retries,
189-
**kwargs,
190-
)
187+
if async_client is not None:
188+
# Use the provided custom async client
189+
self._async_client = async_client
190+
else:
191+
# Create default async client
192+
if is_langfuse_available():
193+
from langfuse.openai import (
194+
AsyncAzureOpenAI as LangfuseAsyncOpenAI,
195+
)
196+
197+
self._async_client = LangfuseAsyncOpenAI(
198+
azure_endpoint=str(self._url),
199+
azure_deployment=self._azure_deployment_name,
200+
api_version=self.api_version,
201+
api_key=self._api_key,
202+
azure_ad_token=self._azure_ad_token,
203+
azure_ad_token_provider=self.azure_ad_token_provider,
204+
timeout=self._timeout,
205+
max_retries=max_retries,
206+
**kwargs,
207+
)
208+
else:
209+
self._async_client = AsyncAzureOpenAI(
210+
azure_endpoint=str(self._url),
211+
azure_deployment=self._azure_deployment_name,
212+
api_version=self.api_version,
213+
api_key=self._api_key,
214+
azure_ad_token=self._azure_ad_token,
215+
azure_ad_token_provider=self.azure_ad_token_provider,
216+
timeout=self._timeout,
217+
max_retries=max_retries,
218+
**kwargs,
219+
)
191220

192221
@property
193222
def token_counter(self) -> BaseTokenCounter:

0 commit comments

Comments
 (0)