diff --git a/python/agents b/python/agents new file mode 120000 index 000000000..08a7149d0 --- /dev/null +++ b/python/agents @@ -0,0 +1 @@ +/opt/homebrew/Caskroom/mambaforge/base/envs/autogen312/lib/python3.12/site-packages/autogen_agentchat/agents \ No newline at end of file diff --git a/python/instrumentation/openinference-instrumentation-autogen/CHANGELOG.md b/python/instrumentation/openinference-instrumentation-autogen/CHANGELOG.md deleted file mode 100644 index 2e4576a06..000000000 --- a/python/instrumentation/openinference-instrumentation-autogen/CHANGELOG.md +++ /dev/null @@ -1,71 +0,0 @@ -# Changelog - -## [0.1.8](https://github.com/Arize-ai/openinference/compare/python-openinference-instrumentation-autogen-v0.1.7...python-openinference-instrumentation-autogen-v0.1.8) (2025-04-11) - - -### Bug Fixes - -* increased minimum supported version of openinference-instrumentation to 0.1.27 ([#1507](https://github.com/Arize-ai/openinference/issues/1507)) ([a55edfa](https://github.com/Arize-ai/openinference/commit/a55edfa8900c1f36a73385c7d03f91cffadd85c4)) - -## [0.1.7](https://github.com/Arize-ai/openinference/compare/python-openinference-instrumentation-autogen-v0.1.6...python-openinference-instrumentation-autogen-v0.1.7) (2025-03-14) - - -### Documentation - -* fix license to be openinference ([#1353](https://github.com/Arize-ai/openinference/issues/1353)) ([85d435b](https://github.com/Arize-ai/openinference/commit/85d435be3af3de5424494cfbdd654454688b7377)) - -## [0.1.6](https://github.com/Arize-ai/openinference/compare/python-openinference-instrumentation-autogen-v0.1.5...python-openinference-instrumentation-autogen-v0.1.6) (2025-02-18) - - -### Features - -* define openinference_instrumentor entry points for all libraries ([#1290](https://github.com/Arize-ai/openinference/issues/1290)) ([4b69fdc](https://github.com/Arize-ai/openinference/commit/4b69fdc13210048009e51639b01e7c0c9550c9d1)) - -## [0.1.5](https://github.com/Arize-ai/openinference/compare/python-openinference-instrumentation-autogen-v0.1.4...python-openinference-instrumentation-autogen-v0.1.5) (2025-02-11) - - -### Features - -* add entrypoint for use in opentelemetry-instrument ([#1278](https://github.com/Arize-ai/openinference/issues/1278)) ([2106acf](https://github.com/Arize-ai/openinference/commit/2106acfd6648804abe9b95e41a49df26a500435c)) - -## [0.1.4](https://github.com/Arize-ai/openinference/compare/python-openinference-instrumentation-autogen-v0.1.3...python-openinference-instrumentation-autogen-v0.1.4) (2025-02-04) - - -### Bug Fixes - -* support python 3.13 and drop python 3.8 ([#1263](https://github.com/Arize-ai/openinference/issues/1263)) ([5bfaa90](https://github.com/Arize-ai/openinference/commit/5bfaa90d800a8f725b3ac7444d16972ed7821738)) - -## [0.1.3](https://github.com/Arize-ai/openinference/compare/python-openinference-instrumentation-autogen-v0.1.2...python-openinference-instrumentation-autogen-v0.1.3) (2025-02-04) - - -### Bug Fixes - -* **AG2:** CI fix ([#1221](https://github.com/Arize-ai/openinference/issues/1221)) ([db27dfd](https://github.com/Arize-ai/openinference/commit/db27dfd0e9eea601cd7e4b1048fe1d35983de019)) - -## [0.1.2](https://github.com/Arize-ai/openinference/compare/python-openinference-instrumentation-autogen-v0.1.1...python-openinference-instrumentation-autogen-v0.1.2) (2025-01-17) - - -### Bug Fixes - -* **autogen2:** tool calling fix ([#1218](https://github.com/Arize-ai/openinference/issues/1218)) ([c8c9ade](https://github.com/Arize-ai/openinference/commit/c8c9ade8267e3ba2fa1e48b2d1d77a42444222e4)) - -## [0.1.1](https://github.com/Arize-ai/openinference/compare/python-openinference-instrumentation-autogen-v0.1.0...python-openinference-instrumentation-autogen-v0.1.1) (2025-01-17) - - -### Features - -* **autogen2:** basic tool calling ([#1216](https://github.com/Arize-ai/openinference/issues/1216)) ([2a638f7](https://github.com/Arize-ai/openinference/commit/2a638f7ca51a2d77b27a556bed75aa6318aa805b)) - -## 0.1.0 (2025-01-17) - - -### Features - -* basic instrumentation for autogen (ag2) ([#1211](https://github.com/Arize-ai/openinference/issues/1211)) ([e02a055](https://github.com/Arize-ai/openinference/commit/e02a0553eb84ee253c7931738c5116e0a310194a)) - -## 0.1.0 (2025-01-16) - - -### Features - -* **python:** autogen (ag2) instrumentation ([#1211]https://github.com/Arize-ai/openinference/pull/1211) diff --git a/python/instrumentation/openinference-instrumentation-autogen/LICENSE b/python/instrumentation/openinference-instrumentation-autogen/LICENSE index 1525436c1..3c0e05ceb 100644 --- a/python/instrumentation/openinference-instrumentation-autogen/LICENSE +++ b/python/instrumentation/openinference-instrumentation-autogen/LICENSE @@ -1,4 +1,4 @@ - Apache License + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -198,4 +198,4 @@ Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and -limitations under the License. +limitations under the License. \ No newline at end of file diff --git a/python/instrumentation/openinference-instrumentation-autogen/README.md b/python/instrumentation/openinference-instrumentation-autogen/README.md index 1c0bc8640..2936d72db 100644 --- a/python/instrumentation/openinference-instrumentation-autogen/README.md +++ b/python/instrumentation/openinference-instrumentation-autogen/README.md @@ -1,3 +1,3 @@ -# OpenInference Autogen (ag2) Instrumentation +# OpenInference Autogen Instrumentation EXPERIMENTAL diff --git a/python/instrumentation/openinference-instrumentation-autogen/pyproject.toml b/python/instrumentation/openinference-instrumentation-autogen/pyproject.toml index e7907a560..678aa9483 100644 --- a/python/instrumentation/openinference-instrumentation-autogen/pyproject.toml +++ b/python/instrumentation/openinference-instrumentation-autogen/pyproject.toml @@ -8,43 +8,41 @@ dynamic = ["version"] description = "OpenInference Autogen Instrumentation" readme = "README.md" license = "Apache-2.0" -requires-python = ">=3.9, <3.14" +requires-python = ">=3.10, <3.14" authors = [ - { name = "OpenInference Authors", email = "oss@arize.com" }, + { name = "OpenInference Authors", email = "oss@arize.com" }, ] classifiers = [ - "Development Status :: 5 - Production/Stable", - "Intended Audience :: Developers", - "License :: OSI Approved :: Apache Software License", - "Programming Language :: Python", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.9", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: 3.11", - "Programming Language :: Python :: 3.12", - "Programming Language :: Python :: 3.13", + "Development Status :: 5 - Production/Stable", + "Intended Audience :: Developers", + "License :: OSI Approved :: Apache Software License", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", ] dependencies = [ - "opentelemetry-api", - "opentelemetry-instrumentation", - "opentelemetry-semantic-conventions", - "openinference-instrumentation>=0.1.27", - "openinference-semantic-conventions>=0.1.12", + "opentelemetry-api", + "opentelemetry-instrumentation", + "opentelemetry-semantic-conventions", + "openinference-instrumentation>=0.1.17", + "openinference-semantic-conventions", + "wrapt", + "typing-extensions", ] [project.optional-dependencies] instruments = [ - "autogen >= 0.5.0", -] -test = [ - "autogen>=0.5.0", + "autogen-agentchat >= 0.4.2", ] [project.entry-points.opentelemetry_instrumentor] autogen = "openinference.instrumentation.autogen:AutogenInstrumentor" [project.entry-points.openinference_instrumentor] -autogen = "openinference.instrumentation.autogen:AutogenInstrumentor" +autogen= "openinference.instrumentation.autogen:AutogenInstrumentor" [project.urls] Homepage = "https://github.com/Arize-ai/openinference/tree/main/python/instrumentation/openinference-instrumentation-autogen" @@ -54,7 +52,7 @@ path = "src/openinference/instrumentation/autogen/version.py" [tool.hatch.build.targets.sdist] include = [ - "/src", + "/src", ] [tool.hatch.build.targets.wheel] @@ -64,27 +62,28 @@ packages = ["src/openinference"] asyncio_mode = "auto" asyncio_default_fixture_loop_scope = "function" testpaths = [ - "tests", + "tests", ] [tool.mypy] strict = true explicit_package_bases = true exclude = [ - "examples", - "dist", - "sdist", + "examples", + "dist", + "sdist", ] [[tool.mypy.overrides]] ignore_missing_imports = true module = [ - "wrapt", + "autogen", + "wrapt", ] [tool.ruff] line-length = 100 -target-version = "py38" +target-version = "py310" [tool.ruff.lint.per-file-ignores] "*.ipynb" = ["E402", "E501"] diff --git a/python/instrumentation/openinference-instrumentation-autogen/src/openinference/instrumentation/autogen/__init__.py b/python/instrumentation/openinference-instrumentation-autogen/src/openinference/instrumentation/autogen/__init__.py index e224d008e..eef1532ff 100644 --- a/python/instrumentation/openinference-instrumentation-autogen/src/openinference/instrumentation/autogen/__init__.py +++ b/python/instrumentation/openinference-instrumentation-autogen/src/openinference/instrumentation/autogen/__init__.py @@ -1,235 +1,74 @@ -import json -from typing import Any, Callable, Dict, Optional, Union - -from opentelemetry import trace -from opentelemetry.trace import Link, SpanContext, Status, StatusCode - -from autogen import ConversableAgent # type: ignore - - -class AutogenInstrumentor: - def __init__(self) -> None: - self.tracer = trace.get_tracer(__name__) - self._original_generate: Optional[Callable[..., Any]] = None - self._original_initiate_chat: Optional[Callable[..., Any]] = None - self._original_execute_function: Optional[Callable[..., Any]] = None - - def _safe_json_dumps(self, obj: Any) -> str: - try: - return json.dumps(obj) - except (TypeError, ValueError): - return json.dumps(str(obj)) - - def instrument(self) -> "AutogenInstrumentor": - # Save original methods - self._original_generate = ConversableAgent.generate_reply - self._original_initiate_chat = ConversableAgent.initiate_chat - self._original_execute_function = ConversableAgent.execute_function - - instrumentor = self - - def wrapped_generate( - agent_self: ConversableAgent, - messages: Optional[Any] = None, - sender: Optional[str] = None, - **kwargs: Any, - ) -> Any: - try: - current_span = trace.get_current_span() - current_context: SpanContext = current_span.get_span_context() - - with instrumentor.tracer.start_as_current_span( - agent_self.__class__.__name__, - context=trace.set_span_in_context(current_span), - links=[Link(current_context)], - ) as span: - span.set_attribute(SpanAttributes.OPENINFERENCE_SPAN_KIND, "AGENT") - span.set_attribute( - SpanAttributes.INPUT_VALUE, - instrumentor._safe_json_dumps(messages), - ) - span.set_attribute(SpanAttributes.INPUT_MIME_TYPE, "application/json") - span.set_attribute("agent.type", agent_self.__class__.__name__) - - if instrumentor._original_generate is not None: - response = instrumentor._original_generate( - agent_self, messages=messages, sender=sender, **kwargs - ) - else: - # Fallback or raise an error if needed - response = None - - span.set_attribute( - SpanAttributes.OUTPUT_VALUE, - instrumentor._safe_json_dumps(response), - ) - span.set_attribute(SpanAttributes.OUTPUT_MIME_TYPE, "application/json") - - return response - except Exception as e: - if span is not None: - span.set_status(Status(StatusCode.ERROR)) - span.record_exception(e) - raise - - def wrapped_initiate_chat( - agent_self: ConversableAgent, recipient: Any, *args: Any, **kwargs: Any - ) -> Any: - try: - message = kwargs.get("message", args[0] if args else None) - current_span = trace.get_current_span() - current_context: SpanContext = current_span.get_span_context() - - with instrumentor.tracer.start_as_current_span( - "Autogen", - context=trace.set_span_in_context(current_span), - links=[Link(current_context)], - ) as span: - span.set_attribute(SpanAttributes.OPENINFERENCE_SPAN_KIND, "AGENT") - span.set_attribute( - SpanAttributes.INPUT_VALUE, - instrumentor._safe_json_dumps(message), - ) - span.set_attribute(SpanAttributes.INPUT_MIME_TYPE, "application/json") - - if instrumentor._original_initiate_chat is not None: - result = instrumentor._original_initiate_chat( - agent_self, recipient, *args, **kwargs - ) - else: - result = None - - if hasattr(result, "chat_history") and result.chat_history: - last_message = result.chat_history[-1]["content"] - span.set_attribute( - SpanAttributes.OUTPUT_VALUE, - instrumentor._safe_json_dumps(last_message), - ) - else: - span.set_attribute( - SpanAttributes.OUTPUT_VALUE, - instrumentor._safe_json_dumps(result), - ) - - span.set_attribute(SpanAttributes.OUTPUT_MIME_TYPE, "application/json") - - return result - except Exception as e: - if span is not None: - span.set_status(Status(StatusCode.ERROR)) - span.record_exception(e) - raise - - def wrapped_execute_function( - agent_self: ConversableAgent, - func_call: Union[str, Dict[str, Any]], - call_id: Optional[str] = None, - verbose: bool = False, - ) -> Any: - try: - current_span = trace.get_current_span() - current_context: SpanContext = current_span.get_span_context() - - # Handle both dictionary and string inputs - if isinstance(func_call, str): - function_name = func_call - func_call = {"name": function_name} - else: - function_name = func_call.get("name", "unknown") - - with instrumentor.tracer.start_as_current_span( - f"{function_name}", - context=trace.set_span_in_context(current_span), - links=[Link(current_context)], - ) as span: - span.set_attribute(SpanAttributes.OPENINFERENCE_SPAN_KIND, "TOOL") - span.set_attribute(SpanAttributes.TOOL_NAME, function_name) - - # Record input - span.set_attribute( - SpanAttributes.INPUT_VALUE, - instrumentor._safe_json_dumps(func_call), - ) - span.set_attribute(SpanAttributes.INPUT_MIME_TYPE, "application/json") - - # If the agent stores a function map, you can store annotations - if hasattr(agent_self, "_function_map"): - function_map = getattr(agent_self, "_function_map", {}) - if function_name in function_map: - func = function_map[function_name] - if hasattr(func, "__annotations__"): - span.set_attribute( - SpanAttributes.TOOL_PARAMETERS, - instrumentor._safe_json_dumps(func.__annotations__), - ) - - # Record function call details - if isinstance(func_call, dict): - # Record function arguments - if "arguments" in func_call: - span.set_attribute( - SpanAttributes.TOOL_CALL_FUNCTION_ARGUMENTS, - instrumentor._safe_json_dumps(func_call["arguments"]), - ) - - # Record function name - span.set_attribute(SpanAttributes.TOOL_CALL_FUNCTION_NAME, function_name) - - # Execute function - if instrumentor._original_execute_function is not None: - result = instrumentor._original_execute_function( - agent_self, func_call, call_id=call_id, verbose=verbose - ) - else: - result = None - - # Record output - span.set_attribute( - SpanAttributes.OUTPUT_VALUE, - instrumentor._safe_json_dumps(result), - ) - span.set_attribute(SpanAttributes.OUTPUT_MIME_TYPE, "application/json") - - return result - - except Exception as e: - if span is not None: - span.set_status(Status(StatusCode.ERROR)) - span.record_exception(e) - raise - - # Replace methods on ConversableAgent with wrapped versions - ConversableAgent.generate_reply = wrapped_generate - ConversableAgent.initiate_chat = wrapped_initiate_chat - ConversableAgent.execute_function = wrapped_execute_function - - return self - - def uninstrument(self) -> "AutogenInstrumentor": - """Restore original behavior.""" - if ( - self._original_generate - and self._original_initiate_chat - and self._original_execute_function - ): - ConversableAgent.generate_reply = self._original_generate - ConversableAgent.initiate_chat = self._original_initiate_chat - ConversableAgent.execute_function = self._original_execute_function - self._original_generate = None - self._original_initiate_chat = None - self._original_execute_function = None - return self - - -class SpanAttributes: - OPENINFERENCE_SPAN_KIND: str = "openinference.span.kind" - INPUT_VALUE: str = "input.value" - INPUT_MIME_TYPE: str = "input.mime_type" - OUTPUT_VALUE: str = "output.value" - OUTPUT_MIME_TYPE: str = "output.mime_type" - TOOL_NAME: str = "tool.name" - TOOL_ARGS: str = "tool.args" - TOOL_KWARGS: str = "tool.kwargs" - TOOL_PARAMETERS: str = "tool.parameters" - TOOL_CALL_FUNCTION_ARGUMENTS: str = "tool_call.function.arguments" - TOOL_CALL_FUNCTION_NAME: str = "tool_call.function.name" +import logging +from typing import Any, Collection, Tuple, Dict + +from opentelemetry import trace as trace_api +from opentelemetry.instrumentation.instrumentor import ( # type: ignore[attr-defined] + BaseInstrumentor, +) + +from wrapt import wrap_function_wrapper +from openinference.instrumentation import ( + OITracer, + TraceConfig, +) + +from ._wrappers import _AssistantAgentWrapper +from .version import __version__ + +_instruments = ("autogen-agentchat >= 0.5.1",) + +logger = logging.getLogger(__name__) + + +class AutogenInstrumentor(BaseInstrumentor): # type: ignore[misc] + """An instrumentor for the Autogen framework.""" + + __slots__ = ("_tracer",) + + def instrumentation_dependencies(self) -> Collection[str]: + return _instruments + + def _instrument(self, **kwargs: Any) -> None: + if not (tracer_provider := kwargs.get("tracer_provider")): + tracer_provider = trace_api.get_tracer_provider() + if not (config := kwargs.get("config")): + config = TraceConfig() + else: + assert isinstance(config, TraceConfig) + self._tracer = OITracer( + trace_api.get_tracer(__name__, __version__, tracer_provider), + config=config, + ) + from autogen_agentchat.agents import AssistantAgent + self._original_on_messages = AssistantAgent.on_messages + self._original_call_llm = AssistantAgent._call_llm + self._original_execute_tool = AssistantAgent._execute_tool_call + + # Create wrapper instance + wrapper = _AssistantAgentWrapper(tracer=self._tracer) + + # Wrap AssistantAgent methods + wrap_function_wrapper( + module="autogen_agentchat.agents", + name="AssistantAgent.on_messages", + wrapper=wrapper.on_messages_wrapper, + ) + wrap_function_wrapper( + module="autogen_agentchat.agents", + name="AssistantAgent._execute_tool_call", + wrapper=wrapper.execute_tool_wrapper, + ) + wrap_function_wrapper( + module="autogen_agentchat.agents", + name="AssistantAgent._call_llm", + wrapper=wrapper.call_llm_wrapper, + ) + + def _uninstrument(self, **kwargs: Any) -> None: + from autogen_agentchat.agents import AssistantAgent + if self._original_on_messages is not None: + AssistantAgent.on_messages = self._original_on_messages + if self._original_call_llm is not None: + AssistantAgent.call_llm = self._original_call_llm + if self._original_execute_tool is not None: + AssistantAgent.execute_tool = self._original_execute_tool \ No newline at end of file diff --git a/python/instrumentation/openinference-instrumentation-autogen/src/openinference/instrumentation/autogen/_wrappers.py b/python/instrumentation/openinference-instrumentation-autogen/src/openinference/instrumentation/autogen/_wrappers.py new file mode 100644 index 000000000..0e7819a49 --- /dev/null +++ b/python/instrumentation/openinference-instrumentation-autogen/src/openinference/instrumentation/autogen/_wrappers.py @@ -0,0 +1,69 @@ +from typing import Any, Dict, Tuple +from opentelemetry import trace as trace_api +from opentelemetry.trace import Span, Status, StatusCode +from openinference.instrumentation import OITracer + +class _AssistantAgentWrapper: + def __init__(self, tracer: OITracer) -> None: + self._tracer = tracer + + def on_messages_wrapper(self, wrapped: Any, instance: Any, args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> Any: + with self._tracer.start_as_current_span("AssistantAgent.on_messages") as span: + try: + # Extract messages from args + messages = args[0] if args else kwargs.get("messages") + if messages: + span.set_attribute("messages.count", len(messages)) + # Add other relevant attributes about messages + + result = wrapped(*args, **kwargs) + return result + except Exception as e: + span.record_exception(e) + span.set_status(Status(StatusCode.ERROR, str(e))) + raise + + def on_messages_stream_wrapper(self, wrapped: Any, instance: Any, args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> Any: + with self._tracer.start_as_current_span("AssistantAgent.on_messages_stream") as span: + try: + messages = args[0] if args else kwargs.get("messages") + if messages: + span.set_attribute("messages.count", len(messages)) + + result = wrapped(*args, **kwargs) + return result + except Exception as e: + span.record_exception(e) + span.set_status(Status(StatusCode.ERROR, str(e))) + raise + + def call_llm_wrapper(self, wrapped: Any, instance: Any, args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> Any: + with self._tracer.start_as_current_span("AssistantAgent.call_llm") as span: + try: + # Extract model client info + model_client = args[0] if args else kwargs.get("model_client") + if model_client: + span.set_attribute("model.name", getattr(model_client, "model", "unknown")) + + result = wrapped(*args, **kwargs) + return result + except Exception as e: + span.record_exception(e) + span.set_status(Status(StatusCode.ERROR, str(e))) + raise + + def execute_tool_wrapper(self, wrapped: Any, instance: Any, args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> Any: + with self._tracer.start_as_current_span("AssistantAgent.execute_tool") as span: + try: + # Extract tool call info + tool_call = args[0] if args else kwargs.get("tool_call") + if tool_call: + span.set_attribute("tool.name", getattr(tool_call, "name", "unknown")) + span.set_attribute("tool.arguments", str(getattr(tool_call, "arguments", ""))) + + result = wrapped(*args, **kwargs) + return result + except Exception as e: + span.record_exception(e) + span.set_status(Status(StatusCode.ERROR, str(e))) + raise \ No newline at end of file diff --git a/python/instrumentation/openinference-instrumentation-autogen/test-requirements.txt b/python/instrumentation/openinference-instrumentation-autogen/test-requirements.txt new file mode 100644 index 000000000..34645f930 --- /dev/null +++ b/python/instrumentation/openinference-instrumentation-autogen/test-requirements.txt @@ -0,0 +1,7 @@ +autogen-agentchat==0.5.4 +autogen-core==0.5.4 +autogen-ext[openai, anthropic]==0.5.4 +opentelemetry-sdk +pytest-asyncio +pytest-vcr +httpx<0.28 diff --git a/python/instrumentation/openinference-instrumentation-autogen/tests/cassettes/test_autogen_chat_agent.yaml b/python/instrumentation/openinference-instrumentation-autogen/tests/cassettes/test_autogen_chat_agent.yaml new file mode 100644 index 000000000..4a3ffdf06 --- /dev/null +++ b/python/instrumentation/openinference-instrumentation-autogen/tests/cassettes/test_autogen_chat_agent.yaml @@ -0,0 +1,210 @@ +interactions: +- request: + body: '{"messages": [{"content": "You are a helpful assistant that can check the + weather.", "role": "system"}, {"name": "user", "role": "user", "content": "What + is the weather in New York?"}], "model": "gpt-3.5-turbo", "stream": true, "tools": + [{"type": "function", "function": {"name": "get_weather", "description": "Get + the weather for a given city.", "parameters": {"type": "object", "properties": + {"city": {"description": "city", "title": "City", "type": "string"}}, "required": + ["city"], "additionalProperties": false}, "strict": false}}]}' + headers: {} + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: 'data: {"id":"chatcmpl-BPk3thaMgHdQ9k9uzxbBZ5nW695pL","object":"chat.completion.chunk","created":1745474961,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":null,"tool_calls":[{"index":0,"id":"call_R3XKcCzBkKSM6HG8zx5EUYpw","type":"function","function":{"name":"get_weather","arguments":""}}],"refusal":null},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-BPk3thaMgHdQ9k9uzxbBZ5nW695pL","object":"chat.completion.chunk","created":1745474961,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"{\""}}]},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-BPk3thaMgHdQ9k9uzxbBZ5nW695pL","object":"chat.completion.chunk","created":1745474961,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"city"}}]},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-BPk3thaMgHdQ9k9uzxbBZ5nW695pL","object":"chat.completion.chunk","created":1745474961,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":\""}}]},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-BPk3thaMgHdQ9k9uzxbBZ5nW695pL","object":"chat.completion.chunk","created":1745474961,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"New"}}]},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-BPk3thaMgHdQ9k9uzxbBZ5nW695pL","object":"chat.completion.chunk","created":1745474961,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" + York"}}]},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-BPk3thaMgHdQ9k9uzxbBZ5nW695pL","object":"chat.completion.chunk","created":1745474961,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\"}"}}]},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-BPk3thaMgHdQ9k9uzxbBZ5nW695pL","object":"chat.completion.chunk","created":1745474961,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"tool_calls"}]} + + + data: [DONE] + + + ' + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 93536e6baad46446-SJC + Connection: + - keep-alive + Content-Type: + - text/event-stream; charset=utf-8 + Date: + - Thu, 24 Apr 2025 06:09:21 GMT + Server: + - cloudflare + Set-Cookie: + - __cf_bm=nSCVMKpQND0Ibksu.XFvDMw8r7XxwuL4XTxb0j.nHX4-1745474961-1.0.1.1-Tg1vhaa0rAxULvTJFPZr4ySWJ2dE5YYPh4MjSN.4VvUXPEZOPUgJ73Mh9Qn.1NVGLZKUiwFYu7utkK5m0hHN.dKfaRHcxiWrv8.niJt42fk; + path=/; expires=Thu, 24-Apr-25 06:39:21 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=X7oz_lPFn3SnxwFKAyO.4squhX.TpgShk_0E6yBMd3Y-1745474961611-0.0.1.1-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + openai-organization: + - user-gif6bpt2ys8quwhhzo6dg5aw + openai-processing-ms: + - '239' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-ratelimit-limit-requests: + - '5000' + x-ratelimit-limit-tokens: + - '4000000' + x-ratelimit-remaining-requests: + - '4999' + x-ratelimit-remaining-tokens: + - '3999975' + x-ratelimit-reset-requests: + - 12ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_80a849b8c450e631fc20379ee17388a1 + status: + code: 200 + message: OK +- request: + body: '{"messages": [{"content": "You are a helpful assistant that can check the + weather.", "role": "system"}, {"name": "user", "role": "user", "content": "What + is the weather in New York?"}, {"name": "weather_agent", "role": "assistant", + "tool_calls": [{"id": "call_R3XKcCzBkKSM6HG8zx5EUYpw", "function": {"arguments": + "{\"city\":\"New York\"}", "name": "get_weather"}, "type": "function"}]}, {"content": + "The weather in New York is 73 degrees and Sunny.", "role": "tool", "tool_call_id": + "call_R3XKcCzBkKSM6HG8zx5EUYpw"}], "model": "gpt-3.5-turbo", "stream": true}' + headers: {} + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: 'data: {"id":"chatcmpl-BPk3tZ8AuuGd9i0D6kckRBe1ll8EV","object":"chat.completion.chunk","created":1745474961,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":"","refusal":null},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-BPk3tZ8AuuGd9i0D6kckRBe1ll8EV","object":"chat.completion.chunk","created":1745474961,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"The"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-BPk3tZ8AuuGd9i0D6kckRBe1ll8EV","object":"chat.completion.chunk","created":1745474961,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" + weather"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-BPk3tZ8AuuGd9i0D6kckRBe1ll8EV","object":"chat.completion.chunk","created":1745474961,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" + in"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-BPk3tZ8AuuGd9i0D6kckRBe1ll8EV","object":"chat.completion.chunk","created":1745474961,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" + New"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-BPk3tZ8AuuGd9i0D6kckRBe1ll8EV","object":"chat.completion.chunk","created":1745474961,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" + York"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-BPk3tZ8AuuGd9i0D6kckRBe1ll8EV","object":"chat.completion.chunk","created":1745474961,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" + is"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-BPk3tZ8AuuGd9i0D6kckRBe1ll8EV","object":"chat.completion.chunk","created":1745474961,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" + "},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-BPk3tZ8AuuGd9i0D6kckRBe1ll8EV","object":"chat.completion.chunk","created":1745474961,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"73"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-BPk3tZ8AuuGd9i0D6kckRBe1ll8EV","object":"chat.completion.chunk","created":1745474961,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" + degrees"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-BPk3tZ8AuuGd9i0D6kckRBe1ll8EV","object":"chat.completion.chunk","created":1745474961,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" + and"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-BPk3tZ8AuuGd9i0D6kckRBe1ll8EV","object":"chat.completion.chunk","created":1745474961,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" + Sunny"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-BPk3tZ8AuuGd9i0D6kckRBe1ll8EV","object":"chat.completion.chunk","created":1745474961,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-BPk3tZ8AuuGd9i0D6kckRBe1ll8EV","object":"chat.completion.chunk","created":1745474961,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]} + + + data: [DONE] + + + ' + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 93536e6ebd786446-SJC + Connection: + - keep-alive + Content-Type: + - text/event-stream; charset=utf-8 + Date: + - Thu, 24 Apr 2025 06:09:21 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + openai-organization: + - user-gif6bpt2ys8quwhhzo6dg5aw + openai-processing-ms: + - '121' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-ratelimit-limit-requests: + - '5000' + x-ratelimit-limit-tokens: + - '4000000' + x-ratelimit-remaining-requests: + - '4999' + x-ratelimit-remaining-tokens: + - '3999961' + x-ratelimit-reset-requests: + - 12ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_acff1594d255870d4a862b332d3e4422 + status: + code: 200 + message: OK +version: 1 diff --git a/python/instrumentation/openinference-instrumentation-autogen/tests/test_instrumentor.py b/python/instrumentation/openinference-instrumentation-autogen/tests/test_instrumentor.py index 2f8e2bf40..b4200f506 100644 --- a/python/instrumentation/openinference-instrumentation-autogen/tests/test_instrumentor.py +++ b/python/instrumentation/openinference-instrumentation-autogen/tests/test_instrumentor.py @@ -1,8 +1,38 @@ +from typing import Any, Generator + +import pytest +from opentelemetry.sdk.resources import Resource +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export import SimpleSpanProcessor +from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter from opentelemetry.util._importlib_metadata import entry_points +from openinference.instrumentation import OITracer from openinference.instrumentation.autogen import AutogenInstrumentor +@pytest.fixture() +def in_memory_span_exporter() -> InMemorySpanExporter: + return InMemorySpanExporter() + + +@pytest.fixture() +def tracer_provider(in_memory_span_exporter: InMemorySpanExporter) -> TracerProvider: + resource = Resource(attributes={}) + tracer_provider = TracerProvider(resource=resource) + tracer_provider.add_span_processor(SimpleSpanProcessor(in_memory_span_exporter)) + return tracer_provider + + +@pytest.fixture() +def setup_autogen_instrumentation( + tracer_provider: TracerProvider, +) -> Generator[None, None, None]: + AutogenInstrumentor().instrument(tracer_provider=tracer_provider) + yield + AutogenInstrumentor().uninstrument() + + class TestInstrumentor: def test_entrypoint_for_opentelemetry_instrument(self) -> None: (instrumentor_entrypoint,) = entry_points( @@ -10,3 +40,55 @@ def test_entrypoint_for_opentelemetry_instrument(self) -> None: ) instrumentor = instrumentor_entrypoint.load()() assert isinstance(instrumentor, AutogenInstrumentor) + + # Ensure we're using the common OITracer from common openinference-instrumentation pkg + def test_oitracer(self, setup_autogen_instrumentation: Any) -> None: + assert isinstance(AutogenInstrumentor()._tracer, OITracer) + + +@pytest.mark.asyncio +@pytest.mark.vcr( + decode_compressed_response=True, + before_record_request=lambda _: _.headers.clear() or _, +) +async def test_autogen_chat_agent( + tracer_provider: TracerProvider, + in_memory_span_exporter: InMemorySpanExporter, + setup_autogen_instrumentation: Any, +) -> None: + from autogen_agentchat.agents import AssistantAgent + from autogen_agentchat.ui import Console + from autogen_ext.models.openai import OpenAIChatCompletionClient + + # Define a model client with a real API key for recording + model_client = OpenAIChatCompletionClient( + model="gpt-3.5-turbo", # Use a real model for recording + api_key="sk-proj", # Use a test key that will be filtered by vcr + ) + + # Define a simple function tool that the agent can use + def get_weather(city: str) -> str: + """Get the weather for a given city.""" + return f"The weather in {city} is 73 degrees and Sunny." + + # Define an AssistantAgent with the model, tool, system message, and reflection enabled + agent = AssistantAgent( + name="weather_agent", + model_client=model_client, + tools=[get_weather], + system_message="You are a helpful assistant that can check the weather.", + reflect_on_tool_use=True, + model_client_stream=True, + ) + + # Run the agent and stream the messages to the console + result = await agent.run(task="What is the weather in New York?") + await model_client.close() + + # Verify that spans were created + spans = in_memory_span_exporter.get_finished_spans() + assert len(spans) > 0, "Expected spans to be created" + + # # Verify the weather tool was called + # weather_spans = [span for span in spans if span.name == "get_weather"] + # assert len(weather_spans) > 0, "Expected weather tool to be called" \ No newline at end of file diff --git a/python/tox.ini b/python/tox.ini index 814b2cc88..8613f86a9 100644 --- a/python/tox.ini +++ b/python/tox.ini @@ -23,6 +23,7 @@ envlist = py3{9,13}-ci-{autogen,autogen-latest} py3{11,13}-ci-{beeai,beeai-latest} py3{9,13}-ci-{portkey,portkey-latest} + py3{10,13}-ci-{autogen,autogen-latest} py39-mypy-langchain_core py3{10,13}-ci-{mcp,mcp-latest} @@ -106,6 +107,11 @@ commands_pre = anthropic: uv pip install -r test-requirements.txt anthropic-latest: uv pip install -U anthropic smolagents: uv pip install --reinstall {toxinidir}/instrumentation/openinference-instrumentation-smolagents[test] + autogen: uv pip uninstall -r test-requirements.txt + autogen: uv pip install --reinstall-package openinference-instrumentation-autogen . + autogen: python -c 'import openinference.instrumentation.autogen' + autogen: uv pip install -r test-requirements.txt + autogen-latest: uv pip install -U autogen-agentchat autogen-ext autogen: uv pip install --reinstall {toxinidir}/instrumentation/openinference-instrumentation-autogen[test] portkey: uv pip uninstall -r test-requirements.txt portkey: uv pip install --reinstall {toxinidir}/instrumentation/openinference-instrumentation-portkey[test]