diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 6ffdb94d5..82664f910 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,13 +1,13 @@ { "python/openinference-semantic-conventions": "0.1.17", - "python/instrumentation/openinference-instrumentation-agno": "0.1.3", + "python/instrumentation/openinference-instrumentation-agno": "0.1.4", "python/instrumentation/openinference-instrumentation-openai": "0.1.28", "python/instrumentation/openinference-instrumentation-llama-index": "4.2.1", "python/instrumentation/openinference-instrumentation-dspy": "0.1.23", - "python/instrumentation/openinference-instrumentation-langchain": "0.1.42", + "python/instrumentation/openinference-instrumentation-langchain": "0.1.43", "python/instrumentation/openinference-instrumentation-bedrock": "0.1.22", "python/instrumentation/openinference-instrumentation-mistralai": "1.3.3", - "python/openinference-instrumentation": "0.1.29", + "python/openinference-instrumentation": "0.1.30", "python/instrumentation/openinference-instrumentation-guardrails": "0.1.9", "python/instrumentation/openinference-instrumentation-vertexai": "0.1.11", "python/instrumentation/openinference-instrumentation-crewai": "0.1.9", @@ -15,12 +15,13 @@ "python/instrumentation/openinference-instrumentation-litellm": "0.1.19", "python/instrumentation/openinference-instrumentation-groq": "0.1.11", "python/instrumentation/openinference-instrumentation-instructor": "0.1.9", - "python/instrumentation/openinference-instrumentation-anthropic": "0.1.17", + "python/instrumentation/openinference-instrumentation-anthropic": "0.1.18", "python/instrumentation/openinference-instrumentation-smolagents": "0.1.11", "python/instrumentation/openinference-instrumentation-autogen": "0.1.9", "python/instrumentation/openinference-instrumentation-openai-agents": "0.1.12", "python/instrumentation/openinference-instrumentation-portkey": "0.1.1", "python/instrumentation/openinference-instrumentation-beeai": "0.1.6", - "python/instrumentation/openinference-instrumentation-mcp": "1.2.1", - "python/instrumentation/openinference-instrumentation-google-genai": "0.1.1" + "python/instrumentation/openinference-instrumentation-mcp": "1.3.0", + "python/instrumentation/openinference-instrumentation-google-genai": "0.1.1", + "python/instrumentation/openinference-instrumentation-autogen-agentchat": "0.1.0" } \ No newline at end of file diff --git a/README.md b/README.md index a4be7d2b2..9fc19b324 100644 --- a/README.md +++ b/README.md @@ -10,48 +10,56 @@

-OpenInference is a set of conventions and plugins that is complimentary to [OpenTelemetry](https://opentelemetry.io/) to enable tracing of AI applications. OpenInference is natively supported by [arize-phoenix](https://github.com/Arize-ai/phoenix), but can be used with any OpenTelemetry-compatible backend as well. +OpenInference is a set of conventions and plugins that is complimentary to [OpenTelemetry](https://opentelemetry.io/) to +enable tracing of AI applications. OpenInference is natively supported +by [arize-phoenix](https://github.com/Arize-ai/phoenix), but can be used with any OpenTelemetry-compatible backend as +well. ## Specification -The OpenInference specification is edited in markdown files found in the [spec directory](./spec/). It's designed to provide insight into the invocation of LLMs and the surrounding application context such as retrieval from vector stores and the usage of external tools such as search engines or APIs. The specification is transport and file-format agnostic, and is intended to be used in conjunction with other specifications such as JSON, ProtoBuf, and DataFrames. +The OpenInference specification is edited in markdown files found in the [spec directory](./spec/). It's designed to +provide insight into the invocation of LLMs and the surrounding application context such as retrieval from vector stores +and the usage of external tools such as search engines or APIs. The specification is transport and file-format agnostic, +and is intended to be used in conjunction with other specifications such as JSON, ProtoBuf, and DataFrames. ## Instrumentation -OpenInference provides a set of instrumentations for popular machine learning SDKs and frameworks in a variety of languages. +OpenInference provides a set of instrumentations for popular machine learning SDKs and frameworks in a variety of +languages. ## Python ### Libraries -| Package | Description | Version | -|-----------------------------------------------------------------------------------------------------------------------|------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| [`openinference-semantic-conventions`](./python/openinference-semantic-conventions) | Semantic conventions for tracing of LLM Apps. | [![PyPI Version](https://img.shields.io/pypi/v/openinference-semantic-conventions.svg)](https://pypi.python.org/pypi/openinference-semantic-conventions) | -| [`openinference-instrumentation-agno`](./python/instrumentation/openinference-instrumentation-agno) | OpenInference Instrumentation for Agno Agents. | [![PyPI Version](https://img.shields.io/pypi/v/openinference-instrumentation-agno.svg)](https://pypi.python.org/pypi/openinference-instrumentation-agno) | -| [`openinference-instrumentation-openai`](./python/instrumentation/openinference-instrumentation-openai) | OpenInference Instrumentation for OpenAI SDK. | [![PyPI Version](https://img.shields.io/pypi/v/openinference-instrumentation-openai.svg)](https://pypi.python.org/pypi/openinference-instrumentation-openai) | -| [`openinference-instrumentation-openai-agents`](./python/instrumentation/openinference-instrumentation-openai-agents) | OpenInference Instrumentation for OpenAI Agents SDK. | [![PyPI Version](https://img.shields.io/pypi/v/openinference-instrumentation-openai-agents.svg)](https://pypi.python.org/pypi/openinference-instrumentation-openai-agents) | -| [`openinference-instrumentation-llama-index`](./python/instrumentation/openinference-instrumentation-llama-index) | OpenInference Instrumentation for LlamaIndex. | [![PyPI Version](https://img.shields.io/pypi/v/openinference-instrumentation-llama-index.svg)](https://pypi.python.org/pypi/openinference-instrumentation-llama-index) | -| [`openinference-instrumentation-dspy`](./python/instrumentation/openinference-instrumentation-dspy) | OpenInference Instrumentation for DSPy. | [![PyPI Version](https://img.shields.io/pypi/v/openinference-instrumentation-dspy.svg)](https://pypi.python.org/pypi/openinference-instrumentation-dspy) | -| [`openinference-instrumentation-bedrock`](./python/instrumentation/openinference-instrumentation-bedrock) | OpenInference Instrumentation for AWS Bedrock. | [![PyPI Version](https://img.shields.io/pypi/v/openinference-instrumentation-bedrock.svg)](https://pypi.python.org/pypi/openinference-instrumentation-bedrock) | -| [`openinference-instrumentation-langchain`](./python/instrumentation/openinference-instrumentation-langchain) | OpenInference Instrumentation for LangChain. | [![PyPI Version](https://img.shields.io/pypi/v/openinference-instrumentation-langchain.svg)](https://pypi.python.org/pypi/openinference-instrumentation-langchain) | -| [`openinference-instrumentation-mcp`](./python/instrumentation/openinference-instrumentation-mcp) | OpenInference Instrumentation for MCP. | [![PyPI Version](https://img.shields.io/pypi/v/openinference-instrumentation-mcp.svg)](https://pypi.python.org/pypi/openinference-instrumentation-mcp) | -| [`openinference-instrumentation-mistralai`](./python/instrumentation/openinference-instrumentation-mistralai) | OpenInference Instrumentation for MistralAI. | [![PyPI Version](https://img.shields.io/pypi/v/openinference-instrumentation-mistralai.svg)](https://pypi.python.org/pypi/openinference-instrumentation-mistralai) | -| [`openinference-instrumentation-portkey`](./python/instrumentation/openinference-instrumentation-portkey) | OpenInference Instrumentation for Portkey. | [![PyPI Version](https://img.shields.io/pypi/v/openinference-instrumentation-portkey.svg)](https://pypi.python.org/pypi/openinference-instrumentation-portkey) | -| [`openinference-instrumentation-guardrails`](./python/instrumentation/openinference-instrumentation-guardrails) | OpenInference Instrumentation for Guardrails. | [![PyPI Version](https://img.shields.io/pypi/v/openinference-instrumentation-guardrails.svg)](https://pypi.python.org/pypi/openinference-instrumentation-guardrails) | -| [`openinference-instrumentation-vertexai`](./python/instrumentation/openinference-instrumentation-vertexai) | OpenInference Instrumentation for VertexAI. | [![PyPI Version](https://img.shields.io/pypi/v/openinference-instrumentation-vertexai.svg)](https://pypi.python.org/pypi/openinference-instrumentation-vertexai) | -| [`openinference-instrumentation-crewai`](./python/instrumentation/openinference-instrumentation-crewai) | OpenInference Instrumentation for CrewAI. | [![PyPI Version](https://img.shields.io/pypi/v/openinference-instrumentation-crewai.svg)](https://pypi.python.org/pypi/openinference-instrumentation-crewai) | -| [`openinference-instrumentation-haystack`](./python/instrumentation/openinference-instrumentation-haystack) | OpenInference Instrumentation for Haystack. | [![PyPI Version](https://img.shields.io/pypi/v/openinference-instrumentation-haystack.svg)](https://pypi.python.org/pypi/openinference-instrumentation-haystack) | -| [`openinference-instrumentation-litellm`](./python/instrumentation/openinference-instrumentation-litellm) | OpenInference Instrumentation for liteLLM. | [![PyPI Version](https://img.shields.io/pypi/v/openinference-instrumentation-litellm.svg)](https://pypi.python.org/pypi/openinference-instrumentation-litellm) | -| [`openinference-instrumentation-groq`](./python/instrumentation/openinference-instrumentation-groq) | OpenInference Instrumentation for Groq. | [![PyPI Version](https://img.shields.io/pypi/v/openinference-instrumentation-groq.svg)](https://pypi.python.org/pypi/openinference-instrumentation-groq) | -| [`openinference-instrumentation-instructor`](./python/instrumentation/openinference-instrumentation-instructor) | OpenInference Instrumentation for Instructor. | [![PyPI Version](https://img.shields.io/pypi/v/openinference-instrumentation-instructor.svg)](https://pypi.python.org/pypi/openinference-instrumentation-instructor) | -| [`openinference-instrumentation-anthropic`](./python/instrumentation/openinference-instrumentation-anthropic) | OpenInference Instrumentation for Anthropic. | [![PyPI Version](https://img.shields.io/pypi/v/openinference-instrumentation-anthropic.svg)](https://pypi.python.org/pypi/openinference-instrumentation-anthropic) | -| [`openinference-instrumentation-beeai`](./python/instrumentation/openinference-instrumentation-beeai) | OpenInference Instrumentation for BeeAI. | [![PyPI Version](https://img.shields.io/pypi/v/openinference-instrumentation-beeai.svg)](https://pypi.python.org/pypi/openinference-instrumentation-beeai) | -| [`openinference-instrumentation-google-genai`](./python/instrumentation/openinference-instrumentation-google-genai) | OpenInference Instrumentation for Google GenAI. | [![PyPI Version](https://img.shields.io/pypi/v/openinference-instrumentation-google-genai.svg)](https://pypi.python.org/pypi/openinference-instrumentation-google-genai) | +| Package | Description | Version | +|-------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [`openinference-semantic-conventions`](./python/openinference-semantic-conventions) | Semantic conventions for tracing of LLM Apps. | [![PyPI Version](https://img.shields.io/pypi/v/openinference-semantic-conventions.svg)](https://pypi.python.org/pypi/openinference-semantic-conventions) | +| [`openinference-instrumentation-agno`](./python/instrumentation/openinference-instrumentation-agno) | OpenInference Instrumentation for Agno Agents. | [![PyPI Version](https://img.shields.io/pypi/v/openinference-instrumentation-agno.svg)](https://pypi.python.org/pypi/openinference-instrumentation-agno) | +| [`openinference-instrumentation-openai`](./python/instrumentation/openinference-instrumentation-openai) | OpenInference Instrumentation for OpenAI SDK. | [![PyPI Version](https://img.shields.io/pypi/v/openinference-instrumentation-openai.svg)](https://pypi.python.org/pypi/openinference-instrumentation-openai) | +| [`openinference-instrumentation-openai-agents`](./python/instrumentation/openinference-instrumentation-openai-agents) | OpenInference Instrumentation for OpenAI Agents SDK. | [![PyPI Version](https://img.shields.io/pypi/v/openinference-instrumentation-openai-agents.svg)](https://pypi.python.org/pypi/openinference-instrumentation-openai-agents) | +| [`openinference-instrumentation-llama-index`](./python/instrumentation/openinference-instrumentation-llama-index) | OpenInference Instrumentation for LlamaIndex. | [![PyPI Version](https://img.shields.io/pypi/v/openinference-instrumentation-llama-index.svg)](https://pypi.python.org/pypi/openinference-instrumentation-llama-index) | +| [`openinference-instrumentation-dspy`](./python/instrumentation/openinference-instrumentation-dspy) | OpenInference Instrumentation for DSPy. | [![PyPI Version](https://img.shields.io/pypi/v/openinference-instrumentation-dspy.svg)](https://pypi.python.org/pypi/openinference-instrumentation-dspy) | +| [`openinference-instrumentation-bedrock`](./python/instrumentation/openinference-instrumentation-bedrock) | OpenInference Instrumentation for AWS Bedrock. | [![PyPI Version](https://img.shields.io/pypi/v/openinference-instrumentation-bedrock.svg)](https://pypi.python.org/pypi/openinference-instrumentation-bedrock) | +| [`openinference-instrumentation-langchain`](./python/instrumentation/openinference-instrumentation-langchain) | OpenInference Instrumentation for LangChain. | [![PyPI Version](https://img.shields.io/pypi/v/openinference-instrumentation-langchain.svg)](https://pypi.python.org/pypi/openinference-instrumentation-langchain) | +| [`openinference-instrumentation-mcp`](./python/instrumentation/openinference-instrumentation-mcp) | OpenInference Instrumentation for MCP. | [![PyPI Version](https://img.shields.io/pypi/v/openinference-instrumentation-mcp.svg)](https://pypi.python.org/pypi/openinference-instrumentation-mcp) | +| [`openinference-instrumentation-mistralai`](./python/instrumentation/openinference-instrumentation-mistralai) | OpenInference Instrumentation for MistralAI. | [![PyPI Version](https://img.shields.io/pypi/v/openinference-instrumentation-mistralai.svg)](https://pypi.python.org/pypi/openinference-instrumentation-mistralai) | +| [`openinference-instrumentation-portkey`](./python/instrumentation/openinference-instrumentation-portkey) | OpenInference Instrumentation for Portkey. | [![PyPI Version](https://img.shields.io/pypi/v/openinference-instrumentation-portkey.svg)](https://pypi.python.org/pypi/openinference-instrumentation-portkey) | +| [`openinference-instrumentation-guardrails`](./python/instrumentation/openinference-instrumentation-guardrails) | OpenInference Instrumentation for Guardrails. | [![PyPI Version](https://img.shields.io/pypi/v/openinference-instrumentation-guardrails.svg)](https://pypi.python.org/pypi/openinference-instrumentation-guardrails) | +| [`openinference-instrumentation-vertexai`](./python/instrumentation/openinference-instrumentation-vertexai) | OpenInference Instrumentation for VertexAI. | [![PyPI Version](https://img.shields.io/pypi/v/openinference-instrumentation-vertexai.svg)](https://pypi.python.org/pypi/openinference-instrumentation-vertexai) | +| [`openinference-instrumentation-crewai`](./python/instrumentation/openinference-instrumentation-crewai) | OpenInference Instrumentation for CrewAI. | [![PyPI Version](https://img.shields.io/pypi/v/openinference-instrumentation-crewai.svg)](https://pypi.python.org/pypi/openinference-instrumentation-crewai) | +| [`openinference-instrumentation-haystack`](./python/instrumentation/openinference-instrumentation-haystack) | OpenInference Instrumentation for Haystack. | [![PyPI Version](https://img.shields.io/pypi/v/openinference-instrumentation-haystack.svg)](https://pypi.python.org/pypi/openinference-instrumentation-haystack) | +| [`openinference-instrumentation-litellm`](./python/instrumentation/openinference-instrumentation-litellm) | OpenInference Instrumentation for liteLLM. | [![PyPI Version](https://img.shields.io/pypi/v/openinference-instrumentation-litellm.svg)](https://pypi.python.org/pypi/openinference-instrumentation-litellm) | +| [`openinference-instrumentation-groq`](./python/instrumentation/openinference-instrumentation-groq) | OpenInference Instrumentation for Groq. | [![PyPI Version](https://img.shields.io/pypi/v/openinference-instrumentation-groq.svg)](https://pypi.python.org/pypi/openinference-instrumentation-groq) | +| [`openinference-instrumentation-instructor`](./python/instrumentation/openinference-instrumentation-instructor) | OpenInference Instrumentation for Instructor. | [![PyPI Version](https://img.shields.io/pypi/v/openinference-instrumentation-instructor.svg)](https://pypi.python.org/pypi/openinference-instrumentation-instructor) | +| [`openinference-instrumentation-anthropic`](./python/instrumentation/openinference-instrumentation-anthropic) | OpenInference Instrumentation for Anthropic. | [![PyPI Version](https://img.shields.io/pypi/v/openinference-instrumentation-anthropic.svg)](https://pypi.python.org/pypi/openinference-instrumentation-anthropic) | +| [`openinference-instrumentation-beeai`](./python/instrumentation/openinference-instrumentation-beeai) | OpenInference Instrumentation for BeeAI. | [![PyPI Version](https://img.shields.io/pypi/v/openinference-instrumentation-beeai.svg)](https://pypi.python.org/pypi/openinference-instrumentation-beeai) | +| [`openinference-instrumentation-google-genai`](./python/instrumentation/openinference-instrumentatimn-google-genai) | OpenInference Instrumentation for Google GenAI. | [![PyPI Version](https://img.shields.io/pypi/v/openinference-instrumentation-google-genai.svg)](https://pypi.python.org/pypi/openinference-instrumentation-google-genai) | +| [`openinference-instrumentation-autogen-agentchat`](./python/instrumentation/openinference-instrumentation-autogen-agentchat) | OpenInference Instrumentation for Microsoft Autogen AgentChat. | [![PyPI Version](https://img.shields.io/pypi/v/openinference-instrumentation-autogen-agentchat.svg)](https://pypi.python.org/pypi/openinference-instrumentation-autogen-agentchat) | ### Examples | Name | Description | Complexity Level | -|-------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------| ---------------- | +|-------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------|------------------| | [Agno](python/instrumentation/openinference-instrumentation-agno/examples/) | Agno agent examples | Beginner | | [OpenAI SDK](python/instrumentation/openinference-instrumentation-openai/examples/) | OpenAI Python SDK, including chat completions and embeddings | Beginner | | [MistralAI SDK](python/instrumentation/openinference-instrumentation-mistralai/examples/) | MistralAI Python SDK | Beginner | @@ -70,13 +78,14 @@ OpenInference provides a set of instrumentations for popular machine learning SD | [DSPy](python/examples/dspy-rag-fastapi/) | A DSPy RAG application using FastAPI, Weaviate, and Cohere | Intermediate | | [Haystack](python/instrumentation/openinference-instrumentation-haystack/examples/) | A Haystack QA RAG application | Intermediate | | [OpenAI Agents](python/instrumentation/openinference-instrumentation-openai-agents/examples/) | OpenAI Agents with handoffs | Intermediate | +| [Autogen AgentChat](python/instrumentation/openinference-instrumentation-autogen-agentchat/examples/) | Microsoft Autogen Assistant Agent and Team Chat | Intermediate | ## JavaScript ### Libraries | Package | Description | Version | -| ----------------------------------------------------------------------------------------------------------- | ----------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +|-------------------------------------------------------------------------------------------------------------|-------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | [`@arizeai/openinference-semantic-conventions`](./js/packages/openinference-semantic-conventions) | Semantic conventions for tracing of LLM Apps. | [![NPM Version](https://img.shields.io/npm/v/@arizeai/openinference-semantic-conventions.svg)](https://www.npmjs.com/package/@arizeai/openinference-semantic-conventions) | | [`@arizeai/openinference-core`](./js/packages/openinference-core) | Core utility functions for instrumentation | [![NPM Version](https://img.shields.io/npm/v/@arizeai/openinference-core.svg)](https://www.npmjs.com/package/@arizeai/openinference-core) | | [`@arizeai/openinference-instrumentation-beeai`](./js/packages/openinference-instrumentation-beeai) | OpenInference Instrumentation for BeeAI. | [![NPM Version](https://img.shields.io/npm/v/@arizeai/openinference-instrumentation-beeai)](https://www.npmjs.com/package/@arizeai/openinference-instrumentation-beeai) | @@ -88,7 +97,7 @@ OpenInference provides a set of instrumentations for popular machine learning SD ### Examples | Name | Description | Complexity Level | -| ------------------------------------------------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------- | +|--------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------| | [OpenAI SDK](js/examples/openai) | OpenAI Node.js client | Beginner | | [BeeAI framework - ReAct agent](js/packages/openinference-instrumentation-beeai/examples/run-react-agent.ts) | Agentic `ReActAgent` instrumentation in the BeeAI framework | Beginner | | [BeeAI framework - ToolCalling agent](js/packages/openinference-instrumentation-beeai/examples/run-toolcalling-agent.ts) | Agentic `ToolCallingAgent` instrumentation in the BeeAI framework | Beginner | @@ -102,17 +111,18 @@ OpenInference provides a set of instrumentations for popular machine learning SD OpenInference supports the following destinations as span collectors. -- ✅ [Arize-Phoenix](https://github.com/Arize-ai/phoenix) -- ✅ [Arize](https://arize.com/) -- ✅ Any OTEL-compatible collector +- ✅ [Arize-Phoenix](https://github.com/Arize-ai/phoenix) +- ✅ [Arize](https://arize.com/) +- ✅ Any OTEL-compatible collector ## Community Join our community to connect with thousands of machine learning practitioners and LLM observability enthusiasts! -- 🌍 Join our [Slack community](https://arize-ai.slack.com/join/shared_invite/zt-11t1vbu4x-xkBIHmOREQnYnYDH1GDfCg?__hstc=259489365.a667dfafcfa0169c8aee4178d115dc81.1733501603539.1733501603539.1733501603539.1&__hssc=259489365.1.1733501603539&__hsfp=3822854628&submissionGuid=381a0676-8f38-437b-96f2-fc10875658df#/shared-invite/email). -- 💡 Ask questions and provide feedback in the _#phoenix-support_ channel. -- 🌟 Leave a star on our [GitHub](https://github.com/Arize-ai/openinference). -- 🐞 Report bugs with [GitHub Issues](https://github.com/Arize-ai/openinference/issues). -- 𝕏 Follow us on [X](https://twitter.com/ArizePhoenix). -- 🗺️ Check out our [roadmap](https://github.com/orgs/Arize-ai/projects/45) to see where we're heading next. +- 🌍 Join + our [Slack community](https://arize-ai.slack.com/join/shared_invite/zt-11t1vbu4x-xkBIHmOREQnYnYDH1GDfCg?__hstc=259489365.a667dfafcfa0169c8aee4178d115dc81.1733501603539.1733501603539.1733501603539.1&__hssc=259489365.1.1733501603539&__hsfp=3822854628&submissionGuid=381a0676-8f38-437b-96f2-fc10875658df#/shared-invite/email). +- 💡 Ask questions and provide feedback in the _#phoenix-support_ channel. +- 🌟 Leave a star on our [GitHub](https://github.com/Arize-ai/openinference). +- 🐞 Report bugs with [GitHub Issues](https://github.com/Arize-ai/openinference/issues). +- 𝕏 Follow us on [X](https://twitter.com/ArizePhoenix). +- 🗺️ Check out our [roadmap](https://github.com/orgs/Arize-ai/projects/45) to see where we're heading next. diff --git a/python/instrumentation/openinference-instrumentation-agno/CHANGELOG.md b/python/instrumentation/openinference-instrumentation-agno/CHANGELOG.md index a143f4fa8..1c20c6572 100644 --- a/python/instrumentation/openinference-instrumentation-agno/CHANGELOG.md +++ b/python/instrumentation/openinference-instrumentation-agno/CHANGELOG.md @@ -1,5 +1,12 @@ # Changelog +## [0.1.4](https://github.com/Arize-ai/openinference/compare/python-openinference-instrumentation-agno-v0.1.3...python-openinference-instrumentation-agno-v0.1.4) (2025-05-20) + + +### Features + +* Updates for new Agno verison ([#1647](https://github.com/Arize-ai/openinference/issues/1647)) ([8292d0d](https://github.com/Arize-ai/openinference/commit/8292d0d5620a9c58c4646e553704a31fd3f8cba3)) + ## [0.1.3](https://github.com/Arize-ai/openinference/compare/python-openinference-instrumentation-agno-v0.1.2...python-openinference-instrumentation-agno-v0.1.3) (2025-05-13) diff --git a/python/instrumentation/openinference-instrumentation-agno/examples/requirements.txt b/python/instrumentation/openinference-instrumentation-agno/examples/requirements.txt index 7df1db884..8f29e53a2 100644 --- a/python/instrumentation/openinference-instrumentation-agno/examples/requirements.txt +++ b/python/instrumentation/openinference-instrumentation-agno/examples/requirements.txt @@ -1,5 +1,5 @@ duckduckgo-search -agno>=1.4.6 +agno>=1.5.2 opentelemetry-sdk opentelemetry-exporter-otlp openinference-instrumentation-openai diff --git a/python/instrumentation/openinference-instrumentation-agno/pyproject.toml b/python/instrumentation/openinference-instrumentation-agno/pyproject.toml index 2f0590130..5e2883128 100644 --- a/python/instrumentation/openinference-instrumentation-agno/pyproject.toml +++ b/python/instrumentation/openinference-instrumentation-agno/pyproject.toml @@ -35,10 +35,10 @@ dependencies = [ [project.optional-dependencies] instruments = [ - "agno>=1.4.5", + "agno>=1.5.2", ] test = [ - "agno==1.4.5", + "agno==1.5.2", "opentelemetry-sdk", "pytest-recording", "openai", diff --git a/python/instrumentation/openinference-instrumentation-agno/src/openinference/instrumentation/agno/__init__.py b/python/instrumentation/openinference-instrumentation-agno/src/openinference/instrumentation/agno/__init__.py index 2b896255f..df69a7457 100644 --- a/python/instrumentation/openinference-instrumentation-agno/src/openinference/instrumentation/agno/__init__.py +++ b/python/instrumentation/openinference-instrumentation-agno/src/openinference/instrumentation/agno/__init__.py @@ -15,7 +15,7 @@ ) from openinference.instrumentation.agno.version import __version__ -_instruments = ("agno >= 1.4.5",) +_instruments = ("agno >= 1.5.2",) # Find all model classes in agno.models that inherit from BaseModel @@ -59,9 +59,13 @@ def find_model_subclasses() -> List[Type[Any]]: class AgnoInstrumentor(BaseInstrumentor): # type: ignore __slots__ = ( "_original_run_method", + "_original_run_stream_method", "_original_arun_method", + "_original_arun_stream_method", "_original_team_run_method", + "_original_team_run_stream_method", "_original_team_arun_method", + "_original_team_arun_stream_method", "_original_function_execute_method", "_original_function_aexecute_method", "_original_model_call_methods", @@ -94,29 +98,50 @@ def _instrument(self, **kwargs: Any) -> None: name="_run", wrapper=run_wrapper.run, ) - - # Register async wrapper + self._original_run_stream_method = getattr(Agent, "_run_stream", None) + wrap_function_wrapper( + module=Agent, + name="_run_stream", + wrapper=run_wrapper.run_stream, + ) self._original_arun_method = getattr(Agent, "_arun", None) wrap_function_wrapper( module=Agent, name="_arun", wrapper=run_wrapper.arun, ) + self._original_arun_stream_method = getattr(Agent, "_arun_stream", None) + wrap_function_wrapper( + module=Agent, + name="_arun_stream", + wrapper=run_wrapper.arun_stream, + ) + # Register wrapper for team self._original_team_run_method = getattr(Team, "_run", None) wrap_function_wrapper( module=Team, name="_run", wrapper=run_wrapper.run, ) - - # Register async wrapper for team + self._original_team_run_stream_method = getattr(Team, "_run_stream", None) + wrap_function_wrapper( + module=Team, + name="_run_stream", + wrapper=run_wrapper.run_stream, + ) self._original_team_arun_method = getattr(Team, "_arun", None) wrap_function_wrapper( module=Team, name="_arun", wrapper=run_wrapper.arun, ) + self._original_team_arun_stream_method = getattr(Team, "_arun_stream", None) + wrap_function_wrapper( + module=Team, + name="_arun_stream", + wrapper=run_wrapper.arun_stream, + ) self._original_model_call_methods: Optional[dict[type, dict[str, Callable[..., Any]]]] = {} diff --git a/python/instrumentation/openinference-instrumentation-agno/src/openinference/instrumentation/agno/_wrappers.py b/python/instrumentation/openinference-instrumentation-agno/src/openinference/instrumentation/agno/_wrappers.py index a73318414..354237c77 100644 --- a/python/instrumentation/openinference-instrumentation-agno/src/openinference/instrumentation/agno/_wrappers.py +++ b/python/instrumentation/openinference-instrumentation-agno/src/openinference/instrumentation/agno/_wrappers.py @@ -118,7 +118,6 @@ def run( ) -> Any: if context_api.get_value(context_api._SUPPRESS_INSTRUMENTATION_KEY): return wrapped(*args, **kwargs) - agent = instance if hasattr(agent, "name") and agent.name: agent_name = agent.name.replace(" ", "_").replace("-", "_") @@ -144,20 +143,56 @@ def run( ), ) as span: try: - if "stream" in kwargs and kwargs["stream"] is True: - yield from wrapped(*args, **kwargs) - run_response = agent.run_response - span.set_status(trace_api.StatusCode.OK) - span.set_attribute(OUTPUT_VALUE, run_response.to_json()) - span.set_attribute(OUTPUT_MIME_TYPE, JSON) - else: - response = wrapped(*args, **kwargs) + run_response = wrapped(*args, **kwargs) + span.set_status(trace_api.StatusCode.OK) + span.set_attribute(OUTPUT_VALUE, run_response.to_json()) + span.set_attribute(OUTPUT_MIME_TYPE, JSON) + return run_response + + except Exception as e: + span.set_status(trace_api.StatusCode.ERROR, str(e)) + raise + + def run_stream( + self, + wrapped: Callable[..., Any], + instance: Any, + args: Tuple[Any, ...], + kwargs: Mapping[str, Any], + ) -> Any: + if context_api.get_value(context_api._SUPPRESS_INSTRUMENTATION_KEY): + return wrapped(*args, **kwargs) - for run_response in response: - span.set_status(trace_api.StatusCode.OK) - span.set_attribute(OUTPUT_VALUE, run_response.to_json()) - span.set_attribute(OUTPUT_MIME_TYPE, JSON) - yield run_response + agent = instance + if hasattr(agent, "name") and agent.name: + agent_name = agent.name.replace(" ", "_").replace("-", "_") + else: + agent_name = "Agent" + span_name = f"{agent_name}.run" + + with self._tracer.start_as_current_span( + span_name, + attributes=dict( + _flatten( + { + OPENINFERENCE_SPAN_KIND: AGENT, + INPUT_VALUE: _get_input_value( + wrapped, + *args, + **kwargs, + ), + **dict(_agent_run_attributes(agent)), + **dict(get_attributes_from_context()), + } + ) + ), + ) as span: + try: + yield from wrapped(*args, **kwargs) + run_response = agent.run_response + span.set_status(trace_api.StatusCode.OK) + span.set_attribute(OUTPUT_VALUE, run_response.to_json()) + span.set_attribute(OUTPUT_MIME_TYPE, JSON) except Exception as e: span.set_status(trace_api.StatusCode.ERROR, str(e)) @@ -171,15 +206,11 @@ async def arun( kwargs: Mapping[str, Any], ) -> Any: if context_api.get_value(context_api._SUPPRESS_INSTRUMENTATION_KEY): - if "stream" in kwargs and kwargs["stream"] is True: - async for response in await wrapped(*args, **kwargs): - yield response - else: - response = await wrapped(*args, **kwargs) - yield response + response = await wrapped(*args, **kwargs) + return response agent = instance - if hasattr(agent, "name") and agent.name: + if hasattr(agent, "name"): agent_name = agent.name.replace(" ", "_").replace("-", "_") else: agent_name = "Agent" @@ -203,21 +234,57 @@ async def arun( ), ) as span: try: - if "stream" in kwargs and kwargs["stream"] is True: - span.set_status(trace_api.StatusCode.OK) - async for response in wrapped(*args, **kwargs): # type: ignore[attr-defined] - yield response - run_response = agent.run_response - span.set_status(trace_api.StatusCode.OK) - span.set_attribute(OUTPUT_VALUE, run_response.to_json()) - else: - response = wrapped(*args, **kwargs) + run_response = await wrapped(*args, **kwargs) + span.set_status(trace_api.StatusCode.OK) + span.set_attribute(OUTPUT_VALUE, run_response.to_json()) + span.set_attribute(OUTPUT_MIME_TYPE, JSON) + return run_response + except Exception as e: + span.set_status(trace_api.StatusCode.ERROR, str(e)) + raise - run_response = await response.__anext__() - span.set_status(trace_api.StatusCode.OK) - span.set_attribute(OUTPUT_VALUE, run_response.to_json()) - yield run_response + async def arun_stream( + self, + wrapped: Callable[..., Awaitable[Any]], + instance: Any, + args: Tuple[Any, ...], + kwargs: Mapping[str, Any], + ) -> Any: + if context_api.get_value(context_api._SUPPRESS_INSTRUMENTATION_KEY): + async for response in await wrapped(*args, **kwargs): + yield response + + agent = instance + if hasattr(agent, "name") and agent.name: + agent_name = agent.name.replace(" ", "_").replace("-", "_") + else: + agent_name = "Agent" + span_name = f"{agent_name}.run" + with self._tracer.start_as_current_span( + span_name, + attributes=dict( + _flatten( + { + OPENINFERENCE_SPAN_KIND: AGENT, + INPUT_VALUE: _get_input_value( + wrapped, + *args, + **kwargs, + ), + **dict(_agent_run_attributes(agent)), + **dict(get_attributes_from_context()), + } + ) + ), + ) as span: + try: + async for response in wrapped(*args, **kwargs): # type: ignore[attr-defined] + yield response + run_response = agent.run_response + span.set_status(trace_api.StatusCode.OK) + span.set_attribute(OUTPUT_VALUE, run_response.to_json()) + span.set_attribute(OUTPUT_MIME_TYPE, JSON) except Exception as e: span.set_status(trace_api.StatusCode.ERROR, str(e)) raise diff --git a/python/instrumentation/openinference-instrumentation-agno/src/openinference/instrumentation/agno/version.py b/python/instrumentation/openinference-instrumentation-agno/src/openinference/instrumentation/agno/version.py index ae7362549..bbab0242f 100644 --- a/python/instrumentation/openinference-instrumentation-agno/src/openinference/instrumentation/agno/version.py +++ b/python/instrumentation/openinference-instrumentation-agno/src/openinference/instrumentation/agno/version.py @@ -1 +1 @@ -__version__ = "0.1.3" +__version__ = "0.1.4" diff --git a/python/instrumentation/openinference-instrumentation-anthropic/CHANGELOG.md b/python/instrumentation/openinference-instrumentation-anthropic/CHANGELOG.md index 235d475c6..516a743fa 100644 --- a/python/instrumentation/openinference-instrumentation-anthropic/CHANGELOG.md +++ b/python/instrumentation/openinference-instrumentation-anthropic/CHANGELOG.md @@ -1,5 +1,12 @@ # Changelog +## [0.1.18](https://github.com/Arize-ai/openinference/compare/python-openinference-instrumentation-anthropic-v0.1.17...python-openinference-instrumentation-anthropic-v0.1.18) (2025-05-19) + + +### Features + +* **anthropic:** add stream wrapper and tests ([#1572](https://github.com/Arize-ai/openinference/issues/1572)) ([918aa01](https://github.com/Arize-ai/openinference/commit/918aa017441fd4c8cffdbcaab287913349a41a60)) + ## [0.1.17](https://github.com/Arize-ai/openinference/compare/python-openinference-instrumentation-anthropic-v0.1.16...python-openinference-instrumentation-anthropic-v0.1.17) (2025-04-28) diff --git a/python/instrumentation/openinference-instrumentation-anthropic/examples/sync_streams.py b/python/instrumentation/openinference-instrumentation-anthropic/examples/sync_streams.py new file mode 100644 index 000000000..7ae4d7c18 --- /dev/null +++ b/python/instrumentation/openinference-instrumentation-anthropic/examples/sync_streams.py @@ -0,0 +1,23 @@ +from anthropic import Anthropic +from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter +from opentelemetry.sdk import trace as trace_sdk +from opentelemetry.sdk.trace.export import SimpleSpanProcessor + +from openinference.instrumentation.anthropic import AnthropicInstrumentor + +# Configure HaystackInstrumentor with Phoenix endpoint +endpoint = "http://127.0.0.1:6006/v1/traces" +tracer_provider = trace_sdk.TracerProvider() +tracer_provider.add_span_processor(SimpleSpanProcessor(OTLPSpanExporter(endpoint))) + +AnthropicInstrumentor().instrument(tracer_provider=tracer_provider) + +client = Anthropic() + +with client.messages.stream( + max_tokens=1024, + messages=[{"role": "user", "content": "Hello!"}], + model="claude-3-7-sonnet-20250219", +) as stream: + for text in stream: + print(text, end="", flush=True) diff --git a/python/instrumentation/openinference-instrumentation-anthropic/src/openinference/instrumentation/anthropic/__init__.py b/python/instrumentation/openinference-instrumentation-anthropic/src/openinference/instrumentation/anthropic/__init__.py index 5765a9585..59d200af0 100644 --- a/python/instrumentation/openinference-instrumentation-anthropic/src/openinference/instrumentation/anthropic/__init__.py +++ b/python/instrumentation/openinference-instrumentation-anthropic/src/openinference/instrumentation/anthropic/__init__.py @@ -12,6 +12,7 @@ _AsyncCompletionsWrapper, _AsyncMessagesWrapper, _CompletionsWrapper, + _MessagesStreamWrapper, _MessagesWrapper, ) from openinference.instrumentation.anthropic.version import __version__ @@ -29,6 +30,7 @@ class AnthropicInstrumentor(BaseInstrumentor): # type: ignore[misc] "_original_async_completions_create", "_original_messages_create", "_original_async_messages_create", + "_original_messages_stream", "_instruments", "_tracer", ) @@ -79,6 +81,13 @@ def _instrument(self, **kwargs: Any) -> None: wrapper=_AsyncMessagesWrapper(tracer=self._tracer), ) + self._original_messages_stream = Messages.stream + wrap_function_wrapper( + module="anthropic.resources.messages", + name="Messages.stream", + wrapper=_MessagesStreamWrapper(tracer=self._tracer), + ) + def _uninstrument(self, **kwargs: Any) -> None: from anthropic.resources.completions import AsyncCompletions, Completions from anthropic.resources.messages import AsyncMessages, Messages diff --git a/python/instrumentation/openinference-instrumentation-anthropic/src/openinference/instrumentation/anthropic/_wrappers.py b/python/instrumentation/openinference-instrumentation-anthropic/src/openinference/instrumentation/anthropic/_wrappers.py index 6e56ed56b..25e4831c8 100644 --- a/python/instrumentation/openinference-instrumentation-anthropic/src/openinference/instrumentation/anthropic/_wrappers.py +++ b/python/instrumentation/openinference-instrumentation-anthropic/src/openinference/instrumentation/anthropic/_wrappers.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from abc import ABC from contextlib import contextmanager from itertools import chain @@ -6,6 +8,7 @@ import opentelemetry.context as context_api from opentelemetry import trace as trace_api from opentelemetry.trace import INVALID_SPAN +from wrapt import ObjectProxy from openinference.instrumentation import get_attributes_from_context, safe_json_dumps from openinference.instrumentation.anthropic._stream import ( @@ -29,6 +32,7 @@ if TYPE_CHECKING: from pydantic import BaseModel + from anthropic.lib.streaming import MessageStreamManager from anthropic.types import Message, Usage @@ -295,6 +299,61 @@ async def __call__( return response +class _MessagesStreamWrapper(_WithTracer): + def __call__( + self, + wrapped: Callable[..., Any], + instance: Any, + args: Tuple[Any, ...], + kwargs: Mapping[str, Any], + ) -> Any: + if context_api.get_value(context_api._SUPPRESS_INSTRUMENTATION_KEY): + return wrapped(*args, **kwargs) + + arguments = kwargs + llm_input_messages = dict(arguments).pop("messages", None) + invocation_parameters = _get_invocation_parameters(arguments) + + with self._start_as_current_span( + span_name="MessagesStream", + attributes=dict( + chain( + get_attributes_from_context(), + _get_llm_model_name_from_input(arguments), + _get_llm_provider(), + _get_llm_system(), + _get_llm_span_kind(), + _get_llm_input_messages(llm_input_messages), + _get_llm_invocation_parameters(invocation_parameters), + _get_llm_tools(invocation_parameters), + _get_inputs(arguments), + ) + ), + ) as span: + try: + response = wrapped(*args, **kwargs) + except Exception as exception: + span.set_status(trace_api.Status(trace_api.StatusCode.ERROR)) + span.record_exception(exception) + raise + + return _MessageStreamManager(response, span) + + +class _MessageStreamManager(ObjectProxy): # type: ignore + def __init__( + self, + manager: MessageStreamManager, + with_span: _WithSpan, + ) -> None: + super().__init__(manager) + self._self_with_span = with_span + + def __enter__(self) -> Iterator[str]: + raw = self.__api_request() + return _MessagesStream(raw, self._self_with_span) + + def _get_inputs(arguments: Mapping[str, Any]) -> Iterator[Tuple[str, Any]]: yield INPUT_VALUE, safe_json_dumps(arguments) yield INPUT_MIME_TYPE, JSON diff --git a/python/instrumentation/openinference-instrumentation-anthropic/src/openinference/instrumentation/anthropic/version.py b/python/instrumentation/openinference-instrumentation-anthropic/src/openinference/instrumentation/anthropic/version.py index 86205cbac..08f934f45 100644 --- a/python/instrumentation/openinference-instrumentation-anthropic/src/openinference/instrumentation/anthropic/version.py +++ b/python/instrumentation/openinference-instrumentation-anthropic/src/openinference/instrumentation/anthropic/version.py @@ -1 +1 @@ -__version__ = "0.1.17" +__version__ = "0.1.18" diff --git a/python/instrumentation/openinference-instrumentation-anthropic/tests/openinference/anthropic/cassettes/test_instrumentor/test_anthropic_instrumentation_stream_message.yaml b/python/instrumentation/openinference-instrumentation-anthropic/tests/openinference/anthropic/cassettes/test_instrumentor/test_anthropic_instrumentation_stream_message.yaml new file mode 100644 index 000000000..23e068294 --- /dev/null +++ b/python/instrumentation/openinference-instrumentation-anthropic/tests/openinference/anthropic/cassettes/test_instrumentor/test_anthropic_instrumentation_stream_message.yaml @@ -0,0 +1,57 @@ +interactions: +- request: + body: '{"max_tokens": 1024, "messages": [{"role": "user", "content": "What''s + the capital of France?"}], "model": "claude-3-opus-latest", "stream": true}' + headers: {} + method: POST + uri: https://api.anthropic.com/v1/messages + response: + body: + string: 'event: message_start + + data: {"type":"message_start","message":{"id":"msg_01EdTbzEsQHdxkVoFKSAFGUS","type":"message","role":"assistant","model":"claude-3-opus-latest","content":[],"stop_reason":null,"stop_sequence":null,"usage":{"input_tokens":14,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"output_tokens":4}} } + + + event: content_block_start + + data: {"type":"content_block_start","index":0,"content_block":{"type":"text","text":""} } + + + event: ping + + data: {"type": "ping"} + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"The + capital of France"} } + + + event: content_block_delta + + data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" + is Paris."} } + + + event: content_block_stop + + data: {"type":"content_block_stop","index":0 } + + + event: message_delta + + data: {"type":"message_delta","delta":{"stop_reason":"end_turn","stop_sequence":null},"usage":{"output_tokens":10} } + + + event: message_stop + + data: {"type":"message_stop" } + + + ' + headers: {} + status: + code: 200 + message: OK +version: 1 diff --git a/python/instrumentation/openinference-instrumentation-anthropic/tests/openinference/anthropic/test_instrumentor.py b/python/instrumentation/openinference-instrumentation-anthropic/tests/openinference/anthropic/test_instrumentor.py index 94bcb1770..8bc8e4dda 100644 --- a/python/instrumentation/openinference-instrumentation-anthropic/tests/openinference/anthropic/test_instrumentor.py +++ b/python/instrumentation/openinference-instrumentation-anthropic/tests/openinference/anthropic/test_instrumentor.py @@ -169,6 +169,66 @@ def test_anthropic_instrumentation_completions_streaming( assert not attributes +@pytest.mark.vcr( + decode_compressed_response=True, + before_record_request=remove_all_vcr_request_headers, + before_record_response=remove_all_vcr_response_headers, +) +def test_anthropic_instrumentation_stream_message( + tracer_provider: TracerProvider, + in_memory_span_exporter: InMemorySpanExporter, + setup_anthropic_instrumentation: Any, +) -> None: + client = Anthropic(api_key="fake") + input_message = "What's the capital of France?" + chat = [{"role": "user", "content": input_message}] + invocation_params = {"max_tokens": 1024, "model": "claude-3-opus-latest"} + + with client.messages.stream( + max_tokens=1024, + messages=chat, # type: ignore + model="claude-3-opus-latest", + ) as stream: + for _ in stream: + pass + + spans = in_memory_span_exporter.get_finished_spans() + assert len(spans) == 1 + + span = spans[0] + assert span.name == "MessagesStream" + + attributes = dict(span.attributes or {}) + + assert attributes.pop(OPENINFERENCE_SPAN_KIND) == "LLM" + assert attributes.pop(LLM_PROVIDER) == LLM_PROVIDER_ANTHROPIC + assert attributes.pop(LLM_SYSTEM) == LLM_SYSTEM_ANTHROPIC + + assert attributes.pop(f"{LLM_INPUT_MESSAGES}.0.{MESSAGE_CONTENT}") == input_message + assert attributes.pop(f"{LLM_INPUT_MESSAGES}.0.{MESSAGE_ROLE}") == "user" + + msg_out = attributes.pop(f"{LLM_OUTPUT_MESSAGES}.0.{MESSAGE_CONTENT}") + assert isinstance(msg_out, str) + assert "paris" in msg_out.lower() + assert attributes.pop(f"{LLM_OUTPUT_MESSAGES}.0.{MESSAGE_ROLE}") == "assistant" + + assert isinstance(attributes.pop(LLM_TOKEN_COUNT_PROMPT), int) + assert isinstance(attributes.pop(LLM_TOKEN_COUNT_COMPLETION), int) + + assert isinstance(attributes.pop(INPUT_VALUE), str) + assert attributes.pop(INPUT_MIME_TYPE) == JSON + assert isinstance(attributes.pop(OUTPUT_VALUE), str) + assert attributes.pop(OUTPUT_MIME_TYPE) == JSON + assert isinstance(attributes.pop("llm.token_count.total"), int) + + assert attributes.pop(LLM_MODEL_NAME) == "claude-3-opus-latest" + raw_inv = attributes.pop(LLM_INVOCATION_PARAMETERS) + assert isinstance(raw_inv, str) + assert json.loads(raw_inv) == invocation_params + + assert not attributes + + @pytest.mark.asyncio @pytest.mark.vcr( decode_compressed_response=True, diff --git a/python/instrumentation/openinference-instrumentation-autogen-agentchat/CHANGELOG.md b/python/instrumentation/openinference-instrumentation-autogen-agentchat/CHANGELOG.md new file mode 100644 index 000000000..c403e5b1e --- /dev/null +++ b/python/instrumentation/openinference-instrumentation-autogen-agentchat/CHANGELOG.md @@ -0,0 +1,8 @@ +# Changelog + +## 0.1.0 (2025-05-21) + + +### Features + +* **autogen-agentchat:** auto instrumentation ([#1611](https://github.com/Arize-ai/openinference/issues/1611)) ([3c5857c](https://github.com/Arize-ai/openinference/commit/3c5857c2864c3b367888683d1fd470ec631c389f)) diff --git a/python/instrumentation/openinference-instrumentation-autogen-agentchat/LICENSE b/python/instrumentation/openinference-instrumentation-autogen-agentchat/LICENSE new file mode 100644 index 000000000..3c0e05ceb --- /dev/null +++ b/python/instrumentation/openinference-instrumentation-autogen-agentchat/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright The OpenInference Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/python/instrumentation/openinference-instrumentation-autogen-agentchat/README.md b/python/instrumentation/openinference-instrumentation-autogen-agentchat/README.md new file mode 100644 index 000000000..5da51f2ea --- /dev/null +++ b/python/instrumentation/openinference-instrumentation-autogen-agentchat/README.md @@ -0,0 +1,156 @@ +# OpenInference Autogen-Agentchat Instrumentation + +[![PyPI Version](https://img.shields.io/pypi/v/openinference-instrumentation-autogen-agentchat.svg)](https://pypi.python.org/pypi/openinference-instrumentation-autogen-agentchat) + +OpenTelelemetry instrumentation for Autogen AgentChat, enabling tracing of agent interactions and conversations. + +The traces emitted by this instrumentation are fully OpenTelemetry compatible and can be sent to an OpenTelemetry collector for viewing, such as [`arize-phoenix`](https://github.com/Arize-ai/phoenix) + +## Installation + +```shell +pip install openinference-instrumentation-autogen-agentchat +``` + +## Quickstart + +In this example we will instrument a simple Autogen AgentChat application and observe the traces via [`arize-phoenix`](https://github.com/Arize-ai/phoenix). + +Install required packages. + +```shell +pip install openinference-instrumentation-autogen-agentchat autogen-agentchat arize-phoenix opentelemetry-sdk opentelemetry-exporter-otlp +``` + +Start the phoenix server so that it is ready to collect traces. +The Phoenix server runs entirely on your machine and does not send data over the internet. + +```shell +phoenix serve +``` + +Here's a simple example using a single assistant agent: + +```python +import asyncio +from autogen_agentchat.agents import AssistantAgent +from autogen_ext.models.openai import OpenAIChatCompletionClient +from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter +from opentelemetry.sdk import trace as trace_sdk +from opentelemetry.sdk.trace.export import ConsoleSpanExporter, SimpleSpanProcessor + +from openinference.instrumentation.autogen_agentchat import AutogenAgentChatInstrumentor + +# Set up the tracer provider +endpoint = "http://127.0.0.1:6006/v1/traces" +tracer_provider = trace_sdk.TracerProvider() +tracer_provider.add_span_processor(SimpleSpanProcessor(OTLPSpanExporter(endpoint))) + +# Instrument AutogenAgentChat +AutogenAgentChatInstrumentor().instrument(tracer_provider=tracer_provider) + +async def main(): + model_client = OpenAIChatCompletionClient( + model="gpt-3.5-turbo", + ) + + def get_weather(city: str) -> str: + """Get the weather for a given city.""" + return f"The weather in {city} is 73 degrees and Sunny." + + # Create an assistant agent with tools + agent = AssistantAgent( + name="weather_agent", + model_client=model_client, + tools=[get_weather], + system_message="You are a helpful assistant that can check the weather.", + reflect_on_tool_use=True, + model_client_stream=True, + ) + + result = await agent.run(task="What is the weather in New York?") + await model_client.close() + print(result) + +if __name__ == "__main__": + asyncio.run(main()) +``` + +For a more complex example using multiple agents in a team: + +```python +import asyncio +from autogen_agentchat.agents import AssistantAgent +from autogen_agentchat.conditions import TextMentionTermination +from autogen_agentchat.teams import RoundRobinGroupChat +from autogen_ext.models.openai import OpenAIChatCompletionClient +from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter +from opentelemetry.sdk import trace as trace_sdk +from opentelemetry.sdk.trace.export import SimpleSpanProcessor + +from openinference.instrumentation.autogen_agentchat import AutogenAgentChatInstrumentor + +# Set up the tracer provider +endpoint = "http://127.0.0.1:6006/v1/traces" +tracer_provider = trace_sdk.TracerProvider() +tracer_provider.add_span_processor(SimpleSpanProcessor(OTLPSpanExporter(endpoint))) + +# Instrument AutogenAgentChat +AutogenAgentChatInstrumentor().instrument(tracer_provider=tracer_provider) + +async def main(): + model_client = OpenAIChatCompletionClient( + model="gpt-4", + ) + + # Create two agents: a primary and a critic + primary_agent = AssistantAgent( + "primary", + model_client=model_client, + system_message="You are a helpful AI assistant.", + ) + + critic_agent = AssistantAgent( + "critic", + model_client=model_client, + system_message=""" + Provide constructive feedback. + Respond with 'APPROVE' when your feedbacks are addressed. + """, + ) + + # Termination condition: stop when the critic says "APPROVE" + text_termination = TextMentionTermination("APPROVE") + + # Create a team with both agents + team = RoundRobinGroupChat( + [primary_agent, critic_agent], + termination_condition=text_termination + ) + + # Run the team on a task + result = await team.run(task="Write a short poem about the fall season.") + await model_client.close() + print(result) + +if __name__ == "__main__": + asyncio.run(main()) +``` + +Since we are using OpenAI, we must set the `OPENAI_API_KEY` environment variable to authenticate with the OpenAI API. + +```shell +export OPENAI_API_KEY=[your_key_here] +``` + +Now simply run the python file and observe the traces in Phoenix. + +```shell +python your_file.py +``` + +## More Info + +- [More info on OpenInference and Phoenix](https://docs.arize.com/phoenix) +- [How to customize spans to track sessions, metadata, etc.](https://github.com/Arize-ai/openinference/tree/main/python/openinference-instrumentation#customizing-spans) +- [How to account for private information and span payload customization](https://github.com/Arize-ai/openinference/tree/main/python/openinference-instrumentation#tracing-configuration) diff --git a/python/instrumentation/openinference-instrumentation-autogen-agentchat/examples/assistant_agent_run.py b/python/instrumentation/openinference-instrumentation-autogen-agentchat/examples/assistant_agent_run.py new file mode 100644 index 000000000..d59f39275 --- /dev/null +++ b/python/instrumentation/openinference-instrumentation-autogen-agentchat/examples/assistant_agent_run.py @@ -0,0 +1,44 @@ +import asyncio + +from autogen_agentchat.agents import AssistantAgent +from autogen_ext.models.openai import OpenAIChatCompletionClient +from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter +from opentelemetry.sdk import trace as trace_sdk +from opentelemetry.sdk.trace.export import ConsoleSpanExporter, SimpleSpanProcessor + +from openinference.instrumentation.autogen_agentchat import AutogenAgentChatInstrumentor + +endpoint = "http://127.0.0.1:6006/v1/traces" +tracer_provider = trace_sdk.TracerProvider() +tracer_provider.add_span_processor(SimpleSpanProcessor(OTLPSpanExporter(endpoint))) +tracer_provider.add_span_processor(SimpleSpanProcessor(ConsoleSpanExporter())) + +AutogenAgentChatInstrumentor().instrument(tracer_provider=tracer_provider) + + +async def main() -> None: + model_client = OpenAIChatCompletionClient( + model="gpt-3.5-turbo", + ) + + def get_weather(city: str) -> str: + """Get the weather for a given city.""" + return f"The weather in {city} is 73 degrees and Sunny." + + # Define an AssistantAgent with the model, tool, system message, and reflection enabled + agent = AssistantAgent( + name="weather_agent", + model_client=model_client, + tools=[get_weather], + system_message="You are a helpful assistant that can check the weather.", + reflect_on_tool_use=True, + model_client_stream=True, + ) + + result = await agent.run(task="What is the weather in New York?") + await model_client.close() + print(result) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/instrumentation/openinference-instrumentation-autogen-agentchat/examples/requirements.txt b/python/instrumentation/openinference-instrumentation-autogen-agentchat/examples/requirements.txt new file mode 100644 index 000000000..ccc0e8996 --- /dev/null +++ b/python/instrumentation/openinference-instrumentation-autogen-agentchat/examples/requirements.txt @@ -0,0 +1,6 @@ +autogen-agentchat +autogen-ext +opentelemetry-api +opentelemetry-sdk +opentelemetry-exporter-otlp-proto-http +openinference-instrumentation-autogen-agentchat diff --git a/python/instrumentation/openinference-instrumentation-autogen-agentchat/examples/team_run.py b/python/instrumentation/openinference-instrumentation-autogen-agentchat/examples/team_run.py new file mode 100644 index 000000000..803b0df7c --- /dev/null +++ b/python/instrumentation/openinference-instrumentation-autogen-agentchat/examples/team_run.py @@ -0,0 +1,55 @@ +import asyncio + +from autogen_agentchat.agents import AssistantAgent +from autogen_agentchat.conditions import TextMentionTermination +from autogen_agentchat.teams import RoundRobinGroupChat +from autogen_ext.models.openai import OpenAIChatCompletionClient +from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter +from opentelemetry.sdk import trace as trace_sdk +from opentelemetry.sdk.trace.export import ConsoleSpanExporter, SimpleSpanProcessor + +from openinference.instrumentation.autogen_agentchat import AutogenAgentChatInstrumentor + +# Set up the tracer provider with both OTLP and Console exporters +endpoint = "http://127.0.0.1:6006/v1/traces" +tracer_provider = trace_sdk.TracerProvider() +tracer_provider.add_span_processor(SimpleSpanProcessor(OTLPSpanExporter(endpoint))) +tracer_provider.add_span_processor(SimpleSpanProcessor(ConsoleSpanExporter())) + +# Instrument the AutogenAgentChat +AutogenAgentChatInstrumentor().instrument(tracer_provider=tracer_provider) + + +async def main() -> None: + model_client = OpenAIChatCompletionClient( + model="gpt-4", + ) + + primary_agent = AssistantAgent( + "primary", + model_client=model_client, + system_message="You are a helpful AI assistant.", + ) + + critic_agent = AssistantAgent( + "critic", + model_client=model_client, + system_message=""" + Provide constructive feedback. + Respond with 'APPROVE' when your feedbacks are addressed. + """, + ) + + text_termination = TextMentionTermination("APPROVE") + + team = RoundRobinGroupChat( + [primary_agent, critic_agent], termination_condition=text_termination + ) + + result = await team.run(task="Write a short poem about the fall season.") + await model_client.close() + print(result) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/instrumentation/openinference-instrumentation-autogen-agentchat/examples/team_run_stream.py b/python/instrumentation/openinference-instrumentation-autogen-agentchat/examples/team_run_stream.py new file mode 100644 index 000000000..55f652fc8 --- /dev/null +++ b/python/instrumentation/openinference-instrumentation-autogen-agentchat/examples/team_run_stream.py @@ -0,0 +1,37 @@ +import asyncio + +from autogen_agentchat.agents import AssistantAgent +from autogen_agentchat.conditions import MaxMessageTermination +from autogen_agentchat.teams import RoundRobinGroupChat +from autogen_ext.models.openai import OpenAIChatCompletionClient +from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter +from opentelemetry.sdk import trace as trace_sdk +from opentelemetry.sdk.trace.export import ConsoleSpanExporter, SimpleSpanProcessor + +from openinference.instrumentation.autogen_agentchat import AutogenAgentChatInstrumentor + +# Set up the tracer provider with both OTLP and Console exporters +endpoint = "http://127.0.0.1:6006/v1/traces" +tracer_provider = trace_sdk.TracerProvider() +tracer_provider.add_span_processor(SimpleSpanProcessor(OTLPSpanExporter(endpoint))) +tracer_provider.add_span_processor(SimpleSpanProcessor(ConsoleSpanExporter())) + +# Instrument the AutogenAgentChat +AutogenAgentChatInstrumentor().instrument(tracer_provider=tracer_provider) + + +async def main() -> None: + model_client = OpenAIChatCompletionClient(model="gpt-4o") + + agent1 = AssistantAgent("Assistant1", model_client=model_client) + agent2 = AssistantAgent("Assistant2", model_client=model_client) + termination = MaxMessageTermination(3) + team = RoundRobinGroupChat([agent1, agent2], termination_condition=termination) + + stream = team.run_stream(task="Count from 1 to 10, respond one at a time.") + async for message in stream: + print(message) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/instrumentation/openinference-instrumentation-autogen-agentchat/pyproject.toml b/python/instrumentation/openinference-instrumentation-autogen-agentchat/pyproject.toml new file mode 100644 index 000000000..88b83f25d --- /dev/null +++ b/python/instrumentation/openinference-instrumentation-autogen-agentchat/pyproject.toml @@ -0,0 +1,95 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "openinference-instrumentation-autogen-agentchat" +dynamic = ["version"] +description = "OpenInference Autogen-Agentchat Instrumentation" +readme = "README.md" +license = "Apache-2.0" +requires-python = ">=3.10, <3.14" +authors = [ + { name = "OpenInference Authors", email = "oss@arize.com" }, +] +classifiers = [ + "Development Status :: 5 - Production/Stable", + "Intended Audience :: Developers", + "License :: OSI Approved :: Apache Software License", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", +] +dependencies = [ + "opentelemetry-api", + "opentelemetry-instrumentation", + "opentelemetry-semantic-conventions", + "openinference-instrumentation>=0.1.27", + "openinference-semantic-conventions>=0.1.17", + "autogen-agentchat>=0.5.0", + "autogen-core>=0.5.0", +] + +[project.optional-dependencies] +instruments = [ + "autogen-agentchat >= 0.5.0", +] + +[project.entry-points.opentelemetry_instrumentor] +autogen_agentchat = "openinference.instrumentation.autogen_agentchat:AutogenAgentChatInstrumentor" + +[project.entry-points.openinference_instrumentor] +autogen_agentchat = "openinference.instrumentation.autogen_agentchat:AutogenAgentChatInstrumentor" + +[project.urls] +Homepage = "https://github.com/Arize-ai/openinference/tree/main/python/instrumentation/openinference-instrumentation-autogen-agentchat" + +[tool.hatch.version] +path = "src/openinference/instrumentation/autogen_agentchat/version.py" + +[tool.hatch.build.targets.sdist] +include = [ + "/src", +] + +[tool.hatch.build.targets.wheel] +packages = ["src/openinference"] + +[tool.pytest.ini_options] +asyncio_mode = "auto" +asyncio_default_fixture_loop_scope = "function" +testpaths = [ + "tests", +] + +[tool.mypy] +strict = true +explicit_package_bases = true +exclude = [ + "examples", + "dist", + "sdist", + "tests", +] + +[[tool.mypy.overrides]] +ignore_missing_imports = true +module = [ + "wrapt", +] + +[tool.ruff] +line-length = 100 +target-version = "py310" + +[tool.ruff.lint.per-file-ignores] +"*.ipynb" = ["E402", "E501"] + +[tool.ruff.lint] +select = ["E", "F", "W", "I"] + +[tool.ruff.lint.isort] +force-single-line = false diff --git a/python/instrumentation/openinference-instrumentation-autogen-agentchat/src/openinference/instrumentation/autogen_agentchat/__init__.py b/python/instrumentation/openinference-instrumentation-autogen-agentchat/src/openinference/instrumentation/autogen_agentchat/__init__.py new file mode 100644 index 000000000..e1f7652bb --- /dev/null +++ b/python/instrumentation/openinference-instrumentation-autogen-agentchat/src/openinference/instrumentation/autogen_agentchat/__init__.py @@ -0,0 +1,138 @@ +import logging +from importlib import import_module +from typing import Any, Collection + +from opentelemetry import trace as trace_api +from opentelemetry.instrumentation.instrumentor import ( # type: ignore[attr-defined] + BaseInstrumentor, +) +from wrapt import wrap_function_wrapper + +from openinference.instrumentation import OITracer, TraceConfig +from openinference.instrumentation.autogen_agentchat._wrappers import ( + _BaseAgentRunWrapper, + _BaseGroupChatRunStreamWrapper, + _PublishMessageWrapper, + _SendMessageWrapper, + _ToolsRunJSONWrapper, +) +from openinference.instrumentation.autogen_agentchat.version import __version__ + +_instruments = ("autogen-agentchat >= 0.5.1",) + +logger = logging.getLogger(__name__) + + +class AutogenAgentChatInstrumentor(BaseInstrumentor): # type: ignore + """An instrumentor for autogen-agentchat""" + + __slots__ = ( + "_tracer", + "_original_base_agent_run_method", + "_original_tools_run_json", + "_original_group_chat_run_stream_method", + "_original_single_threaded_agent_runtime_send_message", + "_original_single_threaded_agent_runtime_publish_message", + ) + + def instrumentation_dependencies(self) -> Collection[str]: + return _instruments + + def _instrument(self, **kwargs: Any) -> None: + if not (tracer_provider := kwargs.get("tracer_provider")): + tracer_provider = trace_api.get_tracer_provider() + if not (config := kwargs.get("config")): + config = TraceConfig() + else: + assert isinstance(config, TraceConfig) + self._tracer = OITracer( + trace_api.get_tracer(__name__, __version__, tracer_provider), + config=config, + ) + + self._original_base_agent_run_method = getattr( + import_module("autogen_agentchat.agents").BaseChatAgent, "run", None + ) + wrap_function_wrapper( + module="autogen_agentchat.agents", + name="BaseChatAgent.run", + wrapper=_BaseAgentRunWrapper(tracer=self._tracer), + ) + + self._original_tools_run_json = getattr( + import_module("autogen_core.tools").BaseTool, "run_json", None + ) + wrap_function_wrapper( + module="autogen_core.tools", + name="BaseTool.run_json", + wrapper=_ToolsRunJSONWrapper(tracer=self._tracer), + ) + + self._original_group_chat_run_stream_method = getattr( + import_module("autogen_agentchat.teams._group_chat._base_group_chat").BaseGroupChat, + "run_stream", + None, + ) + wrap_function_wrapper( + module="autogen_agentchat.teams._group_chat._base_group_chat", + name="BaseGroupChat.run_stream", + wrapper=_BaseGroupChatRunStreamWrapper(tracer=self._tracer), + ) + + self._original_single_threaded_agent_runtime_send_message = getattr( + import_module("autogen_core._single_threaded_agent_runtime").SingleThreadedAgentRuntime, + "send_message", + None, + ) + wrap_function_wrapper( + module="autogen_core._single_threaded_agent_runtime", + name="SingleThreadedAgentRuntime.send_message", + wrapper=_SendMessageWrapper(tracer=self._tracer), + ) + + self._original_single_threaded_agent_runtime_publish_message = getattr( + import_module("autogen_core._single_threaded_agent_runtime").SingleThreadedAgentRuntime, + "publish_message", + None, + ) + wrap_function_wrapper( + module="autogen_core._single_threaded_agent_runtime", + name="SingleThreadedAgentRuntime.publish_message", + wrapper=_PublishMessageWrapper(tracer=self._tracer), + ) + + def _uninstrument(self, **kwargs: Any) -> None: + if self._original_base_agent_run_method is not None: + agent_module = import_module("autogen_agentchat.agents") + agent_module.BaseChatAgent.run = self._original_base_agent_run_method + self._original_base_agent_run_method = None + + if self._original_tools_run_json is not None: + tools_module = import_module("autogen_core.tools") + tools_module.BaseTool.run_json = self._original_tools_run_json + self._original_tools_run_json = None + + if self._original_group_chat_run_stream_method is not None: + group_chat_module = import_module( + "autogen_agentchat.teams._group_chat._base_group_chat" + ) + group_chat_module.BaseGroupChat.run_stream = self._original_group_chat_run_stream_method + self._original_group_chat_run_stream_method = None + + if self._original_single_threaded_agent_runtime_send_message is not None: + single_threaded_agent_runtime_module = import_module( + "autogen_core._single_threaded_agent_runtime" + ) + single_threaded_agent_runtime_module.SingleThreadedAgentRuntime.send_message = ( + self._original_single_threaded_agent_runtime_send_message + ) + self._original_single_threaded_agent_runtime_send_message = None + + if self._original_single_threaded_agent_runtime_publish_message is not None: + single_threaded_agent_runtime_module = import_module( + "autogen_core._single_threaded_agent_runtime" + ) + single_threaded_agent_runtime_module.SingleThreadedAgentRuntime.publish_message = ( + self._original_single_threaded_agent_runtime_publish_message + ) + self._original_single_threaded_agent_runtime_publish_message = None diff --git a/python/instrumentation/openinference-instrumentation-autogen-agentchat/src/openinference/instrumentation/autogen_agentchat/_wrappers.py b/python/instrumentation/openinference-instrumentation-autogen-agentchat/src/openinference/instrumentation/autogen_agentchat/_wrappers.py new file mode 100644 index 000000000..7313b9c3e --- /dev/null +++ b/python/instrumentation/openinference-instrumentation-autogen-agentchat/src/openinference/instrumentation/autogen_agentchat/_wrappers.py @@ -0,0 +1,337 @@ +import json +from enum import Enum +from inspect import signature +from typing import Any, Callable, Iterator, List, Mapping, Optional, Tuple + +from opentelemetry import context as context_api +from opentelemetry import trace as trace_api +from opentelemetry.util.types import AttributeValue + +from autogen_agentchat.base import TaskResult +from openinference.instrumentation import ( + get_attributes_from_context, + get_output_attributes, + safe_json_dumps, +) +from openinference.semconv.trace import OpenInferenceSpanKindValues, SpanAttributes + + +class SafeJSONEncoder(json.JSONEncoder): + """ + Safely encodes non-JSON-serializable objects. + """ + + def default(self, o: Any) -> Any: + try: + return super().default(o) + except TypeError: + if hasattr(o, "dict") and callable(o.dict): # pydantic v1 models, e.g., from Cohere + return o.dict() + return repr(o) + + +def _flatten(mapping: Optional[Mapping[str, Any]]) -> Iterator[Tuple[str, AttributeValue]]: + if not mapping: + return + for key, value in mapping.items(): + if value is None: + continue + if isinstance(value, Mapping): + for sub_key, sub_value in _flatten(value): + yield f"{key}.{sub_key}", sub_value + elif isinstance(value, List) and any(isinstance(item, Mapping) for item in value): + for index, sub_mapping in enumerate(value): + for sub_key, sub_value in _flatten(sub_mapping): + yield f"{key}.{index}.{sub_key}", sub_value + else: + if isinstance(value, Enum): + value = value.value + yield key, value + + +def _get_input_value(method: Callable[..., Any], *args: Any, **kwargs: Any) -> str: + """ + Parses a method call's inputs into a JSON string. Ensures a consistent + output regardless of whether the those inputs are passed as positional or + keyword arguments. + """ + + # For typical class methods, the corresponding instance of inspect.Signature + # does not include the self parameter. However, the inspect.Signature + # instance for __call__ does include the self parameter. + method_signature = signature(method) + first_parameter_name = next(iter(method_signature.parameters), None) + signature_contains_self_parameter = first_parameter_name in ["self"] + bound_arguments = method_signature.bind( + *( + [None] # the value bound to the method's self argument is discarded below, so pass None + if signature_contains_self_parameter + else [] # no self parameter, so no need to pass a value + ), + *args, + **kwargs, + ) + return safe_json_dumps( + { + **{ + argument_name: argument_value + for argument_name, argument_value in bound_arguments.arguments.items() + if argument_name not in ["self", "kwargs"] + }, + **bound_arguments.arguments.get("kwargs", {}), + }, + cls=SafeJSONEncoder, + ) + + +class _BaseAgentRunWrapper: + def __init__(self, tracer: trace_api.Tracer) -> None: + self._tracer = tracer + + async def __call__( + self, + wrapped: Callable[..., Any], + instance: Any, + args: Tuple[Any, ...], + kwargs: Mapping[str, Any], + ) -> Any: + if context_api.get_value(context_api._SUPPRESS_INSTRUMENTATION_KEY): + return wrapped(*args, **kwargs) + agent = instance + if agent: + span_name = f"{agent.__class__.__name__}.{wrapped.__name__}" + else: + span_name = wrapped.__name__ + with self._tracer.start_as_current_span( + span_name, + attributes=dict( + _flatten( + { + OPENINFERENCE_SPAN_KIND: OpenInferenceSpanKindValues.AGENT, + SpanAttributes.INPUT_VALUE: _get_input_value( + wrapped, + *args, + **kwargs, + ), + } + ) + ), + record_exception=False, + set_status_on_exception=False, + ) as span: + if agent: + span.set_attribute("agent_name", agent.name) + span.set_attribute("agent_description", agent.description) + try: + response = await wrapped(*args, **kwargs) + except Exception as exception: + span.set_status(trace_api.Status(trace_api.StatusCode.ERROR, str(exception))) + span.record_exception(exception) + raise + span.set_status(trace_api.StatusCode.OK) + span.set_attributes(dict(get_output_attributes(response))) + span.set_attributes(dict(get_attributes_from_context())) + return response + + +class _ToolsRunJSONWrapper: + def __init__(self, tracer: trace_api.Tracer) -> None: + self._tracer = tracer + + async def __call__( + self, + wrapped: Callable[..., Any], + instance: Any, + args: Tuple[Any, ...], + kwargs: Mapping[str, Any], + ) -> Any: + if context_api.get_value(context_api._SUPPRESS_INSTRUMENTATION_KEY): + return await wrapped(*args, **kwargs) + if instance: + span_name = f"{instance.__class__.__name__}.{wrapped.__name__}" + else: + span_name = wrapped.__name__ + + with self._tracer.start_as_current_span( + span_name, + attributes=dict( + _flatten( + { + SpanAttributes.OPENINFERENCE_SPAN_KIND: OpenInferenceSpanKindValues.TOOL, + SpanAttributes.INPUT_VALUE: _get_input_value( + wrapped, + *args, + **kwargs, + ), + } + ) + ), + record_exception=False, + set_status_on_exception=False, + ) as span: + if instance: + span.set_attribute("tool_name", instance.name) + span.set_attribute("tool_description", instance.description) + try: + response = await wrapped(*args, **kwargs) + except Exception as exception: + span.set_status(trace_api.Status(trace_api.StatusCode.ERROR, str(exception))) + span.record_exception(exception) + raise + span.set_status(trace_api.StatusCode.OK) + span.set_attributes(dict(get_output_attributes(response))) + span.set_attributes(dict(get_attributes_from_context())) + return response + + +class _BaseGroupChatRunStreamWrapper: + def __init__(self, tracer: trace_api.Tracer) -> None: + self._tracer = tracer + + async def __call__( + self, + wrapped: Callable[..., Any], + instance: Any, + args: Tuple[Any, ...], + kwargs: Mapping[str, Any], + ) -> Any: + if context_api.get_value(context_api._SUPPRESS_INSTRUMENTATION_KEY): + async for res in wrapped(*args, **kwargs): + yield res + return + + span_name = f"{instance.__class__.__name__}.run_stream" + with self._tracer.start_as_current_span( + span_name, + attributes=dict( + _flatten( + { + SpanAttributes.OPENINFERENCE_SPAN_KIND: OpenInferenceSpanKindValues.AGENT, + SpanAttributes.INPUT_VALUE: _get_input_value( + wrapped, + *args, + **kwargs, + ), + } + ) + ), + record_exception=False, + set_status_on_exception=False, + ) as span: + group_chat = instance + team_id = getattr(group_chat, "_team_id", None) + participant_names = getattr(group_chat, "_participant_names", None) + participant_descriptions = getattr(group_chat, "_participant_descriptions", None) + + if team_id: + span.set_attribute("team_id", team_id) + if participant_names: + span.set_attribute("participant_names", participant_names) + if participant_descriptions: + span.set_attribute("participant_descriptions", participant_descriptions) + + try: + async for res in wrapped(*args, **kwargs): + if isinstance(res, TaskResult): + span.set_attributes(dict(get_output_attributes(res))) + yield res + except Exception as exception: + span.set_status(trace_api.Status(trace_api.StatusCode.ERROR, str(exception))) + span.record_exception(exception) + raise + span.set_status(trace_api.StatusCode.OK) + span.set_attributes(dict(get_attributes_from_context())) + + +class _SendMessageWrapper: + def __init__(self, tracer: trace_api.Tracer) -> None: + self._tracer = tracer + + async def __call__( + self, + wrapped: Callable[..., Any], + instance: Any, + args: Tuple[Any, ...], + kwargs: Mapping[str, Any], + ) -> Any: + if context_api.get_value(context_api._SUPPRESS_INSTRUMENTATION_KEY): + return await wrapped(*args, **kwargs) + + span_name = f"{instance.__class__.__name__}.send_message" + with self._tracer.start_as_current_span( + span_name, + attributes=dict( + _flatten( + { + SpanAttributes.OPENINFERENCE_SPAN_KIND: OpenInferenceSpanKindValues.CHAIN, + SpanAttributes.INPUT_VALUE: _get_input_value( + wrapped, + *args, + **kwargs, + ), + } + ) + ), + record_exception=False, + set_status_on_exception=False, + ) as span: + try: + response = await wrapped(*args, **kwargs) + except Exception as exception: + span.set_status(trace_api.Status(trace_api.StatusCode.ERROR, str(exception))) + span.record_exception(exception) + raise + span.set_status(trace_api.StatusCode.OK) + span.set_attributes(dict(get_output_attributes(response))) + span.set_attributes(dict(get_attributes_from_context())) + return response + + +class _PublishMessageWrapper: + def __init__(self, tracer: trace_api.Tracer) -> None: + self._tracer = tracer + + async def __call__( + self, + wrapped: Callable[..., Any], + instance: Any, + args: Tuple[Any, ...], + kwargs: Mapping[str, Any], + ) -> Any: + if context_api.get_value(context_api._SUPPRESS_INSTRUMENTATION_KEY): + return await wrapped(*args, **kwargs) + + span_name = f"{instance.__class__.__name__}.publish_message" + with self._tracer.start_as_current_span( + span_name, + attributes=dict( + _flatten( + { + SpanAttributes.OPENINFERENCE_SPAN_KIND: OpenInferenceSpanKindValues.CHAIN, + SpanAttributes.INPUT_VALUE: _get_input_value( + wrapped, + *args, + **kwargs, + ), + } + ) + ), + record_exception=False, + set_status_on_exception=False, + ) as span: + try: + response = await wrapped(*args, **kwargs) + except Exception as exception: + span.set_status(trace_api.Status(trace_api.StatusCode.ERROR, str(exception))) + span.record_exception(exception) + raise + span.set_status(trace_api.StatusCode.OK) + span.set_attributes(dict(get_output_attributes(response))) + span.set_attributes(dict(get_attributes_from_context())) + return response + + +INPUT_VALUE = SpanAttributes.INPUT_VALUE +OPENINFERENCE_SPAN_KIND = SpanAttributes.OPENINFERENCE_SPAN_KIND +OUTPUT_VALUE = SpanAttributes.OUTPUT_VALUE +OUTPUT_MIME_TYPE = SpanAttributes.OUTPUT_MIME_TYPE diff --git a/python/instrumentation/openinference-instrumentation-autogen-agentchat/src/openinference/instrumentation/autogen_agentchat/version.py b/python/instrumentation/openinference-instrumentation-autogen-agentchat/src/openinference/instrumentation/autogen_agentchat/version.py new file mode 100644 index 000000000..3dc1f76bc --- /dev/null +++ b/python/instrumentation/openinference-instrumentation-autogen-agentchat/src/openinference/instrumentation/autogen_agentchat/version.py @@ -0,0 +1 @@ +__version__ = "0.1.0" diff --git a/python/instrumentation/openinference-instrumentation-autogen-agentchat/test-requirements.txt b/python/instrumentation/openinference-instrumentation-autogen-agentchat/test-requirements.txt new file mode 100644 index 000000000..03ab0acdc --- /dev/null +++ b/python/instrumentation/openinference-instrumentation-autogen-agentchat/test-requirements.txt @@ -0,0 +1,6 @@ +autogen-agentchat==0.5.1 +autogen-ext[openai]==0.5.1 +opentelemetry-sdk +pytest-asyncio +pytest-vcr +httpx<0.28 diff --git a/python/instrumentation/openinference-instrumentation-autogen-agentchat/tests/__init__.py b/python/instrumentation/openinference-instrumentation-autogen-agentchat/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/python/instrumentation/openinference-instrumentation-autogen-agentchat/tests/cassettes/TestAssistantAgent.test_agent_run.yaml b/python/instrumentation/openinference-instrumentation-autogen-agentchat/tests/cassettes/TestAssistantAgent.test_agent_run.yaml new file mode 100644 index 000000000..c113a8c72 --- /dev/null +++ b/python/instrumentation/openinference-instrumentation-autogen-agentchat/tests/cassettes/TestAssistantAgent.test_agent_run.yaml @@ -0,0 +1,115 @@ +interactions: +- request: + body: '{"messages":[{"content":"You are a helpful assistant that can check the + weather.","role":"system"},{"name":"user","role":"user","content":"What is the + weather in New York?"}],"model":"gpt-3.5-turbo","stream":true,"tools":[{"type":"function","function":{"name":"get_weather","description":"Get + the weather for a given city.","parameters":{"type":"object","properties":{"city":{"description":"city","title":"City","type":"string"}},"required":["city"],"additionalProperties":false},"strict":false}}]}' + headers: {} + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: 'data: {"id":"chatcmpl-BYyTJaUbY5GaJAQmYj5hOIzn4lCll","object":"chat.completion.chunk","created":1747675305,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":null,"tool_calls":[{"index":0,"id":"call_rOzPHDXJlI50BqN305vbzckW","type":"function","function":{"name":"get_weather","arguments":""}}],"refusal":null},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-BYyTJaUbY5GaJAQmYj5hOIzn4lCll","object":"chat.completion.chunk","created":1747675305,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"{\""}}]},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-BYyTJaUbY5GaJAQmYj5hOIzn4lCll","object":"chat.completion.chunk","created":1747675305,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"city"}}]},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-BYyTJaUbY5GaJAQmYj5hOIzn4lCll","object":"chat.completion.chunk","created":1747675305,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":\""}}]},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-BYyTJaUbY5GaJAQmYj5hOIzn4lCll","object":"chat.completion.chunk","created":1747675305,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"New"}}]},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-BYyTJaUbY5GaJAQmYj5hOIzn4lCll","object":"chat.completion.chunk","created":1747675305,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" + York"}}]},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-BYyTJaUbY5GaJAQmYj5hOIzn4lCll","object":"chat.completion.chunk","created":1747675305,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\"}"}}]},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-BYyTJaUbY5GaJAQmYj5hOIzn4lCll","object":"chat.completion.chunk","created":1747675305,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"tool_calls"}]} + + + data: [DONE] + + + ' + headers: {} + status: + code: 200 + message: OK +- request: + body: '{"messages":[{"content":"You are a helpful assistant that can check the + weather.","role":"system"},{"name":"user","role":"user","content":"What is the + weather in New York?"},{"name":"weather_agent","role":"assistant","tool_calls":[{"id":"call_rOzPHDXJlI50BqN305vbzckW","function":{"arguments":"{\"city\":\"New + York\"}","name":"get_weather"},"type":"function"}]},{"content":"The weather + in New York is 73 degrees and Sunny.","role":"tool","tool_call_id":"call_rOzPHDXJlI50BqN305vbzckW"}],"model":"gpt-3.5-turbo","stream":true}' + headers: {} + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: 'data: {"id":"chatcmpl-BYyTLO8qM1pBtnlTEXtTFyJAsAHWc","object":"chat.completion.chunk","created":1747675307,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":"","refusal":null},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-BYyTLO8qM1pBtnlTEXtTFyJAsAHWc","object":"chat.completion.chunk","created":1747675307,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"The"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-BYyTLO8qM1pBtnlTEXtTFyJAsAHWc","object":"chat.completion.chunk","created":1747675307,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" + weather"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-BYyTLO8qM1pBtnlTEXtTFyJAsAHWc","object":"chat.completion.chunk","created":1747675307,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" + in"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-BYyTLO8qM1pBtnlTEXtTFyJAsAHWc","object":"chat.completion.chunk","created":1747675307,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" + New"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-BYyTLO8qM1pBtnlTEXtTFyJAsAHWc","object":"chat.completion.chunk","created":1747675307,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" + York"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-BYyTLO8qM1pBtnlTEXtTFyJAsAHWc","object":"chat.completion.chunk","created":1747675307,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" + is"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-BYyTLO8qM1pBtnlTEXtTFyJAsAHWc","object":"chat.completion.chunk","created":1747675307,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" + "},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-BYyTLO8qM1pBtnlTEXtTFyJAsAHWc","object":"chat.completion.chunk","created":1747675307,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"73"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-BYyTLO8qM1pBtnlTEXtTFyJAsAHWc","object":"chat.completion.chunk","created":1747675307,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" + degrees"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-BYyTLO8qM1pBtnlTEXtTFyJAsAHWc","object":"chat.completion.chunk","created":1747675307,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" + and"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-BYyTLO8qM1pBtnlTEXtTFyJAsAHWc","object":"chat.completion.chunk","created":1747675307,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":" + sunny"},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-BYyTLO8qM1pBtnlTEXtTFyJAsAHWc","object":"chat.completion.chunk","created":1747675307,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]} + + + data: {"id":"chatcmpl-BYyTLO8qM1pBtnlTEXtTFyJAsAHWc","object":"chat.completion.chunk","created":1747675307,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]} + + + data: [DONE] + + + ' + headers: {} + status: + code: 200 + message: OK +version: 1 diff --git a/python/instrumentation/openinference-instrumentation-autogen-agentchat/tests/cassettes/TestTeam.test_team_run.yaml b/python/instrumentation/openinference-instrumentation-autogen-agentchat/tests/cassettes/TestTeam.test_team_run.yaml new file mode 100644 index 000000000..935cec589 --- /dev/null +++ b/python/instrumentation/openinference-instrumentation-autogen-agentchat/tests/cassettes/TestTeam.test_team_run.yaml @@ -0,0 +1,196 @@ +interactions: +- request: + body: '{"messages":[{"content":"You are a helpful AI assistant.","role":"system"},{"name":"user","role":"user","content":"Write + a short poem about the fall season."}],"model":"gpt-4o-2024-08-06","stream":false}' + headers: {} + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-BYyLLeDhMFrGavW6mUdFHDruJv24r\",\n \"object\": + \"chat.completion\",\n \"created\": 1747674811,\n \"model\": \"gpt-4o-2024-08-06\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Leaves of amber gently fall, \\nWhispering + secrets to the ground. \\nCrisp air and golden light enthrall, \\nAs nature's + painted hues astound. \\n\\nPumpkins perch on doorsteps bright, \\nHarvest + moons in twilight glow. \\nWarm sweaters ward off the night, \\nWhile fires + crackle, warm, and slow. \\n\\nMigrating birds in skies take flight, \\nCarving + paths through autumn's chill. \\nA tapestry in colors alight, \\nAs the + world turns, serene and still. \",\n \"refusal\": null,\n \"annotations\": + []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n + \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 28,\n \"completion_tokens\": + 107,\n \"total_tokens\": 135,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_76544d79cb\"\n}\n" + headers: {} + status: + code: 200 + message: OK +- request: + body: '{"messages":[{"content":"\n Provide constructive feedback.\n Respond + with ''APPROVE'' when your feedbacks are addressed.\n ","role":"system"},{"name":"user","role":"user","content":"Write + a short poem about the fall season."},{"name":"primary","role":"user","content":"Leaves + of amber gently fall, \nWhispering secrets to the ground. \nCrisp air and + golden light enthrall, \nAs nature''s painted hues astound. \n\nPumpkins perch + on doorsteps bright, \nHarvest moons in twilight glow. \nWarm sweaters ward + off the night, \nWhile fires crackle, warm, and slow. \n\nMigrating birds + in skies take flight, \nCarving paths through autumn''s chill. \nA tapestry + in colors alight, \nAs the world turns, serene and still. "}],"model":"gpt-4o-2024-08-06","stream":false}' + headers: {} + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-BYyLOS0He5eDpBmmalPylWmzhhwPa\",\n \"object\": + \"chat.completion\",\n \"created\": 1747674814,\n \"model\": \"gpt-4o-2024-08-06\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Your poem beautifully captures the + essence of the fall season. The imagery of \\\"leaves of amber,\\\" \\\"crisp + air,\\\" and \\\"golden light\\\" effectively conveys the season's visual + and sensory richness. The structure of the stanzas is consistent, and the + rhyme scheme helps maintain a pleasant rhythm. Here are a few suggestions + for enhancement:\\n\\n1. Consider adding more sensory details beyond the visual, + like the sound of rustling leaves or the scent of fresh rain, to deepen the + reader's immersion.\\n\\n2. The phrase \\\"Crisp air and golden light enthrall\\\" + is effective, but perhaps exploring a more unique or unexpected metaphor could + add an additional layer of depth.\\n\\n3. Explore more variation in verbs + or adjectives to enhance the imagery. For example, replacing \\\"warm sweaters\\\" + with \\\"cozy knits\\\" could intrigue readers more.\\n\\n4. The poem might + benefit from a closing line that leaves a lingering thought or emotion about + autumn's transition into winter.\\n\\nOverall, the poem is engaging and successfully + evokes the fall season. Addressing these points could further enhance its + impact. \\n\\nFeel free to make revisions, and I'll be happy to review again!\",\n + \ \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": + null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": + 156,\n \"completion_tokens\": 231,\n \"total_tokens\": 387,\n \"prompt_tokens_details\": + {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_76544d79cb\"\n}\n" + headers: {} + status: + code: 200 + message: OK +- request: + body: '{"messages":[{"content":"You are a helpful AI assistant.","role":"system"},{"name":"user","role":"user","content":"Write + a short poem about the fall season."},{"name":"primary","role":"assistant","content":"Leaves + of amber gently fall, \nWhispering secrets to the ground. \nCrisp air and + golden light enthrall, \nAs nature''s painted hues astound. \n\nPumpkins perch + on doorsteps bright, \nHarvest moons in twilight glow. \nWarm sweaters ward + off the night, \nWhile fires crackle, warm, and slow. \n\nMigrating birds + in skies take flight, \nCarving paths through autumn''s chill. \nA tapestry + in colors alight, \nAs the world turns, serene and still. "},{"name":"critic","role":"user","content":"Your + poem beautifully captures the essence of the fall season. The imagery of \"leaves + of amber,\" \"crisp air,\" and \"golden light\" effectively conveys the season''s + visual and sensory richness. The structure of the stanzas is consistent, and + the rhyme scheme helps maintain a pleasant rhythm. Here are a few suggestions + for enhancement:\n\n1. Consider adding more sensory details beyond the visual, + like the sound of rustling leaves or the scent of fresh rain, to deepen the + reader''s immersion.\n\n2. The phrase \"Crisp air and golden light enthrall\" + is effective, but perhaps exploring a more unique or unexpected metaphor could + add an additional layer of depth.\n\n3. Explore more variation in verbs or adjectives + to enhance the imagery. For example, replacing \"warm sweaters\" with \"cozy + knits\" could intrigue readers more.\n\n4. The poem might benefit from a closing + line that leaves a lingering thought or emotion about autumn''s transition into + winter.\n\nOverall, the poem is engaging and successfully evokes the fall season. + Addressing these points could further enhance its impact. \n\nFeel free to + make revisions, and I''ll be happy to review again!"}],"model":"gpt-4o-2024-08-06","stream":false}' + headers: {} + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-BYyLUsrkxWBgnYHxEcOi89hhV9Sza\",\n \"object\": + \"chat.completion\",\n \"created\": 1747674820,\n \"model\": \"gpt-4o-2024-08-06\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Thank you for the insightful feedback! + I'll make some revisions to incorporate more sensory details, vivid imagery, + and a reflective closing line.\\n\\n---\\n\\nLeaves of amber gently cast, + \ \\nMurmuring secrets to the ground, \\nThe crisp scent of autumn's past, + \ \\nRides the breeze without a sound. \\n\\nHarvest moons in twilight sights, + \ \\nCozier knits fend off the chill, \\nAs whispering rains greet the nights, + \ \\nNature's stillness feels surreal. \\n\\nBirds on journeys trace the + skies, \\nTheir songs a fading farewell, \\nCrimson fires in hearts arise, + \ \\nAs time weaves its subtle spell. \\n\\nIn the dance from fall to frost, + \ \\nA quiet promise softly calls, \\nIn winter's grip, no warmth is lost, + \ \\nFor autumn's flame within us thralls. \\n\\n---\\n\\nLet me know your + thoughts!\",\n \"refusal\": null,\n \"annotations\": []\n },\n + \ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n + \ \"usage\": {\n \"prompt_tokens\": 378,\n \"completion_tokens\": 173,\n + \ \"total_tokens\": 551,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_76544d79cb\"\n}\n" + headers: {} + status: + code: 200 + message: OK +- request: + body: '{"messages":[{"content":"\n Provide constructive feedback.\n Respond + with ''APPROVE'' when your feedbacks are addressed.\n ","role":"system"},{"name":"user","role":"user","content":"Write + a short poem about the fall season."},{"name":"primary","role":"user","content":"Leaves + of amber gently fall, \nWhispering secrets to the ground. \nCrisp air and + golden light enthrall, \nAs nature''s painted hues astound. \n\nPumpkins perch + on doorsteps bright, \nHarvest moons in twilight glow. \nWarm sweaters ward + off the night, \nWhile fires crackle, warm, and slow. \n\nMigrating birds + in skies take flight, \nCarving paths through autumn''s chill. \nA tapestry + in colors alight, \nAs the world turns, serene and still. "},{"name":"critic","role":"assistant","content":"Your + poem beautifully captures the essence of the fall season. The imagery of \"leaves + of amber,\" \"crisp air,\" and \"golden light\" effectively conveys the season''s + visual and sensory richness. The structure of the stanzas is consistent, and + the rhyme scheme helps maintain a pleasant rhythm. Here are a few suggestions + for enhancement:\n\n1. Consider adding more sensory details beyond the visual, + like the sound of rustling leaves or the scent of fresh rain, to deepen the + reader''s immersion.\n\n2. The phrase \"Crisp air and golden light enthrall\" + is effective, but perhaps exploring a more unique or unexpected metaphor could + add an additional layer of depth.\n\n3. Explore more variation in verbs or adjectives + to enhance the imagery. For example, replacing \"warm sweaters\" with \"cozy + knits\" could intrigue readers more.\n\n4. The poem might benefit from a closing + line that leaves a lingering thought or emotion about autumn''s transition into + winter.\n\nOverall, the poem is engaging and successfully evokes the fall season. + Addressing these points could further enhance its impact. \n\nFeel free to + make revisions, and I''ll be happy to review again!"},{"name":"primary","role":"user","content":"Thank + you for the insightful feedback! I''ll make some revisions to incorporate more + sensory details, vivid imagery, and a reflective closing line.\n\n---\n\nLeaves + of amber gently cast, \nMurmuring secrets to the ground, \nThe crisp scent + of autumn''s past, \nRides the breeze without a sound. \n\nHarvest moons in + twilight sights, \nCozier knits fend off the chill, \nAs whispering rains + greet the nights, \nNature''s stillness feels surreal. \n\nBirds on journeys + trace the skies, \nTheir songs a fading farewell, \nCrimson fires in hearts + arise, \nAs time weaves its subtle spell. \n\nIn the dance from fall to frost, \nA + quiet promise softly calls, \nIn winter''s grip, no warmth is lost, \nFor + autumn''s flame within us thralls. \n\n---\n\nLet me know your thoughts!"}],"model":"gpt-4o-2024-08-06","stream":false}' + headers: {} + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-BYyLZwtrua9ikNhaqWg2S3PLv2HWU\",\n \"object\": + \"chat.completion\",\n \"created\": 1747674825,\n \"model\": \"gpt-4o-2024-08-06\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"This revised version of your poem beautifully + incorporates the suggestions! \\n\\n1. You've successfully enhanced the sensory + experience with phrases like \\\"the crisp scent of autumn's past\\\" and + \\\"whispering rains,\\\" adding depth to the poem.\\n\\n2. The imagery is + vivid and vivid, elevating the reader's visual and emotional journey through + autumn.\\n\\n3. The reflective closing lines, especially \\\"In winter's grip, + no warmth is lost,\\\" provide a thoughtful conclusion, leaving the reader + with a sense of continuity and warmth.\\n\\n4. \\\"Cozier knits\\\" and \\\"Crimson + fires\\\" are evocative word choices that amplify the overall imagery and + sentiment of the stanza.\\n\\nWell done! The revisions have enhanced the poem's + impact, making it even more immersive and resonant. \\n\\nAPPROVE\",\n \"refusal\": + null,\n \"annotations\": []\n },\n \"logprobs\": null,\n + \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": + 572,\n \"completion_tokens\": 157,\n \"total_tokens\": 729,\n \"prompt_tokens_details\": + {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_76544d79cb\"\n}\n" + headers: {} + status: + code: 200 + message: OK +version: 1 diff --git a/python/instrumentation/openinference-instrumentation-autogen-agentchat/tests/test_instrumentor.py b/python/instrumentation/openinference-instrumentation-autogen-agentchat/tests/test_instrumentor.py new file mode 100644 index 000000000..01e0515d3 --- /dev/null +++ b/python/instrumentation/openinference-instrumentation-autogen-agentchat/tests/test_instrumentor.py @@ -0,0 +1,213 @@ +import json +from typing import Any, Dict, Generator, Mapping, cast + +import pytest +from opentelemetry import trace as trace_api +from opentelemetry.sdk import trace as trace_sdk +from opentelemetry.sdk.resources import Resource +from opentelemetry.sdk.trace.export import SimpleSpanProcessor +from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter +from opentelemetry.util._importlib_metadata import entry_points +from opentelemetry.util.types import AttributeValue + +from openinference.instrumentation import OITracer +from openinference.instrumentation.autogen_agentchat import AutogenAgentChatInstrumentor + + +def remove_all_vcr_request_headers(request: Any) -> Any: + """ + Removes all request headers. + + Example: + ``` + @pytest.mark.vcr( + before_record_response=remove_all_vcr_request_headers + ) + def test_openai() -> None: + # make request to OpenAI + """ + request.headers.clear() + return request + + +def remove_all_vcr_response_headers(response: Dict[str, Any]) -> Dict[str, Any]: + """ + Removes all response headers. + + Example: + ``` + @pytest.mark.vcr( + before_record_response=remove_all_vcr_response_headers + ) + def test_openai() -> None: + # make request to OpenAI + """ + response["headers"] = {} + return response + + +@pytest.fixture() +def in_memory_span_exporter() -> InMemorySpanExporter: + return InMemorySpanExporter() + + +@pytest.fixture() +def tracer_provider(in_memory_span_exporter: InMemorySpanExporter) -> trace_api.TracerProvider: + resource = Resource(attributes={}) + tracer_provider = trace_sdk.TracerProvider(resource=resource) + span_processor = SimpleSpanProcessor(span_exporter=in_memory_span_exporter) + tracer_provider.add_span_processor(span_processor=span_processor) + + return tracer_provider + + +@pytest.fixture(autouse=True) +def instrument( + tracer_provider: trace_api.TracerProvider, + in_memory_span_exporter: InMemorySpanExporter, +) -> Generator[None, None, None]: + AutogenAgentChatInstrumentor().instrument(tracer_provider=tracer_provider) + yield + AutogenAgentChatInstrumentor().uninstrument() + in_memory_span_exporter.clear() + + +class TestInstrumentor: + def test_entrypoint_for_opentelemetry_instrument(self) -> None: + (instrumentor_entrypoint,) = entry_points( + group="opentelemetry_instrumentor", name="autogen_agentchat" + ) + instrumentor = instrumentor_entrypoint.load()() + assert isinstance(instrumentor, AutogenAgentChatInstrumentor) + + # Ensure we're using the common OITracer from common openinference-instrumentation pkg + def test_oitracer(self) -> None: + assert isinstance(AutogenAgentChatInstrumentor()._tracer, OITracer) + + +class TestAssistantAgent: + @pytest.mark.asyncio + @pytest.mark.vcr( + before_record_response=remove_all_vcr_response_headers, + before_record_request=remove_all_vcr_request_headers, + ) + async def test_agent_run( + self, + tracer_provider: trace_api.TracerProvider, + in_memory_span_exporter: InMemorySpanExporter, + ) -> None: + from autogen_agentchat.agents import AssistantAgent + from autogen_ext.models.openai import OpenAIChatCompletionClient + + model_client = OpenAIChatCompletionClient( + model="gpt-3.5-turbo", + api_key="fake-key", + ) + + # Define a simple function tool that the agent can use + def get_weather(city: str) -> str: + """Get the weather for a given city.""" + return f"The weather in {city} is 73 degrees and Sunny." + + # Define an AssistantAgent with the model, tool, system message, and reflection enabled + agent = AssistantAgent( + name="weather_agent", + model_client=model_client, + tools=[get_weather], + system_message="You are a helpful assistant that can check the weather.", + reflect_on_tool_use=True, + model_client_stream=True, + ) + + # Run the agent and stream the messages to the console + results = await agent.run(task="What is the weather in New York?") + await model_client.close() + + # Verify that spans were created + spans = in_memory_span_exporter.get_finished_spans() + assert len(spans) == 2 + final_span = spans[-1] + assert final_span.status.is_ok + + attributes = dict(cast(Mapping[str, AttributeValue], final_span.attributes)) + assert attributes["agent_name"] == "weather_agent" + assert ( + attributes["agent_description"] + == "An agent that provides assistance with ability to use tools." + ) + assert json.loads(attributes["input.value"]) == {"task": "What is the weather in New York?"} + assert attributes["openinference.span.kind"] == "AGENT" + assert attributes["output.mime_type"] == "application/json" + result_messages = results.messages + captured_output_messages = json.loads(attributes["output.value"])["messages"] + assert len(result_messages) == len(captured_output_messages) + + +class TestTeam: + @pytest.mark.asyncio + @pytest.mark.vcr( + before_record_response=remove_all_vcr_response_headers, + before_record_request=remove_all_vcr_request_headers, + decode_compressed_response=True, + ) + async def test_team_run( + self, + tracer_provider: trace_api.TracerProvider, + in_memory_span_exporter: InMemorySpanExporter, + ) -> None: + from autogen_agentchat.agents import AssistantAgent + from autogen_agentchat.conditions import TextMentionTermination + from autogen_agentchat.teams import RoundRobinGroupChat + from autogen_ext.models.openai import OpenAIChatCompletionClient + + model_client = OpenAIChatCompletionClient( + model="gpt-4o-2024-08-06", + api_key="fake-key", + ) + + # Create two agents: a primary and a critic + primary_agent = AssistantAgent( + "primary", + model_client=model_client, + system_message="You are a helpful AI assistant.", + ) + + critic_agent = AssistantAgent( + "critic", + model_client=model_client, + system_message=""" + Provide constructive feedback. + Respond with 'APPROVE' when your feedbacks are addressed. + """, + ) + + # Termination condition: stop when the critic says "APPROVE" + text_termination = TextMentionTermination("APPROVE") + + # Create a team with both agents + team = RoundRobinGroupChat( + [primary_agent, critic_agent], termination_condition=text_termination + ) + + # Run the team on a task + _ = await team.run(task="Write a short poem about the fall season.") + await model_client.close() + + # Verify that spans were created + spans = in_memory_span_exporter.get_finished_spans() + assert len(spans) == 17 + final_span = spans[-1] + assert "RoundRobinGroupChat" in final_span.name + assert final_span.status.is_ok + + attributes = dict(cast(Mapping[str, AttributeValue], final_span.attributes)) + # Verify input value + input_value = json.loads(attributes["input.value"]) + assert isinstance(input_value, dict) + assert "task" in input_value + assert input_value["task"] == "Write a short poem about the fall season." + + # Verify participant information + assert attributes["participant_names"] == ("primary", "critic") + assert len(attributes["participant_descriptions"]) == 2 + assert attributes["openinference.span.kind"] == "AGENT" diff --git a/python/instrumentation/openinference-instrumentation-langchain/CHANGELOG.md b/python/instrumentation/openinference-instrumentation-langchain/CHANGELOG.md index 3c2797357..53386cdfa 100644 --- a/python/instrumentation/openinference-instrumentation-langchain/CHANGELOG.md +++ b/python/instrumentation/openinference-instrumentation-langchain/CHANGELOG.md @@ -1,5 +1,12 @@ # Changelog +## [0.1.43](https://github.com/Arize-ai/openinference/compare/python-openinference-instrumentation-langchain-v0.1.42...python-openinference-instrumentation-langchain-v0.1.43) (2025-05-20) + + +### Features + +* **langchain:** track tool schemas from LLM invocation parameters ([#1643](https://github.com/Arize-ai/openinference/issues/1643)) ([65d3a82](https://github.com/Arize-ai/openinference/commit/65d3a8219a1a061a4fdea19b17760096b5aca76a)) + ## [0.1.42](https://github.com/Arize-ai/openinference/compare/python-openinference-instrumentation-langchain-v0.1.41...python-openinference-instrumentation-langchain-v0.1.42) (2025-04-28) diff --git a/python/instrumentation/openinference-instrumentation-langchain/src/openinference/instrumentation/langchain/_tracer.py b/python/instrumentation/openinference-instrumentation-langchain/src/openinference/instrumentation/langchain/_tracer.py index 92fd6d316..0091f34cf 100644 --- a/python/instrumentation/openinference-instrumentation-langchain/src/openinference/instrumentation/langchain/_tracer.py +++ b/python/instrumentation/openinference-instrumentation-langchain/src/openinference/instrumentation/langchain/_tracer.py @@ -51,6 +51,7 @@ OpenInferenceSpanKindValues, RerankerAttributes, SpanAttributes, + ToolAttributes, ToolCallAttributes, ) @@ -599,6 +600,9 @@ def _invocation_parameters(run: Run) -> Iterator[Tuple[str, str]]: f"expected Mapping, found {type(invocation_parameters)}" ) yield LLM_INVOCATION_PARAMETERS, safe_json_dumps(invocation_parameters) + tools = invocation_parameters.get("tools", []) + for idx, tool in enumerate(tools): + yield f"{LLM_TOOLS}.{idx}.{TOOL_JSON_SCHEMA}", safe_json_dumps(tool) @stop_on_exception @@ -959,3 +963,5 @@ def _get_attributes_from_image( TOOL_DESCRIPTION = SpanAttributes.TOOL_DESCRIPTION TOOL_NAME = SpanAttributes.TOOL_NAME TOOL_PARAMETERS = SpanAttributes.TOOL_PARAMETERS +TOOL_JSON_SCHEMA = ToolAttributes.TOOL_JSON_SCHEMA +LLM_TOOLS = SpanAttributes.LLM_TOOLS diff --git a/python/instrumentation/openinference-instrumentation-langchain/src/openinference/instrumentation/langchain/version.py b/python/instrumentation/openinference-instrumentation-langchain/src/openinference/instrumentation/langchain/version.py index 6728003b0..fe54c5e00 100644 --- a/python/instrumentation/openinference-instrumentation-langchain/src/openinference/instrumentation/langchain/version.py +++ b/python/instrumentation/openinference-instrumentation-langchain/src/openinference/instrumentation/langchain/version.py @@ -1 +1 @@ -__version__ = "0.1.42" +__version__ = "0.1.43" diff --git a/python/instrumentation/openinference-instrumentation-langchain/tests/cassettes/test_instrumentor/test_tool_call_with_function.yaml b/python/instrumentation/openinference-instrumentation-langchain/tests/cassettes/test_instrumentor/test_tool_call_with_function.yaml new file mode 100644 index 000000000..845de7ecb --- /dev/null +++ b/python/instrumentation/openinference-instrumentation-langchain/tests/cassettes/test_instrumentor/test_tool_call_with_function.yaml @@ -0,0 +1,33 @@ +interactions: +- request: + body: '{"messages":[{"content":"What is 3 * 12? Also, what is 11 + 49?","role":"user"}],"model":"gpt-4o-mini","stream":false,"tools":[{"type":"function","function":{"name":"add","description":"Adds + a and b.","parameters":{"properties":{"a":{"type":"integer"},"b":{"type":"integer"}},"required":["a","b"],"type":"object"}}},{"type":"function","function":{"name":"multiply","description":"Multiplies + a and b.","parameters":{"properties":{"a":{"type":"integer"},"b":{"type":"integer"}},"required":["a","b"],"type":"object"}}}]}' + headers: {} + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-BZ8blO4x3ynTj8awD7YPWEqaUByIy\",\n \"object\": + \"chat.completion\",\n \"created\": 1747714269,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n + \ \"id\": \"call_Kd281qYCh92tVGg73bPsF1HZ\",\n \"type\": + \"function\",\n \"function\": {\n \"name\": \"multiply\",\n + \ \"arguments\": \"{\\\"a\\\": 3, \\\"b\\\": 12}\"\n }\n + \ },\n {\n \"id\": \"call_Z3ns3Wjn6WnUADUhPhlommYN\",\n + \ \"type\": \"function\",\n \"function\": {\n \"name\": + \"add\",\n \"arguments\": \"{\\\"a\\\": 11, \\\"b\\\": 49}\"\n + \ }\n }\n ],\n \"refusal\": null,\n \"annotations\": + []\n },\n \"logprobs\": null,\n \"finish_reason\": \"tool_calls\"\n + \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 87,\n \"completion_tokens\": + 50,\n \"total_tokens\": 137,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_dbaca60df0\"\n}\n" + headers: {} + status: + code: 200 + message: OK +version: 1 diff --git a/python/instrumentation/openinference-instrumentation-langchain/tests/test_instrumentor.py b/python/instrumentation/openinference-instrumentation-langchain/tests/test_instrumentor.py index 62b861535..818d8f5ea 100644 --- a/python/instrumentation/openinference-instrumentation-langchain/tests/test_instrumentor.py +++ b/python/instrumentation/openinference-instrumentation-langchain/tests/test_instrumentor.py @@ -61,6 +61,7 @@ OpenInferenceMimeTypeValues, OpenInferenceSpanKindValues, SpanAttributes, + ToolAttributes, ToolCallAttributes, ) @@ -933,6 +934,98 @@ def test_token_counts( assert attr.pop(LLM_TOKEN_COUNT_TOTAL) == 24 +@pytest.mark.vcr( + decode_compressed_response=True, + before_record_request=remove_all_vcr_request_headers, + before_record_response=remove_all_vcr_response_headers, + cassette_library_dir="tests/cassettes/test_instrumentor", # Explicitly set the directory +) +def test_tool_call_with_function( + in_memory_span_exporter: InMemorySpanExporter, +) -> None: + from langchain.chat_models import init_chat_model + from langchain_core.messages import AIMessage + from langchain_core.tools import tool + + @tool + def add(a: int, b: int) -> int: + """Adds a and b.""" + return a + b + + @tool + def multiply(a: int, b: int) -> int: + """Multiplies a and b.""" + return a * b + + tools = [add, multiply] + + llm = init_chat_model( + "gpt-4o-mini", + model_provider="openai", + api_key="sk-fake-key", + ) + llm_with_tools = llm.bind_tools(tools) + query = "What is 3 * 12? Also, what is 11 + 49?" + result = llm_with_tools.invoke(query) + assert isinstance(result, AIMessage) + _ = result.tool_calls + spans = in_memory_span_exporter.get_finished_spans() + assert len(spans) == 1 + span = spans[0] + attributes = dict(span.attributes or {}) + + # Test input message + assert attributes.pop("llm.input_messages.0.message.role") == "user" + assert attributes.pop("llm.input_messages.0.message.content") == query + + # Test output message and tool calls + assert attributes.pop("llm.output_messages.0.message.role") == "assistant" + + # Test first tool call (multiply) + assert ( + attributes.pop("llm.output_messages.0.message.tool_calls.0.tool_call.function.name") + == "multiply" + ) + multiply_args = attributes.pop( + "llm.output_messages.0.message.tool_calls.0.tool_call.function.arguments" + ) + assert isinstance(multiply_args, str) + assert json.loads(multiply_args) == {"a": 3, "b": 12} + + # Test second tool call (add) + assert ( + attributes.pop("llm.output_messages.0.message.tool_calls.1.tool_call.function.name") + == "add" + ) + add_args = attributes.pop( + "llm.output_messages.0.message.tool_calls.1.tool_call.function.arguments" + ) + assert isinstance(add_args, str) + assert json.loads(add_args) == {"a": 11, "b": 49} + + # Test tool schemas + tool1_schema = attributes.pop(f"{LLM_TOOLS}.0.{TOOL_JSON_SCHEMA}", None) + tool2_schema = attributes.pop(f"{LLM_TOOLS}.1.{TOOL_JSON_SCHEMA}", None) + assert tool1_schema is not None + assert tool2_schema is not None + assert isinstance(tool1_schema, str) + assert isinstance(tool2_schema, str) + + tool1_schema_dict = json.loads(tool1_schema) + assert tool1_schema_dict["type"] == "function" + assert tool1_schema_dict["function"]["name"] == "add" + assert tool1_schema_dict["function"]["description"] == "Adds a and b." + assert tool1_schema_dict["function"]["parameters"]["properties"]["a"]["type"] == "integer" + assert tool1_schema_dict["function"]["parameters"]["properties"]["b"]["type"] == "integer" + + tool2_schema_dict = json.loads(tool2_schema) + assert tool2_schema_dict["type"] == "function" + assert tool2_schema_dict["function"]["name"] == "multiply" + assert tool2_schema_dict["function"]["description"] == "Multiplies a and b." + assert tool2_schema_dict["function"]["parameters"]["properties"]["a"]["type"] == "integer" + assert tool2_schema_dict["function"]["parameters"]["properties"]["b"]["type"] == "integer" + + def _check_context_attributes( attributes: Dict[str, Any], session_id: Optional[str], @@ -1127,6 +1220,8 @@ async def __aiter__(self) -> AsyncIterator[bytes]: RETRIEVAL_DOCUMENTS = SpanAttributes.RETRIEVAL_DOCUMENTS TOOL_CALL_FUNCTION_ARGUMENTS_JSON = ToolCallAttributes.TOOL_CALL_FUNCTION_ARGUMENTS_JSON TOOL_CALL_FUNCTION_NAME = ToolCallAttributes.TOOL_CALL_FUNCTION_NAME +TOOL_JSON_SCHEMA = ToolAttributes.TOOL_JSON_SCHEMA +LLM_TOOLS = SpanAttributes.LLM_TOOLS CHAIN = OpenInferenceSpanKindValues.CHAIN LLM = OpenInferenceSpanKindValues.LLM diff --git a/python/instrumentation/openinference-instrumentation-mcp/CHANGELOG.md b/python/instrumentation/openinference-instrumentation-mcp/CHANGELOG.md index c76adf9eb..e28c6752f 100644 --- a/python/instrumentation/openinference-instrumentation-mcp/CHANGELOG.md +++ b/python/instrumentation/openinference-instrumentation-mcp/CHANGELOG.md @@ -1,5 +1,12 @@ # Changelog +## [1.3.0](https://github.com/Arize-ai/openinference/compare/python-openinference-instrumentation-mcp-v1.2.1...python-openinference-instrumentation-mcp-v1.3.0) (2025-05-19) + + +### Features + +* **mcp:** add instrumentation support for Streamable-HTTP transport and bump mcp dependency to ≥1.8.1 ([#1640](https://github.com/Arize-ai/openinference/issues/1640)) ([4df8976](https://github.com/Arize-ai/openinference/commit/4df8976066f1911321ee31f7732854787079e981)) + ## [1.2.1](https://github.com/Arize-ai/openinference/compare/python-openinference-instrumentation-mcp-v1.2.0...python-openinference-instrumentation-mcp-v1.2.1) (2025-05-14) diff --git a/python/instrumentation/openinference-instrumentation-mcp/src/openinference/instrumentation/mcp/__init__.py b/python/instrumentation/openinference-instrumentation-mcp/src/openinference/instrumentation/mcp/__init__.py index a22d05d89..75f3935ea 100644 --- a/python/instrumentation/openinference-instrumentation-mcp/src/openinference/instrumentation/mcp/__init__.py +++ b/python/instrumentation/openinference-instrumentation-mcp/src/openinference/instrumentation/mcp/__init__.py @@ -61,6 +61,14 @@ def _instrument(self, **kwargs: Any) -> None: ), "mcp.server.stdio", ) + register_post_import_hook( + lambda _: wrap_function_wrapper( + "mcp.server.lowlevel.server", + "Server._handle_request", + self._wrap_handle_request, + ), + "mcp.server.lowlevel.server", + ) # While we prefer to instrument the lowest level primitive, the transports above, it doesn't # mean context will be propagated to handlers automatically. Notably, the MCP SDK passes @@ -98,6 +106,21 @@ async def _wrap_plain_transport( async with wrapped(*args, **kwargs) as (read_stream, write_stream): yield InstrumentedStreamReader(read_stream), InstrumentedStreamWriter(write_stream) + async def _wrap_handle_request( + self, wrapped: Callable[..., Any], instance: Any, args: Any, kwargs: Any + ) -> Any: + token = None + try: + # Message has been deserialized, we need to extract the traceparent + _meta = {"traceparent": args[1].params.meta.traceparent} + ctx = propagate.extract(_meta) + token = context.attach(ctx) + finally: + res = await wrapped(*args, **kwargs) + if token: + context.detach(token) + return res + def _base_session_init_wrapper( self, wrapped: Callable[..., None], instance: Any, args: Any, kwargs: Any ) -> None: diff --git a/python/instrumentation/openinference-instrumentation-mcp/src/openinference/instrumentation/mcp/version.py b/python/instrumentation/openinference-instrumentation-mcp/src/openinference/instrumentation/mcp/version.py index a955fdae1..9c73af26b 100644 --- a/python/instrumentation/openinference-instrumentation-mcp/src/openinference/instrumentation/mcp/version.py +++ b/python/instrumentation/openinference-instrumentation-mcp/src/openinference/instrumentation/mcp/version.py @@ -1 +1 @@ -__version__ = "1.2.1" +__version__ = "1.3.1" diff --git a/python/openinference-instrumentation/CHANGELOG.md b/python/openinference-instrumentation/CHANGELOG.md index 8c59483bf..ac72d2f99 100644 --- a/python/openinference-instrumentation/CHANGELOG.md +++ b/python/openinference-instrumentation/CHANGELOG.md @@ -1,5 +1,12 @@ # Changelog +## [0.1.30](https://github.com/Arize-ai/openinference/compare/python-openinference-instrumentation-v0.1.29...python-openinference-instrumentation-v0.1.30) (2025-05-19) + + +### Documentation + +* update readmes for development setup and fixes ([#1629](https://github.com/Arize-ai/openinference/issues/1629)) ([7b211b3](https://github.com/Arize-ai/openinference/commit/7b211b3b1624ab433ba35568112f0a1d8964456f)) + ## [0.1.29](https://github.com/Arize-ai/openinference/compare/python-openinference-instrumentation-v0.1.28...python-openinference-instrumentation-v0.1.29) (2025-05-12) diff --git a/python/openinference-instrumentation/src/openinference/instrumentation/version.py b/python/openinference-instrumentation/src/openinference/instrumentation/version.py index a5f376226..887b2e7d0 100644 --- a/python/openinference-instrumentation/src/openinference/instrumentation/version.py +++ b/python/openinference-instrumentation/src/openinference/instrumentation/version.py @@ -1 +1 @@ -__version__ = "0.1.29" +__version__ = "0.1.30" diff --git a/python/tox.ini b/python/tox.ini index 3c458dfde..3e178d957 100644 --- a/python/tox.ini +++ b/python/tox.ini @@ -27,6 +27,8 @@ envlist = py3{9,13}-ci-{google_genai,google_genai-latest} py39-mypy-langchain_core py3{10,13}-ci-{mcp,mcp-latest} + py3{10,13}-ci-{autogen_agentchat,autogen_agentchat-latest} + [testenv] package = wheel @@ -59,6 +61,8 @@ changedir = portkey: instrumentation/openinference-instrumentation-portkey/ mcp: instrumentation/openinference-instrumentation-mcp/ google_genai: instrumentation/openinference-instrumentation-google-genai/ + autogen_agentchat: instrumentation/openinference-instrumentation-autogen-agentchat/ + commands_pre = agno: uv pip install --reinstall {toxinidir}/instrumentation/openinference-instrumentation-agno[test] agno-latest: uv pip install -U agno @@ -121,6 +125,12 @@ commands_pre = google_genai: uv pip install --reinstall-package openinference-instrumentation-google-genai . google_genai: uv pip install -r test-requirements.txt google_genai-latest: uv pip install -U google-genai + autogen_agentchat: uv pip install --reinstall {toxinidir}/instrumentation/openinference-instrumentation-autogen-agentchat + autogen_agentchat: uv pip uninstall -r test-requirements.txt + autogen_agentchat: uv pip install --reinstall-package openinference-instrumentation-autogen-agentchat . + autogen_agentchat: python -c 'import openinference.instrumentation.autogen_agentchat' + autogen_agentchat: uv pip install -r test-requirements.txt + autogen_agentchat-latest: uv pip install -U autogen-agentchat uv pip install --reinstall {toxinidir}/openinference-instrumentation # reinstall comes last to ensure it is installed from source instrumentation: uv pip install --reinstall {toxinidir}/openinference-instrumentation[test] uv pip install --reinstall {toxinidir}/openinference-semantic-conventions # reinstall comes last to ensure it is installed from source @@ -130,6 +140,7 @@ commands_pre = mcp: python -c 'import openinference.instrumentation.mcp' mcp: uv pip install -r test-requirements.txt mcp-latest: uv pip install -U mcp + commands = ruff: ruff format . ruff: ruff check --fix . diff --git a/release-please-config.json b/release-please-config.json index 4881ae4e3..a29bb55a7 100644 --- a/release-please-config.json +++ b/release-please-config.json @@ -98,6 +98,10 @@ "python/instrumentation/openinference-instrumentation-google-genai": { "package-name": "python-openinference-instrumentation-google-genai", "release-type": "python" + }, + "python/instrumentation/openinference-instrumentation-autogen-agentchat": { + "package-name": "python-openinference-instrumentation-autogen-agentchat", + "release-type": "python" } } }