From 28e31cf2150c88221590db26d4f19db8eff18323 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Fri, 16 May 2025 08:45:31 -0700 Subject: [PATCH 1/8] Set version to 1.0.0b12. Update dependency on azure-ai-agents to 1.0.0 --- sdk/ai/azure-ai-projects/CHANGELOG.md | 10 ++++++++++ sdk/ai/azure-ai-projects/azure/ai/projects/_version.py | 2 +- sdk/ai/azure-ai-projects/setup.py | 2 +- 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/sdk/ai/azure-ai-projects/CHANGELOG.md b/sdk/ai/azure-ai-projects/CHANGELOG.md index 804fd7baa0b1..a85b4f48620a 100644 --- a/sdk/ai/azure-ai-projects/CHANGELOG.md +++ b/sdk/ai/azure-ai-projects/CHANGELOG.md @@ -1,5 +1,15 @@ # Release History +## 1.0.0b12 (Unreleased) + +### Features added + +### Breaking changes + +### Bugs Fixed + +### Sample updates + ## 1.0.0b11 (2025-05-15) There have been significant updates with the release of version 1.0.0b11, including breaking changes. diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_version.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_version.py index 319889e447e0..46f199f51a87 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/_version.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b11" +VERSION = "1.0.0b12" diff --git a/sdk/ai/azure-ai-projects/setup.py b/sdk/ai/azure-ai-projects/setup.py index bbc27d4b682c..3172316ce47d 100644 --- a/sdk/ai/azure-ai-projects/setup.py +++ b/sdk/ai/azure-ai-projects/setup.py @@ -71,7 +71,7 @@ "azure-core>=1.30.0", "typing-extensions>=4.12.2", "azure-storage-blob>=12.15.0", - "azure-ai-agents>=1.0.0b1", + "azure-ai-agents>=1.0.0", ], python_requires=">=3.9", extras_require={ From 9ac1655e74f71a73068709786fb6fa37dee29b13 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Mon, 19 May 2025 12:57:27 -0700 Subject: [PATCH 2/8] Re-emit from TypeSpec (#41147) --- .../azure-ai-projects/apiview-properties.json | 40 +++++++++---------- .../azure/ai/projects/_client.py | 2 +- .../azure/ai/projects/aio/_client.py | 2 +- .../ai/projects/operations/_operations.py | 12 +++--- ...pletions_with_azure_openai_client_async.py | 1 + ...at_completions_with_azure_openai_client.py | 1 + sdk/ai/azure-ai-projects/tsp-location.yaml | 2 +- 7 files changed, 31 insertions(+), 29 deletions(-) diff --git a/sdk/ai/azure-ai-projects/apiview-properties.json b/sdk/ai/azure-ai-projects/apiview-properties.json index 29de53e42619..bc33249851dd 100644 --- a/sdk/ai/azure-ai-projects/apiview-properties.json +++ b/sdk/ai/azure-ai-projects/apiview-properties.json @@ -55,30 +55,30 @@ "azure.ai.projects.aio.operations.EvaluationsOperations.create": "Azure.AI.Projects.Evaluations.create", "azure.ai.projects.operations.EvaluationsOperations.create_agent_evaluation": "Azure.AI.Projects.Evaluations.createAgentEvaluation", "azure.ai.projects.aio.operations.EvaluationsOperations.create_agent_evaluation": "Azure.AI.Projects.Evaluations.createAgentEvaluation", - "azure.ai.projects.operations.DatasetsOperations.list_versions": "Azure.AI.Projects.ServicePatterns.Datasets.listVersions", - "azure.ai.projects.aio.operations.DatasetsOperations.list_versions": "Azure.AI.Projects.ServicePatterns.Datasets.listVersions", - "azure.ai.projects.operations.DatasetsOperations.list": "Azure.AI.Projects.ServicePatterns.Datasets.listLatest", - "azure.ai.projects.aio.operations.DatasetsOperations.list": "Azure.AI.Projects.ServicePatterns.Datasets.listLatest", - "azure.ai.projects.operations.DatasetsOperations.get": "Azure.AI.Projects.ServicePatterns.Datasets.getVersion", - "azure.ai.projects.aio.operations.DatasetsOperations.get": "Azure.AI.Projects.ServicePatterns.Datasets.getVersion", - "azure.ai.projects.operations.DatasetsOperations.delete": "Azure.AI.Projects.ServicePatterns.Datasets.deleteVersion", - "azure.ai.projects.aio.operations.DatasetsOperations.delete": "Azure.AI.Projects.ServicePatterns.Datasets.deleteVersion", - "azure.ai.projects.operations.DatasetsOperations.create_or_update": "Azure.AI.Projects.ServicePatterns.Datasets.createOrUpdateVersion", - "azure.ai.projects.aio.operations.DatasetsOperations.create_or_update": "Azure.AI.Projects.ServicePatterns.Datasets.createOrUpdateVersion", + "azure.ai.projects.operations.DatasetsOperations.list_versions": "Azure.AI.Projects.Datasets.listVersions", + "azure.ai.projects.aio.operations.DatasetsOperations.list_versions": "Azure.AI.Projects.Datasets.listVersions", + "azure.ai.projects.operations.DatasetsOperations.list": "Azure.AI.Projects.Datasets.listLatest", + "azure.ai.projects.aio.operations.DatasetsOperations.list": "Azure.AI.Projects.Datasets.listLatest", + "azure.ai.projects.operations.DatasetsOperations.get": "Azure.AI.Projects.Datasets.getVersion", + "azure.ai.projects.aio.operations.DatasetsOperations.get": "Azure.AI.Projects.Datasets.getVersion", + "azure.ai.projects.operations.DatasetsOperations.delete": "Azure.AI.Projects.Datasets.deleteVersion", + "azure.ai.projects.aio.operations.DatasetsOperations.delete": "Azure.AI.Projects.Datasets.deleteVersion", + "azure.ai.projects.operations.DatasetsOperations.create_or_update": "Azure.AI.Projects.Datasets.createOrUpdateVersion", + "azure.ai.projects.aio.operations.DatasetsOperations.create_or_update": "Azure.AI.Projects.Datasets.createOrUpdateVersion", "azure.ai.projects.operations.DatasetsOperations.pending_upload": "Azure.AI.Projects.Datasets.startPendingUploadVersion", "azure.ai.projects.aio.operations.DatasetsOperations.pending_upload": "Azure.AI.Projects.Datasets.startPendingUploadVersion", "azure.ai.projects.operations.DatasetsOperations.get_credentials": "Azure.AI.Projects.Datasets.getCredentials", "azure.ai.projects.aio.operations.DatasetsOperations.get_credentials": "Azure.AI.Projects.Datasets.getCredentials", - "azure.ai.projects.operations.IndexesOperations.list_versions": "Azure.AI.Projects.ServicePatterns.Indexes.listVersions", - "azure.ai.projects.aio.operations.IndexesOperations.list_versions": "Azure.AI.Projects.ServicePatterns.Indexes.listVersions", - "azure.ai.projects.operations.IndexesOperations.list": "Azure.AI.Projects.ServicePatterns.Indexes.listLatest", - "azure.ai.projects.aio.operations.IndexesOperations.list": "Azure.AI.Projects.ServicePatterns.Indexes.listLatest", - "azure.ai.projects.operations.IndexesOperations.get": "Azure.AI.Projects.ServicePatterns.Indexes.getVersion", - "azure.ai.projects.aio.operations.IndexesOperations.get": "Azure.AI.Projects.ServicePatterns.Indexes.getVersion", - "azure.ai.projects.operations.IndexesOperations.delete": "Azure.AI.Projects.ServicePatterns.Indexes.deleteVersion", - "azure.ai.projects.aio.operations.IndexesOperations.delete": "Azure.AI.Projects.ServicePatterns.Indexes.deleteVersion", - "azure.ai.projects.operations.IndexesOperations.create_or_update": "Azure.AI.Projects.ServicePatterns.Indexes.createOrUpdateVersion", - "azure.ai.projects.aio.operations.IndexesOperations.create_or_update": "Azure.AI.Projects.ServicePatterns.Indexes.createOrUpdateVersion", + "azure.ai.projects.operations.IndexesOperations.list_versions": "Azure.AI.Projects.Indexes.listVersions", + "azure.ai.projects.aio.operations.IndexesOperations.list_versions": "Azure.AI.Projects.Indexes.listVersions", + "azure.ai.projects.operations.IndexesOperations.list": "Azure.AI.Projects.Indexes.listLatest", + "azure.ai.projects.aio.operations.IndexesOperations.list": "Azure.AI.Projects.Indexes.listLatest", + "azure.ai.projects.operations.IndexesOperations.get": "Azure.AI.Projects.Indexes.getVersion", + "azure.ai.projects.aio.operations.IndexesOperations.get": "Azure.AI.Projects.Indexes.getVersion", + "azure.ai.projects.operations.IndexesOperations.delete": "Azure.AI.Projects.Indexes.deleteVersion", + "azure.ai.projects.aio.operations.IndexesOperations.delete": "Azure.AI.Projects.Indexes.deleteVersion", + "azure.ai.projects.operations.IndexesOperations.create_or_update": "Azure.AI.Projects.Indexes.createOrUpdateVersion", + "azure.ai.projects.aio.operations.IndexesOperations.create_or_update": "Azure.AI.Projects.Indexes.createOrUpdateVersion", "azure.ai.projects.operations.DeploymentsOperations.get": "Azure.AI.Projects.Deployments.get", "azure.ai.projects.aio.operations.DeploymentsOperations.get": "Azure.AI.Projects.Deployments.get", "azure.ai.projects.operations.DeploymentsOperations.list": "Azure.AI.Projects.Deployments.list", diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_client.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_client.py index 217cb41b875a..4f134a04a6b9 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/_client.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_client.py @@ -29,7 +29,7 @@ from azure.core.credentials import TokenCredential -class AIProjectClient: # pylint: disable=too-many-instance-attributes +class AIProjectClient: """AIProjectClient. :ivar connections: ConnectionsOperations operations diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_client.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_client.py index 7fc978f8c178..52a42cebdeab 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_client.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_client.py @@ -29,7 +29,7 @@ from azure.core.credentials_async import AsyncTokenCredential -class AIProjectClient: # pylint: disable=too-many-instance-attributes +class AIProjectClient: """AIProjectClient. :ivar connections: ConnectionsOperations operations diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py index de607445c337..5bb2c2ef9a09 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py @@ -633,7 +633,7 @@ class ConnectionsOperations: :attr:`connections` attribute. """ - def __init__(self, *args, **kwargs): + def __init__(self, *args, **kwargs) -> None: input_args = list(args) self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") self._config: AIProjectClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") @@ -878,7 +878,7 @@ class EvaluationsOperations: :attr:`evaluations` attribute. """ - def __init__(self, *args, **kwargs): + def __init__(self, *args, **kwargs) -> None: input_args = list(args) self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") self._config: AIProjectClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") @@ -1295,7 +1295,7 @@ class DatasetsOperations: :attr:`datasets` attribute. """ - def __init__(self, *args, **kwargs): + def __init__(self, *args, **kwargs) -> None: input_args = list(args) self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") self._config: AIProjectClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") @@ -1951,7 +1951,7 @@ class IndexesOperations: :attr:`indexes` attribute. """ - def __init__(self, *args, **kwargs): + def __init__(self, *args, **kwargs) -> None: input_args = list(args) self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") self._config: AIProjectClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") @@ -2400,7 +2400,7 @@ class DeploymentsOperations: :attr:`deployments` attribute. """ - def __init__(self, *args, **kwargs): + def __init__(self, *args, **kwargs) -> None: input_args = list(args) self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") self._config: AIProjectClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") @@ -2583,7 +2583,7 @@ class RedTeamsOperations: :attr:`red_teams` attribute. """ - def __init__(self, *args, **kwargs): + def __init__(self, *args, **kwargs) -> None: input_args = list(args) self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") self._config: AIProjectClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") diff --git a/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_chat_completions_with_azure_openai_client_async.py b/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_chat_completions_with_azure_openai_client_async.py index f04b27b79b94..cc8c953e7dff 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_chat_completions_with_azure_openai_client_async.py +++ b/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_chat_completions_with_azure_openai_client_async.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client.py b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client.py index 2a43b20b8a32..981f177d7b28 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client.py +++ b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client.py @@ -1,3 +1,4 @@ +# pylint: disable=line-too-long,useless-suppression # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/tsp-location.yaml b/sdk/ai/azure-ai-projects/tsp-location.yaml index 22421471ffc1..cf6504417959 100644 --- a/sdk/ai/azure-ai-projects/tsp-location.yaml +++ b/sdk/ai/azure-ai-projects/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/Azure.AI.Projects -commit: 07a63adf249cb199d5abd179448c92cd6e3446c8 +commit: c7f02183c56d9539034c3668a6e6cc8eeade55e9 repo: Azure/azure-rest-api-specs additionalDirectories: From 78e8978f7e3781cc35b7018ae3eeba2577516b6e Mon Sep 17 00:00:00 2001 From: M-Hietala <78813398+M-Hietala@users.noreply.github.com> Date: Tue, 20 May 2025 13:43:20 -0500 Subject: [PATCH 3/8] project enable telemetry fix for agents (#41223) * projects enable_telemetry fix for agents * updating changelog --- sdk/ai/azure-ai-projects/CHANGELOG.md | 2 ++ sdk/ai/azure-ai-projects/azure/ai/projects/_patch_telemetry.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/sdk/ai/azure-ai-projects/CHANGELOG.md b/sdk/ai/azure-ai-projects/CHANGELOG.md index a85b4f48620a..2590c1e1a417 100644 --- a/sdk/ai/azure-ai-projects/CHANGELOG.md +++ b/sdk/ai/azure-ai-projects/CHANGELOG.md @@ -8,6 +8,8 @@ ### Bugs Fixed +* Fix for enable_telemetry to correctly instrument azure-ai-agents + ### Sample updates ## 1.0.0b11 (2025-05-15) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_patch_telemetry.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_patch_telemetry.py index 4d67af1a22f3..0f48f15959a7 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/_patch_telemetry.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_patch_telemetry.py @@ -72,7 +72,7 @@ def enable_telemetry( ) try: - from azure.ai.agents.tracing import AIAgentsInstrumentor # pylint: disable=import-error,no-name-in-module + from azure.ai.agents.telemetry import AIAgentsInstrumentor # pylint: disable=import-error,no-name-in-module agents_instrumentor = AIAgentsInstrumentor() if not agents_instrumentor.is_instrumented(): From 45650a305409360fc3db7eeaad7b0ac850e629fe Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 21 May 2025 13:21:07 -0700 Subject: [PATCH 4/8] Assortment of changes to Python Projects SDK (#41204) Update README.md to mention what REST APIs the client library is using, and provide link to REST API reference docs Make sure sync and async inference operations use the same method to calculate inference URL (remove duplicate) As a workaround for service bug, until there is a service fix, modify auto-emitted code to accept 200 as a success code for Datasets DELETE operation. The service should be returning 204 according to TypeSpec, since there is no response payload on success of the delete operation. --- sdk/ai/azure-ai-projects/README.md | 2 + .../aio/operations/_patch_inference_async.py | 52 ++---------- .../ai/projects/operations/_operations.py | 2 +- .../projects/operations/_patch_inference.py | 85 +++++++++---------- 4 files changed, 50 insertions(+), 91 deletions(-) diff --git a/sdk/ai/azure-ai-projects/README.md b/sdk/ai/azure-ai-projects/README.md index 413efc268855..a1973156c4f5 100644 --- a/sdk/ai/azure-ai-projects/README.md +++ b/sdk/ai/azure-ai-projects/README.md @@ -14,6 +14,8 @@ resources in your Azure AI Foundry Project. Use it to: * **Run Evaluations** to assess the performance of generative AI applications, using the `evaluations` operations. * **Enable OpenTelemetry tracing** using the `enable_telemetry` function. +The client library uses version `2025-05-15-preview` of the AI Foundry [data plane REST APIs](https://aka.ms/azsdk/azure-ai-projects/rest-api-reference). + > **Note:** There have been significant updates with the release of version 1.0.0b11, including breaking changes. please see new code snippets below and the samples folder. Agents are now implemented in a separate package `azure-ai-agents` which will get installed automatically when you install `azure-ai-projects`. You can continue using ".agents" diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch_inference_async.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch_inference_async.py index 22f3301844e6..dd9b9d7657d6 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch_inference_async.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch_inference_async.py @@ -18,6 +18,8 @@ EntraIDCredentials, ) from ...models._enums import ConnectionType +from ...operations._patch_inference import _get_aoai_inference_url +from ...operations._patch_inference import _get_inference_url if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports @@ -40,48 +42,6 @@ class InferenceOperations: def __init__(self, outer_instance: "azure.ai.projects.aio.AIProjectClient") -> None: # type: ignore[name-defined] self._outer_instance = outer_instance - # TODO: Use a common method for both the sync and async operations - @classmethod - def _get_inference_url(cls, input_url: str) -> str: - """ - Converts an input URL in the format: - https:/// - to: - https:///models - - :param input_url: The input endpoint URL used to construct AIProjectClient. - :type input_url: str - - :return: The endpoint URL required to construct inference clients from the azure-ai-inference package. - :rtype: str - """ - parsed = urlparse(input_url) - if parsed.scheme != "https" or not parsed.netloc: - raise ValueError("Invalid endpoint URL format. Must be an https URL with a host.") - new_url = f"https://{parsed.netloc}/models" - return new_url - - # TODO: Use a common method for both the sync and async operations - @classmethod - def _get_aoai_inference_url(cls, input_url: str) -> str: - """ - Converts an input URL in the format: - https:/// - to: - https:// - - :param input_url: The input endpoint URL used to construct AIProjectClient. - :type input_url: str - - :return: The endpoint URL required to construct an AzureOpenAI client from the `openai` package. - :rtype: str - """ - parsed = urlparse(input_url) - if parsed.scheme != "https" or not parsed.netloc: - raise ValueError("Invalid endpoint URL format. Must be an https URL with a host.") - new_url = f"https://{parsed.netloc}" - return new_url - @distributed_trace def get_chat_completions_client(self, **kwargs: Any) -> "ChatCompletionsClient": # type: ignore[name-defined] """Get an authenticated asynchronous ChatCompletionsClient (from the package azure-ai-inference) to use with @@ -107,7 +67,7 @@ def get_chat_completions_client(self, **kwargs: Any) -> "ChatCompletionsClient": "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" ) from e - endpoint = self._get_inference_url(self._outer_instance._config.endpoint) # pylint: disable=protected-access + endpoint = _get_inference_url(self._outer_instance._config.endpoint) # pylint: disable=protected-access client = ChatCompletionsClient( endpoint=endpoint, @@ -146,7 +106,7 @@ def get_embeddings_client(self, **kwargs: Any) -> "EmbeddingsClient": # type: i "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" ) from e - endpoint = self._get_inference_url(self._outer_instance._config.endpoint) # pylint: disable=protected-access + endpoint = _get_inference_url(self._outer_instance._config.endpoint) # pylint: disable=protected-access client = EmbeddingsClient( endpoint=endpoint, @@ -185,7 +145,7 @@ def get_image_embeddings_client(self, **kwargs: Any) -> "ImageEmbeddingsClient": "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" ) from e - endpoint = self._get_inference_url(self._outer_instance._config.endpoint) # pylint: disable=protected-access + endpoint = _get_inference_url(self._outer_instance._config.endpoint) # pylint: disable=protected-access client = ImageEmbeddingsClient( endpoint=endpoint, @@ -300,7 +260,7 @@ async def get_azure_openai_client( "azure.identity package not installed. Please install it using 'pip install azure.identity'" ) from e - azure_endpoint = self._get_aoai_inference_url( + azure_endpoint = _get_aoai_inference_url( self._outer_instance._config.endpoint # pylint: disable=protected-access ) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py index 5bb2c2ef9a09..74a910c924e4 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py @@ -1576,7 +1576,7 @@ def delete(self, name: str, version: str, **kwargs: Any) -> None: # pylint: dis response = pipeline_response.http_response - if response.status_code not in [204]: + if response.status_code not in [204, 200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch_inference.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch_inference.py index 4e5739d7114d..3618267cab84 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch_inference.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch_inference.py @@ -21,6 +21,43 @@ logger = logging.getLogger(__name__) +def _get_inference_url(input_url: str) -> str: + """ + Converts an input URL in the format: + https:/// + to: + https:///models + + :param input_url: The input endpoint URL used to construct AIProjectClient. + :type input_url: str + + :return: The endpoint URL required to construct inference clients from the `azure-ai-inference` package. + :rtype: str + """ + parsed = urlparse(input_url) + if parsed.scheme != "https" or not parsed.netloc: + raise ValueError("Invalid endpoint URL format. Must be an https URL with a host.") + new_url = f"https://{parsed.netloc}/models" + return new_url + +def _get_aoai_inference_url(input_url: str) -> str: + """ + Converts an input URL in the format: + https:/// + to: + https:// + + :param input_url: The input endpoint URL used to construct AIProjectClient. + :type input_url: str + + :return: The endpoint URL required to construct an AzureOpenAI client from the `openai` package. + :rtype: str + """ + parsed = urlparse(input_url) + if parsed.scheme != "https" or not parsed.netloc: + raise ValueError("Invalid endpoint URL format. Must be an https URL with a host.") + new_url = f"https://{parsed.netloc}" + return new_url class InferenceOperations: """ @@ -35,46 +72,6 @@ class InferenceOperations: def __init__(self, outer_instance: "azure.ai.projects.AIProjectClient") -> None: # type: ignore[name-defined] self._outer_instance = outer_instance - @classmethod - def _get_inference_url(cls, input_url: str) -> str: - """ - Converts an input URL in the format: - https:/// - to: - https:///models - - :param input_url: The input endpoint URL used to construct AIProjectClient. - :type input_url: str - - :return: The endpoint URL required to construct inference clients from the `azure-ai-inference` package. - :rtype: str - """ - parsed = urlparse(input_url) - if parsed.scheme != "https" or not parsed.netloc: - raise ValueError("Invalid endpoint URL format. Must be an https URL with a host.") - new_url = f"https://{parsed.netloc}/models" - return new_url - - @classmethod - def _get_aoai_inference_url(cls, input_url: str) -> str: - """ - Converts an input URL in the format: - https:/// - to: - https:// - - :param input_url: The input endpoint URL used to construct AIProjectClient. - :type input_url: str - - :return: The endpoint URL required to construct an AzureOpenAI client from the `openai` package. - :rtype: str - """ - parsed = urlparse(input_url) - if parsed.scheme != "https" or not parsed.netloc: - raise ValueError("Invalid endpoint URL format. Must be an https URL with a host.") - new_url = f"https://{parsed.netloc}" - return new_url - @distributed_trace def get_chat_completions_client(self, **kwargs: Any) -> "ChatCompletionsClient": # type: ignore[name-defined] """Get an authenticated ChatCompletionsClient (from the package azure-ai-inference) to use with @@ -100,7 +97,7 @@ def get_chat_completions_client(self, **kwargs: Any) -> "ChatCompletionsClient": "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" ) from e - endpoint = self._get_inference_url(self._outer_instance._config.endpoint) # pylint: disable=protected-access + endpoint = _get_inference_url(self._outer_instance._config.endpoint) # pylint: disable=protected-access client = ChatCompletionsClient( endpoint=endpoint, @@ -139,7 +136,7 @@ def get_embeddings_client(self, **kwargs: Any) -> "EmbeddingsClient": # type: i "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" ) from e - endpoint = self._get_inference_url(self._outer_instance._config.endpoint) # pylint: disable=protected-access + endpoint = _get_inference_url(self._outer_instance._config.endpoint) # pylint: disable=protected-access client = EmbeddingsClient( endpoint=endpoint, @@ -178,7 +175,7 @@ def get_image_embeddings_client(self, **kwargs: Any) -> "ImageEmbeddingsClient": "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" ) from e - endpoint = self._get_inference_url(self._outer_instance._config.endpoint) # pylint: disable=protected-access + endpoint = _get_inference_url(self._outer_instance._config.endpoint) # pylint: disable=protected-access client = ImageEmbeddingsClient( endpoint=endpoint, @@ -291,7 +288,7 @@ def get_azure_openai_client( "azure.identity package not installed. Please install it using 'pip install azure.identity'" ) from e - azure_endpoint = self._get_aoai_inference_url( + azure_endpoint = _get_aoai_inference_url( self._outer_instance._config.endpoint # pylint: disable=protected-access ) From 26c7de8559140132d3de3e67d663bd75bf4a3137 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Fri, 6 Jun 2025 18:08:21 -0700 Subject: [PATCH 5/8] New automated tests for Azure AI Projects SDK (#41254) --- sdk/ai/azure-ai-projects/assets.json | 6 + .../ai/projects/aio/operations/_operations.py | 2 +- .../aio/operations/_patch_inference_async.py | 1 - .../projects/operations/_patch_inference.py | 3 + .../azure_ai_projects_tests.template.env | 15 ++ sdk/ai/azure-ai-projects/cspell.json | 1 + .../generated_tests/conftest.py | 35 --- .../test_ai_project_connections_operations.py | 22 -- ...ai_project_connections_operations_async.py | 23 -- .../test_ai_project_datasets_operations.py | 105 --------- ...st_ai_project_datasets_operations_async.py | 106 ---------- .../test_ai_project_deployments_operations.py | 33 --- ...ai_project_deployments_operations_async.py | 34 --- ...i_project_evaluation_results_operations.py | 125 ----------- ...ect_evaluation_results_operations_async.py | 126 ----------- .../test_ai_project_evaluations_operations.py | 71 ------- ...ai_project_evaluations_operations_async.py | 72 ------- .../test_ai_project_indexes_operations.py | 87 -------- ...est_ai_project_indexes_operations_async.py | 88 -------- .../test_ai_project_red_teams_operations.py | 56 ----- ...t_ai_project_red_teams_operations_async.py | 57 ----- .../generated_tests/testpreparer.py | 26 --- .../generated_tests/testpreparer_async.py | 20 -- sdk/ai/azure-ai-projects/tests/conftest.py | 99 +++++++++ .../tests/connections/test_connections.py | 10 - .../tests/samples/test_samples.py | 3 + sdk/ai/azure-ai-projects/tests/test_agents.py | 45 ++++ .../tests/test_agents_async.py | 45 ++++ sdk/ai/azure-ai-projects/tests/test_base.py | 192 +++++++++++++++++ .../tests/test_connections.py | 64 ++++++ .../tests/test_connections_async.py | 64 ++++++ .../tests/test_data/datasets/data_file1.txt | 1 + .../tests/test_data/datasets/data_file2.txt | 1 + .../datasets/data_subfolder/data_file3.txt | 1 + .../datasets/data_subfolder/data_file4.txt | 1 + .../azure-ai-projects/tests/test_datasets.py | 194 +++++++++++++++++ .../tests/test_datasets_async.py | 199 ++++++++++++++++++ .../tests/test_deployments.py | 54 +++++ .../tests/test_deployments_async.py | 54 +++++ .../azure-ai-projects/tests/test_indexes.py | 85 ++++++++ .../tests/test_indexes_async.py | 85 ++++++++ .../azure-ai-projects/tests/test_inference.py | 100 +++++++++ .../tests/test_inference_async.py | 100 +++++++++ .../azure-ai-projects/tests/test_telemetry.py | 35 +++ .../tests/test_telemetry_async.py | 36 ++++ 45 files changed, 1484 insertions(+), 1098 deletions(-) create mode 100644 sdk/ai/azure-ai-projects/assets.json create mode 100644 sdk/ai/azure-ai-projects/azure_ai_projects_tests.template.env delete mode 100644 sdk/ai/azure-ai-projects/generated_tests/conftest.py delete mode 100644 sdk/ai/azure-ai-projects/generated_tests/test_ai_project_connections_operations.py delete mode 100644 sdk/ai/azure-ai-projects/generated_tests/test_ai_project_connections_operations_async.py delete mode 100644 sdk/ai/azure-ai-projects/generated_tests/test_ai_project_datasets_operations.py delete mode 100644 sdk/ai/azure-ai-projects/generated_tests/test_ai_project_datasets_operations_async.py delete mode 100644 sdk/ai/azure-ai-projects/generated_tests/test_ai_project_deployments_operations.py delete mode 100644 sdk/ai/azure-ai-projects/generated_tests/test_ai_project_deployments_operations_async.py delete mode 100644 sdk/ai/azure-ai-projects/generated_tests/test_ai_project_evaluation_results_operations.py delete mode 100644 sdk/ai/azure-ai-projects/generated_tests/test_ai_project_evaluation_results_operations_async.py delete mode 100644 sdk/ai/azure-ai-projects/generated_tests/test_ai_project_evaluations_operations.py delete mode 100644 sdk/ai/azure-ai-projects/generated_tests/test_ai_project_evaluations_operations_async.py delete mode 100644 sdk/ai/azure-ai-projects/generated_tests/test_ai_project_indexes_operations.py delete mode 100644 sdk/ai/azure-ai-projects/generated_tests/test_ai_project_indexes_operations_async.py delete mode 100644 sdk/ai/azure-ai-projects/generated_tests/test_ai_project_red_teams_operations.py delete mode 100644 sdk/ai/azure-ai-projects/generated_tests/test_ai_project_red_teams_operations_async.py delete mode 100644 sdk/ai/azure-ai-projects/generated_tests/testpreparer.py delete mode 100644 sdk/ai/azure-ai-projects/generated_tests/testpreparer_async.py create mode 100644 sdk/ai/azure-ai-projects/tests/conftest.py delete mode 100644 sdk/ai/azure-ai-projects/tests/connections/test_connections.py create mode 100644 sdk/ai/azure-ai-projects/tests/test_agents.py create mode 100644 sdk/ai/azure-ai-projects/tests/test_agents_async.py create mode 100644 sdk/ai/azure-ai-projects/tests/test_base.py create mode 100644 sdk/ai/azure-ai-projects/tests/test_connections.py create mode 100644 sdk/ai/azure-ai-projects/tests/test_connections_async.py create mode 100644 sdk/ai/azure-ai-projects/tests/test_data/datasets/data_file1.txt create mode 100644 sdk/ai/azure-ai-projects/tests/test_data/datasets/data_file2.txt create mode 100644 sdk/ai/azure-ai-projects/tests/test_data/datasets/data_subfolder/data_file3.txt create mode 100644 sdk/ai/azure-ai-projects/tests/test_data/datasets/data_subfolder/data_file4.txt create mode 100644 sdk/ai/azure-ai-projects/tests/test_datasets.py create mode 100644 sdk/ai/azure-ai-projects/tests/test_datasets_async.py create mode 100644 sdk/ai/azure-ai-projects/tests/test_deployments.py create mode 100644 sdk/ai/azure-ai-projects/tests/test_deployments_async.py create mode 100644 sdk/ai/azure-ai-projects/tests/test_indexes.py create mode 100644 sdk/ai/azure-ai-projects/tests/test_indexes_async.py create mode 100644 sdk/ai/azure-ai-projects/tests/test_inference.py create mode 100644 sdk/ai/azure-ai-projects/tests/test_inference_async.py create mode 100644 sdk/ai/azure-ai-projects/tests/test_telemetry.py create mode 100644 sdk/ai/azure-ai-projects/tests/test_telemetry_async.py diff --git a/sdk/ai/azure-ai-projects/assets.json b/sdk/ai/azure-ai-projects/assets.json new file mode 100644 index 000000000000..752d2238c55f --- /dev/null +++ b/sdk/ai/azure-ai-projects/assets.json @@ -0,0 +1,6 @@ +{ + "AssetsRepo": "Azure/azure-sdk-assets", + "AssetsRepoPrefixPath": "python", + "TagPrefix": "python/ai/azure-ai-projects", + "Tag": "python/ai/azure-ai-projects_25a915bc4c" +} diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py index 45f5d3d15a03..3fc29230c783 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py @@ -1022,7 +1022,7 @@ async def delete(self, name: str, version: str, **kwargs: Any) -> None: response = pipeline_response.http_response - if response.status_code not in [204]: + if response.status_code not in [204, 200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch_inference_async.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch_inference_async.py index dd9b9d7657d6..974ec5855ca6 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch_inference_async.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch_inference_async.py @@ -9,7 +9,6 @@ """ import logging from typing import Optional, TYPE_CHECKING, Any -from urllib.parse import urlparse from azure.core.tracing.decorator_async import distributed_trace_async from azure.core.tracing.decorator import distributed_trace diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch_inference.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch_inference.py index 3618267cab84..a3f72eaf8f3f 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch_inference.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch_inference.py @@ -21,6 +21,7 @@ logger = logging.getLogger(__name__) + def _get_inference_url(input_url: str) -> str: """ Converts an input URL in the format: @@ -40,6 +41,7 @@ def _get_inference_url(input_url: str) -> str: new_url = f"https://{parsed.netloc}/models" return new_url + def _get_aoai_inference_url(input_url: str) -> str: """ Converts an input URL in the format: @@ -59,6 +61,7 @@ def _get_aoai_inference_url(input_url: str) -> str: new_url = f"https://{parsed.netloc}" return new_url + class InferenceOperations: """ .. warning:: diff --git a/sdk/ai/azure-ai-projects/azure_ai_projects_tests.template.env b/sdk/ai/azure-ai-projects/azure_ai_projects_tests.template.env new file mode 100644 index 000000000000..c34a3dda4a7f --- /dev/null +++ b/sdk/ai/azure-ai-projects/azure_ai_projects_tests.template.env @@ -0,0 +1,15 @@ +# +# Environment variables that define secrets required for running tests. +# +# All values should be empty by default in this template. +# +# To run tests locally on your device: +# 1. Rename the file to azure_ai_projects_tests.env +# 2. Fill in the values for the environment variables below (do not commit these changes to the repository!) +# 3. Run the test (`pytest`) +# + +# Project endpoint has the format: +# `https://.services.ai.azure.com/api/projects/` +AZURE_AI_PROJECTS_TESTS_PROJECT_ENDPOINT= + diff --git a/sdk/ai/azure-ai-projects/cspell.json b/sdk/ai/azure-ai-projects/cspell.json index 71bd8a696481..ed393add5d13 100644 --- a/sdk/ai/azure-ai-projects/cspell.json +++ b/sdk/ai/azure-ai-projects/cspell.json @@ -12,6 +12,7 @@ "getconnectionwithcredentials", "quantitive", "balapvbyostoragecanary", + "fspath", ], "ignorePaths": [ ] diff --git a/sdk/ai/azure-ai-projects/generated_tests/conftest.py b/sdk/ai/azure-ai-projects/generated_tests/conftest.py deleted file mode 100644 index dd8e527abab1..000000000000 --- a/sdk/ai/azure-ai-projects/generated_tests/conftest.py +++ /dev/null @@ -1,35 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import os -import pytest -from dotenv import load_dotenv -from devtools_testutils import ( - test_proxy, - add_general_regex_sanitizer, - add_body_key_sanitizer, - add_header_regex_sanitizer, -) - -load_dotenv() - - -# For security, please avoid record sensitive identity information in recordings -@pytest.fixture(scope="session", autouse=True) -def add_sanitizers(test_proxy): - aiproject_subscription_id = os.environ.get("AIPROJECT_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") - aiproject_tenant_id = os.environ.get("AIPROJECT_TENANT_ID", "00000000-0000-0000-0000-000000000000") - aiproject_client_id = os.environ.get("AIPROJECT_CLIENT_ID", "00000000-0000-0000-0000-000000000000") - aiproject_client_secret = os.environ.get("AIPROJECT_CLIENT_SECRET", "00000000-0000-0000-0000-000000000000") - add_general_regex_sanitizer(regex=aiproject_subscription_id, value="00000000-0000-0000-0000-000000000000") - add_general_regex_sanitizer(regex=aiproject_tenant_id, value="00000000-0000-0000-0000-000000000000") - add_general_regex_sanitizer(regex=aiproject_client_id, value="00000000-0000-0000-0000-000000000000") - add_general_regex_sanitizer(regex=aiproject_client_secret, value="00000000-0000-0000-0000-000000000000") - - add_header_regex_sanitizer(key="Set-Cookie", value="[set-cookie;]") - add_header_regex_sanitizer(key="Cookie", value="cookie;") - add_body_key_sanitizer(json_path="$..access_token", value="access_token") diff --git a/sdk/ai/azure-ai-projects/generated_tests/test_ai_project_connections_operations.py b/sdk/ai/azure-ai-projects/generated_tests/test_ai_project_connections_operations.py deleted file mode 100644 index d93e0e240cca..000000000000 --- a/sdk/ai/azure-ai-projects/generated_tests/test_ai_project_connections_operations.py +++ /dev/null @@ -1,22 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import pytest -from devtools_testutils import recorded_by_proxy -from testpreparer import AIProjectClientTestBase, AIProjectPreparer - - -@pytest.mark.skip("you may need to update the auto-generated test case before run it") -class TestAIProjectConnectionsOperations(AIProjectClientTestBase): - @AIProjectPreparer() - @recorded_by_proxy - def test_connections_list(self, aiproject_endpoint): - client = self.create_client(endpoint=aiproject_endpoint) - response = client.connections.list() - result = [r for r in response] - # please add some check logic here by yourself - # ... diff --git a/sdk/ai/azure-ai-projects/generated_tests/test_ai_project_connections_operations_async.py b/sdk/ai/azure-ai-projects/generated_tests/test_ai_project_connections_operations_async.py deleted file mode 100644 index cc08499be0ee..000000000000 --- a/sdk/ai/azure-ai-projects/generated_tests/test_ai_project_connections_operations_async.py +++ /dev/null @@ -1,23 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import pytest -from devtools_testutils.aio import recorded_by_proxy_async -from testpreparer import AIProjectPreparer -from testpreparer_async import AIProjectClientTestBaseAsync - - -@pytest.mark.skip("you may need to update the auto-generated test case before run it") -class TestAIProjectConnectionsOperationsAsync(AIProjectClientTestBaseAsync): - @AIProjectPreparer() - @recorded_by_proxy_async - async def test_connections_list(self, aiproject_endpoint): - client = self.create_async_client(endpoint=aiproject_endpoint) - response = client.connections.list() - result = [r async for r in response] - # please add some check logic here by yourself - # ... diff --git a/sdk/ai/azure-ai-projects/generated_tests/test_ai_project_datasets_operations.py b/sdk/ai/azure-ai-projects/generated_tests/test_ai_project_datasets_operations.py deleted file mode 100644 index bdd6a44c053b..000000000000 --- a/sdk/ai/azure-ai-projects/generated_tests/test_ai_project_datasets_operations.py +++ /dev/null @@ -1,105 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import pytest -from devtools_testutils import recorded_by_proxy -from testpreparer import AIProjectClientTestBase, AIProjectPreparer - - -@pytest.mark.skip("you may need to update the auto-generated test case before run it") -class TestAIProjectDatasetsOperations(AIProjectClientTestBase): - @AIProjectPreparer() - @recorded_by_proxy - def test_datasets_list_versions(self, aiproject_endpoint): - client = self.create_client(endpoint=aiproject_endpoint) - response = client.datasets.list_versions( - name="str", - ) - result = [r for r in response] - # please add some check logic here by yourself - # ... - - @AIProjectPreparer() - @recorded_by_proxy - def test_datasets_list(self, aiproject_endpoint): - client = self.create_client(endpoint=aiproject_endpoint) - response = client.datasets.list() - result = [r for r in response] - # please add some check logic here by yourself - # ... - - @AIProjectPreparer() - @recorded_by_proxy - def test_datasets_get(self, aiproject_endpoint): - client = self.create_client(endpoint=aiproject_endpoint) - response = client.datasets.get( - name="str", - version="str", - ) - - # please add some check logic here by yourself - # ... - - @AIProjectPreparer() - @recorded_by_proxy - def test_datasets_delete(self, aiproject_endpoint): - client = self.create_client(endpoint=aiproject_endpoint) - response = client.datasets.delete( - name="str", - version="str", - ) - - # please add some check logic here by yourself - # ... - - @AIProjectPreparer() - @recorded_by_proxy - def test_datasets_create_or_update(self, aiproject_endpoint): - client = self.create_client(endpoint=aiproject_endpoint) - response = client.datasets.create_or_update( - name="str", - version="str", - body={ - "dataUri": "str", - "name": "str", - "type": "uri_file", - "version": "str", - "connectionName": "str", - "description": "str", - "id": "str", - "isReference": bool, - "tags": {"str": "str"}, - }, - ) - - # please add some check logic here by yourself - # ... - - @AIProjectPreparer() - @recorded_by_proxy - def test_datasets_pending_upload(self, aiproject_endpoint): - client = self.create_client(endpoint=aiproject_endpoint) - response = client.datasets.pending_upload( - name="str", - version="str", - body={"pendingUploadType": "str", "connectionName": "str", "pendingUploadId": "str"}, - ) - - # please add some check logic here by yourself - # ... - - @AIProjectPreparer() - @recorded_by_proxy - def test_datasets_get_credentials(self, aiproject_endpoint): - client = self.create_client(endpoint=aiproject_endpoint) - response = client.datasets.get_credentials( - name="str", - version="str", - ) - - # please add some check logic here by yourself - # ... diff --git a/sdk/ai/azure-ai-projects/generated_tests/test_ai_project_datasets_operations_async.py b/sdk/ai/azure-ai-projects/generated_tests/test_ai_project_datasets_operations_async.py deleted file mode 100644 index 6db1ecba7504..000000000000 --- a/sdk/ai/azure-ai-projects/generated_tests/test_ai_project_datasets_operations_async.py +++ /dev/null @@ -1,106 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import pytest -from devtools_testutils.aio import recorded_by_proxy_async -from testpreparer import AIProjectPreparer -from testpreparer_async import AIProjectClientTestBaseAsync - - -@pytest.mark.skip("you may need to update the auto-generated test case before run it") -class TestAIProjectDatasetsOperationsAsync(AIProjectClientTestBaseAsync): - @AIProjectPreparer() - @recorded_by_proxy_async - async def test_datasets_list_versions(self, aiproject_endpoint): - client = self.create_async_client(endpoint=aiproject_endpoint) - response = client.datasets.list_versions( - name="str", - ) - result = [r async for r in response] - # please add some check logic here by yourself - # ... - - @AIProjectPreparer() - @recorded_by_proxy_async - async def test_datasets_list(self, aiproject_endpoint): - client = self.create_async_client(endpoint=aiproject_endpoint) - response = client.datasets.list() - result = [r async for r in response] - # please add some check logic here by yourself - # ... - - @AIProjectPreparer() - @recorded_by_proxy_async - async def test_datasets_get(self, aiproject_endpoint): - client = self.create_async_client(endpoint=aiproject_endpoint) - response = await client.datasets.get( - name="str", - version="str", - ) - - # please add some check logic here by yourself - # ... - - @AIProjectPreparer() - @recorded_by_proxy_async - async def test_datasets_delete(self, aiproject_endpoint): - client = self.create_async_client(endpoint=aiproject_endpoint) - response = await client.datasets.delete( - name="str", - version="str", - ) - - # please add some check logic here by yourself - # ... - - @AIProjectPreparer() - @recorded_by_proxy_async - async def test_datasets_create_or_update(self, aiproject_endpoint): - client = self.create_async_client(endpoint=aiproject_endpoint) - response = await client.datasets.create_or_update( - name="str", - version="str", - body={ - "dataUri": "str", - "name": "str", - "type": "uri_file", - "version": "str", - "connectionName": "str", - "description": "str", - "id": "str", - "isReference": bool, - "tags": {"str": "str"}, - }, - ) - - # please add some check logic here by yourself - # ... - - @AIProjectPreparer() - @recorded_by_proxy_async - async def test_datasets_pending_upload(self, aiproject_endpoint): - client = self.create_async_client(endpoint=aiproject_endpoint) - response = await client.datasets.pending_upload( - name="str", - version="str", - body={"pendingUploadType": "str", "connectionName": "str", "pendingUploadId": "str"}, - ) - - # please add some check logic here by yourself - # ... - - @AIProjectPreparer() - @recorded_by_proxy_async - async def test_datasets_get_credentials(self, aiproject_endpoint): - client = self.create_async_client(endpoint=aiproject_endpoint) - response = await client.datasets.get_credentials( - name="str", - version="str", - ) - - # please add some check logic here by yourself - # ... diff --git a/sdk/ai/azure-ai-projects/generated_tests/test_ai_project_deployments_operations.py b/sdk/ai/azure-ai-projects/generated_tests/test_ai_project_deployments_operations.py deleted file mode 100644 index b0e1e586d866..000000000000 --- a/sdk/ai/azure-ai-projects/generated_tests/test_ai_project_deployments_operations.py +++ /dev/null @@ -1,33 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import pytest -from devtools_testutils import recorded_by_proxy -from testpreparer import AIProjectClientTestBase, AIProjectPreparer - - -@pytest.mark.skip("you may need to update the auto-generated test case before run it") -class TestAIProjectDeploymentsOperations(AIProjectClientTestBase): - @AIProjectPreparer() - @recorded_by_proxy - def test_deployments_get(self, aiproject_endpoint): - client = self.create_client(endpoint=aiproject_endpoint) - response = client.deployments.get( - name="str", - ) - - # please add some check logic here by yourself - # ... - - @AIProjectPreparer() - @recorded_by_proxy - def test_deployments_list(self, aiproject_endpoint): - client = self.create_client(endpoint=aiproject_endpoint) - response = client.deployments.list() - result = [r for r in response] - # please add some check logic here by yourself - # ... diff --git a/sdk/ai/azure-ai-projects/generated_tests/test_ai_project_deployments_operations_async.py b/sdk/ai/azure-ai-projects/generated_tests/test_ai_project_deployments_operations_async.py deleted file mode 100644 index 3958d83eab29..000000000000 --- a/sdk/ai/azure-ai-projects/generated_tests/test_ai_project_deployments_operations_async.py +++ /dev/null @@ -1,34 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import pytest -from devtools_testutils.aio import recorded_by_proxy_async -from testpreparer import AIProjectPreparer -from testpreparer_async import AIProjectClientTestBaseAsync - - -@pytest.mark.skip("you may need to update the auto-generated test case before run it") -class TestAIProjectDeploymentsOperationsAsync(AIProjectClientTestBaseAsync): - @AIProjectPreparer() - @recorded_by_proxy_async - async def test_deployments_get(self, aiproject_endpoint): - client = self.create_async_client(endpoint=aiproject_endpoint) - response = await client.deployments.get( - name="str", - ) - - # please add some check logic here by yourself - # ... - - @AIProjectPreparer() - @recorded_by_proxy_async - async def test_deployments_list(self, aiproject_endpoint): - client = self.create_async_client(endpoint=aiproject_endpoint) - response = client.deployments.list() - result = [r async for r in response] - # please add some check logic here by yourself - # ... diff --git a/sdk/ai/azure-ai-projects/generated_tests/test_ai_project_evaluation_results_operations.py b/sdk/ai/azure-ai-projects/generated_tests/test_ai_project_evaluation_results_operations.py deleted file mode 100644 index b68d4d88d17a..000000000000 --- a/sdk/ai/azure-ai-projects/generated_tests/test_ai_project_evaluation_results_operations.py +++ /dev/null @@ -1,125 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import pytest -from devtools_testutils import recorded_by_proxy -from testpreparer import AIProjectClientTestBase, AIProjectPreparer - - -@pytest.mark.skip("you may need to update the auto-generated test case before run it") -class TestAIProjectEvaluationResultsOperations(AIProjectClientTestBase): - @AIProjectPreparer() - @recorded_by_proxy - def test_evaluation_results_list_versions(self, aiproject_endpoint): - client = self.create_client(endpoint=aiproject_endpoint) - response = client.evaluation_results.list_versions( - name="str", - ) - result = [r for r in response] - # please add some check logic here by yourself - # ... - - @AIProjectPreparer() - @recorded_by_proxy - def test_evaluation_results_list_latest(self, aiproject_endpoint): - client = self.create_client(endpoint=aiproject_endpoint) - response = client.evaluation_results.list_latest() - result = [r for r in response] - # please add some check logic here by yourself - # ... - - @AIProjectPreparer() - @recorded_by_proxy - def test_evaluation_results_get_version(self, aiproject_endpoint): - client = self.create_client(endpoint=aiproject_endpoint) - response = client.evaluation_results.get_version( - name="str", - version="str", - ) - - # please add some check logic here by yourself - # ... - - @AIProjectPreparer() - @recorded_by_proxy - def test_evaluation_results_delete_version(self, aiproject_endpoint): - client = self.create_client(endpoint=aiproject_endpoint) - response = client.evaluation_results.delete_version( - name="str", - version="str", - ) - - # please add some check logic here by yourself - # ... - - @AIProjectPreparer() - @recorded_by_proxy - def test_evaluation_results_create(self, aiproject_endpoint): - client = self.create_client(endpoint=aiproject_endpoint) - response = client.evaluation_results.create( - name="str", - body={ - "name": "str", - "version": "str", - "BlobUri": "str", - "DatasetFamily": "str", - "DatasetName": "str", - "Metrics": {"str": 0.0}, - "ModelAssetId": "str", - "ModelName": "str", - "ModelVersion": "str", - "ResultType": "str", - "description": "str", - "id": "str", - "stage": "str", - "tags": {"str": "str"}, - }, - ) - - # please add some check logic here by yourself - # ... - - @AIProjectPreparer() - @recorded_by_proxy - def test_evaluation_results_create_version(self, aiproject_endpoint): - client = self.create_client(endpoint=aiproject_endpoint) - response = client.evaluation_results.create_version( - name="str", - version="str", - body={ - "name": "str", - "version": "str", - "BlobUri": "str", - "DatasetFamily": "str", - "DatasetName": "str", - "Metrics": {"str": 0.0}, - "ModelAssetId": "str", - "ModelName": "str", - "ModelVersion": "str", - "ResultType": "str", - "description": "str", - "id": "str", - "stage": "str", - "tags": {"str": "str"}, - }, - ) - - # please add some check logic here by yourself - # ... - - @AIProjectPreparer() - @recorded_by_proxy - def test_evaluation_results_start_pending_upload(self, aiproject_endpoint): - client = self.create_client(endpoint=aiproject_endpoint) - response = client.evaluation_results.start_pending_upload( - name="str", - version="str", - body={"pendingUploadType": "str", "connectionName": "str", "pendingUploadId": "str"}, - ) - - # please add some check logic here by yourself - # ... diff --git a/sdk/ai/azure-ai-projects/generated_tests/test_ai_project_evaluation_results_operations_async.py b/sdk/ai/azure-ai-projects/generated_tests/test_ai_project_evaluation_results_operations_async.py deleted file mode 100644 index b90df81464cd..000000000000 --- a/sdk/ai/azure-ai-projects/generated_tests/test_ai_project_evaluation_results_operations_async.py +++ /dev/null @@ -1,126 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import pytest -from devtools_testutils.aio import recorded_by_proxy_async -from testpreparer import AIProjectPreparer -from testpreparer_async import AIProjectClientTestBaseAsync - - -@pytest.mark.skip("you may need to update the auto-generated test case before run it") -class TestAIProjectEvaluationResultsOperationsAsync(AIProjectClientTestBaseAsync): - @AIProjectPreparer() - @recorded_by_proxy_async - async def test_evaluation_results_list_versions(self, aiproject_endpoint): - client = self.create_async_client(endpoint=aiproject_endpoint) - response = client.evaluation_results.list_versions( - name="str", - ) - result = [r async for r in response] - # please add some check logic here by yourself - # ... - - @AIProjectPreparer() - @recorded_by_proxy_async - async def test_evaluation_results_list_latest(self, aiproject_endpoint): - client = self.create_async_client(endpoint=aiproject_endpoint) - response = client.evaluation_results.list_latest() - result = [r async for r in response] - # please add some check logic here by yourself - # ... - - @AIProjectPreparer() - @recorded_by_proxy_async - async def test_evaluation_results_get_version(self, aiproject_endpoint): - client = self.create_async_client(endpoint=aiproject_endpoint) - response = await client.evaluation_results.get_version( - name="str", - version="str", - ) - - # please add some check logic here by yourself - # ... - - @AIProjectPreparer() - @recorded_by_proxy_async - async def test_evaluation_results_delete_version(self, aiproject_endpoint): - client = self.create_async_client(endpoint=aiproject_endpoint) - response = await client.evaluation_results.delete_version( - name="str", - version="str", - ) - - # please add some check logic here by yourself - # ... - - @AIProjectPreparer() - @recorded_by_proxy_async - async def test_evaluation_results_create(self, aiproject_endpoint): - client = self.create_async_client(endpoint=aiproject_endpoint) - response = await client.evaluation_results.create( - name="str", - body={ - "name": "str", - "version": "str", - "BlobUri": "str", - "DatasetFamily": "str", - "DatasetName": "str", - "Metrics": {"str": 0.0}, - "ModelAssetId": "str", - "ModelName": "str", - "ModelVersion": "str", - "ResultType": "str", - "description": "str", - "id": "str", - "stage": "str", - "tags": {"str": "str"}, - }, - ) - - # please add some check logic here by yourself - # ... - - @AIProjectPreparer() - @recorded_by_proxy_async - async def test_evaluation_results_create_version(self, aiproject_endpoint): - client = self.create_async_client(endpoint=aiproject_endpoint) - response = await client.evaluation_results.create_version( - name="str", - version="str", - body={ - "name": "str", - "version": "str", - "BlobUri": "str", - "DatasetFamily": "str", - "DatasetName": "str", - "Metrics": {"str": 0.0}, - "ModelAssetId": "str", - "ModelName": "str", - "ModelVersion": "str", - "ResultType": "str", - "description": "str", - "id": "str", - "stage": "str", - "tags": {"str": "str"}, - }, - ) - - # please add some check logic here by yourself - # ... - - @AIProjectPreparer() - @recorded_by_proxy_async - async def test_evaluation_results_start_pending_upload(self, aiproject_endpoint): - client = self.create_async_client(endpoint=aiproject_endpoint) - response = await client.evaluation_results.start_pending_upload( - name="str", - version="str", - body={"pendingUploadType": "str", "connectionName": "str", "pendingUploadId": "str"}, - ) - - # please add some check logic here by yourself - # ... diff --git a/sdk/ai/azure-ai-projects/generated_tests/test_ai_project_evaluations_operations.py b/sdk/ai/azure-ai-projects/generated_tests/test_ai_project_evaluations_operations.py deleted file mode 100644 index e07aa0e02b47..000000000000 --- a/sdk/ai/azure-ai-projects/generated_tests/test_ai_project_evaluations_operations.py +++ /dev/null @@ -1,71 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import pytest -from devtools_testutils import recorded_by_proxy -from testpreparer import AIProjectClientTestBase, AIProjectPreparer - - -@pytest.mark.skip("you may need to update the auto-generated test case before run it") -class TestAIProjectEvaluationsOperations(AIProjectClientTestBase): - @AIProjectPreparer() - @recorded_by_proxy - def test_evaluations_get(self, aiproject_endpoint): - client = self.create_client(endpoint=aiproject_endpoint) - response = client.evaluations.get( - name="str", - ) - - # please add some check logic here by yourself - # ... - - @AIProjectPreparer() - @recorded_by_proxy - def test_evaluations_list(self, aiproject_endpoint): - client = self.create_client(endpoint=aiproject_endpoint) - response = client.evaluations.list() - result = [r for r in response] - # please add some check logic here by yourself - # ... - - @AIProjectPreparer() - @recorded_by_proxy - def test_evaluations_create(self, aiproject_endpoint): - client = self.create_client(endpoint=aiproject_endpoint) - response = client.evaluations.create( - evaluation={ - "data": "input_data", - "evaluators": {"str": {"id": "str", "dataMapping": {"str": "str"}, "initParams": {"str": {}}}}, - "id": "str", - "description": "str", - "displayName": "str", - "properties": {"str": "str"}, - "status": "str", - "tags": {"str": "str"}, - }, - ) - - # please add some check logic here by yourself - # ... - - @AIProjectPreparer() - @recorded_by_proxy - def test_evaluations_create_agent_evaluation(self, aiproject_endpoint): - client = self.create_client(endpoint=aiproject_endpoint) - response = client.evaluations.create_agent_evaluation( - evaluation={ - "appInsightsConnectionString": "str", - "evaluators": {"str": {"id": "str", "dataMapping": {"str": "str"}, "initParams": {"str": {}}}}, - "runId": "str", - "redactionConfiguration": {"redactScoreProperties": bool}, - "samplingConfiguration": {"maxRequestRate": 0.0, "name": "str", "samplingPercent": 0.0}, - "threadId": "str", - }, - ) - - # please add some check logic here by yourself - # ... diff --git a/sdk/ai/azure-ai-projects/generated_tests/test_ai_project_evaluations_operations_async.py b/sdk/ai/azure-ai-projects/generated_tests/test_ai_project_evaluations_operations_async.py deleted file mode 100644 index 07f22bd9e58a..000000000000 --- a/sdk/ai/azure-ai-projects/generated_tests/test_ai_project_evaluations_operations_async.py +++ /dev/null @@ -1,72 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import pytest -from devtools_testutils.aio import recorded_by_proxy_async -from testpreparer import AIProjectPreparer -from testpreparer_async import AIProjectClientTestBaseAsync - - -@pytest.mark.skip("you may need to update the auto-generated test case before run it") -class TestAIProjectEvaluationsOperationsAsync(AIProjectClientTestBaseAsync): - @AIProjectPreparer() - @recorded_by_proxy_async - async def test_evaluations_get(self, aiproject_endpoint): - client = self.create_async_client(endpoint=aiproject_endpoint) - response = await client.evaluations.get( - name="str", - ) - - # please add some check logic here by yourself - # ... - - @AIProjectPreparer() - @recorded_by_proxy_async - async def test_evaluations_list(self, aiproject_endpoint): - client = self.create_async_client(endpoint=aiproject_endpoint) - response = client.evaluations.list() - result = [r async for r in response] - # please add some check logic here by yourself - # ... - - @AIProjectPreparer() - @recorded_by_proxy_async - async def test_evaluations_create(self, aiproject_endpoint): - client = self.create_async_client(endpoint=aiproject_endpoint) - response = await client.evaluations.create( - evaluation={ - "data": "input_data", - "evaluators": {"str": {"id": "str", "dataMapping": {"str": "str"}, "initParams": {"str": {}}}}, - "id": "str", - "description": "str", - "displayName": "str", - "properties": {"str": "str"}, - "status": "str", - "tags": {"str": "str"}, - }, - ) - - # please add some check logic here by yourself - # ... - - @AIProjectPreparer() - @recorded_by_proxy_async - async def test_evaluations_create_agent_evaluation(self, aiproject_endpoint): - client = self.create_async_client(endpoint=aiproject_endpoint) - response = await client.evaluations.create_agent_evaluation( - evaluation={ - "appInsightsConnectionString": "str", - "evaluators": {"str": {"id": "str", "dataMapping": {"str": "str"}, "initParams": {"str": {}}}}, - "runId": "str", - "redactionConfiguration": {"redactScoreProperties": bool}, - "samplingConfiguration": {"maxRequestRate": 0.0, "name": "str", "samplingPercent": 0.0}, - "threadId": "str", - }, - ) - - # please add some check logic here by yourself - # ... diff --git a/sdk/ai/azure-ai-projects/generated_tests/test_ai_project_indexes_operations.py b/sdk/ai/azure-ai-projects/generated_tests/test_ai_project_indexes_operations.py deleted file mode 100644 index 82f33d5188bd..000000000000 --- a/sdk/ai/azure-ai-projects/generated_tests/test_ai_project_indexes_operations.py +++ /dev/null @@ -1,87 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import pytest -from devtools_testutils import recorded_by_proxy -from testpreparer import AIProjectClientTestBase, AIProjectPreparer - - -@pytest.mark.skip("you may need to update the auto-generated test case before run it") -class TestAIProjectIndexesOperations(AIProjectClientTestBase): - @AIProjectPreparer() - @recorded_by_proxy - def test_indexes_list_versions(self, aiproject_endpoint): - client = self.create_client(endpoint=aiproject_endpoint) - response = client.indexes.list_versions( - name="str", - ) - result = [r for r in response] - # please add some check logic here by yourself - # ... - - @AIProjectPreparer() - @recorded_by_proxy - def test_indexes_list(self, aiproject_endpoint): - client = self.create_client(endpoint=aiproject_endpoint) - response = client.indexes.list() - result = [r for r in response] - # please add some check logic here by yourself - # ... - - @AIProjectPreparer() - @recorded_by_proxy - def test_indexes_get(self, aiproject_endpoint): - client = self.create_client(endpoint=aiproject_endpoint) - response = client.indexes.get( - name="str", - version="str", - ) - - # please add some check logic here by yourself - # ... - - @AIProjectPreparer() - @recorded_by_proxy - def test_indexes_delete(self, aiproject_endpoint): - client = self.create_client(endpoint=aiproject_endpoint) - response = client.indexes.delete( - name="str", - version="str", - ) - - # please add some check logic here by yourself - # ... - - @AIProjectPreparer() - @recorded_by_proxy - def test_indexes_create_or_update(self, aiproject_endpoint): - client = self.create_client(endpoint=aiproject_endpoint) - response = client.indexes.create_or_update( - name="str", - version="str", - body={ - "connectionName": "str", - "indexName": "str", - "name": "str", - "type": "AzureSearch", - "version": "str", - "description": "str", - "fieldMapping": { - "contentFields": ["str"], - "filepathField": "str", - "metadataFields": ["str"], - "titleField": "str", - "urlField": "str", - "vectorFields": ["str"], - }, - "id": "str", - "tags": {"str": "str"}, - }, - ) - - # please add some check logic here by yourself - # ... diff --git a/sdk/ai/azure-ai-projects/generated_tests/test_ai_project_indexes_operations_async.py b/sdk/ai/azure-ai-projects/generated_tests/test_ai_project_indexes_operations_async.py deleted file mode 100644 index 53812b80aa1d..000000000000 --- a/sdk/ai/azure-ai-projects/generated_tests/test_ai_project_indexes_operations_async.py +++ /dev/null @@ -1,88 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import pytest -from devtools_testutils.aio import recorded_by_proxy_async -from testpreparer import AIProjectPreparer -from testpreparer_async import AIProjectClientTestBaseAsync - - -@pytest.mark.skip("you may need to update the auto-generated test case before run it") -class TestAIProjectIndexesOperationsAsync(AIProjectClientTestBaseAsync): - @AIProjectPreparer() - @recorded_by_proxy_async - async def test_indexes_list_versions(self, aiproject_endpoint): - client = self.create_async_client(endpoint=aiproject_endpoint) - response = client.indexes.list_versions( - name="str", - ) - result = [r async for r in response] - # please add some check logic here by yourself - # ... - - @AIProjectPreparer() - @recorded_by_proxy_async - async def test_indexes_list(self, aiproject_endpoint): - client = self.create_async_client(endpoint=aiproject_endpoint) - response = client.indexes.list() - result = [r async for r in response] - # please add some check logic here by yourself - # ... - - @AIProjectPreparer() - @recorded_by_proxy_async - async def test_indexes_get(self, aiproject_endpoint): - client = self.create_async_client(endpoint=aiproject_endpoint) - response = await client.indexes.get( - name="str", - version="str", - ) - - # please add some check logic here by yourself - # ... - - @AIProjectPreparer() - @recorded_by_proxy_async - async def test_indexes_delete(self, aiproject_endpoint): - client = self.create_async_client(endpoint=aiproject_endpoint) - response = await client.indexes.delete( - name="str", - version="str", - ) - - # please add some check logic here by yourself - # ... - - @AIProjectPreparer() - @recorded_by_proxy_async - async def test_indexes_create_or_update(self, aiproject_endpoint): - client = self.create_async_client(endpoint=aiproject_endpoint) - response = await client.indexes.create_or_update( - name="str", - version="str", - body={ - "connectionName": "str", - "indexName": "str", - "name": "str", - "type": "AzureSearch", - "version": "str", - "description": "str", - "fieldMapping": { - "contentFields": ["str"], - "filepathField": "str", - "metadataFields": ["str"], - "titleField": "str", - "urlField": "str", - "vectorFields": ["str"], - }, - "id": "str", - "tags": {"str": "str"}, - }, - ) - - # please add some check logic here by yourself - # ... diff --git a/sdk/ai/azure-ai-projects/generated_tests/test_ai_project_red_teams_operations.py b/sdk/ai/azure-ai-projects/generated_tests/test_ai_project_red_teams_operations.py deleted file mode 100644 index 8cb4893cbb4c..000000000000 --- a/sdk/ai/azure-ai-projects/generated_tests/test_ai_project_red_teams_operations.py +++ /dev/null @@ -1,56 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import pytest -from devtools_testutils import recorded_by_proxy -from testpreparer import AIProjectClientTestBase, AIProjectPreparer - - -@pytest.mark.skip("you may need to update the auto-generated test case before run it") -class TestAIProjectRedTeamsOperations(AIProjectClientTestBase): - @AIProjectPreparer() - @recorded_by_proxy - def test_red_teams_get(self, aiproject_endpoint): - client = self.create_client(endpoint=aiproject_endpoint) - response = client.red_teams.get( - name="str", - ) - - # please add some check logic here by yourself - # ... - - @AIProjectPreparer() - @recorded_by_proxy - def test_red_teams_list(self, aiproject_endpoint): - client = self.create_client(endpoint=aiproject_endpoint) - response = client.red_teams.list() - result = [r for r in response] - # please add some check logic here by yourself - # ... - - @AIProjectPreparer() - @recorded_by_proxy - def test_red_teams_create(self, aiproject_endpoint): - client = self.create_client(endpoint=aiproject_endpoint) - response = client.red_teams.create( - red_team={ - "id": "str", - "target": "target_config", - "applicationScenario": "str", - "attackStrategies": ["str"], - "displayName": "str", - "numTurns": 0, - "properties": {"str": "str"}, - "riskCategories": ["str"], - "simulationOnly": bool, - "status": "str", - "tags": {"str": "str"}, - }, - ) - - # please add some check logic here by yourself - # ... diff --git a/sdk/ai/azure-ai-projects/generated_tests/test_ai_project_red_teams_operations_async.py b/sdk/ai/azure-ai-projects/generated_tests/test_ai_project_red_teams_operations_async.py deleted file mode 100644 index dc93a4d14181..000000000000 --- a/sdk/ai/azure-ai-projects/generated_tests/test_ai_project_red_teams_operations_async.py +++ /dev/null @@ -1,57 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import pytest -from devtools_testutils.aio import recorded_by_proxy_async -from testpreparer import AIProjectPreparer -from testpreparer_async import AIProjectClientTestBaseAsync - - -@pytest.mark.skip("you may need to update the auto-generated test case before run it") -class TestAIProjectRedTeamsOperationsAsync(AIProjectClientTestBaseAsync): - @AIProjectPreparer() - @recorded_by_proxy_async - async def test_red_teams_get(self, aiproject_endpoint): - client = self.create_async_client(endpoint=aiproject_endpoint) - response = await client.red_teams.get( - name="str", - ) - - # please add some check logic here by yourself - # ... - - @AIProjectPreparer() - @recorded_by_proxy_async - async def test_red_teams_list(self, aiproject_endpoint): - client = self.create_async_client(endpoint=aiproject_endpoint) - response = client.red_teams.list() - result = [r async for r in response] - # please add some check logic here by yourself - # ... - - @AIProjectPreparer() - @recorded_by_proxy_async - async def test_red_teams_create(self, aiproject_endpoint): - client = self.create_async_client(endpoint=aiproject_endpoint) - response = await client.red_teams.create( - red_team={ - "id": "str", - "target": "target_config", - "applicationScenario": "str", - "attackStrategies": ["str"], - "displayName": "str", - "numTurns": 0, - "properties": {"str": "str"}, - "riskCategories": ["str"], - "simulationOnly": bool, - "status": "str", - "tags": {"str": "str"}, - }, - ) - - # please add some check logic here by yourself - # ... diff --git a/sdk/ai/azure-ai-projects/generated_tests/testpreparer.py b/sdk/ai/azure-ai-projects/generated_tests/testpreparer.py deleted file mode 100644 index 69c9aaa6e8d1..000000000000 --- a/sdk/ai/azure-ai-projects/generated_tests/testpreparer.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from azure.ai.projects import AIProjectClient -from devtools_testutils import AzureRecordedTestCase, PowerShellPreparer -import functools - - -class AIProjectClientTestBase(AzureRecordedTestCase): - - def create_client(self, endpoint): - credential = self.get_credential(AIProjectClient) - return self.create_client_from_credential( - AIProjectClient, - credential=credential, - endpoint=endpoint, - ) - - -AIProjectPreparer = functools.partial( - PowerShellPreparer, "aiproject", aiproject_endpoint="https://fake_aiproject_endpoint.com" -) diff --git a/sdk/ai/azure-ai-projects/generated_tests/testpreparer_async.py b/sdk/ai/azure-ai-projects/generated_tests/testpreparer_async.py deleted file mode 100644 index 56353f9fdd65..000000000000 --- a/sdk/ai/azure-ai-projects/generated_tests/testpreparer_async.py +++ /dev/null @@ -1,20 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from azure.ai.projects.aio import AIProjectClient -from devtools_testutils import AzureRecordedTestCase - - -class AIProjectClientTestBaseAsync(AzureRecordedTestCase): - - def create_async_client(self, endpoint): - credential = self.get_credential(AIProjectClient, is_async=True) - return self.create_client_from_credential( - AIProjectClient, - credential=credential, - endpoint=endpoint, - ) diff --git a/sdk/ai/azure-ai-projects/tests/conftest.py b/sdk/ai/azure-ai-projects/tests/conftest.py new file mode 100644 index 000000000000..737e4f85c400 --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/conftest.py @@ -0,0 +1,99 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import os +import pytest +from dotenv import load_dotenv, find_dotenv +from devtools_testutils import remove_batch_sanitizers, add_general_regex_sanitizer, add_body_key_sanitizer + +if not load_dotenv(find_dotenv(filename="azure_ai_projects_tests.env"), override=True): + print( + "Failed to apply environment variables for azure-ai-projects tests. This is expected if running in ADO pipeline." + ) + + +def pytest_collection_modifyitems(items): + if os.environ.get("AZURE_TEST_RUN_LIVE") == "true": + return + for item in items: + if "tests\\evaluation" in item.fspath.strpath or "tests/evaluation" in item.fspath.strpath: + item.add_marker( + pytest.mark.skip( + reason="Skip running Evaluations tests in PR pipeline until we can sort out the failures related to AI Foundry project settings" + ) + ) + + +class SanitizedValues: + SUBSCRIPTION_ID = "00000000-0000-0000-0000-000000000000" + RESOURCE_GROUP_NAME = "sanitized-resource-group-name" + ACCOUNT_NAME = "sanitized-account-name" + PROJECT_NAME = "sanitized-project-name" + COMPONENT_NAME = "sanitized-component-name" + + +@pytest.fixture(scope="session") +def sanitized_values(): + return { + "subscription_id": f"{SanitizedValues.SUBSCRIPTION_ID}", + "resource_group_name": f"{SanitizedValues.RESOURCE_GROUP_NAME}", + "project_name": f"{SanitizedValues.PROJECT_NAME}", + "account_name": f"{SanitizedValues.ACCOUNT_NAME}", + "component_name": f"{SanitizedValues.COMPONENT_NAME}", + } + + +# From: https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/tests.md#start-the-test-proxy-server +# autouse=True will trigger this fixture on each pytest run, even if it's not explicitly used by a test method +# test_proxy auto-starts the test proxy +# patch_sleep and patch_async_sleep streamline tests by disabling wait times during LRO polling +@pytest.fixture(scope="session", autouse=True) +def start_proxy(test_proxy): + return + + +@pytest.fixture(scope="session", autouse=True) +def add_sanitizers(test_proxy, sanitized_values): + + def sanitize_url_paths(): + + add_general_regex_sanitizer( + regex=r"/subscriptions/([-\w\._\(\)]+)", + value=sanitized_values["subscription_id"], + group_for_replace="1", + ) + + add_general_regex_sanitizer( + regex=r"/resource[gG]roups/([-\w\._\(\)]+)", + value=sanitized_values["resource_group_name"], + group_for_replace="1", + ) + + add_general_regex_sanitizer( + regex=r"/projects/([-\w\._\(\)]+)", value=sanitized_values["project_name"], group_for_replace="1" + ) + + add_general_regex_sanitizer( + regex=r"/accounts/([-\w\._\(\)]+)", value=sanitized_values["account_name"], group_for_replace="1" + ) + + add_general_regex_sanitizer( + regex=r"/components/([-\w\._\(\)]+)", value=sanitized_values["component_name"], group_for_replace="1" + ) + + sanitize_url_paths() + + # Sanitize API key from service response (this includes Application Insights connection string) + add_body_key_sanitizer(json_path="credentials.key", value="Sanitized-api-key") + + # Sanitize SAS URI from Datasets get credential response + add_body_key_sanitizer(json_path="blobReference.credential.sasUri", value="Sanitized-sas-uri") + add_body_key_sanitizer(json_path="blobReferenceForConsumption.credential.sasUri", value="Sanitized-sas-uri") + + # Remove the following sanitizers since certain fields are needed in tests and are non-sensitive: + # - AZSDK3493: $..name + # - AZSDK3430: $..id + remove_batch_sanitizers(["AZSDK3493"]) + remove_batch_sanitizers(["AZSDK3430"]) diff --git a/sdk/ai/azure-ai-projects/tests/connections/test_connections.py b/sdk/ai/azure-ai-projects/tests/connections/test_connections.py deleted file mode 100644 index f1e4612563a8..000000000000 --- a/sdk/ai/azure-ai-projects/tests/connections/test_connections.py +++ /dev/null @@ -1,10 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - - -class TestConnections: - - def test_connections_get(self, **kwargs): - pass diff --git a/sdk/ai/azure-ai-projects/tests/samples/test_samples.py b/sdk/ai/azure-ai-projects/tests/samples/test_samples.py index 8b12a8ea9c6f..d1d8b69e6228 100644 --- a/sdk/ai/azure-ai-projects/tests/samples/test_samples.py +++ b/sdk/ai/azure-ai-projects/tests/samples/test_samples.py @@ -11,6 +11,9 @@ class TestSamples: + _samples_folder_path: str + _results: dict[str, tuple[bool, str]] + """ Test class for running all samples in the `/sdk/ai/azure-ai-projects/samples` folder. diff --git a/sdk/ai/azure-ai-projects/tests/test_agents.py b/sdk/ai/azure-ai-projects/tests/test_agents.py new file mode 100644 index 000000000000..8e72c929481d --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/test_agents.py @@ -0,0 +1,45 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +from azure.ai.projects import AIProjectClient +from test_base import TestBase, servicePreparer +from devtools_testutils import recorded_by_proxy + +# NOTE: This is just a simple test to verify that the agent can be created and deleted using AIProjectClient. +# You can find comprehensive Agent functionally tests here: +# https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai/azure-ai-agents/tests + + +class TestAgents(TestBase): + + # To run this test, use the following command in the \sdk\ai\azure-ai-projects folder: + # cls & pytest tests\test_agents.py::TestAgents::test_agents -s + @servicePreparer() + @recorded_by_proxy + def test_agents(self, **kwargs): + + endpoint = kwargs.pop("azure_ai_projects_tests_project_endpoint") + print("\n=====> Endpoint:", endpoint) + + model_deployment_name = self.test_agents_params["model_deployment_name"] + agent_name = self.test_agents_params["agent_name"] + + with AIProjectClient( + endpoint=endpoint, + credential=self.get_credential(AIProjectClient, is_async=False), + ) as project_client: + + print("[test_agents] Create agent") + agent = project_client.agents.create_agent( + model=model_deployment_name, + name=agent_name, + instructions="You are helpful agent", + ) + assert agent.id + assert agent.model == model_deployment_name + assert agent.name == agent_name + + print("[test_agents] Delete agent") + project_client.agents.delete_agent(agent.id) diff --git a/sdk/ai/azure-ai-projects/tests/test_agents_async.py b/sdk/ai/azure-ai-projects/tests/test_agents_async.py new file mode 100644 index 000000000000..76c437c9cd49 --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/test_agents_async.py @@ -0,0 +1,45 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +from azure.ai.projects.aio import AIProjectClient +from test_base import TestBase, servicePreparer +from devtools_testutils.aio import recorded_by_proxy_async + +# NOTE: This is just a simple test to verify that the agent can be created and deleted using AIProjectClient. +# You can find comprehensive Agent functionally tests here: +# https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai/azure-ai-agents/tests + + +class TestAgentsAsync(TestBase): + + # To run this test, use the following command in the \sdk\ai\azure-ai-projects folder: + # cls & pytest tests\test_agents_async.py::TestAgentsAsync::test_agents -s + @servicePreparer() + @recorded_by_proxy_async + async def test_agents(self, **kwargs): + + endpoint = kwargs.pop("azure_ai_projects_tests_project_endpoint") + print("\n=====> Endpoint:", endpoint) + + model_deployment_name = self.test_agents_params["model_deployment_name"] + agent_name = self.test_agents_params["agent_name"] + + async with AIProjectClient( + endpoint=endpoint, + credential=self.get_credential(AIProjectClient, is_async=True), + ) as project_client: + + print("[test_agents_async] Create agent") + agent = await project_client.agents.create_agent( + model=model_deployment_name, + name=agent_name, + instructions="You are helpful agent", + ) + assert agent.id + assert agent.model == model_deployment_name + assert agent.name == agent_name + + print("[test_agents_async] Delete agent") + await project_client.agents.delete_agent(agent.id) diff --git a/sdk/ai/azure-ai-projects/tests/test_base.py b/sdk/ai/azure-ai-projects/tests/test_base.py new file mode 100644 index 000000000000..ccf63ff4e21c --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/test_base.py @@ -0,0 +1,192 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import random +import re +import functools +from typing import Optional +from azure.ai.projects.models import ( + Connection, + ConnectionType, + CredentialType, + ApiKeyCredentials, + Deployment, + DeploymentType, + ModelDeployment, + Index, + IndexType, + AzureAISearchIndex, + DatasetVersion, + DatasetType, + AssetCredentialResponse, +) +from devtools_testutils import AzureRecordedTestCase, EnvironmentVariableLoader, is_live_and_not_recording + + +servicePreparer = functools.partial( + EnvironmentVariableLoader, + "azure_ai_projects_tests", + azure_ai_projects_tests_project_endpoint="https://sanitized.services.ai.azure.com/api/projects/sanitized-project-name", +) + + +class TestBase(AzureRecordedTestCase): + + test_connections_params = { + "connection_name": "connection1", + "connection_type": ConnectionType.AZURE_OPEN_AI, + } + + test_deployments_params = { + "model_publisher": "Cohere", + "model_name": "gpt-4o", + "model_deployment_name": "DeepSeek-V3", + } + + test_agents_params = { + "model_deployment_name": "gpt-4o", + "agent_name": "agent-for-python-projects-sdk-testing", + } + + test_inference_params = { + "connection_name": "connection1", + "model_deployment_name": "gpt-4o", + "aoai_api_version": "2024-10-21", + } + + test_indexes_params = { + "index_name": f"test-index-name", + "index_version": "1", + "ai_search_connection_name": "my-ai-search-connection", + "ai_search_index_name": "my-ai-search-index", + } + + test_datasets_params = { + "dataset_name_1": f"test-dataset-name-{random.randint(0, 99999):05d}", + "dataset_name_2": f"test-dataset-name-{random.randint(0, 99999):05d}", + "dataset_name_3": f"test-dataset-name-{random.randint(0, 99999):05d}", + "dataset_name_4": f"test-dataset-name-{random.randint(0, 99999):05d}", + "dataset_version": 1, + "connection_name": "balapvbyostoragecanary", + } + + # Regular expression describing the pattern of an Application Insights connection string. + REGEX_APPINSIGHTS_CONNECTION_STRING = re.compile( + r"^InstrumentationKey=[0-9a-fA-F-]{36};IngestionEndpoint=https://.+.applicationinsights.azure.com/;LiveEndpoint=https://.+.monitor.azure.com/;ApplicationId=[0-9a-fA-F-]{36}$" + ) + + @staticmethod + def assert_equal_or_not_none(actual, expected=None): + assert actual is not None + if expected is not None: + assert actual == expected + + # Checks that a given dictionary has at least one non-empty (non-whitespace) string key-value pair. + @classmethod + def is_valid_dict(cls, d: dict[str, str]) -> bool: + return bool(d) and all( + isinstance(k, str) and isinstance(v, str) and k.strip() and v.strip() for k, v in d.items() + ) + + @classmethod + def validate_connection( + cls, + connection: Connection, + include_credentials: bool, + *, + expected_connection_type: Optional[ConnectionType] = None, + expected_connection_name: Optional[str] = None, + expected_authentication_type: Optional[CredentialType] = None, + expected_is_default: Optional[bool] = None, + ): + assert connection.id is not None + + TestBase.assert_equal_or_not_none(connection.name, expected_connection_name) + TestBase.assert_equal_or_not_none(connection.type, expected_connection_type) + TestBase.assert_equal_or_not_none(connection.credentials.type, expected_authentication_type) + + if expected_is_default is not None: + assert connection.is_default == expected_is_default + + if include_credentials: + if type(connection.credentials) == ApiKeyCredentials: + assert connection.credentials.type == CredentialType.API_KEY + assert connection.credentials.api_key is not None + + @classmethod + def validate_deployment( + cls, + deployment: Deployment, + *, + expected_model_name: Optional[str] = None, + expected_model_deployment_name: Optional[str] = None, + expected_model_publisher: Optional[str] = None, + ): + assert type(deployment) == ModelDeployment + assert deployment.type == DeploymentType.MODEL_DEPLOYMENT + assert deployment.model_version is not None + # Comment out the below, since I see that `Cohere-embed-v3-english` has an empty capabilities dict. + # assert TestBase.is_valid_dict(deployment.capabilities) + assert bool(deployment.sku) # Check none-empty + + TestBase.assert_equal_or_not_none(deployment.model_name, expected_model_name) + TestBase.assert_equal_or_not_none(deployment.name, expected_model_deployment_name) + TestBase.assert_equal_or_not_none(deployment.model_publisher, expected_model_publisher) + + @classmethod + def validate_index( + cls, + index: Index, + *, + expected_index_type: Optional[IndexType] = None, + expected_index_name: Optional[str] = None, + expected_index_version: Optional[str] = None, + expected_ai_search_connection_name: Optional[str] = None, + expected_ai_search_index_name: Optional[str] = None, + ): + + TestBase.assert_equal_or_not_none(index.name, expected_index_name) + TestBase.assert_equal_or_not_none(index.version, expected_index_version) + + if expected_index_type == IndexType.AZURE_SEARCH: + assert type(index) == AzureAISearchIndex + assert index.type == IndexType.AZURE_SEARCH + TestBase.assert_equal_or_not_none(index.connection_name, expected_ai_search_connection_name) + TestBase.assert_equal_or_not_none(index.index_name, expected_ai_search_index_name) + + @classmethod + def validate_dataset( + cls, + dataset: DatasetVersion, + *, + expected_dataset_type: Optional[DatasetType] = None, + expected_dataset_name: Optional[str] = None, + expected_dataset_version: Optional[str] = None, + expected_connection_name: Optional[str] = None, + ): + assert dataset.data_uri is not None + + if expected_dataset_type: + assert dataset.type == expected_dataset_type + else: + assert dataset.type == DatasetType.URI_FILE or dataset.type == DatasetType.URI_FOLDER + + TestBase.assert_equal_or_not_none(dataset.name, expected_dataset_name) + TestBase.assert_equal_or_not_none(dataset.version, expected_dataset_version) + if expected_connection_name: + assert dataset.connection_name == expected_connection_name + + @classmethod + def validate_asset_credential(cls, asset_credential: AssetCredentialResponse): + + assert asset_credential.blob_reference is not None + assert asset_credential.blob_reference.blob_uri + assert asset_credential.blob_reference.storage_account_arm_id + + assert asset_credential.blob_reference.credential is not None + assert ( + asset_credential.blob_reference.credential.type == "SAS" + ) # Why is this not of type CredentialType.SAS as defined for Connections? + assert asset_credential.blob_reference.credential.sas_uri diff --git a/sdk/ai/azure-ai-projects/tests/test_connections.py b/sdk/ai/azure-ai-projects/tests/test_connections.py new file mode 100644 index 000000000000..55db3a70288a --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/test_connections.py @@ -0,0 +1,64 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +from azure.ai.projects import AIProjectClient +from test_base import TestBase, servicePreparer +from devtools_testutils import recorded_by_proxy + + +class TestConnections(TestBase): + + # To run this test, use the following command in the \sdk\ai\azure-ai-projects folder: + # cls & pytest tests\test_connections.py::TestConnections::test_connections -s + @servicePreparer() + @recorded_by_proxy + def test_connections(self, **kwargs): + + endpoint = kwargs.pop("azure_ai_projects_tests_project_endpoint") + print("\n=====> Endpoint:", endpoint) + + connection_name = self.test_connections_params["connection_name"] + connection_type = self.test_connections_params["connection_type"] + + with AIProjectClient( + endpoint=endpoint, + credential=self.get_credential(AIProjectClient, is_async=False), + ) as project_client: + + print("[test_connections] List all connections") + empty = True + for connection in project_client.connections.list(): + empty = False + TestBase.validate_connection(connection, False) + assert not empty + + print("[test_connections] List all connections of a particular type") + empty = True + for connection in project_client.connections.list( + connection_type=connection_type, + ): + empty = False + TestBase.validate_connection(connection, False, expected_connection_type=connection_type) + assert not empty + + print("[test_connections] Get the default connection of a particular type, without its credentials") + connection = project_client.connections.get_default(connection_type=connection_type) + TestBase.validate_connection(connection, False, expected_connection_type=connection_type) + + print("[test_connections] Get the default connection of a particular type, with its credentials") + connection = project_client.connections.get_default( + connection_type=connection_type, include_credentials=True + ) + TestBase.validate_connection( + connection, True, expected_connection_type=connection_type, expected_is_default=True + ) + + print(f"[test_connections] Get the connection named `{connection_name}`, without its credentials") + connection = project_client.connections.get(connection_name) + TestBase.validate_connection(connection, False, expected_connection_name=connection_name) + + print(f"[test_connections] Get the connection named `{connection_name}`, with its credentials") + connection = project_client.connections.get(connection_name, include_credentials=True) + TestBase.validate_connection(connection, True, expected_connection_name=connection_name) diff --git a/sdk/ai/azure-ai-projects/tests/test_connections_async.py b/sdk/ai/azure-ai-projects/tests/test_connections_async.py new file mode 100644 index 000000000000..147bad39de9b --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/test_connections_async.py @@ -0,0 +1,64 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +from azure.ai.projects.aio import AIProjectClient +from test_base import TestBase, servicePreparer +from devtools_testutils.aio import recorded_by_proxy_async + + +class TestConnectionsAsync(TestBase): + + # To run this test, use the following command in the \sdk\ai\azure-ai-projects folder: + # cls & pytest tests\test_connections_async.py::TestConnectionsAsync::test_connections_async -s + @servicePreparer() + @recorded_by_proxy_async + async def test_connections_async(self, **kwargs): + + endpoint = kwargs.pop("azure_ai_projects_tests_project_endpoint") + print("\n=====> Endpoint:", endpoint) + + connection_name = self.test_connections_params["connection_name"] + connection_type = self.test_connections_params["connection_type"] + + async with AIProjectClient( + endpoint=endpoint, + credential=self.get_credential(AIProjectClient, is_async=True), + ) as project_client: + + print("[test_connections_async] List all connections") + empty = True + async for connection in project_client.connections.list(): + empty = False + TestBase.validate_connection(connection, False) + assert not empty + + print("[test_connections_async] List all connections of a particular type") + empty = True + async for connection in project_client.connections.list( + connection_type=connection_type, + ): + empty = False + TestBase.validate_connection(connection, False, expected_connection_type=connection_type) + assert not empty + + print("[test_connections_async] Get the default connection of a particular type, without its credentials") + connection = await project_client.connections.get_default(connection_type=connection_type) + TestBase.validate_connection(connection, False, expected_connection_type=connection_type) + + print("[test_connections_async] Get the default connection of a particular type, with its credentials") + connection = await project_client.connections.get_default( + connection_type=connection_type, include_credentials=True + ) + TestBase.validate_connection( + connection, True, expected_connection_type=connection_type, expected_is_default=True + ) + + print(f"[test_connections_async] Get the connection named `{connection_name}`, without its credentials") + connection = await project_client.connections.get(connection_name) + TestBase.validate_connection(connection, False, expected_connection_name=connection_name) + + print(f"[test_connections_async] Get the connection named `{connection_name}`, with its credentials") + connection = await project_client.connections.get(connection_name, include_credentials=True) + TestBase.validate_connection(connection, True, expected_connection_name=connection_name) diff --git a/sdk/ai/azure-ai-projects/tests/test_data/datasets/data_file1.txt b/sdk/ai/azure-ai-projects/tests/test_data/datasets/data_file1.txt new file mode 100644 index 000000000000..e129759a15ff --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/test_data/datasets/data_file1.txt @@ -0,0 +1 @@ +This is sample file 1 diff --git a/sdk/ai/azure-ai-projects/tests/test_data/datasets/data_file2.txt b/sdk/ai/azure-ai-projects/tests/test_data/datasets/data_file2.txt new file mode 100644 index 000000000000..3dd74cdfc9eb --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/test_data/datasets/data_file2.txt @@ -0,0 +1 @@ +This is sample file 2 diff --git a/sdk/ai/azure-ai-projects/tests/test_data/datasets/data_subfolder/data_file3.txt b/sdk/ai/azure-ai-projects/tests/test_data/datasets/data_subfolder/data_file3.txt new file mode 100644 index 000000000000..dde35c02f5a4 --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/test_data/datasets/data_subfolder/data_file3.txt @@ -0,0 +1 @@ +This is sample file 3 diff --git a/sdk/ai/azure-ai-projects/tests/test_data/datasets/data_subfolder/data_file4.txt b/sdk/ai/azure-ai-projects/tests/test_data/datasets/data_subfolder/data_file4.txt new file mode 100644 index 000000000000..0d17a14a0c1f --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/test_data/datasets/data_subfolder/data_file4.txt @@ -0,0 +1 @@ +This is sample file 4 diff --git a/sdk/ai/azure-ai-projects/tests/test_datasets.py b/sdk/ai/azure-ai-projects/tests/test_datasets.py new file mode 100644 index 000000000000..ede3731b8a87 --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/test_datasets.py @@ -0,0 +1,194 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import os +import re +import pytest +from azure.ai.projects import AIProjectClient +from azure.ai.projects.models import DatasetVersion, DatasetType +from test_base import TestBase, servicePreparer +from devtools_testutils import recorded_by_proxy, is_live_and_not_recording +from azure.core.exceptions import HttpResponseError + + +# Construct the paths to the data folder and data file used in this test +script_dir = os.path.dirname(os.path.abspath(__file__)) +data_folder = os.environ.get("DATA_FOLDER", os.path.join(script_dir, "test_data/datasets")) +data_file1 = os.path.join(data_folder, "data_file1.txt") +data_file2 = os.path.join(data_folder, "data_file2.txt") + + +class TestDatasets(TestBase): + + # To run this test, use the following command in the \sdk\ai\azure-ai-projects folder: + # cls & pytest tests\test_datasets.py::TestDatasets::test_datasets_upload_file -s + @servicePreparer() + @pytest.mark.skipif( + not is_live_and_not_recording(), + reason="Skipped because this test involves network calls from another client (azure.storage.blob) that is not recorded.", + ) + @recorded_by_proxy + def test_datasets_upload_file(self, **kwargs): + + endpoint = kwargs.pop("azure_ai_projects_tests_project_endpoint") + print("\n=====> Endpoint:", endpoint) + + connection_name = self.test_datasets_params["connection_name"] + dataset_name = self.test_datasets_params["dataset_name_1"] + dataset_version = self.test_datasets_params["dataset_version"] + + with AIProjectClient( + endpoint=endpoint, + credential=self.get_credential(AIProjectClient, is_async=False), + ) as project_client: + + print( + f"[test_datasets_upload_file] Upload a single file and create a new Dataset `{dataset_name}`, version `{dataset_version}`, to reference the file." + ) + dataset: DatasetVersion = project_client.datasets.upload_file( + name=dataset_name, + version=str(dataset_version), + file_path=data_file1, + connection_name=connection_name, + ) + print(dataset) + TestBase.validate_dataset( + dataset, + expected_dataset_type=DatasetType.URI_FILE, + expected_dataset_name=dataset_name, + expected_dataset_version=str(dataset_version), + ) + + print(f"[test_datasets_upload_file] Get an existing Dataset version `{dataset_version}`:") + dataset = project_client.datasets.get(name=dataset_name, version=dataset_version) + print(dataset) + TestBase.validate_dataset( + dataset, + expected_dataset_type=DatasetType.URI_FILE, + expected_dataset_name=dataset_name, + expected_dataset_version=str(dataset_version), + ) + + print( + f"[test_datasets_upload_file] Upload a single file and create a new version in existing Dataset `{dataset_name}`, to reference the file." + ) + dataset: DatasetVersion = project_client.datasets.upload_file( + name=dataset_name, + version=str(dataset_version + 1), + file_path=data_file2, + connection_name=connection_name, + ) + print(dataset) + TestBase.validate_dataset( + dataset, + expected_dataset_type=DatasetType.URI_FILE, + expected_dataset_name=dataset_name, + expected_dataset_version=str(dataset_version + 1), + ) + + print(f"[test_datasets_upload_file] Get credentials of an existing Dataset version `{dataset_version}`:") + asset_credential = project_client.datasets.get_credentials(name=dataset_name, version=str(dataset_version)) + print(asset_credential) + TestBase.validate_asset_credential(asset_credential) + + """ + print("[test_datasets_upload_file] List latest versions of all Datasets:") + empty = True + for dataset in project_client.datasets.list(): + empty = False + print(dataset) + TestBase.validate_dataset(dataset) + assert not empty + + print(f"[test_datasets_upload_file] Listing all versions of the Dataset named `{dataset_name}`:") + empty = True + for dataset in project_client.datasets.list_versions(name=dataset_name): + empty = False + print(dataset) + TestBase.validate_dataset(dataset, expected_dataset_name=dataset_name) + assert not empty + """ + + print( + f"[test_datasets_upload_file] Delete Dataset `{dataset_name}`, version `{dataset_version}` that was created above." + ) + project_client.datasets.delete(name=dataset_name, version=str(dataset_version)) + project_client.datasets.delete(name=dataset_name, version=str(dataset_version + 1)) + + print( + "[test_datasets_upload_file] Delete the same (now non-existing) Dataset. REST API call should return 204 (No content). This call should NOT throw an exception." + ) + project_client.datasets.delete(name=dataset_name, version=str(dataset_version)) + + print( + f"[test_datasets_upload_file] Try to get a non-existing Dataset `{dataset_name}`, version `{dataset_version}`. This should throw an exception." + ) + try: + exception_thrown = False + dataset = project_client.datasets.get(name=dataset_name, version=str(dataset_version)) + except HttpResponseError as e: + exception_thrown = True + print(f"Expected exception occurred: {e}") + assert "Could not find asset with ID" in e.message + assert exception_thrown + + # To run this test, use the following command in the \sdk\ai\azure-ai-projects folder: + # cls & pytest tests\test_datasets.py::TestDatasets::test_datasets_upload_folder -s + @servicePreparer() + @pytest.mark.skipif( + not is_live_and_not_recording(), + reason="Skipped because this test involves network calls from another client (azure.storage.blob) that is not recorded.", + ) + @recorded_by_proxy + def test_datasets_upload_folder(self, **kwargs): + + endpoint = kwargs.pop("azure_ai_projects_tests_project_endpoint") + print("\n=====> Endpoint:", endpoint) + + connection_name = self.test_datasets_params["connection_name"] + dataset_name = self.test_datasets_params["dataset_name_2"] + dataset_version = self.test_datasets_params["dataset_version"] + + with AIProjectClient( + endpoint=endpoint, + credential=self.get_credential(AIProjectClient, is_async=False), + ) as project_client: + + print( + f"[test_datasets_upload_folder] Upload files in a folder (including sub-folders) and create a new version `{dataset_version}` in the same Dataset, to reference the files." + ) + dataset = project_client.datasets.upload_folder( + name=dataset_name, + version=str(dataset_version), + folder=data_folder, + connection_name=connection_name, + file_pattern=re.compile(r"\.(txt|csv|md)$", re.IGNORECASE), + ) + print(dataset) + TestBase.validate_dataset( + dataset, + expected_dataset_type=DatasetType.URI_FOLDER, + expected_dataset_name=dataset_name, + expected_dataset_version=str(dataset_version), + ) + + print(f"[test_datasets_upload_file] Get an existing Dataset version `{dataset_version}`:") + dataset = project_client.datasets.get(name=dataset_name, version=str(dataset_version)) + print(dataset) + TestBase.validate_dataset( + dataset, + expected_dataset_type=DatasetType.URI_FOLDER, + expected_dataset_name=dataset_name, + expected_dataset_version=str(dataset_version), + ) + + print(f"[test_datasets_upload_file] Get credentials of an existing Dataset version `{dataset_version}`:") + asset_credential = project_client.datasets.get_credentials(name=dataset_name, version=str(dataset_version)) + print(asset_credential) + TestBase.validate_asset_credential(asset_credential) + + print( + f"[test_datasets_upload_file] Delete Dataset `{dataset_name}`, version `{dataset_version}` that was created above." + ) + project_client.datasets.delete(name=dataset_name, version=str(dataset_version)) diff --git a/sdk/ai/azure-ai-projects/tests/test_datasets_async.py b/sdk/ai/azure-ai-projects/tests/test_datasets_async.py new file mode 100644 index 000000000000..803cdddb3dbc --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/test_datasets_async.py @@ -0,0 +1,199 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import os +import re +import pytest +from azure.ai.projects.aio import AIProjectClient +from azure.ai.projects.models import DatasetVersion, DatasetType +from test_base import TestBase, servicePreparer +from devtools_testutils.aio import recorded_by_proxy_async +from devtools_testutils import is_live_and_not_recording +from azure.core.exceptions import HttpResponseError + + +# Construct the paths to the data folder and data file used in this test +script_dir = os.path.dirname(os.path.abspath(__file__)) +data_folder = os.environ.get("DATA_FOLDER", os.path.join(script_dir, "test_data/datasets")) +data_file1 = os.path.join(data_folder, "data_file1.txt") +data_file2 = os.path.join(data_folder, "data_file2.txt") + + +class TestDatasetsAsync(TestBase): + + # To run this test, use the following command in the \sdk\ai\azure-ai-projects folder: + # cls & pytest tests\test_datasets_async.py::TestDatasetsAsync::test_datasets_upload_file_async -s + @servicePreparer() + @pytest.mark.skipif( + not is_live_and_not_recording(), + reason="Skipped because this test involves network calls from another client (azure.storage.blob) that is not recorded.", + ) + @recorded_by_proxy_async + async def test_datasets_upload_file(self, **kwargs): + + endpoint = kwargs.pop("azure_ai_projects_tests_project_endpoint") + print("\n=====> Endpoint:", endpoint) + + connection_name = self.test_datasets_params["connection_name"] + dataset_name = self.test_datasets_params["dataset_name_3"] + dataset_version = self.test_datasets_params["dataset_version"] + + async with AIProjectClient( + endpoint=endpoint, + credential=self.get_credential(AIProjectClient, is_async=True), + ) as project_client: + + print( + f"[test_datasets_upload_file] Upload a single file and create a new Dataset `{dataset_name}`, version `{dataset_version}`, to reference the file." + ) + dataset: DatasetVersion = await project_client.datasets.upload_file( + name=dataset_name, + version=str(dataset_version), + file_path=data_file1, + connection_name=connection_name, + ) + print(dataset) + TestBase.validate_dataset( + dataset, + expected_dataset_type=DatasetType.URI_FILE, + expected_dataset_name=dataset_name, + expected_dataset_version=str(dataset_version), + ) + + print(f"[test_datasets_upload_file] Get an existing Dataset version `{dataset_version}`:") + dataset = await project_client.datasets.get(name=dataset_name, version=dataset_version) + print(dataset) + TestBase.validate_dataset( + dataset, + expected_dataset_type=DatasetType.URI_FILE, + expected_dataset_name=dataset_name, + expected_dataset_version=str(dataset_version), + ) + + print( + f"[test_datasets_upload_file] Upload a single file and create a new version in existing Dataset `{dataset_name}`, to reference the file." + ) + dataset: DatasetVersion = await project_client.datasets.upload_file( + name=dataset_name, + version=str(dataset_version + 1), + file_path=data_file2, + connection_name=connection_name, + ) + print(dataset) + TestBase.validate_dataset( + dataset, + expected_dataset_type=DatasetType.URI_FILE, + expected_dataset_name=dataset_name, + expected_dataset_version=str(dataset_version + 1), + ) + + print(f"[test_datasets_upload_file] Get credentials of an existing Dataset version `{dataset_version}`:") + asset_credential = await project_client.datasets.get_credentials( + name=dataset_name, version=str(dataset_version) + ) + print(asset_credential) + TestBase.validate_asset_credential(asset_credential) + + """ + print("[test_datasets_upload_file] List latest versions of all Datasets:") + empty = True + for dataset in project_client.datasets.list(): + empty = False + print(dataset) + TestBase.validate_dataset(dataset) + assert not empty + + print(f"[test_datasets_upload_file] Listing all versions of the Dataset named `{dataset_name}`:") + empty = True + for dataset in project_client.datasets.list_versions(name=dataset_name): + empty = False + print(dataset) + TestBase.validate_dataset(dataset, expected_dataset_name=dataset_name) + assert not empty + """ + + print( + f"[test_datasets_upload_file] Delete Dataset `{dataset_name}`, version `{dataset_version}` that was created above." + ) + await project_client.datasets.delete(name=dataset_name, version=str(dataset_version)) + await project_client.datasets.delete(name=dataset_name, version=str(dataset_version + 1)) + + print( + "[test_datasets_upload_file] Delete the same (now non-existing) Dataset. REST API call should return 204 (No content). This call should NOT throw an exception." + ) + await project_client.datasets.delete(name=dataset_name, version=str(dataset_version)) + + print( + f"[test_datasets_upload_file] Try to get a non-existing Dataset `{dataset_name}`, version `{dataset_version}`. This should throw an exception." + ) + try: + exception_thrown = False + dataset = await project_client.datasets.get(name=dataset_name, version=str(dataset_version)) + except HttpResponseError as e: + exception_thrown = True + print(f"Expected exception occurred: {e}") + assert "Could not find asset with ID" in e.message + assert exception_thrown + + # To run this test, use the following command in the \sdk\ai\azure-ai-projects folder: + # cls & pytest tests\test_datasets_async.py::TestDatasetsAsync::test_datasets_upload_folder_async -s + @servicePreparer() + @pytest.mark.skipif( + not is_live_and_not_recording(), + reason="Skipped because this test involves network calls from another client (azure.storage.blob) that is not recorded.", + ) + @recorded_by_proxy_async + async def test_datasets_upload_folder_async(self, **kwargs): + + endpoint = kwargs.pop("azure_ai_projects_tests_project_endpoint") + print("\n=====> Endpoint:", endpoint) + + connection_name = self.test_datasets_params["connection_name"] + dataset_name = self.test_datasets_params["dataset_name_4"] + dataset_version = self.test_datasets_params["dataset_version"] + + async with AIProjectClient( + endpoint=endpoint, + credential=self.get_credential(AIProjectClient, is_async=True), + ) as project_client: + + print( + f"[test_datasets_upload_folder] Upload files in a folder (including sub-folders) and create a new version `{dataset_version}` in the same Dataset, to reference the files." + ) + dataset = await project_client.datasets.upload_folder( + name=dataset_name, + version=str(dataset_version), + folder=data_folder, + connection_name=connection_name, + file_pattern=re.compile(r"\.(txt|csv|md)$", re.IGNORECASE), + ) + print(dataset) + TestBase.validate_dataset( + dataset, + expected_dataset_type=DatasetType.URI_FOLDER, + expected_dataset_name=dataset_name, + expected_dataset_version=str(dataset_version), + ) + + print(f"[test_datasets_upload_file] Get an existing Dataset version `{dataset_version}`:") + dataset = await project_client.datasets.get(name=dataset_name, version=str(dataset_version)) + print(dataset) + TestBase.validate_dataset( + dataset, + expected_dataset_type=DatasetType.URI_FOLDER, + expected_dataset_name=dataset_name, + expected_dataset_version=str(dataset_version), + ) + + print(f"[test_datasets_upload_file] Get credentials of an existing Dataset version `{dataset_version}`:") + asset_credential = await project_client.datasets.get_credentials( + name=dataset_name, version=str(dataset_version) + ) + print(asset_credential) + TestBase.validate_asset_credential(asset_credential) + + print( + f"[test_datasets_upload_file] Delete Dataset `{dataset_name}`, version `{dataset_version}` that was created above." + ) + await project_client.datasets.delete(name=dataset_name, version=str(dataset_version)) diff --git a/sdk/ai/azure-ai-projects/tests/test_deployments.py b/sdk/ai/azure-ai-projects/tests/test_deployments.py new file mode 100644 index 000000000000..805f27e76d2b --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/test_deployments.py @@ -0,0 +1,54 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +from azure.ai.projects import AIProjectClient +from test_base import TestBase, servicePreparer +from devtools_testutils import recorded_by_proxy + + +class TestDeployments(TestBase): + + # To run this test, use the following command in the \sdk\ai\azure-ai-projects folder: + # cls & pytest tests\test_deployments.py::TestDeployments::test_deployments -s + @servicePreparer() + @recorded_by_proxy + def test_deployments(self, **kwargs): + + endpoint = kwargs.pop("azure_ai_projects_tests_project_endpoint") + print("\n=====> Endpoint:", endpoint) + + model_publisher = self.test_deployments_params["model_publisher"] + model_name = self.test_deployments_params["model_name"] + model_deployment_name = self.test_deployments_params["model_deployment_name"] + + with AIProjectClient( + endpoint=endpoint, + credential=self.get_credential(AIProjectClient, is_async=False), + ) as project_client: + + print("[test_deployments] List all deployments") + empty = True + for deployment in project_client.deployments.list(): + empty = False + TestBase.validate_deployment(deployment) + assert not empty + + print(f"[test_deployments] List all deployments by the model publisher `{model_publisher}`") + empty = True + for deployment in project_client.deployments.list(model_publisher=model_publisher): + empty = False + TestBase.validate_deployment(deployment, expected_model_publisher=model_publisher) + assert not empty + + print(f"[test_deployments] List all deployments of model `{model_name}`") + empty = True + for deployment in project_client.deployments.list(model_name=model_name): + empty = False + TestBase.validate_deployment(deployment, expected_model_name=model_name) + assert not empty + + print(f"[test_deployments] Get a single deployment named `{model_deployment_name}`") + deployment = project_client.deployments.get(model_deployment_name) + TestBase.validate_deployment(deployment, expected_model_deployment_name=model_deployment_name) diff --git a/sdk/ai/azure-ai-projects/tests/test_deployments_async.py b/sdk/ai/azure-ai-projects/tests/test_deployments_async.py new file mode 100644 index 000000000000..493d71935993 --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/test_deployments_async.py @@ -0,0 +1,54 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +from azure.ai.projects.aio import AIProjectClient +from test_base import TestBase, servicePreparer +from devtools_testutils.aio import recorded_by_proxy_async + + +class TestDeploymentsAsync(TestBase): + + # To run this test, use the following command in the \sdk\ai\azure-ai-projects folder: + # cls & pytest tests\test_deployments_async.py::TestDeploymentsAsync::test_deployments_async -s + @servicePreparer() + @recorded_by_proxy_async + async def test_deployments_async(self, **kwargs): + + endpoint = kwargs.pop("azure_ai_projects_tests_project_endpoint") + print("\n=====> Endpoint:", endpoint) + + model_publisher = self.test_deployments_params["model_publisher"] + model_name = self.test_deployments_params["model_name"] + model_deployment_name = self.test_deployments_params["model_deployment_name"] + + async with AIProjectClient( + endpoint=endpoint, + credential=self.get_credential(AIProjectClient, is_async=True), + ) as project_client: + + print("[test_deployments_async] List all deployments") + empty = True + async for deployment in project_client.deployments.list(): + empty = False + TestBase.validate_deployment(deployment) + assert not empty + + print(f"[test_deployments_async] List all deployments by the model publisher `{model_publisher}`") + empty = True + async for deployment in project_client.deployments.list(model_publisher=model_publisher): + empty = False + TestBase.validate_deployment(deployment, expected_model_publisher=model_publisher) + assert not empty + + print(f"[test_deployments_async] List all deployments of model `{model_name}`") + empty = True + async for deployment in project_client.deployments.list(model_name=model_name): + empty = False + TestBase.validate_deployment(deployment, expected_model_name=model_name) + assert not empty + + print(f"[test_deployments_async] Get a single deployment named `{model_deployment_name}`") + deployment = await project_client.deployments.get(model_deployment_name) + TestBase.validate_deployment(deployment, expected_model_deployment_name=model_deployment_name) diff --git a/sdk/ai/azure-ai-projects/tests/test_indexes.py b/sdk/ai/azure-ai-projects/tests/test_indexes.py new file mode 100644 index 000000000000..0bb5f33f0e0b --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/test_indexes.py @@ -0,0 +1,85 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +from azure.ai.projects import AIProjectClient +from azure.ai.projects.models import AzureAISearchIndex, IndexType +from test_base import TestBase, servicePreparer +from devtools_testutils import recorded_by_proxy + + +class TestIndexes(TestBase): + + # To run this test, use the following command in the \sdk\ai\azure-ai-projects folder: + # cls & pytest tests\test_indexes.py::TestIndexes::test_indexes -s + @servicePreparer() + @recorded_by_proxy + def test_indexes(self, **kwargs): + + endpoint = kwargs.pop("azure_ai_projects_tests_project_endpoint") + print("\n=====> Endpoint:", endpoint) + + index_name = self.test_indexes_params["index_name"] + index_version = self.test_indexes_params["index_version"] + ai_search_connection_name = self.test_indexes_params["ai_search_connection_name"] + ai_search_index_name = self.test_indexes_params["ai_search_index_name"] + + with AIProjectClient( + endpoint=endpoint, + credential=self.get_credential(AIProjectClient, is_async=False), + ) as project_client: + + print( + f"[test_indexes] Create Index `{index_name}` with version `{index_version}`, referencing an existing AI Search resource:" + ) + index = project_client.indexes.create_or_update( + name=index_name, + version=index_version, + body=AzureAISearchIndex(connection_name=ai_search_connection_name, index_name=ai_search_index_name), + ) + print(index) + TestBase.validate_index( + index, + expected_index_type=IndexType.AZURE_SEARCH, + expected_index_name=index_name, + expected_index_version=index_version, + expected_ai_search_connection_name=ai_search_connection_name, + expected_ai_search_index_name=ai_search_index_name, + ) + + print(f"[test_indexes] Get Index `{index_name}` version `{index_version}`:") + index = project_client.indexes.get(name=index_name, version=index_version) + print(index) + TestBase.validate_index( + index, + expected_index_type=IndexType.AZURE_SEARCH, + expected_index_name=index_name, + expected_index_version=index_version, + expected_ai_search_connection_name=ai_search_connection_name, + expected_ai_search_index_name=ai_search_index_name, + ) + + print("[test_indexes] List latest versions of all Indexes:") + empty = True + for index in project_client.indexes.list(): + empty = False + print(index) + TestBase.validate_index(index) + assert not empty + + print(f"[test_indexes] Listing all versions of the Index named `{index_name}`:") + empty = True + for index in project_client.indexes.list_versions(name=index_name): + empty = False + print(index) + TestBase.validate_index(index) + assert not empty + + print(f"[test_indexes] Delete Index `{index_name}` version `{index_version}`.") + project_client.indexes.delete(name=index_name, version=index_version) + + print( + f"[test_indexes] Again delete Index `{index_name}` version `{index_version}`. Since it does not exist, the REST API should return 204 (No content). This call should NOT throw an exception." + ) + project_client.indexes.delete(name=index_name, version=index_version) diff --git a/sdk/ai/azure-ai-projects/tests/test_indexes_async.py b/sdk/ai/azure-ai-projects/tests/test_indexes_async.py new file mode 100644 index 000000000000..9deddc5d49fe --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/test_indexes_async.py @@ -0,0 +1,85 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +from azure.ai.projects.aio import AIProjectClient +from azure.ai.projects.models import AzureAISearchIndex, IndexType +from test_base import TestBase, servicePreparer +from devtools_testutils.aio import recorded_by_proxy_async + + +class TestIndexesAsync(TestBase): + + # To run this test, use the following command in the \sdk\ai\azure-ai-projects folder: + # cls & pytest tests\test_indexes_async.py::TestIndexesAsync::test_indexes_async -s + @servicePreparer() + @recorded_by_proxy_async + async def test_indexes_async(self, **kwargs): + + endpoint = kwargs.pop("azure_ai_projects_tests_project_endpoint") + print("\n=====> Endpoint:", endpoint) + + index_name = self.test_indexes_params["index_name"] + index_version = self.test_indexes_params["index_version"] + ai_search_connection_name = self.test_indexes_params["ai_search_connection_name"] + ai_search_index_name = self.test_indexes_params["ai_search_index_name"] + + async with AIProjectClient( + endpoint=endpoint, + credential=self.get_credential(AIProjectClient, is_async=True), + ) as project_client: + + print( + f"[test_indexes] Create Index `{index_name}` with version `{index_version}`, referencing an existing AI Search resource:" + ) + index = await project_client.indexes.create_or_update( + name=index_name, + version=index_version, + body=AzureAISearchIndex(connection_name=ai_search_connection_name, index_name=ai_search_index_name), + ) + print(index) + TestBase.validate_index( + index, + expected_index_type=IndexType.AZURE_SEARCH, + expected_index_name=index_name, + expected_index_version=index_version, + expected_ai_search_connection_name=ai_search_connection_name, + expected_ai_search_index_name=ai_search_index_name, + ) + + print(f"[test_indexes] Get Index `{index_name}` version `{index_version}`:") + index = await project_client.indexes.get(name=index_name, version=index_version) + print(index) + TestBase.validate_index( + index, + expected_index_type=IndexType.AZURE_SEARCH, + expected_index_name=index_name, + expected_index_version=index_version, + expected_ai_search_connection_name=ai_search_connection_name, + expected_ai_search_index_name=ai_search_index_name, + ) + + print("[test_indexes] List latest versions of all Indexes:") + empty = True + async for index in project_client.indexes.list(): + empty = False + print(index) + TestBase.validate_index(index) + assert not empty + + print(f"[test_indexes] Listing all versions of the Index named `{index_name}`:") + empty = True + async for index in project_client.indexes.list_versions(name=index_name): + empty = False + print(index) + TestBase.validate_index(index) + assert not empty + + print(f"[test_indexes] Delete Index `{index_name}` version `{index_version}`.") + await project_client.indexes.delete(name=index_name, version=index_version) + + print( + f"[test_indexes] Again delete Index `{index_name}` version `{index_version}`. Since it does not exist, the REST API should return 204 (No content). This call should NOT throw an exception." + ) + await project_client.indexes.delete(name=index_name, version=index_version) diff --git a/sdk/ai/azure-ai-projects/tests/test_inference.py b/sdk/ai/azure-ai-projects/tests/test_inference.py new file mode 100644 index 000000000000..40f1ec706c69 --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/test_inference.py @@ -0,0 +1,100 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import pprint + +import pytest +from azure.ai.projects import AIProjectClient +from test_base import TestBase, servicePreparer +from devtools_testutils import recorded_by_proxy, is_live_and_not_recording + + +class TestInference(TestBase): + + # To run this test, use the following command in the \sdk\ai\azure-ai-projects folder: + # cls & pytest tests\test_inference.py::TestInference::test_inference -s + @servicePreparer() + @pytest.mark.skipif( + condition=(not is_live_and_not_recording()), + reason="Skipped because we cannot record chat completions call with AOAI client", + ) + @recorded_by_proxy + def test_inference(self, **kwargs): + + endpoint = kwargs.pop("azure_ai_projects_tests_project_endpoint") + print("\n=====> Endpoint:", endpoint) + + model_deployment_name = self.test_inference_params["model_deployment_name"] + api_version = self.test_inference_params["aoai_api_version"] + + with AIProjectClient( + endpoint=endpoint, + credential=self.get_credential(AIProjectClient, is_async=False), + ) as project_client: + + print( + "[test_inference] Get an authenticated Azure OpenAI client for the parent AI Services resource, and perform a chat completion operation." + ) + with project_client.inference.get_azure_openai_client(api_version=api_version) as client: + + response = client.chat.completions.create( + model=model_deployment_name, + messages=[ + { + "role": "user", + "content": "How many feet are in a mile?", + }, + ], + ) + + print("Raw dump of response object: ") + pprint.pprint(response) + print("Response message: ", response.choices[0].message.content) + contains = ["5280", "5,280"] + assert any(item in response.choices[0].message.content for item in contains) + + # To run this test, use the following command in the \sdk\ai\azure-ai-projects folder: + # cls & pytest tests\test_inference.py::TestInference::test_inference_on_connection -s + @servicePreparer() + @pytest.mark.skipif( + condition=(not is_live_and_not_recording()), + reason="Skipped because we cannot record chat completions call with AOAI client", + ) + @recorded_by_proxy + def test_inference_on_connection(self, **kwargs): + + endpoint = kwargs.pop("azure_ai_projects_tests_project_endpoint") + print("\n=====> Endpoint:", endpoint) + + connection_name = self.test_inference_params["connection_name"] + model_deployment_name = self.test_inference_params["model_deployment_name"] + api_version = self.test_inference_params["aoai_api_version"] + + with AIProjectClient( + endpoint=endpoint, + credential=self.get_credential(AIProjectClient, is_async=False), + ) as project_client: + + print( + "[test_inference_on_connection] Get an authenticated Azure OpenAI client for a connection AOAI service, and perform a chat completion operation." + ) + with project_client.inference.get_azure_openai_client( + api_version=api_version, connection_name=connection_name + ) as client: + + response = client.chat.completions.create( + model=model_deployment_name, + messages=[ + { + "role": "user", + "content": "How many feet are in a mile?", + }, + ], + ) + + print("Raw dump of response object: ") + pprint.pprint(response) + print("Response message: ", response.choices[0].message.content) + contains = ["5280", "5,280"] + assert any(item in response.choices[0].message.content for item in contains) diff --git a/sdk/ai/azure-ai-projects/tests/test_inference_async.py b/sdk/ai/azure-ai-projects/tests/test_inference_async.py new file mode 100644 index 000000000000..3d5c602d1b3a --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/test_inference_async.py @@ -0,0 +1,100 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import pprint +import pytest +from azure.ai.projects.aio import AIProjectClient +from test_base import TestBase, servicePreparer +from devtools_testutils import is_live_and_not_recording +from devtools_testutils.aio import recorded_by_proxy_async + + +class TestInferenceAsync(TestBase): + + # To run this test, use the following command in the \sdk\ai\azure-ai-projects folder: + # cls & pytest tests\test_inference_async.py::TestInferenceAsync::test_inference_async -s + @servicePreparer() + @pytest.mark.skipif( + not is_live_and_not_recording(), + reason="Skipped because we cannot record chat completions call with AOAI client", + ) + @recorded_by_proxy_async + async def test_inference_async(self, **kwargs): + + endpoint = kwargs.pop("azure_ai_projects_tests_project_endpoint") + print("\n=====> Endpoint:", endpoint) + + model_deployment_name = self.test_inference_params["model_deployment_name"] + api_version = self.test_inference_params["aoai_api_version"] + + async with AIProjectClient( + endpoint=endpoint, + credential=self.get_credential(AIProjectClient, is_async=True), + ) as project_client: + + print( + "[test_inference_async] Get an authenticated Azure OpenAI client for the parent AI Services resource, and perform a chat completion operation." + ) + async with await project_client.inference.get_azure_openai_client(api_version=api_version) as client: + + response = await client.chat.completions.create( + model=model_deployment_name, + messages=[ + { + "role": "user", + "content": "How many feet are in a mile?", + }, + ], + ) + + print("Raw dump of response object: ") + pprint.pprint(response) + print("Response message: ", response.choices[0].message.content) + contains = ["5280", "5,280"] + assert any(item in response.choices[0].message.content for item in contains) + + # To run this test, use the following command in the \sdk\ai\azure-ai-projects folder: + # cls & pytest tests\test_inference_async.py::TestInferenceAsync::test_inference_on_connection_async -s + @servicePreparer() + @pytest.mark.skipif( + not is_live_and_not_recording(), + reason="Skipped because we cannot record chat completions call with AOAI client", + ) + @recorded_by_proxy_async + async def test_inference_on_connection_async(self, **kwargs): + + endpoint = kwargs.pop("azure_ai_projects_tests_project_endpoint") + print("\n=====> Endpoint:", endpoint) + + connection_name = self.test_inference_params["connection_name"] + model_deployment_name = self.test_inference_params["model_deployment_name"] + api_version = self.test_inference_params["aoai_api_version"] + + async with AIProjectClient( + endpoint=endpoint, + credential=self.get_credential(AIProjectClient, is_async=False), + ) as project_client: + + print( + "[test_inference_on_connection_async] Get an authenticated Azure OpenAI client for a connection AOAI service, and perform a chat completion operation." + ) + async with await project_client.inference.get_azure_openai_client( + api_version=api_version, connection_name=connection_name + ) as client: + + response = await client.chat.completions.create( + model=model_deployment_name, + messages=[ + { + "role": "user", + "content": "How many feet are in a mile?", + }, + ], + ) + + print("Raw dump of response object: ") + pprint.pprint(response) + print("Response message: ", response.choices[0].message.content) + contains = ["5280", "5,280"] + assert any(item in response.choices[0].message.content for item in contains) diff --git a/sdk/ai/azure-ai-projects/tests/test_telemetry.py b/sdk/ai/azure-ai-projects/tests/test_telemetry.py new file mode 100644 index 000000000000..5716366fc87e --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/test_telemetry.py @@ -0,0 +1,35 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +from azure.ai.projects import AIProjectClient +from test_base import TestBase, servicePreparer +from devtools_testutils import recorded_by_proxy, is_live + + +class TestTelemetry(TestBase): + + # To run this test, use the following command in the \sdk\ai\azure-ai-projects folder: + # cls & pytest tests\test_telemetry.py::TestTelemetry::test_telemetry -s + @servicePreparer() + @recorded_by_proxy + def test_telemetry(self, **kwargs): + + endpoint = kwargs.pop("azure_ai_projects_tests_project_endpoint") + print("\n=====> Endpoint:", endpoint) + + with AIProjectClient( + endpoint=endpoint, + credential=self.get_credential(AIProjectClient, is_async=False), + ) as project_client: + + print("[test_telemetry] Get the Application Insights connection string:") + connection_string = project_client.telemetry.get_connection_string() + assert connection_string + if is_live(): + assert bool(self.REGEX_APPINSIGHTS_CONNECTION_STRING.match(connection_string)) + else: + assert connection_string == "Sanitized-api-key" + assert connection_string == project_client.telemetry.get_connection_string() # Test cached value + print("Application Insights connection string = " + connection_string) diff --git a/sdk/ai/azure-ai-projects/tests/test_telemetry_async.py b/sdk/ai/azure-ai-projects/tests/test_telemetry_async.py new file mode 100644 index 000000000000..86a96162b97a --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/test_telemetry_async.py @@ -0,0 +1,36 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +from azure.ai.projects.aio import AIProjectClient +from test_base import TestBase, servicePreparer +from devtools_testutils.aio import recorded_by_proxy_async +from devtools_testutils import is_live + + +class TestTelemetryAsync(TestBase): + + # To run this test, use the following command in the \sdk\ai\azure-ai-projects folder: + # cls & pytest tests\test_telemetry_async.py::TestTelemetryAsync::test_telemetry_async -s + @servicePreparer() + @recorded_by_proxy_async + async def test_telemetry_async(self, **kwargs): + + endpoint = kwargs.pop("azure_ai_projects_tests_project_endpoint") + print("\n=====> Endpoint:", endpoint) + + async with AIProjectClient( + endpoint=endpoint, + credential=self.get_credential(AIProjectClient, is_async=True), + ) as project_client: + + print("[test_telemetry_async] Get the Application Insights connection string:") + connection_string = await project_client.telemetry.get_connection_string() + assert connection_string + if is_live(): + assert bool(self.REGEX_APPINSIGHTS_CONNECTION_STRING.match(connection_string)) + else: + assert connection_string == "Sanitized-api-key" + assert connection_string == await project_client.telemetry.get_connection_string() # Test cached value + print("Application Insights connection string = " + connection_string) From 3032fd8ab739ecf2d70e596f23e4367d31c8b912 Mon Sep 17 00:00:00 2001 From: howieleung Date: Tue, 10 Jun 2025 10:23:55 -0700 Subject: [PATCH 6/8] migrate agent samples --- .../samples/agents/__init__.py | 0 .../samples/agents/agents_async/__init__.py | 0 .../sample_agents_azure_functions_async.py | 108 ++ .../sample_agents_basics_async.py | 81 + ...ics_create_thread_and_process_run_async.py | 80 + ...ents_basics_create_thread_and_run_async.py | 89 + .../sample_agents_code_interpreter_async.py | 111 ++ ...gents_code_interpreter_attachment_async.py | 95 + ...eter_attachment_enterprise_search_async.py | 85 + .../sample_agents_functions_async.py | 117 ++ .../sample_agents_image_input_base64_async.py | 118 ++ .../sample_agents_image_input_file_async.py | 100 + .../sample_agents_image_input_url_async.py | 96 + .../sample_agents_json_schema_async.py | 108 ++ .../sample_agents_run_with_toolset_async.py | 90 + ...sample_agents_stream_eventhandler_async.py | 106 ++ ...tream_eventhandler_with_functions_async.py | 140 ++ ..._stream_eventhandler_with_toolset_async.py | 119 ++ .../sample_agents_stream_iteration_async.py | 95 + ...m_with_base_override_eventhandler_async.py | 116 ++ ...tore_batch_enterprise_file_search_async.py | 125 ++ ...ts_vector_store_batch_file_search_async.py | 117 ++ ...ctor_store_enterprise_file_search_async.py | 89 + ...e_agents_vector_store_file_search_async.py | 87 + ...gents_with_file_search_attachment_async.py | 89 + .../agents/agents_async/utils/__init__.py | 0 .../utils/user_async_functions.py | 67 + .../sample_agents_image_input_base64.py | 113 ++ .../sample_agents_image_input_file.py | 93 + .../sample_agents_image_input_url.py | 91 + .../sample_agents_json_schema.py | 102 ++ ...ctor_store_batch_enterprise_file_search.py | 106 ++ ...e_agents_vector_store_batch_file_search.py | 110 ++ .../sample_agents_vector_store_file_search.py | 81 + ...s_with_code_interpreter_file_attachment.py | 112 ++ ...mple_agents_with_file_search_attachment.py | 78 + .../sample_agents_with_resources_in_thread.py | 98 + .../sample_agents_agent_team.py | 81 + ...le_agents_agent_team_custom_team_leader.py | 120 ++ .../sample_agents_multi_agent_team.py | 137 ++ .../agents_multiagent/utils/agent_team.py | 436 +++++ .../utils/agent_team_config.yaml | 43 + .../utils/agent_trace_configurator.py | 73 + .../utils/user_functions_with_traces.py | 111 ++ ...stream_eventhandler_with_bing_grounding.py | 125 ++ ...ents_stream_eventhandler_with_functions.py | 148 ++ ...agents_stream_eventhandler_with_toolset.py | 120 ++ ...ts_stream_iteration_with_bing_grounding.py | 118 ++ ...gents_stream_iteration_with_file_search.py | 111 ++ ..._agents_stream_iteration_with_functions.py | 148 ++ ...le_agents_stream_iteration_with_toolset.py | 109 ++ ..._stream_with_base_override_eventhandler.py | 108 ++ ...basics_async_with_azure_monitor_tracing.py | 90 + ...gents_basics_async_with_console_tracing.py | 99 + ...gents_basics_with_azure_monitor_tracing.py | 81 + ...mple_agents_basics_with_console_tracing.py | 87 + ..._with_console_tracing_custom_attributes.py | 116 ++ ...eventhandler_with_azure_monitor_tracing.py | 118 ++ ...tream_eventhandler_with_console_tracing.py | 133 ++ ...ents_toolset_with_azure_monitor_tracing.py | 132 ++ ...ple_agents_toolset_with_console_tracing.py | 144 ++ .../samples/agents/agents_tools/__init__.py | 0 .../sample_agents_azure_ai_search.py | 135 ++ .../sample_agents_azure_functions.py | 99 + .../sample_agents_bing_custom_search.py | 90 + .../sample_agents_bing_grounding.py | 106 ++ .../sample_agents_code_interpreter.py | 111 ++ ...nterpreter_attachment_enterprise_search.py | 86 + .../sample_agents_connected_agent.py | 98 + .../sample_agents_enterprise_file_search.py | 83 + .../agents_tools/sample_agents_fabric.py | 88 + .../agents_tools/sample_agents_file_search.py | 104 ++ .../agents_tools/sample_agents_functions.py | 117 ++ .../agents_tools/sample_agents_logic_apps.py | 134 ++ ...sample_agents_multiple_connected_agents.py | 117 ++ .../agents_tools/sample_agents_openapi.py | 123 ++ .../sample_agents_openapi_connection_auth.py | 103 ++ .../sample_agents_run_with_toolset.py | 96 + .../agents_tools/sample_agents_sharepoint.py | 91 + .../agents_tools/utils/user_logic_apps.py | 80 + .../samples/agents/assets/countries.json | 46 + .../samples/agents/assets/image_file.png | Bin 0 -> 183951 bytes .../samples/agents/assets/product_info_1.md | 51 + .../synthetic_500_quarterly_results.csv | 14 + .../agents/assets/tripadvisor_openapi.json | 1606 +++++++++++++++++ .../agents/assets/weather_openapi.json | 62 + .../samples/agents/sample_agents.py | 51 - .../samples/agents/sample_agents_async.py | 56 - .../samples/agents/sample_agents_basics.py | 85 + ...ample_agents_basics_stream_eventhandler.py | 105 ++ .../sample_agents_basics_stream_iteration.py | 85 + ...le_agents_basics_thread_and_process_run.py | 69 + .../sample_agents_basics_thread_and_run.py | 79 + .../samples/agents/utils/__init__.py | 0 .../samples/agents/utils/user_functions.py | 248 +++ 95 files changed, 10711 insertions(+), 107 deletions(-) create mode 100644 sdk/ai/azure-ai-projects/samples/agents/__init__.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_async/__init__.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_azure_functions_async.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_basics_async.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_basics_create_thread_and_process_run_async.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_basics_create_thread_and_run_async.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_code_interpreter_async.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_code_interpreter_attachment_async.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_code_interpreter_attachment_enterprise_search_async.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_functions_async.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_image_input_base64_async.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_image_input_file_async.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_image_input_url_async.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_json_schema_async.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_run_with_toolset_async.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_stream_eventhandler_async.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_stream_eventhandler_with_functions_async.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_stream_eventhandler_with_toolset_async.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_stream_iteration_async.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_stream_with_base_override_eventhandler_async.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_vector_store_batch_enterprise_file_search_async.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_vector_store_batch_file_search_async.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_vector_store_enterprise_file_search_async.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_vector_store_file_search_async.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_with_file_search_attachment_async.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_async/utils/__init__.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_async/utils/user_async_functions.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_files_images_inputs/sample_agents_image_input_base64.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_files_images_inputs/sample_agents_image_input_file.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_files_images_inputs/sample_agents_image_input_url.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_files_images_inputs/sample_agents_json_schema.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_files_images_inputs/sample_agents_vector_store_batch_enterprise_file_search.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_files_images_inputs/sample_agents_vector_store_batch_file_search.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_files_images_inputs/sample_agents_vector_store_file_search.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_files_images_inputs/sample_agents_with_code_interpreter_file_attachment.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_files_images_inputs/sample_agents_with_file_search_attachment.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_files_images_inputs/sample_agents_with_resources_in_thread.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_multiagent/sample_agents_agent_team.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_multiagent/sample_agents_agent_team_custom_team_leader.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_multiagent/sample_agents_multi_agent_team.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_multiagent/utils/agent_team.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_multiagent/utils/agent_team_config.yaml create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_multiagent/utils/agent_trace_configurator.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_multiagent/utils/user_functions_with_traces.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_streaming/sample_agents_stream_eventhandler_with_bing_grounding.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_streaming/sample_agents_stream_eventhandler_with_functions.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_streaming/sample_agents_stream_eventhandler_with_toolset.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_streaming/sample_agents_stream_iteration_with_bing_grounding.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_streaming/sample_agents_stream_iteration_with_file_search.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_streaming/sample_agents_stream_iteration_with_functions.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_streaming/sample_agents_stream_iteration_with_toolset.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_streaming/sample_agents_stream_with_base_override_eventhandler.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_telemetry/sample_agents_basics_async_with_azure_monitor_tracing.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_telemetry/sample_agents_basics_async_with_console_tracing.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_telemetry/sample_agents_basics_with_azure_monitor_tracing.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_telemetry/sample_agents_basics_with_console_tracing.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_telemetry/sample_agents_basics_with_console_tracing_custom_attributes.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_telemetry/sample_agents_stream_eventhandler_with_azure_monitor_tracing.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_telemetry/sample_agents_stream_eventhandler_with_console_tracing.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_telemetry/sample_agents_toolset_with_azure_monitor_tracing.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_telemetry/sample_agents_toolset_with_console_tracing.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_tools/__init__.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_azure_ai_search.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_azure_functions.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_bing_custom_search.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_bing_grounding.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_code_interpreter.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_code_interpreter_attachment_enterprise_search.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_connected_agent.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_enterprise_file_search.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_fabric.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_file_search.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_functions.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_logic_apps.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_multiple_connected_agents.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_openapi.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_openapi_connection_auth.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_run_with_toolset.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_sharepoint.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/agents_tools/utils/user_logic_apps.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/assets/countries.json create mode 100644 sdk/ai/azure-ai-projects/samples/agents/assets/image_file.png create mode 100644 sdk/ai/azure-ai-projects/samples/agents/assets/product_info_1.md create mode 100644 sdk/ai/azure-ai-projects/samples/agents/assets/synthetic_500_quarterly_results.csv create mode 100644 sdk/ai/azure-ai-projects/samples/agents/assets/tripadvisor_openapi.json create mode 100644 sdk/ai/azure-ai-projects/samples/agents/assets/weather_openapi.json delete mode 100644 sdk/ai/azure-ai-projects/samples/agents/sample_agents.py delete mode 100644 sdk/ai/azure-ai-projects/samples/agents/sample_agents_async.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_stream_eventhandler.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_stream_iteration.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_thread_and_process_run.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_thread_and_run.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/utils/__init__.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/utils/user_functions.py diff --git a/sdk/ai/azure-ai-projects/samples/agents/__init__.py b/sdk/ai/azure-ai-projects/samples/agents/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_async/__init__.py b/sdk/ai/azure-ai-projects/samples/agents/agents_async/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_azure_functions_async.py b/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_azure_functions_async.py new file mode 100644 index 000000000000..5151dc57380f --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_azure_functions_async.py @@ -0,0 +1,108 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import asyncio + +""" +DESCRIPTION: + This sample demonstrates how to use azure function agent operations from + the Azure Agents service using a asynchronous client. + +USAGE: + python sample_agents_azure_functions_async.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-projects azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + 3) STORAGE_SERVICE_ENDPONT - the storage service queue endpoint, triggering Azure function. + + Please see Getting Started with Azure Functions page for more information on Azure Functions: + https://learn.microsoft.com/azure/azure-functions/functions-get-started +""" + +import os +from azure.ai.projects.aio import AIProjectClient +from azure.identity.aio import DefaultAzureCredential +from azure.ai.agents.models import ( + AzureFunctionStorageQueue, + AzureFunctionTool, + MessageRole, +) + + +async def main(): + + project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), + ) + + async with project_client: + agents_client = project_client.agents + + storage_service_endpoint = os.environ["STORAGE_SERVICE_ENDPONT"] + azure_function_tool = AzureFunctionTool( + name="foo", + description="Get answers from the foo bot.", + parameters={ + "type": "object", + "properties": { + "query": {"type": "string", "description": "The question to ask."}, + "outputqueueuri": {"type": "string", "description": "The full output queue uri."}, + }, + }, + input_queue=AzureFunctionStorageQueue( + queue_name="azure-function-foo-input", + storage_service_endpoint=storage_service_endpoint, + ), + output_queue=AzureFunctionStorageQueue( + queue_name="azure-function-tool-output", + storage_service_endpoint=storage_service_endpoint, + ), + ) + + agent = await agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="azure-function-agent-foo", + instructions=f"You are a helpful support agent. Use the provided function any time the prompt contains the string 'What would foo say?'. When you invoke the function, ALWAYS specify the output queue uri parameter as '{storage_service_endpoint}/azure-function-tool-output'. Always responds with \"Foo says\" and then the response from the tool.", + tools=azure_function_tool.definitions, + ) + print(f"Created agent, agent ID: {agent.id}") + + # Create a thread + thread = await agents_client.threads.create() + print(f"Created thread, thread ID: {thread.id}") + + # Create a message + message = await agents_client.messages.create( + thread_id=thread.id, + role="user", + content="What is the most prevalent element in the universe? What would foo say?", + ) + print(f"Created message, message ID: {message.id}") + + run = await agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + # Get the last message from the sender + last_msg = await agents_client.messages.get_last_message_text_by_role( + thread_id=thread.id, role=MessageRole.AGENT + ) + if last_msg: + print(f"Last Message: {last_msg.text.value}") + + # Delete the agent once done + await agents_client.delete_agent(agent.id) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_basics_async.py b/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_basics_async.py new file mode 100644 index 000000000000..0e4227a5f86c --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_basics_async.py @@ -0,0 +1,81 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use basic agent operations from + the Azure Agents service using a asynchronous client. + +USAGE: + python sample_agents_basics_async.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity aiohttp + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" +import asyncio +import time + +from azure.ai.projects.aio import AIProjectClient +from azure.ai.agents.models import MessageTextContent, ListSortOrder +from azure.identity.aio import DefaultAzureCredential + +import os + + +async def main() -> None: + + project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), + ) + + async with project_client: + agents_client = project_client.agents + + agent = await agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-agent", instructions="You are helpful agent" + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = await agents_client.threads.create() + print(f"Created thread, thread ID: {thread.id}") + + message = await agents_client.messages.create(thread_id=thread.id, role="user", content="Hello, tell me a joke") + print(f"Created message, message ID: {message.id}") + + run = await agents_client.runs.create(thread_id=thread.id, agent_id=agent.id) + + # Poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # Wait for a second + time.sleep(1) + run = await agents_client.runs.get(thread_id=thread.id, run_id=run.id) + print(f"Run status: {run.status}") + + if run.status == "failed": + print(f"Run error: {run.last_error}") + + await agents_client.delete_agent(agent.id) + print("Deleted agent") + + messages = agents_client.messages.list( + thread_id=thread.id, + order=ListSortOrder.ASCENDING, + ) + async for msg in messages: + last_part = msg.content[-1] + if isinstance(last_part, MessageTextContent): + print(f"{msg.role}: {last_part.text.value}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_basics_create_thread_and_process_run_async.py b/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_basics_create_thread_and_process_run_async.py new file mode 100644 index 000000000000..21e5c7b2f908 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_basics_create_thread_and_process_run_async.py @@ -0,0 +1,80 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + Asynchronous variant of sample_agents_basics_thread_and_process_run.py. + This sample demonstrates how to use the new convenience method + `create_thread_and_process_run` in the Azure AI Agents service. + This single call will create a thread, start a run, poll to + completion (including any tool calls), and return the final result. + +USAGE: + python sample_agents_basics_thread_and_process_run_async.py + + Before running: + + pip install azure-ai-projects azure-ai-agents azure-identity aiohttp + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" +import asyncio +import os + +from azure.ai.projects.aio import AIProjectClient +from azure.ai.agents.models import ( + AgentThreadCreationOptions, + ThreadMessageOptions, + MessageTextContent, + ListSortOrder, +) +from azure.identity.aio import DefaultAzureCredential + + +async def main() -> None: + project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), + ) + + async with project_client: + agents_client = project_client.agents + + agent = await agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="sample-agent", + instructions="You are a helpful assistant that tells jokes.", + ) + print(f"Created agent, agent ID: {agent.id}") + + run = await agents_client.create_thread_and_process_run( + agent_id=agent.id, + thread=AgentThreadCreationOptions( + messages=[ThreadMessageOptions(role="user", content="Hi! Tell me your favorite programming joke.")] + ), + ) + + if run.status == "failed": + print(f"Run error: {run.last_error}") + + # List all messages in the thread, in ascending order of creation + messages = agents_client.messages.list( + thread_id=run.thread_id, + order=ListSortOrder.ASCENDING, + ) + async for msg in messages: + last_part = msg.content[-1] + if isinstance(last_part, MessageTextContent): + print(f"{msg.role}: {last_part.text.value}") + + await agents_client.delete_agent(agent.id) + print(f"Deleted agent {agent.id!r}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_basics_create_thread_and_run_async.py b/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_basics_create_thread_and_run_async.py new file mode 100644 index 000000000000..52b2341539e0 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_basics_create_thread_and_run_async.py @@ -0,0 +1,89 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + Asynchronous variant of sample_agents_basics_thread_and_run.py. + It creates an agent, starts a new thread, and immediately runs it + using the async Azure AI Agents client. + +USAGE: + python sample_agents_basics_thread_and_run_async.py + + Before running: + + pip install azure-ai-projects azure-ai-agents azure-identity aiohttp + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" +import asyncio +import os + +from azure.ai.projects.aio import AIProjectClient +from azure.ai.agents.models import ( + AgentThreadCreationOptions, + ThreadMessageOptions, + MessageTextContent, + ListSortOrder, +) +from azure.identity.aio import DefaultAzureCredential + + +async def main() -> None: + project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), + ) + + async with project_client: + agents_client = project_client.agents + + agent = await agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="sample-agent", + instructions="You are a helpful assistant that tells jokes.", + ) + print(f"Created agent, agent ID: {agent.id}") + + # Prepare the initial user message + initial_message = ThreadMessageOptions( + role="user", + content="Hello! Can you tell me a joke?", + ) + + # Create a new thread and immediately start a run on it + run = await agents_client.create_thread_and_run( + agent_id=agent.id, + thread=AgentThreadCreationOptions(messages=[initial_message]), + ) + + # Poll the run as long as run status is queued or in progress + while run.status in {"queued", "in_progress", "requires_action"}: + await asyncio.sleep(1) + run = await agents_client.runs.get(thread_id=run.thread_id, run_id=run.id) + print(f"Run status: {run.status}") + + if run.status == "failed": + print(f"Run error: {run.last_error}") + + # List all messages in the thread, in ascending order of creation + messages = agents_client.messages.list( + thread_id=run.thread_id, + order=ListSortOrder.ASCENDING, + ) + async for msg in messages: + last_part = msg.content[-1] + if isinstance(last_part, MessageTextContent): + print(f"{msg.role}: {last_part.text.value}") + + await agents_client.delete_agent(agent.id) + print(f"Deleted agent {agent.id!r}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_code_interpreter_async.py b/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_code_interpreter_async.py new file mode 100644 index 000000000000..56c9e6a47fe6 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_code_interpreter_async.py @@ -0,0 +1,111 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use code interpreter tool with agent from + the Azure Agents service using a asynchronous client. + +USAGE: + python sample_agents_code_interpreter_async.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity aiohttp + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" +import asyncio + +from azure.ai.projects.aio import AIProjectClient +from azure.ai.agents.models import CodeInterpreterTool, FilePurpose, ListSortOrder, MessageRole +from azure.identity.aio import DefaultAzureCredential +from pathlib import Path + +import os + +asset_file_path = os.path.abspath( + os.path.join(os.path.dirname(__file__), "../assets/synthetic_500_quarterly_results.csv") +) + + +async def main() -> None: + + project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), + ) + + async with project_client: + agents_client = project_client.agents + + # Upload a file and wait for it to be processed + file = await agents_client.files.upload_and_poll(file_path=asset_file_path, purpose=FilePurpose.AGENTS) + print(f"Uploaded file, file ID: {file.id}") + + code_interpreter = CodeInterpreterTool(file_ids=[file.id]) + + agent = await agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are helpful agent", + tools=code_interpreter.definitions, + tool_resources=code_interpreter.resources, + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = await agents_client.threads.create() + print(f"Created thread, thread ID: {thread.id}") + + message = await agents_client.messages.create( + thread_id=thread.id, + role="user", + content="Could you please create bar chart in TRANSPORTATION sector for the operating profit from the uploaded csv file and provide file to me?", + ) + print(f"Created message, message ID: {message.id}") + + run = await agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + # Check if you got "Rate limit is exceeded.", then you want to get more quota + print(f"Run failed: {run.last_error}") + + messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) + + last_msg = await agents_client.messages.get_last_message_text_by_role( + thread_id=thread.id, role=MessageRole.AGENT + ) + if last_msg: + print(f"Last Message: {last_msg.text.value}") + + async for msg in messages: + # Save every image file in the message + for img in msg.image_contents: + file_id = img.image_file.file_id + file_name = f"{file_id}_image_file.png" + await agents_client.files.save(file_id=file_id, file_name=file_name) + print(f"Saved image file to: {Path.cwd() / file_name}") + + # Print details of every file-path annotation + for ann in msg.file_path_annotations: + print("File Paths:") + print(f" Type: {ann.type}") + print(f" Text: {ann.text}") + print(f" File ID: {ann.file_path.file_id}") + print(f" Start Index: {ann.start_index}") + print(f" End Index: {ann.end_index}") + + await agents_client.delete_agent(agent.id) + print("Deleted agent") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_code_interpreter_attachment_async.py b/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_code_interpreter_attachment_async.py new file mode 100644 index 000000000000..c6af613314e7 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_code_interpreter_attachment_async.py @@ -0,0 +1,95 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use agent operations with code interpreter from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_code_interpreter_attachment_async.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity aiohttp + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" +import asyncio +import os +from azure.ai.projects.aio import AIProjectClient +from azure.ai.agents.models import ( + CodeInterpreterTool, + FilePurpose, + MessageAttachment, + ListSortOrder, + MessageTextContent, +) +from azure.identity.aio import DefaultAzureCredential + +asset_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../assets/product_info_1.md")) + + +async def main(): + project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), + ) + + async with project_client: + agents_client = project_client.agents + + # Upload a file and wait for it to be processed + file = await agents_client.files.upload_and_poll(file_path=asset_file_path, purpose=FilePurpose.AGENTS) + print(f"Uploaded file, file ID: {file.id}") + + code_interpreter = CodeInterpreterTool() + + # Notice that CodeInterpreter must be enabled in the agent creation, otherwise the agent will not be able to see the file attachment + agent = await agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are helpful agent", + tools=code_interpreter.definitions, + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = await agents_client.threads.create() + print(f"Created thread, thread ID: {thread.id}") + + # Create a message with the attachment + attachment = MessageAttachment(file_id=file.id, tools=code_interpreter.definitions) + message = await agents_client.messages.create( + thread_id=thread.id, role="user", content="What does the attachment say?", attachments=[attachment] + ) + print(f"Created message, message ID: {message.id}") + + run = await agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + # Check if you got "Rate limit is exceeded.", then you want to get more quota + print(f"Run failed: {run.last_error}") + + await agents_client.files.delete(file.id) + print("Deleted file") + + await agents_client.delete_agent(agent.id) + print("Deleted agent") + + messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) + async for msg in messages: + last_part = msg.content[-1] + if isinstance(last_part, MessageTextContent): + print(f"{msg.role}: {last_part.text.value}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_code_interpreter_attachment_enterprise_search_async.py b/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_code_interpreter_attachment_enterprise_search_async.py new file mode 100644 index 000000000000..e25772cc8e8f --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_code_interpreter_attachment_enterprise_search_async.py @@ -0,0 +1,85 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to use agent operations with code interpreter from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_code_interpreter_attachment_enterprise_search_async.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity aiohttp + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" +import asyncio +import os +from azure.ai.agents.aio import AgentsClient +from azure.ai.agents.models import ( + CodeInterpreterTool, + ListSortOrder, + MessageAttachment, + MessageTextContent, + VectorStoreDataSource, + VectorStoreDataSourceAssetType, +) +from azure.identity.aio import DefaultAzureCredential + + +async def main(): + async with DefaultAzureCredential() as credential: + async with AgentsClient(endpoint=os.environ["PROJECT_ENDPOINT"], credential=credential) as agents_client: + + code_interpreter = CodeInterpreterTool() + + # Notice that CodeInterpreter must be enabled in the agent creation, otherwise the agent will not be able to see the file attachment + agent = await agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are helpful agent", + tools=code_interpreter.definitions, + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = await agents_client.threads.create() + print(f"Created thread, thread ID: {thread.id}") + + # We will upload the local file to Azure and will use it for vector store creation. + asset_uri = os.environ["AZURE_BLOB_URI"] + ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) + + # Create a message with the attachment + attachment = MessageAttachment(data_source=ds, tools=code_interpreter.definitions) + message = await agents_client.messages.create( + thread_id=thread.id, role="user", content="What does the attachment say?", attachments=[attachment] + ) + print(f"Created message, message ID: {message.id}") + + run = await agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + # Check if you got "Rate limit is exceeded.", then you want to get more quota + print(f"Run failed: {run.last_error}") + + await agents_client.delete_agent(agent.id) + print("Deleted agent") + + messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) + async for msg in messages: + last_part = msg.content[-1] + if isinstance(last_part, MessageTextContent): + print(f"{msg.role}: {last_part.text.value}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_functions_async.py b/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_functions_async.py new file mode 100644 index 000000000000..8f39ebd23247 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_functions_async.py @@ -0,0 +1,117 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_functions_async.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with custom functions from + the Azure Agents service using a asynchronous client. + +USAGE: + python sample_agents_functions_async.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity aiohttp + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" +import asyncio +import time +import os +from azure.ai.projects.aio import AIProjectClient +from azure.ai.agents.models import ( + AsyncFunctionTool, + AsyncToolSet, + RequiredFunctionToolCall, + SubmitToolOutputsAction, + ToolOutput, + ListSortOrder, + MessageTextContent, +) +from azure.identity.aio import DefaultAzureCredential +from utils.user_async_functions import user_async_functions + + +async def main() -> None: + project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), + ) + + async with project_client: + agents_client = project_client.agents + + # Initialize agent functions + functions = AsyncFunctionTool(functions=user_async_functions) + toolset = AsyncToolSet() + toolset.add(functions) + + # Create agent + agent = await agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are helpful agent", + tools=functions.definitions, + ) + print(f"Created agent, agent ID: {agent.id}") + + # Create thread for communication + thread = await agents_client.threads.create() + print(f"Created thread, ID: {thread.id}") + + # Create and send message + message = await agents_client.messages.create( + thread_id=thread.id, role="user", content="Hello, what's the time?" + ) + print(f"Created message, ID: {message.id}") + + # Create and run agent task + run = await agents_client.runs.create(thread_id=thread.id, agent_id=agent.id) + print(f"Created run, ID: {run.id}") + + # Polling loop for run status + while run.status in ["queued", "in_progress", "requires_action"]: + time.sleep(4) + run = await agents_client.runs.get(thread_id=thread.id, run_id=run.id) + + if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + print("No tool calls provided - cancelling run") + await agents_client.runs.cancel(thread_id=thread.id, run_id=run.id) + break + + tool_outputs = await toolset.execute_tool_calls(tool_calls) + + print(f"Tool outputs: {tool_outputs}") + if tool_outputs: + await agents_client.runs.submit_tool_outputs( + thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs + ) + + print(f"Current run status: {run.status}") + + print(f"Run completed with status: {run.status}") + + # Delete the agent when done + await agents_client.delete_agent(agent.id) + print("Deleted agent") + + # Fetch and log messages + messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) + async for msg in messages: + last_part = msg.content[-1] + if isinstance(last_part, MessageTextContent): + print(f"{msg.role}: {last_part.text.value}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_image_input_base64_async.py b/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_image_input_base64_async.py new file mode 100644 index 000000000000..b19fb72d01e9 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_image_input_base64_async.py @@ -0,0 +1,118 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use basic agent operations using image file input for the + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_image_input_base64.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-projects azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" +import asyncio +import os, time, base64 +from typing import List +from azure.ai.projects.aio import AIProjectClient +from azure.identity.aio import DefaultAzureCredential +from azure.ai.agents.models import ( + ListSortOrder, + MessageTextContent, + MessageInputContentBlock, + MessageImageUrlParam, + MessageInputTextBlock, + MessageInputImageUrlBlock, +) + +asset_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../assets/image_file.png")) + + +def image_to_base64(image_path: str) -> str: + """ + Convert an image file to a Base64-encoded string. + + :param image_path: The path to the image file (e.g. 'image_file.png') + :return: A Base64-encoded string representing the image. + :raises FileNotFoundError: If the provided file path does not exist. + :raises OSError: If there's an error reading the file. + """ + if not os.path.isfile(image_path): + raise FileNotFoundError(f"File not found at: {image_path}") + + try: + with open(image_path, "rb") as image_file: + file_data = image_file.read() + return base64.b64encode(file_data).decode("utf-8") + except Exception as exc: + raise OSError(f"Error reading file '{image_path}'") from exc + + +async def main(): + project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), + ) + + async with project_client: + agents_client = project_client.agents + + agent = await agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are helpful agent", + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = await agents_client.threads.create() + print(f"Created thread, thread ID: {thread.id}") + + input_message = "Hello, what is in the image ?" + image_base64 = image_to_base64(asset_file_path) + img_url = f"data:image/png;base64,{image_base64}" + url_param = MessageImageUrlParam(url=img_url, detail="high") + content_blocks: List[MessageInputContentBlock] = [ + MessageInputTextBlock(text=input_message), + MessageInputImageUrlBlock(image_url=url_param), + ] + message = await agents_client.messages.create(thread_id=thread.id, role="user", content=content_blocks) + print(f"Created message, message ID: {message.id}") + + run = await agents_client.runs.create(thread_id=thread.id, agent_id=agent.id) + + # Poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # Wait for a second + time.sleep(1) + run = await agents_client.runs.get(thread_id=thread.id, run_id=run.id) + print(f"Run status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + await agents_client.delete_agent(agent.id) + print("Deleted agent") + + messages = agents_client.messages.list( + thread_id=thread.id, + order=ListSortOrder.ASCENDING, + ) + + async for msg in messages: + last_part = msg.content[-1] + if isinstance(last_part, MessageTextContent): + print(f"{msg.role}: {last_part.text.value}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_image_input_file_async.py b/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_image_input_file_async.py new file mode 100644 index 000000000000..4eb7335f13de --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_image_input_file_async.py @@ -0,0 +1,100 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use basic agent operations using image file input for the + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_image_input_file.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-projects azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" +import asyncio +import os, time +from typing import List +from azure.ai.projects.aio import AIProjectClient +from azure.identity.aio import DefaultAzureCredential +from azure.ai.agents.models import ( + ListSortOrder, + MessageTextContent, + MessageInputContentBlock, + MessageImageFileParam, + MessageInputTextBlock, + MessageInputImageFileBlock, + FilePurpose, +) + +asset_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../assets/image_file.png")) + + +async def main(): + project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), + ) + + async with project_client: + agents_client = project_client.agents + + agent = await agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are helpful agent", + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = await agents_client.threads.create() + print(f"Created thread, thread ID: {thread.id}") + + image_file = await agents_client.files.upload_and_poll(file_path=asset_file_path, purpose=FilePurpose.AGENTS) + print(f"Uploaded file, file ID: {image_file.id}") + + input_message = "Hello, what is in the image ?" + file_param = MessageImageFileParam(file_id=image_file.id, detail="high") + content_blocks: List[MessageInputContentBlock] = [ + MessageInputTextBlock(text=input_message), + MessageInputImageFileBlock(image_file=file_param), + ] + message = await agents_client.messages.create(thread_id=thread.id, role="user", content=content_blocks) + print(f"Created message, message ID: {message.id}") + + run = await agents_client.runs.create(thread_id=thread.id, agent_id=agent.id) + + # Poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # Wait for a second + time.sleep(1) + run = await agents_client.runs.get(thread_id=thread.id, run_id=run.id) + print(f"Run status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + await agents_client.delete_agent(agent.id) + print("Deleted agent") + + messages = agents_client.messages.list( + thread_id=thread.id, + order=ListSortOrder.ASCENDING, + ) + + async for msg in messages: + last_part = msg.content[-1] + if isinstance(last_part, MessageTextContent): + print(f"{msg.role}: {last_part.text.value}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_image_input_url_async.py b/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_image_input_url_async.py new file mode 100644 index 000000000000..00995181f1ba --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_image_input_url_async.py @@ -0,0 +1,96 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use basic agent operations using image url input for the + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_image_input_url.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-projects azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" +import asyncio +import os, time +from typing import List +from azure.ai.projects.aio import AIProjectClient +from azure.identity.aio import DefaultAzureCredential +from azure.ai.agents.models import ( + ListSortOrder, + MessageTextContent, + MessageInputContentBlock, + MessageImageUrlParam, + MessageInputTextBlock, + MessageInputImageUrlBlock, +) + + +async def main(): + project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), + ) + + async with project_client: + agents_client = project_client.agents + + agent = await agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are helpful agent", + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = await agents_client.threads.create() + print(f"Created thread, thread ID: {thread.id}") + + image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" + input_message = "Hello, what is in the image ?" + url_param = MessageImageUrlParam(url=image_url, detail="high") + content_blocks: List[MessageInputContentBlock] = [ + MessageInputTextBlock(text=input_message), + MessageInputImageUrlBlock(image_url=url_param), + ] + message = await agents_client.messages.create(thread_id=thread.id, role="user", content=content_blocks) + print(f"Created message, message ID: {message.id}") + + run = await agents_client.runs.create(thread_id=thread.id, agent_id=agent.id) + + # Poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # Wait for a second + time.sleep(1) + run = await agents_client.runs.get(thread_id=thread.id, run_id=run.id) + print(f"Run status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + await agents_client.delete_agent(agent.id) + print("Deleted agent") + + messages = agents_client.messages.list( + thread_id=thread.id, + order=ListSortOrder.ASCENDING, + ) + + async for msg in messages: + last_part = msg.content[-1] + if isinstance(last_part, MessageTextContent): + print(f"{msg.role}: {last_part.text.value}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_json_schema_async.py b/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_json_schema_async.py new file mode 100644 index 000000000000..cd5b9a83977f --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_json_schema_async.py @@ -0,0 +1,108 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use agents with JSON schema output format. + +USAGE: + python sample_agents_json_schema_async.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity pydantic + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import asyncio +import os + +from enum import Enum +from pydantic import BaseModel, TypeAdapter +from azure.ai.projects.aio import AIProjectClient +from azure.identity.aio import DefaultAzureCredential +from azure.ai.agents.models import ( + ListSortOrder, + MessageTextContent, + MessageRole, + ResponseFormatJsonSchema, + ResponseFormatJsonSchemaType, + RunStatus, +) + + +# Create the pydantic model to represent the planet names and there masses. +class Planets(str, Enum): + Earth = "Earth" + Mars = "Mars" + Jupyter = "Jupyter" + + +class Planet(BaseModel): + planet: Planets + mass: float + + +async def main(): + project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), + ) + + async with project_client: + agents_client = project_client.agents + + agent = await agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="Extract the information about planets.", + response_format=ResponseFormatJsonSchemaType( + json_schema=ResponseFormatJsonSchema( + name="planet_mass", + description="Extract planet mass.", + schema=Planet.model_json_schema(), + ) + ), + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = await agents_client.threads.create() + print(f"Created thread, thread ID: {thread.id}") + + message = await agents_client.messages.create( + thread_id=thread.id, + role="user", + content=("The mass of the Mars is 6.4171E23 kg; the mass of the Earth is 5.972168E24 kg;"), + ) + print(f"Created message, message ID: {message.id}") + + run = await agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) + + if run.status != RunStatus.COMPLETED: + print(f"The run did not succeed: {run.status=}.") + + await agents_client.delete_agent(agent.id) + print("Deleted agent") + + messages = agents_client.messages.list( + thread_id=thread.id, + order=ListSortOrder.ASCENDING, + ) + + async for msg in messages: + if msg.role == MessageRole.AGENT: + last_part = msg.content[-1] + if isinstance(last_part, MessageTextContent): + planet = TypeAdapter(Planet).validate_json(last_part.text.value) + print(f"The mass of {planet.planet} is {planet.mass} kg.") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_run_with_toolset_async.py b/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_run_with_toolset_async.py new file mode 100644 index 000000000000..c7d9addc9130 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_run_with_toolset_async.py @@ -0,0 +1,90 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use agent operations with toolset from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_run_with_toolset_async.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity aiohttp + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model. +""" + +import os, asyncio +from azure.ai.projects.aio import AIProjectClient +from azure.identity.aio import DefaultAzureCredential +from azure.ai.agents.models import AsyncFunctionTool, AsyncToolSet, ListSortOrder, MessageTextContent +from utils.user_async_functions import user_async_functions + + +async def main() -> None: + + project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), + ) + + async with project_client: + agents_client = project_client.agents + + # Initialize agent toolset with user functions and code interpreter + # [START create_agent_with_async_function_tool] + functions = AsyncFunctionTool(user_async_functions) + + toolset = AsyncToolSet() + toolset.add(functions) + agents_client.enable_auto_function_calls(toolset) + + agent = await agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are a helpful agent", + toolset=toolset, + ) + # [END create_agent_with_async_function_tool] + print(f"Created agent, ID: {agent.id}") + + # Create thread for communication + thread = await agents_client.threads.create() + print(f"Created thread, ID: {thread.id}") + + # Create message to thread + message = await agents_client.messages.create( + thread_id=thread.id, + role="user", + content="Hello, send an email with the datetime and weather information in New York?", + ) + print(f"Created message, ID: {message.id}") + + # Create and process agent run in thread with tools + run = await agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + # Delete the agent when done + await agents_client.delete_agent(agent.id) + print("Deleted agent") + + # Fetch and log all messages + messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) + async for msg in messages: + last_part = msg.content[-1] + if isinstance(last_part, MessageTextContent): + print(f"{msg.role}: {last_part.text.value}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_stream_eventhandler_async.py b/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_stream_eventhandler_async.py new file mode 100644 index 000000000000..274b460e64c7 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_stream_eventhandler_async.py @@ -0,0 +1,106 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use agent operations with an event handler in streaming from + the Azure Agents service using a asynchronous client. + +USAGE: + python sample_agents_stream_eventhandler_async.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity aiohttp + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model. +""" +import asyncio +from typing import Any, Optional + +from azure.ai.projects.aio import AIProjectClient +from azure.ai.agents.models import ( + ListSortOrder, + MessageTextContent, + MessageDeltaChunk, + RunStep, + ThreadMessage, + ThreadRun, +) +from azure.ai.agents.models import AsyncAgentEventHandler +from azure.identity.aio import DefaultAzureCredential + +import os + + +class MyEventHandler(AsyncAgentEventHandler[str]): + + async def on_message_delta(self, delta: "MessageDeltaChunk") -> Optional[str]: + return f"Text delta received: {delta.text}" + + async def on_thread_message(self, message: "ThreadMessage") -> Optional[str]: + return f"ThreadMessage created. ID: {message.id}, Status: {message.status}" + + async def on_thread_run(self, run: "ThreadRun") -> Optional[str]: + return f"ThreadRun status: {run.status}" + + async def on_run_step(self, step: "RunStep") -> Optional[str]: + return f"RunStep type: {step.type}, Status: {step.status}" + + async def on_error(self, data: str) -> Optional[str]: + return f"An error occurred. Data: {data}" + + async def on_done(self) -> Optional[str]: + return "Stream completed." + + async def on_unhandled_event(self, event_type: str, event_data: Any) -> Optional[str]: + return f"Unhandled Event Type: {event_type}, Data: {event_data}" + + +async def main() -> None: + + project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), + ) + + async with project_client: + agents_client = project_client.agents + + agent = await agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-agent", instructions="You are helpful agent" + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = await agents_client.threads.create() + print(f"Created thread, thread ID {thread.id}") + + message = await agents_client.messages.create(thread_id=thread.id, role="user", content="Hello, tell me a joke") + print(f"Created message, message ID {message.id}") + + async with await agents_client.runs.stream( + thread_id=thread.id, agent_id=agent.id, event_handler=MyEventHandler() + ) as stream: + async for event_type, event_data, func_return in stream: + print(f"Received data.") + print(f"Streaming receive Event Type: {event_type}") + print(f"Event Data: {str(event_data)[:100]}...") + print(f"Event Function return: {func_return}\n") + + await agents_client.delete_agent(agent.id) + print("Deleted agent") + + messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) + async for msg in messages: + last_part = msg.content[-1] + if isinstance(last_part, MessageTextContent): + print(f"{msg.role}: {last_part.text.value}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_stream_eventhandler_with_functions_async.py b/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_stream_eventhandler_with_functions_async.py new file mode 100644 index 000000000000..a74375965f4f --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_stream_eventhandler_with_functions_async.py @@ -0,0 +1,140 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use agent operations with an event handler and toolset from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_stream_eventhandler_with_functions_async.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity aiohttp + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model. +""" +import asyncio +from typing import Any + +import os +from azure.ai.projects.aio import AIProjectClient +from azure.ai.agents.aio import AgentsClient +from azure.ai.agents.models import ( + AsyncAgentEventHandler, + AsyncFunctionTool, + AsyncToolSet, + ListSortOrder, + MessageTextContent, + MessageDeltaChunk, + RequiredFunctionToolCall, + RunStep, + SubmitToolOutputsAction, + ThreadMessage, + ThreadRun, + ToolOutput, +) +from azure.identity.aio import DefaultAzureCredential +from utils.user_async_functions import user_async_functions + +# Initialize function tool with user functions +functions = AsyncFunctionTool(functions=user_async_functions) +toolset = AsyncToolSet() +toolset.add(functions) + + +class MyEventHandler(AsyncAgentEventHandler[str]): + + def __init__(self, functions: AsyncFunctionTool, agents_client: AgentsClient) -> None: + super().__init__() + self.functions = functions + self.agents_client = agents_client + + async def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + print(f"Text delta received: {delta.text}") + + async def on_thread_message(self, message: "ThreadMessage") -> None: + print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") + + async def on_thread_run(self, run: "ThreadRun") -> None: + print(f"ThreadRun status: {run.status}") + + if run.status == "failed": + print(f"Run failed. Error: {run.last_error}") + + if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): + tool_calls = run.required_action.submit_tool_outputs.tool_calls + + tool_outputs = await toolset.execute_tool_calls(tool_calls) + + if tool_outputs: + await self.agents_client.runs.submit_tool_outputs_stream( + thread_id=run.thread_id, run_id=run.id, tool_outputs=tool_outputs, event_handler=self + ) + + async def on_run_step(self, step: "RunStep") -> None: + print(f"RunStep type: {step.type}, Status: {step.status}") + + async def on_error(self, data: str) -> None: + print(f"An error occurred. Data: {data}") + + async def on_done(self) -> None: + print("Stream completed.") + + async def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + +async def main() -> None: + project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), + ) + + async with project_client: + agents_client = project_client.agents + + agent = await agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are a helpful agent", + tools=functions.definitions, + ) + print(f"Created agent, ID: {agent.id}") + + thread = await agents_client.threads.create() + print(f"Created thread, thread ID {thread.id}") + + message = await agents_client.messages.create( + thread_id=thread.id, + role="user", + content="Hello, send an email with the datetime and weather information in New York? Also let me know the details.", + ) + print(f"Created message, message ID {message.id}") + + async with await agents_client.runs.stream( + thread_id=thread.id, + agent_id=agent.id, + event_handler=MyEventHandler(functions, agents_client), + ) as stream: + await stream.until_done() + + await agents_client.delete_agent(agent.id) + print("Deleted agent") + + messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) + async for msg in messages: + last_part = msg.content[-1] + if isinstance(last_part, MessageTextContent): + print(f"{msg.role}: {last_part.text.value}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_stream_eventhandler_with_toolset_async.py b/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_stream_eventhandler_with_toolset_async.py new file mode 100644 index 000000000000..016e2f151aea --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_stream_eventhandler_with_toolset_async.py @@ -0,0 +1,119 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use agent operations with an event handler and toolset from + the Azure Agents service using a asynchronous client. + +USAGE: + python sample_agents_stream_eventhandler_with_toolset_async.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity aiohttp + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model. +""" +import asyncio +from typing import Any + +from azure.ai.projects.aio import AIProjectClient +from azure.ai.agents.models import MessageDeltaChunk, RunStep, ThreadMessage, ThreadRun +from azure.ai.agents.models import ( + AsyncAgentEventHandler, + AsyncFunctionTool, + AsyncToolSet, + ListSortOrder, + MessageTextContent, +) +from azure.identity.aio import DefaultAzureCredential + +import os + +from utils.user_async_functions import user_async_functions + + +class MyEventHandler(AsyncAgentEventHandler): + + async def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + print(f"Text delta received: {delta.text}") + + async def on_thread_message(self, message: "ThreadMessage") -> None: + print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") + + async def on_thread_run(self, run: "ThreadRun") -> None: + print(f"ThreadRun status: {run.status}") + + if run.status == "failed": + print(f"Run failed. Error: {run.last_error}") + + async def on_run_step(self, step: "RunStep") -> None: + print(f"RunStep type: {step.type}, Status: {step.status}") + + async def on_error(self, data: str) -> None: + print(f"An error occurred. Data: {data}") + + async def on_done(self) -> None: + print("Stream completed.") + + async def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + +async def main() -> None: + project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), + ) + + async with project_client: + agents_client = project_client.agents + + # Initialize toolset with user functions + functions = AsyncFunctionTool(user_async_functions) + toolset = AsyncToolSet() + toolset.add(functions) + + agents_client.enable_auto_function_calls(user_async_functions) + agent = await agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are helpful agent", + toolset=toolset, + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = await agents_client.threads.create() + print(f"Created thread, thread ID {thread.id}") + + message = await agents_client.messages.create( + thread_id=thread.id, + role="user", + content="Hello, send an email with the datetime and weather information in New York? Also let me know the details", + ) + print(f"Created message, message ID {message.id}") + + async with await agents_client.runs.stream( + thread_id=thread.id, agent_id=agent.id, event_handler=MyEventHandler() + ) as stream: + await stream.until_done() + + await agents_client.delete_agent(agent.id) + print("Deleted agent") + + messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) + async for msg in messages: + last_part = msg.content[-1] + if isinstance(last_part, MessageTextContent): + print(f"{msg.role}: {last_part.text.value}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_stream_iteration_async.py b/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_stream_iteration_async.py new file mode 100644 index 000000000000..b7f9e86bb31b --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_stream_iteration_async.py @@ -0,0 +1,95 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use agent operations with interation in streaming from + the Azure Agents service using a asynchronous client. + +USAGE: + python sample_agents_stream_iteration_async.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity aiohttp + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model. +""" +import asyncio + +from azure.ai.projects.aio import AIProjectClient +from azure.ai.agents.models import AgentStreamEvent +from azure.ai.agents.models import ( + MessageDeltaChunk, + RunStep, + ThreadMessage, + ThreadRun, + ListSortOrder, + MessageTextContent, +) +from azure.identity.aio import DefaultAzureCredential + +import os + + +async def main() -> None: + project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), + ) + + async with project_client: + agents_client = project_client.agents + + agent = await agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-agent", instructions="You are helpful agent" + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = await agents_client.threads.create() + print(f"Created thread, thread ID {thread.id}") + + message = await agents_client.messages.create(thread_id=thread.id, role="user", content="Hello, tell me a joke") + print(f"Created message, message ID {message.id}") + + async with await agents_client.runs.stream(thread_id=thread.id, agent_id=agent.id) as stream: + async for event_type, event_data, _ in stream: + + if isinstance(event_data, MessageDeltaChunk): + print(f"Text delta received: {event_data.text}") + + elif isinstance(event_data, ThreadMessage): + print(f"ThreadMessage created. ID: {event_data.id}, Status: {event_data.status}") + + elif isinstance(event_data, ThreadRun): + print(f"ThreadRun status: {event_data.status}") + elif isinstance(event_data, RunStep): + print(f"RunStep type: {event_data.type}, Status: {event_data.status}") + + elif event_type == AgentStreamEvent.ERROR: + print(f"An error occurred. Data: {event_data}") + + elif event_type == AgentStreamEvent.DONE: + print("Stream completed.") + break + + else: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + await agents_client.delete_agent(agent.id) + print("Deleted agent") + + messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) + async for msg in messages: + last_part = msg.content[-1] + if isinstance(last_part, MessageTextContent): + print(f"{msg.role}: {last_part.text.value}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_stream_with_base_override_eventhandler_async.py b/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_stream_with_base_override_eventhandler_async.py new file mode 100644 index 000000000000..aa8d8a4fc929 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_stream_with_base_override_eventhandler_async.py @@ -0,0 +1,116 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to override the base event handler, parse the events, and iterate through them. + In your use case, you might not want to write the iteration code similar to sample_agents_stream_iteration_async.py. + If you have multiple places to call stream, you might find the iteration code cumbersome. + This example shows how to override the base event handler, parse the events, and iterate through them, which can be + reused in multiple stream calls to help keep the code clean. + +USAGE: + python sample_agents_stream_with_base_override_eventhandler_async.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity aiohttp + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model. +""" +import asyncio +import json +from typing import AsyncGenerator, Optional + +from azure.ai.projects.aio import AIProjectClient +from azure.ai.agents.models._models import ( + MessageDeltaChunk, + MessageDeltaTextContent, +) +from azure.ai.agents.models import AgentStreamEvent, BaseAsyncAgentEventHandler, ListSortOrder, MessageTextContent +from azure.identity.aio import DefaultAzureCredential + +import os + + +# Our goal is to parse the event data in a string and return the chunk in text for each iteration. +# Because we want the iteration to be a string, we define str as the generic type for BaseAsyncAgentEventHandler +# and override the _process_event method to return a string. +# The get_stream_chunks method is defined to return the chunks as strings because the iteration is a string. +class MyEventHandler(BaseAsyncAgentEventHandler[Optional[str]]): + + async def _process_event(self, event_data_str: str) -> Optional[str]: + + event_lines = event_data_str.strip().split("\n") + event_type: Optional[str] = None + event_data = "" + for line in event_lines: + if line.startswith("event:"): + event_type = line.split(":", 1)[1].strip() + elif line.startswith("data:"): + event_data = line.split(":", 1)[1].strip() + + if not event_type: + raise ValueError("Event type not specified in the event data.") + + if event_type == AgentStreamEvent.THREAD_MESSAGE_DELTA.value: + + event_obj: MessageDeltaChunk = MessageDeltaChunk(**json.loads(event_data)) + + for content_part in event_obj.delta.content: + if isinstance(content_part, MessageDeltaTextContent): + if content_part.text is not None: + return content_part.text.value + return None + + async def get_stream_chunks(self) -> AsyncGenerator[str, None]: + async for chunk in self: + if chunk: + yield chunk + + +async def main() -> None: + + project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), + ) + + async with project_client: + agents_client = project_client.agents + + agent = await agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-agent", instructions="You are helpful agent" + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = await agents_client.threads.create() + print(f"Created thread, thread ID {thread.id}") + + message = await agents_client.messages.create(thread_id=thread.id, role="user", content="Hello, tell me a joke") + print(f"Created message, message ID {message.id}") + + async with await agents_client.runs.stream( + thread_id=thread.id, agent_id=agent.id, event_handler=MyEventHandler() + ) as stream: + async for chunk in stream.get_stream_chunks(): + print(chunk) + + await agents_client.delete_agent(agent.id) + print("Deleted agent") + + messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) + async for msg in messages: + last_part = msg.content[-1] + if isinstance(last_part, MessageTextContent): + print(f"{msg.role}: {last_part.text.value}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_vector_store_batch_enterprise_file_search_async.py b/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_vector_store_batch_enterprise_file_search_async.py new file mode 100644 index 000000000000..e04f9955c449 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_vector_store_batch_enterprise_file_search_async.py @@ -0,0 +1,125 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to use agent operations to add files to an existing vector store and perform search from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_vector_store_batch_enterprise_file_search_async.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity azure-ai-ml aiohttp + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model. +""" +import asyncio +import os + +from azure.ai.projects.aio import AIProjectClient +from azure.ai.agents.models import ( + FileSearchTool, + ListSortOrder, + MessageTextContent, + VectorStoreDataSource, + VectorStoreDataSourceAssetType, +) +from azure.identity.aio import DefaultAzureCredential + + +async def main(): + + project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), + ) + + async with project_client: + agents_client = project_client.agents + + # We will upload the local file to Azure and will use it for vector store creation. + asset_uri = os.environ["AZURE_BLOB_URI"] + ds = VectorStoreDataSource( + asset_identifier=asset_uri, + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, + ) + vector_store = await agents_client.vector_stores.create_and_poll(file_ids=[], name="sample_vector_store") + print(f"Created vector store, vector store ID: {vector_store.id}") + + # Add the file to the vector store or you can supply file ids in the vector store creation + vector_store_file_batch = await agents_client.vector_store_file_batches.create_and_poll( + vector_store_id=vector_store.id, data_sources=[ds] + ) + print(f"Created vector store file batch, vector store file batch ID: {vector_store_file_batch.id}") + + # Create a file search tool + file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) + + # Notices that FileSearchTool as tool and tool_resources must be added or the agent unable to search the file + agent = await agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are helpful agent", + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = await agents_client.threads.create() + print(f"Created thread, thread ID: {thread.id}") + + message = await agents_client.messages.create( + thread_id=thread.id, + role="user", + content="What feature does Smart Eyewear offer?", + ) + print(f"Created message, message ID: {message.id}") + + run = await agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) + print(f"Created run, run ID: {run.id}") + + file_search_tool.remove_vector_store(vector_store.id) + print(f"Removed vector store from file search, vector store ID: {vector_store.id}") + + await agents_client.update_agent( + agent_id=agent.id, + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, + ) + print(f"Updated agent, agent ID: {agent.id}") + + thread = await agents_client.threads.create() + print(f"Created thread, thread ID: {thread.id}") + + message = await agents_client.messages.create( + thread_id=thread.id, + role="user", + content="What feature does Smart Eyewear offer?", + ) + print(f"Created message, message ID: {message.id}") + + run = await agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) + print(f"Created run, run ID: {run.id}") + + await agents_client.vector_stores.delete(vector_store.id) + print("Deleted vector store") + + await agents_client.delete_agent(agent.id) + print("Deleted agent") + + messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) + async for msg in messages: + last_part = msg.content[-1] + if isinstance(last_part, MessageTextContent): + print(f"{msg.role}: {last_part.text.value}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_vector_store_batch_file_search_async.py b/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_vector_store_batch_file_search_async.py new file mode 100644 index 000000000000..7dada937762b --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_vector_store_batch_file_search_async.py @@ -0,0 +1,117 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use agent operations to add files to an existing vector store and perform search from + the Azure Agents service using a asynchronous client. + +USAGE: + python sample_agents_vector_store_batch_file_search_async.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity aiohttp + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model. +""" + +import asyncio +import os +from azure.ai.projects.aio import AIProjectClient +from azure.ai.agents.models import FileSearchTool, FilePurpose, ListSortOrder, MessageTextContent +from azure.identity.aio import DefaultAzureCredential + +asset_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../assets/product_info_1.md")) + + +async def main() -> None: + project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), + ) + + async with project_client: + agents_client = project_client.agents + + # Upload a file and wait for it to be processed + file = await agents_client.files.upload_and_poll(file_path=asset_file_path, purpose=FilePurpose.AGENTS) + print(f"Uploaded file, file ID: {file.id}") + + # Create a vector store with no file and wait for it to be processed + vector_store = await agents_client.vector_stores.create_and_poll(file_ids=[], name="sample_vector_store") + print(f"Created vector store, vector store ID: {vector_store.id}") + + # Add the file to the vector store or you can supply file ids in the vector store creation + vector_store_file_batch = await agents_client.vector_store_file_batches.create_and_poll( + vector_store_id=vector_store.id, file_ids=[file.id] + ) + print(f"Created vector store file batch, vector store file batch ID: {vector_store_file_batch.id}") + + # Create a file search tool + file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) + + # Notices that FileSearchTool as tool and tool_resources must be added or the agent unable to search the file + agent = await agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are helpful agent", + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = await agents_client.threads.create() + print(f"Created thread, thread ID: {thread.id}") + + message = await agents_client.messages.create( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) + print(f"Created message, message ID: {message.id}") + + run = await agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) + print(f"Created run, run ID: {run.id}") + + file_search_tool.remove_vector_store(vector_store.id) + print(f"Removed vector store from file search, vector store ID: {vector_store.id}") + + await agents_client.update_agent( + agent_id=agent.id, tools=file_search_tool.definitions, tool_resources=file_search_tool.resources + ) + print(f"Updated agent, agent ID: {agent.id}") + + thread = await agents_client.threads.create() + print(f"Created thread, thread ID: {thread.id}") + + message = await agents_client.messages.create( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) + print(f"Created message, message ID: {message.id}") + + run = await agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) + print(f"Created run, run ID: {run.id}") + + await agents_client.files.delete(file.id) + print("Deleted file") + + await agents_client.vector_stores.delete(vector_store.id) + print("Deleted vector store") + + await agents_client.delete_agent(agent.id) + print("Deleted agent") + + messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) + async for msg in messages: + last_part = msg.content[-1] + if isinstance(last_part, MessageTextContent): + print(f"{msg.role}: {last_part.text.value}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_vector_store_enterprise_file_search_async.py b/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_vector_store_enterprise_file_search_async.py new file mode 100644 index 000000000000..bd5dae4f98ed --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_vector_store_enterprise_file_search_async.py @@ -0,0 +1,89 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to add files to agent during the vector store creation. + +USAGE: + python sample_agents_vector_store_enterprise_file_search_async.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity azure-ai-ml aiohttp + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model. +""" +import asyncio +import os + +from azure.ai.projects.aio import AIProjectClient +from azure.ai.agents.models import ( + FileSearchTool, + VectorStoreDataSource, + VectorStoreDataSourceAssetType, + ListSortOrder, + MessageTextContent, +) +from azure.identity.aio import DefaultAzureCredential + + +async def main(): + project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), + ) + + async with project_client: + agents_client = project_client.agents + + # We will upload the local file to Azure and will use it for vector store creation. + asset_uri = os.environ["AZURE_BLOB_URI"] + ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) + vector_store = await agents_client.vector_stores.create_and_poll(data_sources=[ds], name="sample_vector_store") + print(f"Created vector store, vector store ID: {vector_store.id}") + + # Create a file search tool + file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) + + # Notices that FileSearchTool as tool and tool_resources must be added or the agent unable to search the file + agent = await agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are helpful agent", + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = await agents_client.threads.create() + print(f"Created thread, thread ID: {thread.id}") + + message = await agents_client.messages.create( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) + print(f"Created message, message ID: {message.id}") + + run = await agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) + print(f"Created run, run ID: {run.id}") + + await agents_client.vector_stores.delete(vector_store.id) + print("Deleted vector store") + + await agents_client.delete_agent(agent.id) + print("Deleted agent") + + messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) + async for msg in messages: + last_part = msg.content[-1] + if isinstance(last_part, MessageTextContent): + print(f"{msg.role}: {last_part.text.value}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_vector_store_file_search_async.py b/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_vector_store_file_search_async.py new file mode 100644 index 000000000000..1851246d3cd0 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_vector_store_file_search_async.py @@ -0,0 +1,87 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to add files to agent during the vector store creation. + +USAGE: + python sample_agents_vector_store_file_search_async.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity aiohttp + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model. +""" +import asyncio +import os + +from azure.ai.projects.aio import AIProjectClient +from azure.ai.agents.models import FileSearchTool, FilePurpose, MessageTextContent, ListSortOrder +from azure.identity.aio import DefaultAzureCredential + +asset_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../assets/product_info_1.md")) + + +async def main(): + project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), + ) + + async with project_client: + agents_client = project_client.agents + + # Upload a file and wait for it to be processed + file = await agents_client.files.upload_and_poll(file_path=asset_file_path, purpose=FilePurpose.AGENTS) + print(f"Uploaded file, file ID: {file.id}") + + # Create a vector store with no file and wait for it to be processed + vector_store = await agents_client.vector_stores.create_and_poll(file_ids=[file.id], name="sample_vector_store") + print(f"Created vector store, vector store ID: {vector_store.id}") + + # Create a file search tool + file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) + + # Notices that FileSearchTool as tool and tool_resources must be added or the agent unable to search the file + agent = await agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are helpful agent", + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = await agents_client.threads.create() + print(f"Created thread, thread ID: {thread.id}") + + message = await agents_client.messages.create( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) + print(f"Created message, message ID: {message.id}") + + run = await agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) + print(f"Created run, run ID: {run.id}") + + await agents_client.vector_stores.delete(vector_store.id) + print("Deleted vector store") + + await agents_client.delete_agent(agent.id) + print("Deleted agent") + + messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) + async for msg in messages: + last_part = msg.content[-1] + if isinstance(last_part, MessageTextContent): + print(f"{msg.role}: {last_part.text.value}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_with_file_search_attachment_async.py b/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_with_file_search_attachment_async.py new file mode 100644 index 000000000000..321c1c7c6cee --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_async/sample_agents_with_file_search_attachment_async.py @@ -0,0 +1,89 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use agent operations to create messages with file search attachments from + the Azure Agents service using a asynchronous client. + +USAGE: + python sample_agents_with_file_search_attachment_async.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity aiohttp + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model. +""" +import asyncio + +from azure.ai.projects.aio import AIProjectClient +from azure.ai.agents.models import FilePurpose +from azure.ai.agents.models import FileSearchTool, MessageAttachment, ListSortOrder, MessageTextContent +from azure.identity.aio import DefaultAzureCredential + +import os + +asset_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../assets/product_info_1.md")) + + +async def main() -> None: + project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), + ) + + async with project_client: + agents_client = project_client.agents + + # Upload a file and wait for it to be processed + file = await agents_client.files.upload_and_poll(file_path=asset_file_path, purpose=FilePurpose.AGENTS) + + # Create agent + agent = await agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are helpful agent", + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = await agents_client.threads.create() + print(f"Created thread, thread ID: {thread.id}") + + # Create a message with the file search attachment + # Notice that vector store is created temporarily when using attachments with a default expiration policy of seven days. + attachment = MessageAttachment(file_id=file.id, tools=FileSearchTool().definitions) + message = await agents_client.messages.create( + thread_id=thread.id, + role="user", + content="What feature does Smart Eyewear offer?", + attachments=[attachment], + ) + print(f"Created message, message ID: {message.id}") + + run = await agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id, polling_interval=4) + print(f"Created run, run ID: {run.id}") + + print(f"Run completed with status: {run.status}") + + await agents_client.files.delete(file.id) + print("Deleted file") + + await agents_client.delete_agent(agent.id) + print("Deleted agent") + + messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) + async for msg in messages: + last_part = msg.content[-1] + if isinstance(last_part, MessageTextContent): + print(f"{msg.role}: {last_part.text.value}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_async/utils/__init__.py b/sdk/ai/azure-ai-projects/samples/agents/agents_async/utils/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_async/utils/user_async_functions.py b/sdk/ai/azure-ai-projects/samples/agents/agents_async/utils/user_async_functions.py new file mode 100644 index 000000000000..202efe8b5df9 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_async/utils/user_async_functions.py @@ -0,0 +1,67 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +import asyncio +import os +import sys +import json +import datetime +from typing import Any, Callable, Set, Optional +from azure.ai.agents.telemetry import trace_function + + +# Add parent directory to sys.path to import user_functions +current_dir = os.path.dirname(os.path.abspath(__file__)) +parent_dir = os.path.abspath(os.path.join(current_dir, "..", "..", "..")) +if parent_dir not in sys.path: + sys.path.insert(0, parent_dir) +from samples.utils.user_functions import fetch_current_datetime, fetch_weather, send_email + + +async def send_email_async(recipient: str, subject: str, body: str) -> str: + """ + Sends an email with the specified subject and body to the recipient. + + :param recipient (str): Email address of the recipient. + :param subject (str): Subject of the email. + :param body (str): Body content of the email. + :return: Confirmation message. + :rtype: str + """ + await asyncio.sleep(1) + return send_email(recipient, subject, body) + + +# The trace_func decorator will trace the function call and enable adding additional attributes +# to the span in the function implementation. Note that this will trace the function parameters and their values. +@trace_function() +async def fetch_current_datetime_async(format: Optional[str] = None) -> str: + """ + Get the current time as a JSON string, optionally formatted. + + :param format (Optional[str]): The format in which to return the current time. Defaults to None, which uses a standard format. + :return: The current time in JSON format. + :rtype: str + """ + await asyncio.sleep(1) + current_time = datetime.datetime.now() + + # Use the provided format if available, else use a default format + if format: + time_format = format + else: + time_format = "%Y-%m-%d %H:%M:%S" + + time_json = json.dumps({"current_time": current_time.strftime(time_format)}) + return time_json + + +# Statically defined user functions for fast reference with send_email as async but the rest as sync +user_async_functions: Set[Callable[..., Any]] = { + fetch_current_datetime_async, + fetch_weather, + send_email_async, +} diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_files_images_inputs/sample_agents_image_input_base64.py b/sdk/ai/azure-ai-projects/samples/agents/agents_files_images_inputs/sample_agents_image_input_base64.py new file mode 100644 index 000000000000..6660d063a35e --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_files_images_inputs/sample_agents_image_input_base64.py @@ -0,0 +1,113 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use basic agent operations using image file input for the + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_image_input_base64.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-projects azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os, time, base64 +from typing import List +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential +from azure.ai.agents.models import ( + MessageTextContent, + MessageInputContentBlock, + MessageImageUrlParam, + MessageInputTextBlock, + MessageInputImageUrlBlock, +) + +asset_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../assets/image_file.png")) + + +def image_to_base64(image_path: str) -> str: + """ + Convert an image file to a Base64-encoded string. + + :param image_path: The path to the image file (e.g. 'image_file.png') + :return: A Base64-encoded string representing the image. + :raises FileNotFoundError: If the provided file path does not exist. + :raises OSError: If there's an error reading the file. + """ + if not os.path.isfile(image_path): + raise FileNotFoundError(f"File not found at: {image_path}") + + try: + with open(image_path, "rb") as image_file: + file_data = image_file.read() + return base64.b64encode(file_data).decode("utf-8") + except Exception as exc: + raise OSError(f"Error reading file '{image_path}'") from exc + + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +with project_client: + agents_client = project_client.agents + + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are helpful agent", + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = agents_client.threads.create() + print(f"Created thread, thread ID: {thread.id}") + + input_message = "Hello, what is in the image ?" + image_base64 = image_to_base64(asset_file_path) + img_url = f"data:image/png;base64,{image_base64}" + url_param = MessageImageUrlParam(url=img_url, detail="high") + content_blocks: List[MessageInputContentBlock] = [ + MessageInputTextBlock(text=input_message), + MessageInputImageUrlBlock(image_url=url_param), + ] + message = agents_client.messages.create(thread_id=thread.id, role="user", content=content_blocks) + print(f"Created message, message ID: {message.id}") + + run = agents_client.runs.create(thread_id=thread.id, agent_id=agent.id) + + # Poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # Wait for a second + time.sleep(1) + run = agents_client.runs.get(thread_id=thread.id, run_id=run.id) + print(f"Run status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + agents_client.delete_agent(agent.id) + print("Deleted agent") + + messages = agents_client.messages.list(thread_id=thread.id) + + # The messages are following in the reverse order, + # we will iterate them and output only text contents. + for msg in messages: + last_part = msg.content[-1] + if isinstance(last_part, MessageTextContent): + print(f"{msg.role}: {last_part.text.value}") + + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_files_images_inputs/sample_agents_image_input_file.py b/sdk/ai/azure-ai-projects/samples/agents/agents_files_images_inputs/sample_agents_image_input_file.py new file mode 100644 index 000000000000..26967232c4c3 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_files_images_inputs/sample_agents_image_input_file.py @@ -0,0 +1,93 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use basic agent operations using image file input for the + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_image_input_file.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-projects azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os, time +from typing import List +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential +from azure.ai.agents.models import ( + ListSortOrder, + MessageTextContent, + MessageInputContentBlock, + MessageImageFileParam, + MessageInputTextBlock, + MessageInputImageFileBlock, + FilePurpose, +) + +asset_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../assets/image_file.png")) + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +with project_client: + agents_client = project_client.agents + + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are helpful agent", + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = agents_client.threads.create() + print(f"Created thread, thread ID: {thread.id}") + + image_file = agents_client.files.upload_and_poll(file_path=asset_file_path, purpose=FilePurpose.AGENTS) + print(f"Uploaded file, file ID: {image_file.id}") + + input_message = "Hello, what is in the image ?" + file_param = MessageImageFileParam(file_id=image_file.id, detail="high") + content_blocks: List[MessageInputContentBlock] = [ + MessageInputTextBlock(text=input_message), + MessageInputImageFileBlock(image_file=file_param), + ] + message = agents_client.messages.create(thread_id=thread.id, role="user", content=content_blocks) + print(f"Created message, message ID: {message.id}") + + run = agents_client.runs.create(thread_id=thread.id, agent_id=agent.id) + + # Poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # Wait for a second + time.sleep(1) + run = agents_client.runs.get(thread_id=thread.id, run_id=run.id) + print(f"Run status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + agents_client.delete_agent(agent.id) + print("Deleted agent") + + messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) + + # The messages are following in the reverse order, + # we will iterate them and output only text contents. + for msg in messages: + last_part = msg.content[-1] + if isinstance(last_part, MessageTextContent): + print(f"{msg.role}: {last_part.text.value}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_files_images_inputs/sample_agents_image_input_url.py b/sdk/ai/azure-ai-projects/samples/agents/agents_files_images_inputs/sample_agents_image_input_url.py new file mode 100644 index 000000000000..947f6af0dc2f --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_files_images_inputs/sample_agents_image_input_url.py @@ -0,0 +1,91 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use basic agent operations using image url input for the + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_image_input_url.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-projects azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os, time +from typing import List +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential +from azure.ai.agents.models import ( + MessageTextContent, + MessageInputContentBlock, + MessageImageUrlParam, + MessageInputTextBlock, + MessageInputImageUrlBlock, +) + + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +with project_client: + agents_client = project_client.agents + + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are helpful agent", + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = agents_client.threads.create() + print(f"Created thread, thread ID: {thread.id}") + + image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" + input_message = "Hello, what is in the image ?" + url_param = MessageImageUrlParam(url=image_url, detail="high") + content_blocks: List[MessageInputContentBlock] = [ + MessageInputTextBlock(text=input_message), + MessageInputImageUrlBlock(image_url=url_param), + ] + message = agents_client.messages.create(thread_id=thread.id, role="user", content=content_blocks) + print(f"Created message, message ID: {message.id}") + + run = agents_client.runs.create(thread_id=thread.id, agent_id=agent.id) + + # Poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # Wait for a second + time.sleep(1) + run = agents_client.runs.get(thread_id=thread.id, run_id=run.id) + print(f"Run status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + agents_client.delete_agent(agent.id) + print("Deleted agent") + + messages = agents_client.messages.list(thread_id=thread.id) + + # The messages are following in the reverse order, + # we will iterate them and output only text contents. + for msg in messages: + last_part = msg.content[-1] + if isinstance(last_part, MessageTextContent): + print(f"{msg.role}: {last_part.text.value}") + + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_files_images_inputs/sample_agents_json_schema.py b/sdk/ai/azure-ai-projects/samples/agents/agents_files_images_inputs/sample_agents_json_schema.py new file mode 100644 index 000000000000..3e4a2dae3b3f --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_files_images_inputs/sample_agents_json_schema.py @@ -0,0 +1,102 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use agents with JSON schema output format. + +USAGE: + python sample_agents_json_schema.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity pydantic + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os + +from enum import Enum +from pydantic import BaseModel, TypeAdapter +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential +from azure.ai.agents.models import ( + MessageTextContent, + MessageRole, + ResponseFormatJsonSchema, + ResponseFormatJsonSchemaType, + RunStatus, +) + +# [START create_agents_client] +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) +# [END create_agents_client] + + +# Create the pydantic model to represent the planet names and there masses. +class Planets(str, Enum): + Earth = "Earth" + Mars = "Mars" + Jupyter = "Jupyter" + + +class Planet(BaseModel): + planet: Planets + mass: float + + +with project_client: + agents_client = project_client.agents + + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="Extract the information about planets.", + response_format=ResponseFormatJsonSchemaType( + json_schema=ResponseFormatJsonSchema( + name="planet_mass", + description="Extract planet mass.", + schema=Planet.model_json_schema(), + ) + ), + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = agents_client.threads.create() + print(f"Created thread, thread ID: {thread.id}") + + message = agents_client.messages.create( + thread_id=thread.id, + role="user", + content=("The mass of the Mars is 6.4171E23 kg; the mass of the Earth is 5.972168E24 kg;"), + ) + print(f"Created message, message ID: {message.id}") + + run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) + + if run.status != RunStatus.COMPLETED: + print(f"The run did not succeed: {run.status=}.") + + agents_client.delete_agent(agent.id) + print("Deleted agent") + + messages = agents_client.messages.list(thread_id=thread.id) + + # The messages are following in the reverse order, + # we will iterate them and output only text contents. + for msg in messages: + if msg.role == MessageRole.AGENT: + last_part = msg.content[-1] + if isinstance(last_part, MessageTextContent): + planet = TypeAdapter(Planet).validate_json(last_part.text.value) + print(f"The mass of {planet.planet} is {planet.mass} kg.") diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_files_images_inputs/sample_agents_vector_store_batch_enterprise_file_search.py b/sdk/ai/azure-ai-projects/samples/agents/agents_files_images_inputs/sample_agents_vector_store_batch_enterprise_file_search.py new file mode 100644 index 000000000000..48cb10814b33 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_files_images_inputs/sample_agents_vector_store_batch_enterprise_file_search.py @@ -0,0 +1,106 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to create the vector store with the list of files. + +USAGE: + python sample_agents_vector_store_batch_enterprise_file_search.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity azure-ai-ml + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os +from azure.ai.projects import AIProjectClient +from azure.ai.agents.models import FileSearchTool, VectorStoreDataSource, VectorStoreDataSourceAssetType, ListSortOrder +from azure.identity import DefaultAzureCredential + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +with project_client: + agents_client = project_client.agents + + # We will upload the local file to Azure and will use it for vector store creation. + asset_uri = os.environ["AZURE_BLOB_URI"] + + # [START attach_files_to_store] + # Create a vector store with no file and wait for it to be processed + vector_store = agents_client.vector_stores.create_and_poll(data_sources=[], name="sample_vector_store") + print(f"Created vector store, vector store ID: {vector_store.id}") + + ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) + # Add the file to the vector store or you can supply data sources in the vector store creation + vector_store_file_batch = agents_client.vector_store_file_batches.create_and_poll( + vector_store_id=vector_store.id, data_sources=[ds] + ) + print(f"Created vector store file batch, vector store file batch ID: {vector_store_file_batch.id}") + + # Create a file search tool + file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) + # [END attach_files_to_store] + + # Notices that FileSearchTool as tool and tool_resources must be added or the agent unable to search the file + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are helpful agent", + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = agents_client.threads.create() + print(f"Created thread, thread ID: {thread.id}") + + message = agents_client.messages.create( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) + print(f"Created message, message ID: {message.id}") + + run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) + print(f"Created run, run ID: {run.id}") + + file_search_tool.remove_vector_store(vector_store.id) + print(f"Removed vector store from file search, vector store ID: {vector_store.id}") + + agents_client.update_agent( + agent_id=agent.id, tools=file_search_tool.definitions, tool_resources=file_search_tool.resources + ) + print(f"Updated agent, agent ID: {agent.id}") + + thread = agents_client.threads.create() + print(f"Created thread, thread ID: {thread.id}") + + message = agents_client.messages.create( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) + print(f"Created message, message ID: {message.id}") + + run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) + print(f"Created run, run ID: {run.id}") + + agents_client.vector_stores.delete(vector_store.id) + print("Deleted vector store") + + agents_client.delete_agent(agent.id) + print("Deleted agent") + + messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) + + for msg in messages: + if msg.text_messages: + last_text = msg.text_messages[-1] + print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_files_images_inputs/sample_agents_vector_store_batch_file_search.py b/sdk/ai/azure-ai-projects/samples/agents/agents_files_images_inputs/sample_agents_vector_store_batch_file_search.py new file mode 100644 index 000000000000..0c2153fba088 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_files_images_inputs/sample_agents_vector_store_batch_file_search.py @@ -0,0 +1,110 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to use agent operations to add files to an existing vector store and perform search from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_vector_store_batch_file_search.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os +from azure.ai.projects import AIProjectClient +from azure.ai.agents.models import FileSearchTool, FilePurpose, ListSortOrder +from azure.identity import DefaultAzureCredential + +asset_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../assets/product_info_1.md")) + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +with project_client: + agents_client = project_client.agents + + # Upload a file and wait for it to be processed + file = agents_client.files.upload_and_poll(file_path=asset_file_path, purpose=FilePurpose.AGENTS) + print(f"Uploaded file, file ID: {file.id}") + + # Create a vector store with no file and wait for it to be processed + vector_store = agents_client.vector_stores.create_and_poll(data_sources=[], name="sample_vector_store") + print(f"Created vector store, vector store ID: {vector_store.id}") + + # Add the file to the vector store or you can supply file ids in the vector store creation + vector_store_file_batch = agents_client.vector_store_file_batches.create_and_poll( + vector_store_id=vector_store.id, file_ids=[file.id] + ) + print(f"Created vector store file batch, vector store file batch ID: {vector_store_file_batch.id}") + + # Create a file search tool + # [START create_agent_with_tools_and_tool_resources] + file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) + + # Notices that FileSearchTool as tool and tool_resources must be added or the agent unable to search the file + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are helpful agent", + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, + ) + # [END create_agent_with_tools_and_tool_resources] + print(f"Created agent, agent ID: {agent.id}") + + thread = agents_client.threads.create() + print(f"Created thread, thread ID: {thread.id}") + + message = agents_client.messages.create( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) + print(f"Created message, message ID: {message.id}") + + run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) + print(f"Created run, run ID: {run.id}") + + file_search_tool.remove_vector_store(vector_store.id) + print(f"Removed vector store from file search, vector store ID: {vector_store.id}") + + agents_client.update_agent( + agent_id=agent.id, tools=file_search_tool.definitions, tool_resources=file_search_tool.resources + ) + print(f"Updated agent, agent ID: {agent.id}") + + thread = agents_client.threads.create() + print(f"Created thread, thread ID: {thread.id}") + + message = agents_client.messages.create( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) + print(f"Created message, message ID: {message.id}") + + run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) + print(f"Created run, run ID: {run.id}") + + agents_client.vector_stores.delete(vector_store.id) + print("Deleted vector store") + + agents_client.delete_agent(agent.id) + print("Deleted agent") + + messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) + + for msg in messages: + if msg.text_messages: + last_text = msg.text_messages[-1] + print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_files_images_inputs/sample_agents_vector_store_file_search.py b/sdk/ai/azure-ai-projects/samples/agents/agents_files_images_inputs/sample_agents_vector_store_file_search.py new file mode 100644 index 000000000000..01955f2ae315 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_files_images_inputs/sample_agents_vector_store_file_search.py @@ -0,0 +1,81 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to add files to agent during the vector store creation. + +USAGE: + python sample_agents_vector_store_file_search.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os +from azure.ai.projects import AIProjectClient +from azure.ai.agents.models import FileSearchTool, FilePurpose, ListSortOrder +from azure.identity import DefaultAzureCredential + +asset_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../assets/product_info_1.md")) + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +with project_client: + agents_client = project_client.agents + + # Upload a file and wait for it to be processed + file = agents_client.files.upload_and_poll(file_path=asset_file_path, purpose=FilePurpose.AGENTS) + print(f"Uploaded file, file ID: {file.id}") + + # Create a vector store with no file and wait for it to be processed + vector_store = agents_client.vector_stores.create_and_poll(file_ids=[file.id], name="sample_vector_store") + print(f"Created vector store, vector store ID: {vector_store.id}") + + # Create a file search tool + file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) + + # Notices that FileSearchTool as tool and tool_resources must be added or the agent unable to search the file + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are helpful agent", + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = agents_client.threads.create() + print(f"Created thread, thread ID: {thread.id}") + + message = agents_client.messages.create( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) + print(f"Created message, message ID: {message.id}") + + run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) + print(f"Created run, run ID: {run.id}") + + agents_client.vector_stores.delete(vector_store.id) + print("Deleted vector store") + + agents_client.delete_agent(agent.id) + print("Deleted agent") + + messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) + + for msg in messages: + if msg.text_messages: + last_text = msg.text_messages[-1] + print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_files_images_inputs/sample_agents_with_code_interpreter_file_attachment.py b/sdk/ai/azure-ai-projects/samples/agents/agents_files_images_inputs/sample_agents_with_code_interpreter_file_attachment.py new file mode 100644 index 000000000000..9195a64fb6c2 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_files_images_inputs/sample_agents_with_code_interpreter_file_attachment.py @@ -0,0 +1,112 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use agent operations with code interpreter through file attachment from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_with_code_interpreter_file_attachment.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os +from azure.ai.projects import AIProjectClient +from azure.ai.agents.models import CodeInterpreterTool, MessageAttachment +from azure.ai.agents.models import FilePurpose, MessageRole +from azure.identity import DefaultAzureCredential +from pathlib import Path + +asset_file_path = os.path.abspath( + os.path.join(os.path.dirname(__file__), "../assets/synthetic_500_quarterly_results.csv") +) + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +with project_client: + agents_client = project_client.agents + + # Upload a file and wait for it to be processed + file = agents_client.files.upload_and_poll(file_path=asset_file_path, purpose=FilePurpose.AGENTS) + print(f"Uploaded file, file ID: {file.id}") + + # [START create_agent_and_message_with_code_interpreter_file_attachment] + # Notice that CodeInterpreter must be enabled in the agent creation, + # otherwise the agent will not be able to see the file attachment for code interpretation + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are helpful agent", + tools=CodeInterpreterTool().definitions, + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = agents_client.threads.create() + print(f"Created thread, thread ID: {thread.id}") + + # Create an attachment + attachment = MessageAttachment(file_id=file.id, tools=CodeInterpreterTool().definitions) + + # Create a message + message = agents_client.messages.create( + thread_id=thread.id, + role="user", + content="Could you please create bar chart in TRANSPORTATION sector for the operating profit from the uploaded csv file and provide file to me?", + attachments=[attachment], + ) + # [END create_agent_and_message_with_code_interpreter_file_attachment] + print(f"Created message, message ID: {message.id}") + + run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + # Check if you got "Rate limit is exceeded.", then you want to get more quota + print(f"Run failed: {run.last_error}") + + agents_client.files.delete(file.id) + print("Deleted file") + + messages = agents_client.messages.list(thread_id=thread.id) + print(f"Messages: {messages}") + + last_msg = agents_client.messages.get_last_message_text_by_role(thread_id=thread.id, role=MessageRole.AGENT) + if last_msg: + print(f"Last Message: {last_msg.text.value}") + + for msg in messages: + # Save every image file in the message + for img in msg.image_contents: + file_id = img.image_file.file_id + file_name = f"{file_id}_image_file.png" + agents_client.files.save(file_id=file_id, file_name=file_name) + print(f"Saved image file to: {Path.cwd() / file_name}") + + # Print details of every file-path annotation + for ann in msg.file_path_annotations: + print("File Paths:") + print(f" Type: {ann.type}") + print(f" Text: {ann.text}") + print(f" File ID: {ann.file_path.file_id}") + print(f" Start Index: {ann.start_index}") + print(f" End Index: {ann.end_index}") + + agents_client.delete_agent(agent.id) + print("Deleted agent") diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_files_images_inputs/sample_agents_with_file_search_attachment.py b/sdk/ai/azure-ai-projects/samples/agents/agents_files_images_inputs/sample_agents_with_file_search_attachment.py new file mode 100644 index 000000000000..9864cad38be8 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_files_images_inputs/sample_agents_with_file_search_attachment.py @@ -0,0 +1,78 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use agent operations to create messages with file search attachments from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_with_file_search_attachment.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" +import os +from azure.ai.projects import AIProjectClient +from azure.ai.agents.models import FilePurpose, FileSearchTool, MessageAttachment, ListSortOrder +from azure.identity import DefaultAzureCredential + +asset_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../assets/product_info_1.md")) + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +with project_client: + agents_client = project_client.agents + + # Upload a file and wait for it to be processed + file = agents_client.files.upload_and_poll(file_path=asset_file_path, purpose=FilePurpose.AGENTS) + print(f"Uploaded file, file ID: {file.id}") + + # Create agent + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are helpful agent", + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = agents_client.threads.create() + print(f"Created thread, thread ID: {thread.id}") + + # Create a message with the file search attachment + # Notice that vector store is created temporarily when using attachments with a default expiration policy of seven days. + # [START create_message_with_attachment] + attachment = MessageAttachment(file_id=file.id, tools=FileSearchTool().definitions) + message = agents_client.messages.create( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?", attachments=[attachment] + ) + # [END create_message_with_attachment] + print(f"Created message, message ID: {message.id}") + + run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) + print(f"Created run, run ID: {run.id}") + + agents_client.files.delete(file.id) + print("Deleted file") + + agents_client.delete_agent(agent.id) + print("Deleted agent") + + messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) + for msg in messages: + if msg.text_messages: + last_text = msg.text_messages[-1] + print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_files_images_inputs/sample_agents_with_resources_in_thread.py b/sdk/ai/azure-ai-projects/samples/agents/agents_files_images_inputs/sample_agents_with_resources_in_thread.py new file mode 100644 index 000000000000..9af2da71e5b2 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_files_images_inputs/sample_agents_with_resources_in_thread.py @@ -0,0 +1,98 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use agent operations with file searching from + the Azure Agents service using a synchronous client. The file is attached to thread. + +USAGE: + python sample_agents_with_resources_in_thread.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os +from azure.ai.projects import AIProjectClient +from azure.ai.agents.models import FileSearchTool, FilePurpose, ListSortOrder +from azure.identity import DefaultAzureCredential + +asset_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../assets/product_info_1.md")) + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +with project_client: + agents_client = project_client.agents + + # Upload file and create vector store + # [START create_agent_and_thread_for_file_search] + file = agents_client.files.upload_and_poll(file_path=asset_file_path, purpose=FilePurpose.AGENTS) + print(f"Uploaded file, file ID: {file.id}") + + vector_store = agents_client.vector_stores.create_and_poll(file_ids=[file.id], name="my_vectorstore") + print(f"Created vector store, vector store ID: {vector_store.id}") + + # Create file search tool with resources followed by creating agent + file_search = FileSearchTool(vector_store_ids=[vector_store.id]) + + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="Hello, you are helpful agent and can search information from uploaded files", + tools=file_search.definitions, + ) + + print(f"Created agent, ID: {agent.id}") + + # Create thread with file resources. + # If the agent has multiple threads, only this thread can search this file. + thread = agents_client.threads.create(tool_resources=file_search.resources) + # [END create_agent_and_thread_for_file_search] + print(f"Created thread, ID: {thread.id}") + + # Create message to thread + message = agents_client.messages.create( + thread_id=thread.id, role="user", content="Hello, what Contoso products do you know?" + ) + print(f"Created message, ID: {message.id}") + + # Create and process agent run in thread with tools + run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + # Check if you got "Rate limit is exceeded.", then you want to get more quota + print(f"Run failed: {run.last_error}") + + # [START teardown] + # Delete the file when done + agents_client.vector_stores.delete(vector_store.id) + print("Deleted vector store") + + agents_client.files.delete(file_id=file.id) + print("Deleted file") + + # Delete the agent when done + agents_client.delete_agent(agent.id) + print("Deleted agent") + # [END teardown] + + # Fetch and log all messages + messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) + for msg in messages: + if msg.text_messages: + last_text = msg.text_messages[-1] + print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_multiagent/sample_agents_agent_team.py b/sdk/ai/azure-ai-projects/samples/agents/agents_multiagent/sample_agents_agent_team.py new file mode 100644 index 000000000000..7c388403d435 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_multiagent/sample_agents_agent_team.py @@ -0,0 +1,81 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use multiple agents using AgentTeam with traces. + + The team consists of + • one leader agent - automatically created by AgentTeam from the + configuration in `utils/agent_team_config.yaml` + • two worker agents - `Coder` and `Reviewer`, defined in the code below + + IMPORTANT - leader-agent model configuration + `utils/agent_team_config.yaml` contains the key TEAM_LEADER_MODEL. + Its value must be the name of a **deployed** model in your Azure AI + project (e.g. "gpt-4o-mini"). + If this model deployment is not available, AgentTeam cannot instantiate + the leader agent and the sample will fail. + +USAGE: + python sample_agents_agent_team.py + + Before running the sample: + + 1. pip install azure-ai-projects azure-ai-agents azure-identity + 2. Ensure `utils/agent_team_config.yaml` is present and TEAM_LEADER_MODEL points + to a valid model deployment. + 3. Set these environment variables with your own values: + PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + MODEL_DEPLOYMENT_NAME - The model deployment name used for the worker agents. +""" + +import os +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential +from utils.agent_team import AgentTeam, _create_task +from utils.agent_trace_configurator import AgentTraceConfigurator + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +with project_client: + agents_client = project_client.agents + + agents_client.enable_auto_function_calls({_create_task}) + + model_deployment_name = os.getenv("MODEL_DEPLOYMENT_NAME") + + if model_deployment_name is not None: + AgentTraceConfigurator(agents_client=agents_client).setup_tracing() + with agents_client: + agent_team = AgentTeam("test_team", agents_client=agents_client) + agent_team.add_agent( + model=model_deployment_name, + name="Coder", + instructions="You are software engineer who writes great code. Your name is Coder.", + ) + agent_team.add_agent( + model=model_deployment_name, + name="Reviewer", + instructions="You are software engineer who reviews code. Your name is Reviewer.", + ) + agent_team.assemble_team() + + print("A team of agents specialized in software engineering is available for requests.") + while True: + user_input = input("Input (type 'quit' or 'exit' to exit): ") + if user_input.lower() == "quit": + break + elif user_input.lower() == "exit": + break + agent_team.process_request(request=user_input) + + agent_team.dismantle_team() + else: + print("Error: Please define the environment variable MODEL_DEPLOYMENT_NAME.") diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_multiagent/sample_agents_agent_team_custom_team_leader.py b/sdk/ai/azure-ai-projects/samples/agents/agents_multiagent/sample_agents_agent_team_custom_team_leader.py new file mode 100644 index 000000000000..3a7d89494cfe --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_multiagent/sample_agents_agent_team_custom_team_leader.py @@ -0,0 +1,120 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to multiple agents using AgentTeam with traces. + +USAGE: + python sample_agents_agent_team_custom_team_leader.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity + + Set these environment variables with your own values: + PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + MODEL_DEPLOYMENT_NAME - the name of the model deployment to use. +""" + +import os +from typing import Optional, Set +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential +from utils.agent_team import AgentTeam, AgentTask +from utils.agent_trace_configurator import AgentTraceConfigurator +from azure.ai.agents.models import FunctionTool, ToolSet + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +model_deployment_name = os.getenv("MODEL_DEPLOYMENT_NAME") + + +def create_task(team_name: str, recipient: str, request: str, requestor: str) -> str: + """ + Requests another agent in the team to complete a task. + + :param team_name (str): The name of the team. + :param recipient (str): The name of the agent that is being requested to complete the task. + :param request (str): A description of the to complete. This can also be a question. + :param requestor (str): The name of the agent who is requesting the task. + :return: True if the task was successfully received, False otherwise. + :rtype: str + """ + task = AgentTask(recipient=recipient, task_description=request, requestor=requestor) + team: Optional[AgentTeam] = None + try: + team = AgentTeam.get_team(team_name) + except: + pass + if team is not None: + team.add_task(task) + return "True" + return "False" + + +# Any additional functions that might be used by the agents: +agent_team_default_functions: Set = { + create_task, +} + +default_function_tool = FunctionTool(functions=agent_team_default_functions) + +with project_client: + agents_client = project_client.agents + + agents_client.enable_auto_function_calls({create_task}) + + if model_deployment_name is not None: + AgentTraceConfigurator(agents_client=agents_client).setup_tracing() + with agents_client: + agent_team = AgentTeam("test_team", agents_client=agents_client) + toolset = ToolSet() + toolset.add(default_function_tool) + agent_team.set_team_leader( + model=model_deployment_name, + name="TeamLeader", + instructions="You are an agent named 'TeamLeader'. You are a leader of a team of agents. The name of your team is 'test_team'." + "You are an agent that is responsible for receiving requests from user and utilizing a team of agents to complete the task. " + "When you are passed a request, the only thing you will do is evaluate which team member should do which task next to complete the request. " + "You will use the provided create_task function to create a task for the agent that is best suited for handling the task next. " + "You will respond with the description of who you assigned the task and why. When you think that the original user request is " + "processed completely utilizing all the talent available in the team, you do not have to create anymore tasks. " + "Using the skills of all the team members when applicable is highly valued. " + "Do not create parallel tasks. " + "Here are the other agents in your team: " + "- Coder: You are software engineer who writes great code. Your name is Coder. " + "- Reviewer: You are software engineer who reviews code. Your name is Reviewer.", + toolset=toolset, + ) + agent_team.add_agent( + model=model_deployment_name, + name="Coder", + instructions="You are software engineer who writes great code. Your name is Coder.", + ) + agent_team.add_agent( + model=model_deployment_name, + name="Reviewer", + instructions="You are software engineer who reviews code. Your name is Reviewer.", + ) + agent_team.assemble_team() + + print("A team of agents specialized in software engineering is available for requests.") + while True: + user_input = input("Input (type 'quit' or 'exit' to exit): ") + if user_input.lower() == "quit": + break + elif user_input.lower() == "exit": + break + agent_team.process_request(request=user_input) + + agent_team.dismantle_team() + else: + print("Error: Please define the environment variable MODEL_DEPLOYMENT_NAME.") diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_multiagent/sample_agents_multi_agent_team.py b/sdk/ai/azure-ai-projects/samples/agents/agents_multiagent/sample_agents_multi_agent_team.py new file mode 100644 index 000000000000..07aa0f9d81b9 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_multiagent/sample_agents_multi_agent_team.py @@ -0,0 +1,137 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use an AgentTeam to execute a multi-step + user request with automatic function calling and trace collection. + + The team consists of + • one leader agent - created automatically from the configuration in + `utils/agent_team_config.yaml` + • three worker agents - `TimeWeatherAgent`, `SendEmailAgent`, and + `TemperatureAgent`, each defined in the code below with its own tools + + IMPORTANT - leader-agent model configuration + `utils/agent_team_config.yaml` contains the key TEAM_LEADER_MODEL. + Its value must be the name of a **deployed** model in your Azure AI + project (e.g. "gpt-4o-mini"). + If this deployment does not exist, AgentTeam cannot instantiate the + leader agent and the sample will fail. + +USAGE: + python sample_agents_multi_agent_team.py + + Before running the sample: + + 1. pip install azure-ai-projects azure-ai-agents azure-identity + 2. Ensure `utils/agent_team_config.yaml` is present and TEAM_LEADER_MODEL points + to a valid model deployment. + 3. Set these environment variables with your own values: + PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + MODEL_DEPLOYMENT_NAME - The model deployment name used for the worker agents. +""" + +import os +from typing import Set + +from utils.user_functions_with_traces import ( + fetch_current_datetime, + fetch_weather, + send_email_using_recipient_name, + convert_temperature, +) + +from azure.ai.projects import AIProjectClient +from azure.ai.agents.models import ToolSet, FunctionTool +from azure.identity import DefaultAzureCredential +from utils.agent_team import AgentTeam, _create_task +from utils.agent_trace_configurator import AgentTraceConfigurator + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +user_function_set_1: Set = {fetch_current_datetime, fetch_weather} + +user_function_set_2: Set = {send_email_using_recipient_name} + +user_function_set_3: Set = {convert_temperature} + + +with project_client: + agents_client = project_client.agents + + agents_client.enable_auto_function_calls( + { + _create_task, + fetch_current_datetime, + fetch_weather, + send_email_using_recipient_name, + convert_temperature, + } + ) + + model_deployment_name = os.getenv("MODEL_DEPLOYMENT_NAME") + + if model_deployment_name is not None: + AgentTraceConfigurator(agents_client=agents_client).setup_tracing() + with agents_client: + + functions = FunctionTool(functions=user_function_set_1) + toolset1 = ToolSet() + toolset1.add(functions) + + agent_team = AgentTeam("test_team", agents_client=agents_client) + + agent_team.add_agent( + model=model_deployment_name, + name="TimeWeatherAgent", + instructions="You are a specialized agent for time and weather queries.", + toolset=toolset1, + can_delegate=True, + ) + + functions = FunctionTool(functions=user_function_set_2) + toolset2 = ToolSet() + toolset2.add(functions) + + agent_team.add_agent( + model=model_deployment_name, + name="SendEmailAgent", + instructions="You are a specialized agent for sending emails.", + toolset=toolset2, + can_delegate=False, + ) + + functions = FunctionTool(functions=user_function_set_3) + toolset3 = ToolSet() + toolset3.add(functions) + + agent_team.add_agent( + model=model_deployment_name, + name="TemperatureAgent", + instructions="You are a specialized agent for temperature conversion.", + toolset=toolset3, + can_delegate=False, + ) + + agent_team.assemble_team() + + user_request = ( + "Hello, Please provide me current time in '%Y-%m-%d %H:%M:%S' format, and the weather in New York. " + "Finally, convert the Celsius to Fahrenheit and send an email to Example Recipient with summary of results." + ) + + # Once process_request is called, the TeamLeader will coordinate. + # The loop in process_request will pick up tasks from the queue, assign them, and so on. + agent_team.process_request(request=user_request) + + agent_team.dismantle_team() + else: + print("Error: Please define the environment variable MODEL_DEPLOYMENT_NAME.") diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_multiagent/utils/agent_team.py b/sdk/ai/azure-ai-projects/samples/agents/agents_multiagent/utils/agent_team.py new file mode 100644 index 000000000000..ce304992536d --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_multiagent/utils/agent_team.py @@ -0,0 +1,436 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import os +import yaml # type: ignore + +from opentelemetry import trace +from opentelemetry.trace import Span # noqa: F401 # pylint: disable=unused-import +from typing import Any, Dict, Optional, Set, List +from azure.ai.agents import AgentsClient +from azure.ai.agents.models import FunctionTool, ToolSet, MessageRole, Agent, AgentThread + +tracer = trace.get_tracer(__name__) + + +class _AgentTeamMember: + """ + Represents an individual agent on a team. + + :param model: The model (e.g. GPT-4) used by this agent. + :param name: The agent's name. + :param instructions: The agent's initial instructions or "personality". + :param toolset: An optional ToolSet with specialized tools for this agent. + :param can_delegate: Whether this agent has delegation capability (e.g., 'create_task'). + Defaults to True. + """ + + def __init__( + self, model: str, name: str, instructions: str, toolset: Optional[ToolSet] = None, can_delegate: bool = True + ) -> None: + self.model = model + self.name = name + self.instructions = instructions + self.agent_instance: Optional[Agent] = None + self.toolset: Optional[ToolSet] = toolset + self.can_delegate = can_delegate + + +class AgentTask: + """ + Encapsulates a task for an agent to perform. + + :param recipient: The name of the agent who should receive the task. + :param task_description: The description of the work to be done or question to be answered. + :param requestor: The name of the agent or user requesting the task. + """ + + def __init__(self, recipient: str, task_description: str, requestor: str) -> None: + self.recipient = recipient + self.task_description = task_description + self.requestor = requestor + + +class AgentTeam: + """ + A class that represents a team of agents. + + """ + + # Static container to store all instances of AgentTeam + _teams: Dict[str, "AgentTeam"] = {} + + _agents_client: AgentsClient + _agent_thread: Optional[AgentThread] = None + _team_leader: Optional[_AgentTeamMember] = None + _members: List[_AgentTeamMember] = [] + _tasks: List[AgentTask] = [] + _team_name: str = "" + _current_request_span: Optional[Span] = None + _current_task_span: Optional[Span] = None + + def __init__(self, team_name: str, agents_client: AgentsClient): + """ + Initialize a new AgentTeam and set it as the singleton instance. + """ + # Validate that the team_name is a non-empty string + if not isinstance(team_name, str) or not team_name: + raise ValueError("Team name must be a non-empty string.") + # Check for existing team with the same name + if team_name in AgentTeam._teams: + raise ValueError(f"A team with the name '{team_name}' already exists.") + self.team_name = team_name + if agents_client is None: + raise ValueError("No AgentsClient provided.") + self._agents_client = agents_client + # Store the instance in the static container + AgentTeam._teams[team_name] = self + + # Get the directory of the current file + current_dir = os.path.dirname(os.path.abspath(__file__)) + # Construct the full path to the config file + file_path = os.path.join(current_dir, "agent_team_config.yaml") + with open(file_path, "r") as config_file: + config = yaml.safe_load(config_file) + self.TEAM_LEADER_INSTRUCTIONS = config["TEAM_LEADER_INSTRUCTIONS"] + self.TEAM_LEADER_INITIAL_REQUEST = config["TEAM_LEADER_INITIAL_REQUEST"] + self.TEAM_LEADER_TASK_COMPLETENESS_CHECK_INSTRUCTIONS = config[ + "TEAM_LEADER_TASK_COMPLETENESS_CHECK_INSTRUCTIONS" + ] + self.TEAM_MEMBER_CAN_DELEGATE_INSTRUCTIONS = config["TEAM_MEMBER_CAN_DELEGATE_INSTRUCTIONS"] + self.TEAM_MEMBER_NO_DELEGATE_INSTRUCTIONS = config["TEAM_MEMBER_NO_DELEGATE_INSTRUCTIONS"] + self.TEAM_LEADER_MODEL = config["TEAM_LEADER_MODEL"].strip() + + @staticmethod + def get_team(team_name: str) -> "AgentTeam": + """Static method to fetch the AgentTeam instance by name.""" + team = AgentTeam._teams.get(team_name) + if team is None: + raise ValueError(f"No team found with the name '{team_name}'.") + return team + + @staticmethod + def _remove_team(team_name: str) -> None: + """Static method to remove an AgentTeam instance by name.""" + if team_name not in AgentTeam._teams: + raise ValueError(f"No team found with the name '{team_name}'.") + del AgentTeam._teams[team_name] + + def add_agent( + self, model: str, name: str, instructions: str, toolset: Optional[ToolSet] = None, can_delegate: bool = True + ) -> None: + """ + Add a new agent (team member) to this AgentTeam. + + :param model: The model name (e.g. GPT-4) for the agent. + :param name: The name of the agent being added. + :param instructions: The initial instructions/personality for the agent. + :param toolset: An optional ToolSet to configure specific tools (functions, etc.) + for this agent. If None, we'll create a default set. + :param can_delegate: If True, the agent can delegate tasks (via create_task). + If False, the agent does not get 'create_task' in its ToolSet + and won't mention delegation in instructions. + """ + if toolset is None: + toolset = ToolSet() + + if can_delegate: + # If agent can delegate, ensure it has 'create_task' + try: + function_tool = toolset.get_tool(FunctionTool) + function_tool.add_functions(agent_team_default_functions) + except ValueError: + default_function_tool = FunctionTool(agent_team_default_functions) + toolset.add(default_function_tool) + + member = _AgentTeamMember( + model=model, + name=name, + instructions=instructions, + toolset=toolset, + can_delegate=can_delegate, + ) + self._members.append(member) + + def set_team_leader(self, model: str, name: str, instructions: str, toolset: Optional[ToolSet] = None) -> None: + """ + Set the team leader for this AgentTeam. + + If team leader has not been set prior to the call to assemble_team, + then a default team leader will be set. + + :param model: The model name (e.g. GPT-4) for the agent. + :param name: The name of the team leader. + :param instructions: The instructions for the team leader. These instructions + are not modified by the implementation, so all required + information about other team members and how to pass tasks + to them should be included. + :param toolset: An optional ToolSet to configure specific tools (functions, etc.) + for the team leader. + """ + member = _AgentTeamMember(model=model, name=name, instructions=instructions, toolset=toolset) + self._team_leader = member + + def add_task(self, task: AgentTask) -> None: + """ + Add a new task to the team's task list. + + :param task: The task to be added. + """ + self._tasks.append(task) + + def _create_team_leader(self) -> None: + """ + Create the team leader agent. + """ + assert self._agents_client is not None, "agents_client must not be None" + assert self._team_leader is not None, "team leader has not been added" + + self._team_leader.agent_instance = self._agents_client.create_agent( + model=self._team_leader.model, + name=self._team_leader.name, + instructions=self._team_leader.instructions, + toolset=self._team_leader.toolset, + ) + + def _set_default_team_leader(self): + """ + Set the default 'TeamLeader' agent with awareness of all other agents. + """ + toolset = ToolSet() + toolset.add(default_function_tool) + instructions = self.TEAM_LEADER_INSTRUCTIONS.format(agent_name="TeamLeader", team_name=self.team_name) + "\n" + # List all agents (will be empty at this moment if you haven't added any, or you can append after they're added) + for member in self._members: + instructions += f"- {member.name}: {member.instructions}\n" + + self._team_leader = _AgentTeamMember( + model=self.TEAM_LEADER_MODEL, + name="TeamLeader", + instructions=instructions, + toolset=toolset, + can_delegate=True, + ) + + def assemble_team(self): + """ + Create the team leader agent and initialize all member agents with + their configured or default toolsets. + """ + assert self._agents_client is not None, "agents_client must not be None" + + if self._team_leader is None: + self._set_default_team_leader() + + self._create_team_leader() + + for member in self._members: + if member is self._team_leader: + continue + + team_description = "" + for other_member in self._members: + if other_member != member: + team_description += f"- {other_member.name}: {other_member.instructions}\n" + + if member.can_delegate: + extended_instructions = self.TEAM_MEMBER_CAN_DELEGATE_INSTRUCTIONS.format( + name=member.name, + team_name=self._team_name, + original_instructions=member.instructions, + team_description=team_description, + ) + else: + extended_instructions = self.TEAM_MEMBER_NO_DELEGATE_INSTRUCTIONS.format( + name=member.name, + team_name=self._team_name, + original_instructions=member.instructions, + team_description=team_description, + ) + member.agent_instance = self._agents_client.create_agent( + model=member.model, name=member.name, instructions=extended_instructions, toolset=member.toolset + ) + + def dismantle_team(self) -> None: + """ + Delete all agents (including the team leader) from the project client. + """ + assert self._agents_client is not None, "agents_client must not be None" + + if self._team_leader and self._team_leader.agent_instance: + print(f"Deleting team leader agent '{self._team_leader.name}'") + self._agents_client.delete_agent(self._team_leader.agent_instance.id) + for member in self._members: + if member is not self._team_leader and member.agent_instance: + print(f"Deleting agent '{member.name}'") + self._agents_client.delete_agent(member.agent_instance.id) + AgentTeam._remove_team(self.team_name) + + def _add_task_completion_event( + self, + span: Span, + result: str, + ) -> None: + + attributes: Dict[str, Any] = {} + attributes["agent_team.task.result"] = result + span.add_event(name=f"agent_team.task_completed", attributes=attributes) + + def process_request(self, request: str) -> None: + """ + Handle a user's request by creating a team and delegating tasks to + the team leader. The team leader may generate additional tasks. + + :param request: The user's request or question. + """ + assert self._agents_client is not None, "project client must not be None" + assert self._team_leader is not None, "team leader must not be None" + + if self._agent_thread is None: + self._agent_thread = self._agents_client.threads.create() + print(f"Created thread with ID: {self._agent_thread.id}") + + with tracer.start_as_current_span("agent_team_request") as current_request_span: + self._current_request_span = current_request_span + if self._current_request_span is not None: + self._current_request_span.set_attribute("agent_team.name", self.team_name) + team_leader_request = self.TEAM_LEADER_INITIAL_REQUEST.format(original_request=request) + _create_task( + team_name=self.team_name, + recipient=self._team_leader.name, + request=team_leader_request, + requestor="user", + ) + while self._tasks: + task = self._tasks.pop(0) + with tracer.start_as_current_span("agent_team_task") as current_task_span: + self._current_task_span = current_task_span + if self._current_task_span is not None: + self._current_task_span.set_attribute("agent_team.name", self.team_name) + self._current_task_span.set_attribute("agent_team.task.recipient", task.recipient) + self._current_task_span.set_attribute("agent_team.task.requestor", task.requestor) + self._current_task_span.set_attribute("agent_team.task.description", task.task_description) + print( + f"Starting task for agent '{task.recipient}'. " + f"Requestor: '{task.requestor}'. " + f"Task description: '{task.task_description}'." + ) + message = self._agents_client.messages.create( + thread_id=self._agent_thread.id, + role="user", + content=task.task_description, + ) + print(f"Created message with ID: {message.id} for task in thread {self._agent_thread.id}") + agent = self._get_member_by_name(task.recipient) + if agent and agent.agent_instance: + run = self._agents_client.runs.create_and_process( + thread_id=self._agent_thread.id, agent_id=agent.agent_instance.id + ) + print(f"Created and processed run for agent '{agent.name}', run ID: {run.id}") + text_message = self._agents_client.messages.get_last_message_text_by_role( + thread_id=self._agent_thread.id, role=MessageRole.AGENT + ) + if text_message and text_message.text: + print(f"Agent '{agent.name}' completed task. " f"Outcome: {text_message.text.value}") + if self._current_task_span is not None: + self._add_task_completion_event(self._current_task_span, result=text_message.text.value) + + # If no tasks remain AND the recipient is not the TeamLeader, + # let the TeamLeader see if more delegation is needed. + if not self._tasks and not task.recipient == "TeamLeader": + team_leader_request = self.TEAM_LEADER_TASK_COMPLETENESS_CHECK_INSTRUCTIONS + _create_task( + team_name=self.team_name, + recipient=self._team_leader.name, + request=team_leader_request, + requestor="user", + ) + # self._current_task_span.end() + self._current_task_span = None + # self._current_request_span.end() + self._current_request_span = None + + def _get_member_by_name(self, name) -> Optional[_AgentTeamMember]: + """ + Retrieve a team member (agent) by name. + If no member with the specified name is found, returns None. + + :param name: The agent's name within this team. + """ + if name == "TeamLeader": + return self._team_leader + for member in self._members: + if member.name == name: + return member + return None + + """ + Requests another agent in the team to complete a task. + + :param span (Span): The event will be added to this span + :param team_name (str): The name of the team. + :param recipient (str): The name of the agent that is being requested to complete the task. + :param request (str): A description of the to complete. This can also be a question. + :param requestor (str): The name of the agent who is requesting the task. + :return: True if the task was successfully received, False otherwise. + :rtype: str + """ + + +def _add_create_task_event( + span: Span, + team_name: str, + requestor: str, + recipient: str, + request: str, +) -> None: + + attributes: Dict[str, Any] = {} + attributes["agent_team.task.team_name"] = team_name + attributes["agent_team.task.requestor"] = requestor + attributes["agent_team.task.recipient"] = recipient + attributes["agent_team.task.description"] = request + span.add_event(name=f"agent_team.create_task", attributes=attributes) + + +def _create_task(team_name: str, recipient: str, request: str, requestor: str) -> str: + """ + Requests another agent in the team to complete a task. + + :param team_name (str): The name of the team. + :param recipient (str): The name of the agent that is being requested to complete the task. + :param request (str): A description of the to complete. This can also be a question. + :param requestor (str): The name of the agent who is requesting the task. + :return: True if the task was successfully received, False otherwise. + :rtype: str + """ + task = AgentTask(recipient=recipient, task_description=request, requestor=requestor) + team: Optional[AgentTeam] = None + try: + team = AgentTeam.get_team(team_name) + span: Optional[Span] = None + if team._current_task_span is not None: + span = team._current_task_span + elif team._current_request_span is not None: + span = team._current_request_span + + if span is not None: + _add_create_task_event( + span=span, team_name=team_name, requestor=requestor, recipient=recipient, request=request + ) + except: + pass + if team is not None: + team.add_task(task) + return "True" + return "False" + + +# Any additional functions that might be used by the agents: +agent_team_default_functions: Set = { + _create_task, +} + +default_function_tool = FunctionTool(functions=agent_team_default_functions) diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_multiagent/utils/agent_team_config.yaml b/sdk/ai/azure-ai-projects/samples/agents/agents_multiagent/utils/agent_team_config.yaml new file mode 100644 index 000000000000..ad711427b5fb --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_multiagent/utils/agent_team_config.yaml @@ -0,0 +1,43 @@ +TEAM_LEADER_MODEL: | + gpt-4o-mini + +TEAM_LEADER_INSTRUCTIONS: | + You are an agent named '{agent_name}'. You are a leader of a team of agents. The name of your team is '{team_name}'. + You are an agent that is responsible for receiving requests from user and utilizing a team of agents to complete the task. + When you are passed a request, the only thing you will do is evaluate which team member should do which task next to complete the request. + You will use the provided _create_task function to create a task for the agent that is best suited for handling the task next. + You will respond with the description of who you assigned the task and why. When you think that the original user request is + processed completely utilizing all the talent available in the team, you do not have to create anymore tasks. + Using the skills of all the team members when applicable is highly valued. + Do not create parallel tasks. + Here are the other agents in your team: + +TEAM_LEADER_INITIAL_REQUEST: | + Please create a task for agent in the team that is best suited to next process the following request. + Use the _create_task function available for you to create the task. The request is: + {original_request} + +TEAM_LEADER_TASK_COMPLETENESS_CHECK_INSTRUCTIONS: | + Check the discussion so far and especially the most recent agent response in the thread and if you see a potential task + that could improve the final outcome, then use the _create_task function to create the task. + Do not ever ask user confirmation for creating a task. + If the request is completely processed, you do not have to create a task. + +TEAM_MEMBER_CAN_DELEGATE_INSTRUCTIONS: | + You are an agent named '{name}'. You are a member in a team of agents. The name of your team is '{team_name}'. + {original_instructions} + + - You can delegate tasks when appropriate. To delegate, call the _create_task function, using your own name as the 'requestor'. + - Provide a brief account of any tasks you assign and the outcome. + - Ask for help from other team members if you see they have the relevant expertise. + - Once you believe your assignment is complete, respond with your final answer or actions taken. + - Below are the other agents in your team: {team_description} + +TEAM_MEMBER_NO_DELEGATE_INSTRUCTIONS: | + You are an agent named '{name}'. You are a member in a team of agents. The name of your team is '{team_name}'. + {original_instructions} + + - You do not delegate tasks. Instead, focus solely on fulfilling the tasks assigned to you. + - If you have suggestions for tasks better suited to another agent, simply mention it in your response, but do not call _create_task yourself. + - Once you believe your assignment is complete, respond with your final answer or actions taken. + - Below are the other agents in your team: {team_description} diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_multiagent/utils/agent_trace_configurator.py b/sdk/ai/azure-ai-projects/samples/agents/agents_multiagent/utils/agent_trace_configurator.py new file mode 100644 index 000000000000..7c03cb453d58 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_multiagent/utils/agent_trace_configurator.py @@ -0,0 +1,73 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import os +import sys +from typing import cast +from opentelemetry import trace +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export import SimpleSpanProcessor, ConsoleSpanExporter +from azure.ai.agents import AgentsClient +from azure.monitor.opentelemetry import configure_azure_monitor + + +class AgentTraceConfigurator: + def __init__(self, agents_client: AgentsClient): + self.agents_client = agents_client + + def enable_azure_monitor_tracing(self): + application_insights_connection_string = os.environ.get("APPLICATIONINSIGHTS_CONNECTION_STRING") + if not application_insights_connection_string: + print("APPLICATIONINSIGHTS_CONNECTION_STRING environment variable was not set.") + print("Please create APPLICATIONINSIGHTS_CONNECTION_STRING with the Application Insights,") + print("connection string. It should be enabled for this project.") + print("Enable it via the 'Tracing' tab in your AI Foundry project page.") + exit() + configure_azure_monitor(connection_string=application_insights_connection_string) + + def enable_console_tracing_without_genai(self): + exporter = ConsoleSpanExporter() + trace.set_tracer_provider(TracerProvider()) + tracer = trace.get_tracer(__name__) + provider = cast(TracerProvider, trace.get_tracer_provider()) + provider.add_span_processor(SimpleSpanProcessor(exporter)) + print("Console tracing enabled without agent traces.") + + def enable_console_tracing_with_agent(self): + span_exporter = ConsoleSpanExporter() + tracer_provider = TracerProvider() + tracer_provider.add_span_processor(SimpleSpanProcessor(span_exporter)) + trace.set_tracer_provider(tracer_provider) + tracer = trace.get_tracer(__name__) + try: + from azure.ai.agents.telemetry import AIAgentsInstrumentor + + agents_instrumentor = AIAgentsInstrumentor() + if not agents_instrumentor.is_instrumented(): + agents_instrumentor.instrument() + except Exception as exc: # pylint: disable=broad-exception-caught + print(f"Could not call `AIAgentsInstrumentor().instrument()`. Exception: {exc}") + print("Console tracing enabled with agent traces.") + + def display_menu(self): + print("Select a tracing option:") + print("1. Enable Azure Monitor tracing") + print("2. Enable console tracing without enabling gen_ai agent traces") + print("3. Enable console tracing with gen_ai agent traces") + print("4. Do not enable traces") + + def setup_tracing(self): + self.display_menu() + choice = input("Enter your choice (1-4): ") + + if choice == "1": + self.enable_azure_monitor_tracing() + elif choice == "2": + self.enable_console_tracing_without_genai() + elif choice == "3": + self.enable_console_tracing_with_agent() + elif choice == "4": + print("No tracing enabled.") + else: + print("Invalid choice. Please select a valid option.") diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_multiagent/utils/user_functions_with_traces.py b/sdk/ai/azure-ai-projects/samples/agents/agents_multiagent/utils/user_functions_with_traces.py new file mode 100644 index 000000000000..2c4f2377ddaf --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_multiagent/utils/user_functions_with_traces.py @@ -0,0 +1,111 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +import json +import datetime +from typing import Any, Callable, Set, Optional +from opentelemetry import trace + + +tracer = trace.get_tracer(__name__) + + +# These are the user-defined functions that can be called by the agent. +@tracer.start_as_current_span("fetch_current_datetime") # type: ignore +def fetch_current_datetime(format: Optional[str] = None) -> str: + """ + Get the current time as a JSON string, optionally formatted. + + :param format (Optional[str]): The format in which to return the current time. Defaults to None, which uses a standard format. + :return: The current time in JSON format. + :rtype: str + """ + current_time = datetime.datetime.now() + + # Use the provided format if available, else use a default format + if format: + time_format = format + else: + time_format = "%Y-%m-%d %H:%M:%S" + + time_json = json.dumps({"current_time": current_time.strftime(time_format)}) + return time_json + + +@tracer.start_as_current_span("fetch_weather") # type: ignore +def fetch_weather(location: str) -> str: + """ + Fetches the weather information for the specified location. + + :param location (str): The location to fetch weather for. + :return: Weather information as a JSON string. + :rtype: str + """ + # In a real-world scenario, you'd integrate with a weather API. + # Here, we'll mock the response. + mock_weather_data = {"New York": "Sunny, 25°C", "London": "Cloudy, 18°C", "Tokyo": "Rainy, 22°C"} + weather = mock_weather_data.get(location, "Weather data not available for this location.") + weather_json = json.dumps({"weather": weather}) + return weather_json + + +@tracer.start_as_current_span("send_email_using_recipient_name") # type: ignore +def send_email_using_recipient_name(recipient: str, subject: str, body: str) -> str: + """ + Sends an email with the specified subject and body to the recipient. + + :param recipient (str): Name of the recipient. + :param subject (str): Subject of the email. + :param body (str): Body content of the email. + :return: Confirmation message. + :rtype: str + """ + # In a real-world scenario, you'd use an SMTP server or an email service API. + # Here, we'll mock the email sending. + print(f"Sending email to {recipient}...") + print(f"Subject: {subject}") + print(f"Body:\n{body}") + + message_json = json.dumps({"message": f"Email successfully sent to {recipient}."}) + return message_json + + +@tracer.start_as_current_span("convert_temperature") # type: ignore +def convert_temperature(celsius: float) -> str: + """Converts temperature from Celsius to Fahrenheit. + + :param celsius (float): Temperature in Celsius. + :rtype: float + + :return: Temperature in Fahrenheit. + :rtype: str + """ + fahrenheit = (celsius * 9 / 5) + 32 + return json.dumps({"fahrenheit": fahrenheit}) + + +# Example User Input for Each Function +# 1. Fetch Current DateTime +# User Input: "What is the current date and time?" +# User Input: "What is the current date and time in '%Y-%m-%d %H:%M:%S' format?" + +# 2. Fetch Weather +# User Input: "Can you provide the weather information for New York?" + +# 3. Send Email Using Recipient Name +# User Input: "Send an email to John Doe with the subject 'Meeting Reminder' and body 'Don't forget our meeting at 3 PM.'" + +# 4. Convert Temperature +# User Input: "Convert 25 degrees Celsius to Fahrenheit." + + +# Statically defined user functions for fast reference +user_functions: Set[Callable[..., Any]] = { + fetch_current_datetime, + fetch_weather, + send_email_using_recipient_name, + convert_temperature, +} diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_streaming/sample_agents_stream_eventhandler_with_bing_grounding.py b/sdk/ai/azure-ai-projects/samples/agents/agents_streaming/sample_agents_stream_eventhandler_with_bing_grounding.py new file mode 100644 index 000000000000..89e130126a0e --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_streaming/sample_agents_stream_eventhandler_with_bing_grounding.py @@ -0,0 +1,125 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use Agent operations with an event handler and + the Bing grounding tool. It uses a synchronous client. + +USAGE: + python sample_agents_stream_eventhandler_with_bing_grounding.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + 3) AZURE_BING_CONNECTION_ID - The connection id of the Bing connection, as found in the "Connected resources" tab + in your Azure AI Foundry project. +""" + +import os +from typing import Any +from azure.identity import DefaultAzureCredential +from azure.ai.projects import AIProjectClient +from azure.ai.agents.models import ( + MessageDeltaChunk, + RunStep, + ThreadMessage, + ThreadRun, + AgentEventHandler, + BingGroundingTool, + MessageRole, + MessageDeltaTextUrlCitationAnnotation, + MessageDeltaTextContent, +) + + +# When using FunctionTool with ToolSet in agent creation, the tool call events are handled inside the stream +# method and functions gets automatically called by default. +class MyEventHandler(AgentEventHandler): + + def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + print(f"Text delta received: {delta.text}") + if delta.delta.content and isinstance(delta.delta.content[0], MessageDeltaTextContent): + delta_text_content = delta.delta.content[0] + if delta_text_content.text and delta_text_content.text.annotations: + for delta_annotation in delta_text_content.text.annotations: + if isinstance(delta_annotation, MessageDeltaTextUrlCitationAnnotation): + print( + f"URL citation delta received: [{delta_annotation.url_citation.title}]({delta_annotation.url_citation.url})" + ) + + def on_thread_message(self, message: "ThreadMessage") -> None: + print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") + + def on_thread_run(self, run: "ThreadRun") -> None: + print(f"ThreadRun status: {run.status}") + + if run.status == "failed": + print(f"Run failed. Error: {run.last_error}") + + def on_run_step(self, step: "RunStep") -> None: + print(f"RunStep type: {step.type}, Status: {step.status}") + + def on_error(self, data: str) -> None: + print(f"An error occurred. Data: {data}") + + def on_done(self) -> None: + print("Stream completed.") + + def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +with project_client: + agents_client = project_client.agents + + bing_connection_id = os.environ["AZURE_BING_CONNECTION_ID"] + print(f"Bing Connection ID: {bing_connection_id}") + + # Initialize agent bing tool and add the connection id + bing = BingGroundingTool(connection_id=bing_connection_id) + + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are a helpful agent", + tools=bing.definitions, + ) + print(f"Created agent, ID: {agent.id}") + + thread = agents_client.threads.create() + print(f"Created thread, thread ID {thread.id}") + + message = agents_client.messages.create( + thread_id=thread.id, + role=MessageRole.USER, + content="How does wikipedia explain Euler's Identity?", + ) + print(f"Created message, message ID {message.id}") + + with agents_client.runs.stream(thread_id=thread.id, agent_id=agent.id, event_handler=MyEventHandler()) as stream: + stream.until_done() + + agents_client.delete_agent(agent.id) + print("Deleted agent") + + response_message = agents_client.messages.get_last_message_by_role(thread_id=thread.id, role=MessageRole.AGENT) + if response_message: + for text_message in response_message.text_messages: + print(f"Agent response: {text_message.text.value}") + for annotation in response_message.url_citation_annotations: + print(f"URL Citation: [{annotation.url_citation.title}]({annotation.url_citation.url})") diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_streaming/sample_agents_stream_eventhandler_with_functions.py b/sdk/ai/azure-ai-projects/samples/agents/agents_streaming/sample_agents_stream_eventhandler_with_functions.py new file mode 100644 index 000000000000..e16a99df53a7 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_streaming/sample_agents_stream_eventhandler_with_functions.py @@ -0,0 +1,148 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use agent operations with an event handler and toolset from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_stream_eventhandler_with_functions.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" +from typing import Any + +import os, sys +from azure.ai.projects import AIProjectClient +from azure.ai.agents.models import ( + AgentEventHandler, + FunctionTool, + ListSortOrder, + MessageDeltaChunk, + RequiredFunctionToolCall, + RunStep, + SubmitToolOutputsAction, + ThreadMessage, + ThreadRun, + ToolOutput, +) +from azure.identity import DefaultAzureCredential + +current_path = os.path.dirname(__file__) +root_path = os.path.abspath(os.path.join(current_path, os.pardir, os.pardir)) +if root_path not in sys.path: + sys.path.insert(0, root_path) +from samples.utils.user_functions import user_functions + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + + +class MyEventHandler(AgentEventHandler): + + def __init__(self, functions: FunctionTool) -> None: + super().__init__() + self.functions = functions + + def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + print(f"Text delta received: {delta.text}") + + def on_thread_message(self, message: "ThreadMessage") -> None: + print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") + + def on_thread_run(self, run: "ThreadRun") -> None: + print(f"ThreadRun status: {run.status}") + + if run.status == "failed": + print(f"Run failed. Error: {run.last_error}") + + if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): + tool_calls = run.required_action.submit_tool_outputs.tool_calls + + tool_outputs = [] + for tool_call in tool_calls: + if isinstance(tool_call, RequiredFunctionToolCall): + try: + output = functions.execute(tool_call) + tool_outputs.append( + ToolOutput( + tool_call_id=tool_call.id, + output=output, + ) + ) + except Exception as e: + print(f"Error executing tool_call {tool_call.id}: {e}") + + print(f"Tool outputs: {tool_outputs}") + if tool_outputs: + # Once we receive 'requires_action' status, the next event will be DONE. + # Here we associate our existing event handler to the next stream. + agents_client.runs.submit_tool_outputs_stream( + thread_id=run.thread_id, run_id=run.id, tool_outputs=tool_outputs, event_handler=self + ) + + def on_run_step(self, step: "RunStep") -> None: + print(f"RunStep type: {step.type}, Status: {step.status}") + + def on_error(self, data: str) -> None: + print(f"An error occurred. Data: {data}") + + def on_done(self) -> None: + print("Stream completed.") + + def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + +with project_client: + agents_client = project_client.agents + + # [START create_agent_with_function_tool] + functions = FunctionTool(user_functions) + + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are a helpful agent", + tools=functions.definitions, + ) + # [END create_agent_with_function_tool] + print(f"Created agent, ID: {agent.id}") + + thread = agents_client.threads.create() + print(f"Created thread, thread ID {thread.id}") + + message = agents_client.messages.create( + thread_id=thread.id, + role="user", + content="Hello, send an email with the datetime and weather information in New York? Also let me know the details.", + ) + print(f"Created message, message ID {message.id}") + + with agents_client.runs.stream( + thread_id=thread.id, agent_id=agent.id, event_handler=MyEventHandler(functions) + ) as stream: + stream.until_done() + + agents_client.delete_agent(agent.id) + print("Deleted agent") + + messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) + for msg in messages: + if msg.text_messages: + last_text = msg.text_messages[-1] + print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_streaming/sample_agents_stream_eventhandler_with_toolset.py b/sdk/ai/azure-ai-projects/samples/agents/agents_streaming/sample_agents_stream_eventhandler_with_toolset.py new file mode 100644 index 000000000000..0ac79409b8c4 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_streaming/sample_agents_stream_eventhandler_with_toolset.py @@ -0,0 +1,120 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use agent operations with an event handler and toolset from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_stream_eventhandler_with_toolset.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +from azure.ai.projects import AIProjectClient +from azure.ai.agents.models import ( + MessageDeltaChunk, + ListSortOrder, + RunStep, + ThreadMessage, + ThreadRun, +) +from azure.ai.agents.models import AgentEventHandler +from azure.identity import DefaultAzureCredential +from azure.ai.agents.models import FunctionTool, ToolSet + +import os, sys +from typing import Any + +current_path = os.path.dirname(__file__) +root_path = os.path.abspath(os.path.join(current_path, os.pardir, os.pardir)) +if root_path not in sys.path: + sys.path.insert(0, root_path) +from samples.utils.user_functions import user_functions + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + + +# When using FunctionTool with ToolSet in agent creation, the tool call events are handled inside the create_stream +# method and functions gets automatically called by default. +class MyEventHandler(AgentEventHandler): + + def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + print(f"Text delta received: {delta.text}") + + def on_thread_message(self, message: "ThreadMessage") -> None: + print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") + + def on_thread_run(self, run: "ThreadRun") -> None: + print(f"ThreadRun status: {run.status}") + + if run.status == "failed": + print(f"Run failed. Error: {run.last_error}") + + def on_run_step(self, step: "RunStep") -> None: + print(f"RunStep type: {step.type}, Status: {step.status}") + + def on_error(self, data: str) -> None: + print(f"An error occurred. Data: {data}") + + def on_done(self) -> None: + print("Stream completed.") + + def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + +with project_client: + agents_client = project_client.agents + + # [START create_agent_with_function_tool] + functions = FunctionTool(user_functions) + toolset = ToolSet() + toolset.add(functions) + agents_client.enable_auto_function_calls(toolset) + + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are a helpful agent", + toolset=toolset, + ) + # [END create_agent_with_function_tool] + print(f"Created agent, ID: {agent.id}") + + thread = agents_client.threads.create() + print(f"Created thread, thread ID {thread.id}") + + message = agents_client.messages.create( + thread_id=thread.id, + role="user", + content="Hello, send an email with the datetime and weather information in New York? Also let me know the details", + ) + print(f"Created message, message ID {message.id}") + + with agents_client.runs.stream(thread_id=thread.id, agent_id=agent.id, event_handler=MyEventHandler()) as stream: + stream.until_done() + + agents_client.delete_agent(agent.id) + print("Deleted agent") + + messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) + for msg in messages: + if msg.text_messages: + last_text = msg.text_messages[-1] + print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_streaming/sample_agents_stream_iteration_with_bing_grounding.py b/sdk/ai/azure-ai-projects/samples/agents/agents_streaming/sample_agents_stream_iteration_with_bing_grounding.py new file mode 100644 index 000000000000..32fe5956c33e --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_streaming/sample_agents_stream_iteration_with_bing_grounding.py @@ -0,0 +1,118 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use Agent operations with the Bing grounding + tool, and iteration in streaming. It uses a synchronous client. + +USAGE: + python sample_agents_stream_iteration_with_bing_grounding.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + 3) AZURE_BING_CONNECTION_ID - The ID of the Bing connection, as found in the "Connected resources" tab + in your Azure AI Foundry project. +""" + +import os +from azure.ai.projects import AIProjectClient +from azure.ai.agents.models import AgentStreamEvent, RunStepDeltaChunk +from azure.ai.agents.models import ( + MessageDeltaChunk, + RunStep, + ThreadMessage, + ThreadRun, + BingGroundingTool, + MessageRole, + MessageDeltaTextContent, + MessageDeltaTextUrlCitationAnnotation, +) +from azure.identity import DefaultAzureCredential + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +with project_client: + agents_client = project_client.agents + + bing_connection_id = os.environ["AZURE_BING_CONNECTION_ID"] + bing = BingGroundingTool(connection_id=bing_connection_id) + print(f"Bing Connection ID: {bing_connection_id}") + + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are a helpful agent", + tools=bing.definitions, + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = agents_client.threads.create() + print(f"Created thread, thread ID {thread.id}") + + message = agents_client.messages.create( + thread_id=thread.id, role=MessageRole.USER, content="How does wikipedia explain Euler's Identity?" + ) + print(f"Created message, message ID {message.id}") + + with agents_client.runs.stream(thread_id=thread.id, agent_id=agent.id) as stream: + + for event_type, event_data, _ in stream: + + if isinstance(event_data, MessageDeltaChunk): + print(f"Text delta received: {event_data.text}") + if event_data.delta.content and isinstance(event_data.delta.content[0], MessageDeltaTextContent): + delta_text_content = event_data.delta.content[0] + if delta_text_content.text and delta_text_content.text.annotations: + for delta_annotation in delta_text_content.text.annotations: + if isinstance(delta_annotation, MessageDeltaTextUrlCitationAnnotation): + print( + f"URL citation delta received: [{delta_annotation.url_citation.title}]({delta_annotation.url_citation.url})" + ) + + elif isinstance(event_data, RunStepDeltaChunk): + print(f"RunStepDeltaChunk received. ID: {event_data.id}.") + + elif isinstance(event_data, ThreadMessage): + print(f"ThreadMessage created. ID: {event_data.id}, Status: {event_data.status}") + + elif isinstance(event_data, ThreadRun): + print(f"ThreadRun status: {event_data.status}") + + if event_data.status == "failed": + print(f"Run failed. Error: {event_data.last_error}") + + elif isinstance(event_data, RunStep): + print(f"RunStep type: {event_data.type}, Status: {event_data.status}") + + elif event_type == AgentStreamEvent.ERROR: + print(f"An error occurred. Data: {event_data}") + + elif event_type == AgentStreamEvent.DONE: + print("Stream completed.") + + else: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + agents_client.delete_agent(agent.id) + print("Deleted agent") + + response_message = agents_client.messages.get_last_message_by_role(thread_id=thread.id, role=MessageRole.AGENT) + if response_message: + for text_message in response_message.text_messages: + print(f"Agent response: {text_message.text.value}") + for annotation in response_message.url_citation_annotations: + print(f"URL Citation: [{annotation.url_citation.title}]({annotation.url_citation.url})") diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_streaming/sample_agents_stream_iteration_with_file_search.py b/sdk/ai/azure-ai-projects/samples/agents/agents_streaming/sample_agents_stream_iteration_with_file_search.py new file mode 100644 index 000000000000..dab751fc05df --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_streaming/sample_agents_stream_iteration_with_file_search.py @@ -0,0 +1,111 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use agent operations with file search tools and iteration in streaming from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_stream_iteration_with_file_search.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os +from azure.ai.projects import AIProjectClient +from azure.ai.agents.models import AgentStreamEvent, FileSearchTool, RunStepDeltaChunk +from azure.ai.agents.models import MessageDeltaChunk, RunStep, ThreadMessage, ThreadRun, FilePurpose, ListSortOrder +from azure.identity import DefaultAzureCredential + +asset_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../assets/product_info_1.md")) + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +with project_client: + agents_client = project_client.agents + + # Upload file and create vector store + file = agents_client.files.upload_and_poll(file_path=asset_file_path, purpose=FilePurpose.AGENTS) + print(f"Uploaded file, file ID: {file.id}") + + vector_store = agents_client.vector_stores.create_and_poll(file_ids=[file.id], name="my_vectorstore") + print(f"Created vector store, vector store ID: {vector_store.id}") + + # Create file search tool with resources followed by creating agent + file_search = FileSearchTool(vector_store_ids=[vector_store.id]) + + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="Hello, you are helpful agent and can search information from uploaded files", + tools=file_search.definitions, + tool_resources=file_search.resources, + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = agents_client.threads.create() + print(f"Created thread, thread ID {thread.id}") + + message = agents_client.messages.create( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) + print(f"Created message, message ID {message.id}") + + with agents_client.runs.stream(thread_id=thread.id, agent_id=agent.id) as stream: + + for event_type, event_data, _ in stream: + + if isinstance(event_data, MessageDeltaChunk): + print(f"Text delta received: {event_data.text}") + + elif isinstance(event_data, RunStepDeltaChunk): + print(f"RunStepDeltaChunk received. ID: {event_data.id}.") + + elif isinstance(event_data, ThreadMessage): + print(f"ThreadMessage created. ID: {event_data.id}, Status: {event_data.status}") + for annotation in event_data.file_citation_annotations: + print( + f"Citation {annotation.text} from file ID: {annotation.file_citation.file_id}, start index: {annotation.start_index}, end index: {annotation.end_index}" + ) + + elif isinstance(event_data, ThreadRun): + print(f"ThreadRun status: {event_data.status}") + + if event_data.status == "failed": + print(f"Run failed. Error: {event_data.last_error}") + + elif isinstance(event_data, RunStep): + print(f"RunStep type: {event_data.type}, Status: {event_data.status}") + + elif event_type == AgentStreamEvent.ERROR: + print(f"An error occurred. Data: {event_data}") + + elif event_type == AgentStreamEvent.DONE: + print("Stream completed.") + + else: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + agents_client.delete_agent(agent.id) + print("Deleted agent") + + messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) + for msg in messages: + if msg.text_messages: + last_text = msg.text_messages[-1] + print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_streaming/sample_agents_stream_iteration_with_functions.py b/sdk/ai/azure-ai-projects/samples/agents/agents_streaming/sample_agents_stream_iteration_with_functions.py new file mode 100644 index 000000000000..bc5646270da8 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_streaming/sample_agents_stream_iteration_with_functions.py @@ -0,0 +1,148 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use agent operations with iteration and functions from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_stream_iteration_with_functions.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" +from typing import Any + +import os, sys +from azure.ai.projects import AIProjectClient +from azure.ai.agents.models import ( + FunctionTool, + ListSortOrder, + MessageDeltaChunk, + RequiredFunctionToolCall, + RunStep, + SubmitToolOutputsAction, + ThreadMessage, + ThreadRun, + ToolOutput, + AgentStreamEvent, + RunStepDeltaChunk, +) +from azure.identity import DefaultAzureCredential + +current_path = os.path.dirname(__file__) +root_path = os.path.abspath(os.path.join(current_path, os.pardir, os.pardir)) +if root_path not in sys.path: + sys.path.insert(0, root_path) +from samples.utils.user_functions import user_functions + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +with project_client: + agents_client = project_client.agents + + # [START create_agent_with_function_tool] + functions = FunctionTool(user_functions) + + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are a helpful agent", + tools=functions.definitions, + ) + # [END create_agent_with_function_tool] + print(f"Created agent, ID: {agent.id}") + + thread = agents_client.threads.create() + print(f"Created thread, thread ID {thread.id}") + + message = agents_client.messages.create( + thread_id=thread.id, + role="user", + content="Hello, send an email with the datetime and weather information in New York? Also let me know the details.", + ) + print(f"Created message, message ID {message.id}") + + with agents_client.runs.stream(thread_id=thread.id, agent_id=agent.id) as stream: + + for event_type, event_data, _ in stream: + + if isinstance(event_data, MessageDeltaChunk): + print(f"Text delta received: {event_data.text}") + + elif isinstance(event_data, RunStepDeltaChunk): + print(f"RunStepDeltaChunk received. ID: {event_data.id}.") + + elif isinstance(event_data, ThreadMessage): + print(f"ThreadMessage created. ID: {event_data.id}, Status: {event_data.status}") + + elif isinstance(event_data, ThreadRun): + print(f"ThreadRun status: {event_data.status}") + + if event_data.status == "failed": + print(f"Run failed. Error: {event_data.last_error}") + + if event_data.status == "requires_action" and isinstance( + event_data.required_action, SubmitToolOutputsAction + ): + tool_calls = event_data.required_action.submit_tool_outputs.tool_calls + + tool_outputs = [] + for tool_call in tool_calls: + if isinstance(tool_call, RequiredFunctionToolCall): + try: + output = functions.execute(tool_call) + tool_outputs.append( + ToolOutput( + tool_call_id=tool_call.id, + output=output, + ) + ) + except Exception as e: + print(f"Error executing tool_call {tool_call.id}: {e}") + + print(f"Tool outputs: {tool_outputs}") + if tool_outputs: + # Once we receive 'requires_action' status, the next event will be DONE. + # Here we associate our existing event handler to the next stream. + agents_client.runs.submit_tool_outputs_stream( + thread_id=event_data.thread_id, + run_id=event_data.id, + tool_outputs=tool_outputs, + event_handler=stream, + ) + + elif isinstance(event_data, RunStep): + print(f"RunStep type: {event_data.type}, Status: {event_data.status}") + + elif event_type == AgentStreamEvent.ERROR: + print(f"An error occurred. Data: {event_data}") + + elif event_type == AgentStreamEvent.DONE: + print("Stream completed.") + + else: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + agents_client.delete_agent(agent.id) + print("Deleted agent") + + messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) + for msg in messages: + if msg.text_messages: + last_text = msg.text_messages[-1] + print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_streaming/sample_agents_stream_iteration_with_toolset.py b/sdk/ai/azure-ai-projects/samples/agents/agents_streaming/sample_agents_stream_iteration_with_toolset.py new file mode 100644 index 000000000000..4562c7bfa381 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_streaming/sample_agents_stream_iteration_with_toolset.py @@ -0,0 +1,109 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use agent operations with toolset and iteration in streaming from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_stream_iteration_with_toolset.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os, sys +from azure.ai.projects import AIProjectClient +from azure.ai.agents.models import AgentStreamEvent, RunStepDeltaChunk +from azure.ai.agents.models import ( + MessageDeltaChunk, + ListSortOrder, + RunStep, + ThreadMessage, + ThreadRun, +) +from azure.ai.agents.models import FunctionTool, ToolSet +from azure.identity import DefaultAzureCredential + +current_path = os.path.dirname(__file__) +root_path = os.path.abspath(os.path.join(current_path, os.pardir, os.pardir)) +if root_path not in sys.path: + sys.path.insert(0, root_path) +from samples.utils.user_functions import user_functions + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +functions = FunctionTool(user_functions) +toolset = ToolSet() +toolset.add(functions) + +with project_client: + agents_client = project_client.agents + + agents_client.enable_auto_function_calls(toolset) + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are a helpful agent", + toolset=toolset, + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = agents_client.threads.create() + print(f"Created thread, thread ID {thread.id}") + + message = agents_client.messages.create(thread_id=thread.id, role="user", content="Hello, what's the time?") + print(f"Created message, message ID {message.id}") + + with agents_client.runs.stream(thread_id=thread.id, agent_id=agent.id) as stream: + + for event_type, event_data, _ in stream: + + if isinstance(event_data, MessageDeltaChunk): + print(f"Text delta received: {event_data.text}") + + elif isinstance(event_data, RunStepDeltaChunk): + print(f"RunStepDeltaChunk received. ID: {event_data.id}.") + + elif isinstance(event_data, ThreadMessage): + print(f"ThreadMessage created. ID: {event_data.id}, Status: {event_data.status}") + + elif isinstance(event_data, ThreadRun): + print(f"ThreadRun status: {event_data.status}") + + if event_data.status == "failed": + print(f"Run failed. Error: {event_data.last_error}") + + elif isinstance(event_data, RunStep): + print(f"RunStep type: {event_data.type}, Status: {event_data.status}") + + elif event_type == AgentStreamEvent.ERROR: + print(f"An error occurred. Data: {event_data}") + + elif event_type == AgentStreamEvent.DONE: + print("Stream completed.") + + else: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + agents_client.delete_agent(agent.id) + print("Deleted agent") + + messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) + for msg in messages: + if msg.text_messages: + last_text = msg.text_messages[-1] + print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_streaming/sample_agents_stream_with_base_override_eventhandler.py b/sdk/ai/azure-ai-projects/samples/agents/agents_streaming/sample_agents_stream_with_base_override_eventhandler.py new file mode 100644 index 000000000000..854a5bdc7221 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_streaming/sample_agents_stream_with_base_override_eventhandler.py @@ -0,0 +1,108 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to override the base event handler and parse the events and iterate through them + In your use case, you might not want to write the iteration code similar to sample_agents_stream_iteration_async.py. + If you have multiple places to call stream, you might find the iteration code cumbersome. + This example shows how to override the base event handler, parse the events, and iterate through them, which can be reused in multiple stream calls to help keep the code clean. + +USAGE: + python sample_agents_stream_with_base_override_eventhandler.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" +import json +from typing import Generator, Optional + +from azure.ai.projects import AIProjectClient +from azure.ai.agents.models import ( + MessageDeltaChunk, + MessageDeltaTextContent, + ListSortOrder, +) +from azure.ai.agents.models import AgentStreamEvent, BaseAgentEventHandler +from azure.identity import DefaultAzureCredential + +import os + + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + + +# Our goal is to parse the event data in a string and return the chunk in text for each iteration. +# Because we want the iteration to be a string, we define str as the generic type for BaseAsyncAgentEventHandler +# and override the _process_event method to return a string. +# The get_stream_chunks method is defined to return the chunks as strings because the iteration is a string. +class MyEventHandler(BaseAgentEventHandler[Optional[str]]): + + def _process_event(self, event_data_str: str) -> Optional[str]: # type: ignore[return] + event_lines = event_data_str.strip().split("\n") + event_type: Optional[str] = None + event_data = "" + for line in event_lines: + if line.startswith("event:"): + event_type = line.split(":", 1)[1].strip() + elif line.startswith("data:"): + event_data = line.split(":", 1)[1].strip() + + if not event_type: + raise ValueError("Event type not specified in the event data.") + + if event_type == AgentStreamEvent.THREAD_MESSAGE_DELTA.value: + + event_obj: MessageDeltaChunk = MessageDeltaChunk(**json.loads(event_data)) + + for content_part in event_obj.delta.content: + if isinstance(content_part, MessageDeltaTextContent): + if content_part.text is not None: + return content_part.text.value + return None + + def get_stream_chunks(self) -> Generator[str, None, None]: + for chunk in self: + if chunk: + yield chunk + + +with project_client: + agents_client = project_client.agents + + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-agent", instructions="You are helpful agent" + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = agents_client.threads.create() + print(f"Created thread, thread ID {thread.id}") + + message = agents_client.messages.create(thread_id=thread.id, role="user", content="Hello, tell me a joke") + print(f"Created message, message ID {message.id}") + + with agents_client.runs.stream(thread_id=thread.id, agent_id=agent.id, event_handler=MyEventHandler()) as stream: + for chunk in stream.get_stream_chunks(): + print(chunk) + + agents_client.delete_agent(agent.id) + print("Deleted agent") + + messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) + for msg in messages: + if msg.text_messages: + last_text = msg.text_messages[-1] + print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_telemetry/sample_agents_basics_async_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/agents_telemetry/sample_agents_basics_async_with_azure_monitor_tracing.py new file mode 100644 index 000000000000..a10148e0a868 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_telemetry/sample_agents_basics_async_with_azure_monitor_tracing.py @@ -0,0 +1,90 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use basic agent operations from + the Azure Agents service using a asynchronous client with Azure Monitor tracing. + View the results in the "Tracing" tab in your Azure AI Foundry project page. + +USAGE: + python sample_agents_basics_async_with_azure_monitor_tracing.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity opentelemetry-sdk azure-monitor-opentelemetry aiohttp + + Set these environment variables with your own values: + * PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + * AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat + messages, which may contain personal data. False by default. + * AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat + messages, which may contain personal data. False by default. + * APPLICATIONINSIGHTS_CONNECTION_STRING - Set to the connection string of your Application Insights resource. + This is used to send telemetry data to Azure Monitor. You can also get the connection string programmatically + from AIProjectClient using the `telemetry.get_connection_string` method. A code sample showing how to do this + can be found in the `sample_telemetry_async.py` file in the azure-ai-projects telemetry samples. +""" +import asyncio +import time +from azure.ai.projects.aio import AIProjectClient +from azure.ai.agents.models import ListSortOrder, MessageTextContent +from azure.identity.aio import DefaultAzureCredential +from opentelemetry import trace +import os +from azure.monitor.opentelemetry import configure_azure_monitor + +scenario = os.path.basename(__file__) +tracer = trace.get_tracer(__name__) + + +async def main() -> None: + + async with DefaultAzureCredential() as creds: + async with AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=creds, + ) as project_client: + + async with project_client: + agents_client = project_client.agents + + # Enable Azure Monitor tracing + application_insights_connection_string = os.environ["APPLICATIONINSIGHTS_CONNECTION_STRING"] + configure_azure_monitor(connection_string=application_insights_connection_string) + + with tracer.start_as_current_span(scenario): + async with agents_client: + agent = await agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are helpful agent", + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = await agents_client.threads.create() + print(f"Created thread, thread ID: {thread.id}") + + message = await agents_client.messages.create( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + print(f"Created message, message ID: {message.id}") + + run = await agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) + print(f"Run completed with status: {run.status}") + + await agents_client.delete_agent(agent.id) + print("Deleted agent") + + messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) + async for msg in messages: + last_part = msg.content[-1] + if isinstance(last_part, MessageTextContent): + print(f"{msg.role}: {last_part.text.value}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_telemetry/sample_agents_basics_async_with_console_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/agents_telemetry/sample_agents_basics_async_with_console_tracing.py new file mode 100644 index 000000000000..df6c85e59be9 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_telemetry/sample_agents_basics_async_with_console_tracing.py @@ -0,0 +1,99 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use basic agent operations from + the Azure Agents service using a asynchronous client with tracing to console. + +USAGE: + python sample_agents_basics_async_with_console_tracing.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity opentelemetry-sdk azure-core-tracing-opentelemetry aiohttp + + If you want to export telemetry to OTLP endpoint (such as Aspire dashboard + https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/standalone?tabs=bash) + install: + + pip install azure-ai-projects opentelemetry-exporter-otlp-proto-grpc + + Set these environment variables with your own values: + * PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + * AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat + messages, which may contain personal data. False by default. +""" +import asyncio +import time +import sys +from azure.core.settings import settings + +settings.tracing_implementation = "opentelemetry" +from opentelemetry import trace +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export import SimpleSpanProcessor, ConsoleSpanExporter +from azure.ai.projects.aio import AIProjectClient +from azure.ai.agents.models import ListSortOrder, MessageTextContent +from azure.identity.aio import DefaultAzureCredential +from opentelemetry import trace +import os +from azure.ai.agents.telemetry import AIAgentsInstrumentor + +# Setup tracing to console +# Requires opentelemetry-sdk +span_exporter = ConsoleSpanExporter() +tracer_provider = TracerProvider() +tracer_provider.add_span_processor(SimpleSpanProcessor(span_exporter)) +trace.set_tracer_provider(tracer_provider) +tracer = trace.get_tracer(__name__) + +AIAgentsInstrumentor().instrument() + +scenario = os.path.basename(__file__) +tracer = trace.get_tracer(__name__) + + +@tracer.start_as_current_span(__file__) +async def main() -> None: + + async with DefaultAzureCredential() as creds: + async with AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=creds, + ) as project_client: + + async with project_client: + agents_client = project_client.agents + + agent = await agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-agent", instructions="You are helpful agent" + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = await agents_client.threads.create() + print(f"Created thread, thread ID: {thread.id}") + + message = await agents_client.messages.create( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + print(f"Created message, message ID: {message.id}") + + run = await agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) + print(f"Run completed with status: {run.status}") + + await agents_client.delete_agent(agent.id) + print("Deleted agent") + + messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) + async for msg in messages: + last_part = msg.content[-1] + if isinstance(last_part, MessageTextContent): + print(f"{msg.role}: {last_part.text.value}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_telemetry/sample_agents_basics_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/agents_telemetry/sample_agents_basics_with_azure_monitor_tracing.py new file mode 100644 index 000000000000..6153b3d1902c --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_telemetry/sample_agents_basics_with_azure_monitor_tracing.py @@ -0,0 +1,81 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use basic agent operations from + the Azure Agents service using a synchronous client with Azure Monitor tracing. + View the results in the "Tracing" tab in your Azure AI Foundry project page. + +USAGE: + python sample_agents_basics_with_azure_monitor_tracing.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity azure-monitor-opentelemetry + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + 3) AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat + messages, which may contain personal data. False by default. + 4) APPLICATIONINSIGHTS_CONNECTION_STRING - Set to the connection string of your Application Insights resource. + This is used to send telemetry data to Azure Monitor. You can also get the connection string programmatically + from AIProjectClient using the `telemetry.get_connection_string` method. A code sample showing how to do this + can be found in the `sample_telemetry.py` file in the azure-ai-projects telemetry samples. +""" + +import os +from azure.ai.projects import AIProjectClient +from azure.ai.agents.models import ListSortOrder +from azure.identity import DefaultAzureCredential + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +# [START enable_tracing] +from opentelemetry import trace +from azure.monitor.opentelemetry import configure_azure_monitor + +# Enable Azure Monitor tracing +application_insights_connection_string = os.environ["APPLICATIONINSIGHTS_CONNECTION_STRING"] +configure_azure_monitor(connection_string=application_insights_connection_string) + +scenario = os.path.basename(__file__) +tracer = trace.get_tracer(__name__) + +with tracer.start_as_current_span(scenario): + with project_client: + agents_client = project_client.agents + + # [END enable_tracing] + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-agent", instructions="You are helpful agent" + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = agents_client.threads.create() + print(f"Created thread, thread ID: {thread.id}") + + message = agents_client.messages.create( + thread_id=thread.id, role="user", content="Hello, tell me a hilarious joke" + ) + print(f"Created message, message ID: {message.id}") + + run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) + print(f"Run completed with status: {run.status}") + + agents_client.delete_agent(agent.id) + print("Deleted agent") + + messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) + for msg in messages: + if msg.text_messages: + last_text = msg.text_messages[-1] + print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_telemetry/sample_agents_basics_with_console_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/agents_telemetry/sample_agents_basics_with_console_tracing.py new file mode 100644 index 000000000000..0a93e44a2a0a --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_telemetry/sample_agents_basics_with_console_tracing.py @@ -0,0 +1,87 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use basic agent operations from + the Azure Agents service using a synchronous client with tracing to console. + +USAGE: + python sample_agents_basics_with_console_tracing.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity opentelemetry-sdk azure-core-tracing-opentelemetry + + If you want to export telemetry to OTLP endpoint (such as Aspire dashboard + https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/standalone?tabs=bash) + install: + + pip install azure-ai-projects opentelemetry-exporter-otlp-proto-grpc + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + 3) AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat + messages, which may contain personal data. False by default. +""" + +import os, time +from azure.core.settings import settings + +settings.tracing_implementation = "opentelemetry" +# Install opentelemetry with command "pip install azure-ai-projects opentelemetry-sdk". +from opentelemetry import trace +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export import SimpleSpanProcessor, ConsoleSpanExporter +from azure.ai.projects import AIProjectClient +from azure.ai.agents.models import ListSortOrder +from azure.identity import DefaultAzureCredential +from azure.ai.agents.telemetry import AIAgentsInstrumentor + +# Setup tracing to console +# Requires opentelemetry-sdk +span_exporter = ConsoleSpanExporter() +tracer_provider = TracerProvider() +tracer_provider.add_span_processor(SimpleSpanProcessor(span_exporter)) +trace.set_tracer_provider(tracer_provider) +tracer = trace.get_tracer(__name__) + +AIAgentsInstrumentor().instrument() + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +scenario = os.path.basename(__file__) +with tracer.start_as_current_span(scenario): + with project_client: + agents_client = project_client.agents + + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-agent", instructions="You are helpful agent" + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = agents_client.threads.create() + print(f"Created thread, thread ID: {thread.id}") + + message = agents_client.messages.create(thread_id=thread.id, role="user", content="Hello, tell me a joke") + print(f"Created message, message ID: {message.id}") + + run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) + print(f"Run completed with status: {run.status}") + + agents_client.delete_agent(agent.id) + print("Deleted agent") + + messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) + for msg in messages: + if msg.text_messages: + last_text = msg.text_messages[-1] + print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_telemetry/sample_agents_basics_with_console_tracing_custom_attributes.py b/sdk/ai/azure-ai-projects/samples/agents/agents_telemetry/sample_agents_basics_with_console_tracing_custom_attributes.py new file mode 100644 index 000000000000..1b51abdaf41b --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_telemetry/sample_agents_basics_with_console_tracing_custom_attributes.py @@ -0,0 +1,116 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use basic agent operations from + the Azure Agents service using a synchronous client with tracing to console and adding + custom attributes to the span. + +USAGE: + python sample_agents_basics_with_console_tracing_custom_attributes.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity opentelemetry-sdk azure-core-tracing-opentelemetry + + If you want to export telemetry to OTLP endpoint (such as Aspire dashboard + https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/standalone?tabs=bash) + install: + + pip install azure-ai-projects opentelemetry-exporter-otlp-proto-grpc + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + 3) AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat + messages, which may contain personal data. False by default. +""" + +import os, sys, time +from typing import cast +from azure.core.settings import settings + +settings.tracing_implementation = "opentelemetry" +# Install opentelemetry with command "pip install azure-ai-projects opentelemetry-sdk". +from opentelemetry import trace +from opentelemetry.sdk.trace import TracerProvider, SpanProcessor, ReadableSpan, Span +from opentelemetry.sdk.trace.export import SimpleSpanProcessor, ConsoleSpanExporter +from azure.ai.projects import AIProjectClient +from azure.ai.agents.models import ListSortOrder +from azure.identity import DefaultAzureCredential +from azure.ai.agents.telemetry import AIAgentsInstrumentor + + +# Define the custom span processor that is used for adding the custom +# attributes to spans when they are started. +class CustomAttributeSpanProcessor(SpanProcessor): + def __init__(self): + pass + + def on_start(self, span: Span, parent_context=None): + # Add this attribute to all spans + span.set_attribute("trace_sample.sessionid", "123") + + # Add another attribute only to create_message spans + if span.name == "create_message": + span.set_attribute("trace_sample.message.context", "abc") + + def on_end(self, span: ReadableSpan): + # Clean-up logic can be added here if necessary + pass + + +# Setup tracing to console +# Requires opentelemetry-sdk +span_exporter = ConsoleSpanExporter() +tracer_provider = TracerProvider() +tracer_provider.add_span_processor(SimpleSpanProcessor(span_exporter)) +trace.set_tracer_provider(tracer_provider) +tracer = trace.get_tracer(__name__) + +AIAgentsInstrumentor().instrument() + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +# Add the custom span processor to the global tracer provider +provider = cast(TracerProvider, trace.get_tracer_provider()) +provider.add_span_processor(CustomAttributeSpanProcessor()) + +scenario = os.path.basename(__file__) +tracer = trace.get_tracer(__name__) + +with tracer.start_as_current_span(scenario): + + with project_client: + agents_client = project_client.agents + + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-agent", instructions="You are helpful agent" + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = agents_client.threads.create() + print(f"Created thread, thread ID: {thread.id}") + + message = agents_client.messages.create(thread_id=thread.id, role="user", content="Hello, tell me a joke") + print(f"Created message, message ID: {message.id}") + + run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) + print(f"Run completed with status: {run.status}") + + agents_client.delete_agent(agent.id) + print("Deleted agent") + + messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) + for msg in messages: + if msg.text_messages: + last_text = msg.text_messages[-1] + print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_telemetry/sample_agents_stream_eventhandler_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/agents_telemetry/sample_agents_stream_eventhandler_with_azure_monitor_tracing.py new file mode 100644 index 000000000000..aeb05da1e7e0 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_telemetry/sample_agents_stream_eventhandler_with_azure_monitor_tracing.py @@ -0,0 +1,118 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use agent operations with an event handler in streaming from + the Azure Agents service using a synchronous client with Azure Monitor tracing. + View the results in the "Tracing" tab in your Azure AI Foundry project page. + +USAGE: + python sample_agents_stream_eventhandler_with_azure_monitor_tracing.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity opentelemetry-sdk azure-monitor-opentelemetry + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + 3) AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat + messages, which may contain personal data. False by default. + 4) APPLICATIONINSIGHTS_CONNECTION_STRING - Set to the connection string of your Application Insights resource. + This is used to send telemetry data to Azure Monitor. You can also get the connection string programmatically + from AIProjectClient using the `telemetry.get_connection_string` method. A code sample showing how to do this + can be found in the `sample_telemetry.py` file in the azure-ai-projects telemetry samples. +""" + +import os +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential +from azure.ai.agents.models import ( + AgentEventHandler, + MessageDeltaChunk, + ListSortOrder, + ThreadMessage, + ThreadRun, + RunStep, +) +from typing import Any +from opentelemetry import trace +from azure.monitor.opentelemetry import configure_azure_monitor + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + + +class MyEventHandler(AgentEventHandler): + def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + print(f"Text delta received: {delta.text}") + + def on_thread_message(self, message: "ThreadMessage") -> None: + if len(message.content): + print( + f"ThreadMessage created. ID: {message.id}, " + f"Status: {message.status}, Content: {message.content[0].as_dict()}" + ) + else: + print(f"ThreadMessage created. ID: {message.id}, " f"Status: {message.status}") + + def on_thread_run(self, run: "ThreadRun") -> None: + print(f"ThreadRun status: {run.status}") + + def on_run_step(self, step: "RunStep") -> None: + print(f"RunStep type: {step.type}, Status: {step.status}") + + def on_error(self, data: str) -> None: + print(f"An error occurred. Data: {data}") + + def on_done(self) -> None: + print("Stream completed.") + + def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + +# Enable Azure Monitor tracing +application_insights_connection_string = os.environ["APPLICATIONINSIGHTS_CONNECTION_STRING"] +configure_azure_monitor(connection_string=application_insights_connection_string) + +scenario = os.path.basename(__file__) +tracer = trace.get_tracer(__name__) + +with tracer.start_as_current_span(scenario): + + with project_client: + agents_client = project_client.agents + + # Create an agent and run stream with event handler + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-agent", instructions="You are a helpful agent" + ) + print(f"Created agent, agent ID {agent.id}") + + thread = agents_client.threads.create() + print(f"Created thread, thread ID {thread.id}") + + message = agents_client.messages.create(thread_id=thread.id, role="user", content="Hello, tell me a joke") + print(f"Created message, message ID {message.id}") + + with agents_client.runs.stream( + thread_id=thread.id, agent_id=agent.id, event_handler=MyEventHandler() + ) as stream: + stream.until_done() + + agents_client.delete_agent(agent.id) + print("Deleted agent") + + messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) + for msg in messages: + if msg.text_messages: + last_text = msg.text_messages[-1] + print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_telemetry/sample_agents_stream_eventhandler_with_console_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/agents_telemetry/sample_agents_stream_eventhandler_with_console_tracing.py new file mode 100644 index 000000000000..2f7a660d21f9 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_telemetry/sample_agents_stream_eventhandler_with_console_tracing.py @@ -0,0 +1,133 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use agent operations with an event handler in streaming from + the Azure Agents service using a synchronous client with tracing to console. + +USAGE: + python sample_agents_stream_eventhandler_with_console_tracing.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity opentelemetry-sdk azure-core-tracing-opentelemetry + + If you want to export telemetry to OTLP endpoint (such as Aspire dashboard + https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/standalone?tabs=bash) + install: + + pip install azure-ai-projects opentelemetry-exporter-otlp-proto-grpc + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + 3) AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat + messages, which may contain personal data. False by default. +""" + +import os, sys +from azure.core.settings import settings + +settings.tracing_implementation = "opentelemetry" +from opentelemetry import trace +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export import SimpleSpanProcessor, ConsoleSpanExporter +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential +from azure.ai.agents.models import ( + AgentEventHandler, + MessageDeltaChunk, + ListSortOrder, + ThreadMessage, + ThreadRun, + RunStep, +) +from typing import Any +from azure.ai.agents.telemetry import AIAgentsInstrumentor + +# Setup tracing to console +# Requires opentelemetry-sdk +span_exporter = ConsoleSpanExporter() +tracer_provider = TracerProvider() +tracer_provider.add_span_processor(SimpleSpanProcessor(span_exporter)) +trace.set_tracer_provider(tracer_provider) +tracer = trace.get_tracer(__name__) + +AIAgentsInstrumentor().instrument() + +scenario = os.path.basename(__file__) +tracer = trace.get_tracer(__name__) + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + + +class MyEventHandler(AgentEventHandler): + def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + print(f"Text delta received: {delta.text}") + + def on_thread_message(self, message: "ThreadMessage") -> None: + if len(message.content): + print( + f"ThreadMessage created. ID: {message.id}, " + f"Status: {message.status}, Content: {message.content[0].as_dict()}" + ) + else: + print(f"ThreadMessage created. ID: {message.id}, " f"Status: {message.status}") + + def on_thread_run(self, run: "ThreadRun") -> None: + print(f"ThreadRun status: {run.status}") + + def on_run_step(self, step: "RunStep") -> None: + print(f"RunStep type: {step.type}, Status: {step.status}") + + def on_error(self, data: str) -> None: + print(f"An error occurred. Data: {data}") + + def on_done(self) -> None: + print("Stream completed.") + + def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + +scenario = os.path.basename(__file__) +tracer = trace.get_tracer(__name__) + +with tracer.start_as_current_span(scenario): + + with project_client: + agents_client = project_client.agents + + # Create an agent and run stream with event handler + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-agent", instructions="You are a helpful agent" + ) + print(f"Created agent, agent ID {agent.id}") + + thread = agents_client.threads.create() + print(f"Created thread, thread ID {thread.id}") + + message = agents_client.messages.create(thread_id=thread.id, role="user", content="Hello, tell me a joke") + print(f"Created message, message ID {message.id}") + + with agents_client.runs.stream( + thread_id=thread.id, agent_id=agent.id, event_handler=MyEventHandler() + ) as stream: + stream.until_done() + + agents_client.delete_agent(agent.id) + print("Deleted agent") + + messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) + for msg in messages: + if msg.text_messages: + last_text = msg.text_messages[-1] + print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_telemetry/sample_agents_toolset_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/agents_telemetry/sample_agents_toolset_with_azure_monitor_tracing.py new file mode 100644 index 000000000000..be5e5e9ed04f --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_telemetry/sample_agents_toolset_with_azure_monitor_tracing.py @@ -0,0 +1,132 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use agent operations with toolset from + the Azure Agents service using a synchronous client with Azure Monitor tracing. + View the results in the "Tracing" tab in your Azure AI Foundry project page. + +USAGE: + python sample_agents_toolset_with_azure_monitor_tracing.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity opentelemetry-sdk azure-monitor-opentelemetry + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + 3) AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat + messages, which may contain personal data. False by default. + 4) APPLICATIONINSIGHTS_CONNECTION_STRING - Set to the connection string of your Application Insights resource. + This is used to send telemetry data to Azure Monitor. You can also get the connection string programmatically + from AIProjectClient using the `telemetry.get_connection_string` method. A code sample showing how to do this + can be found in the `sample_telemetry.py` file in the azure-ai-projects telemetry samples. +""" +from typing import Any, Callable, Set + +import os, time, json +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential +from azure.ai.agents.models import ( + FunctionTool, + ToolSet, + ListSortOrder, +) +from opentelemetry import trace +from azure.monitor.opentelemetry import configure_azure_monitor +from azure.ai.agents.telemetry import trace_function + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +# Enable Azure Monitor tracing +application_insights_connection_string = os.environ["APPLICATIONINSIGHTS_CONNECTION_STRING"] +configure_azure_monitor(connection_string=application_insights_connection_string) + +scenario = os.path.basename(__file__) +tracer = trace.get_tracer(__name__) + + +# The trace_func decorator will trace the function call and enable adding additional attributes +# to the span in the function implementation. Note that this will trace the function parameters and their values. +@trace_function() +def fetch_weather(location: str) -> str: + """ + Fetches the weather information for the specified location. + + :param location (str): The location to fetch weather for. + :return: Weather information as a JSON string. + :rtype: str + """ + # In a real-world scenario, you'd integrate with a weather API. + # Here, we'll mock the response. + mock_weather_data = {"New York": "Sunny, 25°C", "London": "Cloudy, 18°C", "Tokyo": "Rainy, 22°C"} + + # Adding attributes to the current span + span = trace.get_current_span() + span.set_attribute("requested_location", location) + + weather = mock_weather_data.get(location, "Weather data not available for this location.") + weather_json = json.dumps({"weather": weather}) + return weather_json + + +# Statically defined user functions for fast reference +user_functions: Set[Callable[..., Any]] = { + fetch_weather, +} + +# Initialize function tool with user function +functions = FunctionTool(functions=user_functions) +toolset = ToolSet() +toolset.add(functions) + + +with tracer.start_as_current_span(scenario): + + with project_client: + agents_client = project_client.agents + + # To enable tool calls executed automatically + agents_client.enable_auto_function_calls(toolset) + + # Create an agent and run user's request with function calls + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are a helpful agent", + toolset=toolset, + ) + print(f"Created agent, ID: {agent.id}") + + thread = agents_client.threads.create() + print(f"Created thread, ID: {thread.id}") + + message = agents_client.messages.create( + thread_id=thread.id, + role="user", + content="Hello, what is the weather in New York?", + ) + print(f"Created message, ID: {message.id}") + + run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id, toolset=toolset) + print(f"Run completed with status: {run.status}") + + # Delete the agent when done + agents_client.delete_agent(agent.id) + print("Deleted agent") + + # Fetch and log all messages + messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) + for msg in messages: + if msg.text_messages: + last_text = msg.text_messages[-1] + print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_telemetry/sample_agents_toolset_with_console_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/agents_telemetry/sample_agents_toolset_with_console_tracing.py new file mode 100644 index 000000000000..f6ddd8792ebb --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_telemetry/sample_agents_toolset_with_console_tracing.py @@ -0,0 +1,144 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use agent operations with toolset from + the Azure Agents service using a synchronous client with tracing to console. + +USAGE: + python sample_agents_toolset_with_console_tracing.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity opentelemetry-sdk azure-core-tracing-opentelemetry + + If you want to export telemetry to OTLP endpoint (such as Aspire dashboard + https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/standalone?tabs=bash) + install: + + pip install azure-ai-projects opentelemetry-exporter-otlp-proto-grpc + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + 3) AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat + messages, which may contain personal data. False by default. +""" +from typing import Any, Callable, Set + +import os, sys, time, json +from azure.core.settings import settings + +settings.tracing_implementation = "opentelemetry" +# Install opentelemetry with command "pip install azure-ai-projects opentelemetry-sdk". +from opentelemetry import trace +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export import SimpleSpanProcessor, ConsoleSpanExporter +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential +from azure.ai.agents.models import ( + FunctionTool, + ToolSet, + ListSortOrder, +) +from azure.ai.agents.telemetry import trace_function +from azure.ai.agents.telemetry import AIAgentsInstrumentor + +# Setup tracing to console +# Requires opentelemetry-sdk +span_exporter = ConsoleSpanExporter() +tracer_provider = TracerProvider() +tracer_provider.add_span_processor(SimpleSpanProcessor(span_exporter)) +trace.set_tracer_provider(tracer_provider) +tracer = trace.get_tracer(__name__) + +AIAgentsInstrumentor().instrument() + +scenario = os.path.basename(__file__) +tracer = trace.get_tracer(__name__) + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + + +# The trace_func decorator will trace the function call and enable adding additional attributes +# to the span in the function implementation. Note that this will trace the function parameters and their values. +@trace_function() +def fetch_weather(location: str) -> str: + """ + Fetches the weather information for the specified location. + + :param location (str): The location to fetch weather for. + :return: Weather information as a JSON string. + :rtype: str + """ + # In a real-world scenario, you'd integrate with a weather API. + # Here, we'll mock the response. + mock_weather_data = {"New York": "Sunny, 25°C", "London": "Cloudy, 18°C", "Tokyo": "Rainy, 22°C"} + + # Adding attributes to the current span + span = trace.get_current_span() + span.set_attribute("requested_location", location) + + weather = mock_weather_data.get(location, "Weather data not available for this location.") + weather_json = json.dumps({"weather": weather}) + return weather_json + + +# Statically defined user functions for fast reference +user_functions: Set[Callable[..., Any]] = { + fetch_weather, +} + +# Initialize function tool with user function +functions = FunctionTool(functions=user_functions) +toolset = ToolSet() +toolset.add(functions) + + +with tracer.start_as_current_span(scenario): + with project_client: + agents_client = project_client.agents + + # To enable tool calls executed automatically + agents_client.enable_auto_function_calls(toolset) + + # Create an agent and run user's request with function calls + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are a helpful agent", + toolset=toolset, + ) + print(f"Created agent, ID: {agent.id}") + + thread = agents_client.threads.create() + print(f"Created thread, ID: {thread.id}") + + message = agents_client.messages.create( + thread_id=thread.id, + role="user", + content="Hello, what is the weather in New York?", + ) + print(f"Created message, ID: {message.id}") + + run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id, toolset=toolset) + print(f"Run completed with status: {run.status}") + + # Delete the agent when done + agents_client.delete_agent(agent.id) + print("Deleted agent") + + # Fetch and log all messages + messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) + for msg in messages: + if msg.text_messages: + last_text = msg.text_messages[-1] + print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_tools/__init__.py b/sdk/ai/azure-ai-projects/samples/agents/agents_tools/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_azure_ai_search.py b/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_azure_ai_search.py new file mode 100644 index 000000000000..600dfe42243b --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_azure_ai_search.py @@ -0,0 +1,135 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + + +""" +DESCRIPTION: + This sample demonstrates how to use agent operations with the + Azure AI Search tool from the Azure agents service using a synchronous client. + +PREREQUISITES: + You will need an Azure AI Search Resource. + If you already have one, you must create an agent that can use an existing Azure AI Search index: + https://learn.microsoft.com/azure/ai-services/agents/how-to/tools/azure-ai-search?tabs=azurecli%2Cpython&pivots=overview-azure-ai-search + + If you do not already have an agent Setup with an Azure AI Search resource, follow the guide for a Standard agent setup: + https://learn.microsoft.com/azure/ai-services/agents/quickstart?pivots=programming-language-python-azure + +USAGE: + python sample_agents_azure_ai_search.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-projects azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + 3) AI_SEARCH_CONNECTION_NAME - The connection name of the AI Search connection to your Foundry project, + as found under the "Name" column in the "Connected Resources" tab in your Azure AI Foundry project. +""" + +import os +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential +from azure.ai.agents.models import AzureAISearchQueryType, AzureAISearchTool, ListSortOrder, MessageRole + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +# [START create_agent_with_azure_ai_search_tool] +conn_id = os.environ["AI_AZURE_AI_CONNECTION_ID"] + +print(conn_id) + +# Initialize agent AI search tool and add the search index connection id +ai_search = AzureAISearchTool( + index_connection_id=conn_id, index_name="sample_index", query_type=AzureAISearchQueryType.SIMPLE, top_k=3, filter="" +) + +# Create agent with AI search tool and process agent run +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +with project_client: + agents_client = project_client.agents + + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are a helpful agent", + tools=ai_search.definitions, + tool_resources=ai_search.resources, + ) + # [END create_agent_with_azure_ai_search_tool] + print(f"Created agent, ID: {agent.id}") + + # Create thread for communication + thread = agents_client.threads.create() + print(f"Created thread, ID: {thread.id}") + + # Create message to thread + message = agents_client.messages.create( + thread_id=thread.id, + role="user", + content="What is the temperature rating of the cozynights sleeping bag?", + ) + print(f"Created message, ID: {message.id}") + + # Create and process agent run in thread with tools + run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + # Fetch run steps to get the details of the agent run + run_steps = agents_client.run_steps.list(thread_id=thread.id, run_id=run.id) + for step in run_steps: + print(f"Step {step['id']} status: {step['status']}") + step_details = step.get("step_details", {}) + tool_calls = step_details.get("tool_calls", []) + + if tool_calls: + print(" Tool calls:") + for call in tool_calls: + print(f" Tool Call ID: {call.get('id')}") + print(f" Type: {call.get('type')}") + + azure_ai_search_details = call.get("azure_ai_search", {}) + if azure_ai_search_details: + print(f" azure_ai_search input: {azure_ai_search_details.get('input')}") + print(f" azure_ai_search output: {azure_ai_search_details.get('output')}") + print() # add an extra newline between steps + + # Delete the agent when done + agents_client.delete_agent(agent.id) + print("Deleted agent") + + # [START populate_references_agent_with_azure_ai_search_tool] + # Fetch and log all messages + messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) + for message in messages: + if message.role == MessageRole.AGENT and message.url_citation_annotations: + placeholder_annotations = { + annotation.text: f" [see {annotation.url_citation.title}] ({annotation.url_citation.url})" + for annotation in message.url_citation_annotations + } + for message_text in message.text_messages: + message_str = message_text.text.value + for k, v in placeholder_annotations.items(): + message_str = message_str.replace(k, v) + print(f"{message.role}: {message_str}") + else: + for message_text in message.text_messages: + print(f"{message.role}: {message_text.text.value}") + # [END populate_references_agent_with_azure_ai_search_tool] diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_azure_functions.py b/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_azure_functions.py new file mode 100644 index 000000000000..b7c095946368 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_azure_functions.py @@ -0,0 +1,99 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use azure function agent operations from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_azure_functions.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + 3) STORAGE_SERVICE_ENDPONT - the storage service queue endpoint, triggering Azure function. + Please see Getting Started with Azure Functions page for more information on Azure Functions: + https://learn.microsoft.com/azure/azure-functions/functions-get-started +""" + +import os +from azure.ai.projects import AIProjectClient +from azure.ai.agents.models import AzureFunctionStorageQueue, AzureFunctionTool, MessageRole +from azure.identity import DefaultAzureCredential + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +with project_client: + agents_client = project_client.agents + + # [START create_agent_with_azure_function_tool] + storage_service_endpoint = os.environ["STORAGE_SERVICE_ENDPONT"] + + azure_function_tool = AzureFunctionTool( + name="foo", + description="Get answers from the foo bot.", + parameters={ + "type": "object", + "properties": { + "query": {"type": "string", "description": "The question to ask."}, + "outputqueueuri": {"type": "string", "description": "The full output queue uri."}, + }, + }, + input_queue=AzureFunctionStorageQueue( + queue_name="azure-function-foo-input", + storage_service_endpoint=storage_service_endpoint, + ), + output_queue=AzureFunctionStorageQueue( + queue_name="azure-function-tool-output", + storage_service_endpoint=storage_service_endpoint, + ), + ) + + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="azure-function-agent-foo", + instructions=f"You are a helpful support agent. Use the provided function any time the prompt contains the string 'What would foo say?'. When you invoke the function, ALWAYS specify the output queue uri parameter as '{storage_service_endpoint}/azure-function-tool-output'. Always responds with \"Foo says\" and then the response from the tool.", + tools=azure_function_tool.definitions, + ) + print(f"Created agent, agent ID: {agent.id}") + # [END create_agent_with_azure_function_tool] + + # Create a thread + thread = agents_client.threads.create() + print(f"Created thread, thread ID: {thread.id}") + + # Create a message + message = agents_client.messages.create( + thread_id=thread.id, + role="user", + content="What is the most prevalent element in the universe? What would foo say?", + ) + print(f"Created message, message ID: {message.id}") + + run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + # Get messages from the thread + messages = agents_client.messages.list(thread_id=thread.id) + + # Get the last message from agent + last_msg = agents_client.messages.get_last_message_text_by_role(thread_id=thread.id, role=MessageRole.AGENT) + if last_msg: + print(f"Last Message: {last_msg.text.value}") + + # Delete the agent once done + agents_client.delete_agent(agent.id) diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_bing_custom_search.py b/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_bing_custom_search.py new file mode 100644 index 000000000000..04756402b136 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_bing_custom_search.py @@ -0,0 +1,90 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use Agent operations with the Bing Custom Search tool from + the Azure Agents service using a synchronous client. + For more information on the Bing Custom Search tool, see: https://aka.ms/AgentCustomSearchDoc + +USAGE: + python sample_agents_bing_custom_search.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity + + Set this environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + 3) BING_CUSTOM_CONNECTION_ID - The ID of the Bing Custom Search connection, in the format of: + /subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/providers/Microsoft.MachineLearningServices/workspaces/{workspace-name}/connections/{connection-name} +""" + +import os +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential +from azure.ai.agents.models import BingCustomSearchTool + + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +conn_id = os.environ["BING_CUSTOM_CONNECTION_ID"] + +# Initialize Bing Custom Search tool with connection id and instance name +bing_custom_tool = BingCustomSearchTool(connection_id=conn_id, instance_name="") + +# Create Agent with the Bing Custom Search tool and process Agent run +with project_client: + agents_client = project_client.agents + + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are a helpful agent", + tools=bing_custom_tool.definitions, + ) + print(f"Created agent, ID: {agent.id}") + + # Create thread for communication + thread = agents_client.threads.create() + print(f"Created thread, ID: {thread.id}") + + # Create message to thread + message = agents_client.messages.create( + thread_id=thread.id, + role="user", + content="How many medals did the USA win in the 2024 summer olympics?", + ) + print(f"Created message, ID: {message.id}") + + # Create and process Agent run in thread with tools + run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + # Delete the Agent when done + agents_client.delete_agent(agent.id) + print("Deleted agent") + + # Fetch and log all messages + messages = agents_client.messages.list(thread_id=thread.id) + for msg in messages: + if msg.text_messages: + for text_message in msg.text_messages: + print(f"Agent response: {text_message.text.value}") + for annotation in msg.url_citation_annotations: + print(f"URL Citation: [{annotation.url_citation.title}]({annotation.url_citation.url})") diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_bing_grounding.py b/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_bing_grounding.py new file mode 100644 index 000000000000..f042a17da650 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_bing_grounding.py @@ -0,0 +1,106 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use agent operations with the Bing grounding tool from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_bing_grounding.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + 3) AZURE_BING_CONNECTION_ID - The ID of the Bing connection, in the format of: + /subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/providers/Microsoft.MachineLearningServices/workspaces/{workspace-name}/connections/{connection-name} +""" + +import os +from azure.ai.projects import AIProjectClient +from azure.ai.agents.models import MessageRole, BingGroundingTool +from azure.identity import DefaultAzureCredential + + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +# [START create_agent_with_bing_grounding_tool] +conn_id = os.environ["AZURE_BING_CONNECTION_ID"] + +# Initialize agent bing tool and add the connection id +bing = BingGroundingTool(connection_id=conn_id) + +# Create agent with the bing tool and process agent run +with project_client: + agents_client = project_client.agents + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are a helpful agent", + tools=bing.definitions, + ) + # [END create_agent_with_bing_grounding_tool] + + print(f"Created agent, ID: {agent.id}") + + # Create thread for communication + thread = agents_client.threads.create() + print(f"Created thread, ID: {thread.id}") + + # Create message to thread + message = agents_client.messages.create( + thread_id=thread.id, + role=MessageRole.USER, + content="How does wikipedia explain Euler's Identity?", + ) + print(f"Created message, ID: {message.id}") + + # Create and process agent run in thread with tools + run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + # Fetch run steps to get the details of the agent run + run_steps = agents_client.run_steps.list(thread_id=thread.id, run_id=run.id) + for step in run_steps: + print(f"Step {step['id']} status: {step['status']}") + step_details = step.get("step_details", {}) + tool_calls = step_details.get("tool_calls", []) + + if tool_calls: + print(" Tool calls:") + for call in tool_calls: + print(f" Tool Call ID: {call.get('id')}") + print(f" Type: {call.get('type')}") + + bing_grounding_details = call.get("bing_grounding", {}) + if bing_grounding_details: + print(f" Bing Grounding ID: {bing_grounding_details.get('requesturl')}") + + print() # add an extra newline between steps + + # Delete the agent when done + agents_client.delete_agent(agent.id) + print("Deleted agent") + + # Print the Agent's response message with optional citation + response_message = agents_client.messages.get_last_message_by_role(thread_id=thread.id, role=MessageRole.AGENT) + if response_message: + for text_message in response_message.text_messages: + print(f"Agent response: {text_message.text.value}") + for annotation in response_message.url_citation_annotations: + print(f"URL Citation: [{annotation.url_citation.title}]({annotation.url_citation.url})") diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_code_interpreter.py b/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_code_interpreter.py new file mode 100644 index 000000000000..56ada75bbdf4 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_code_interpreter.py @@ -0,0 +1,111 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use agent operations with code interpreter from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_code_interpreter.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os +from azure.ai.projects import AIProjectClient +from azure.ai.agents.models import CodeInterpreterTool +from azure.ai.agents.models import FilePurpose, MessageRole +from azure.identity import DefaultAzureCredential +from pathlib import Path + +asset_file_path = os.path.abspath( + os.path.join(os.path.dirname(__file__), "../assets/synthetic_500_quarterly_results.csv") +) + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +with project_client: + agents_client = project_client.agents + + # Upload a file and wait for it to be processed + # [START upload_file_and_create_agent_with_code_interpreter] + file = agents_client.files.upload_and_poll(file_path=asset_file_path, purpose=FilePurpose.AGENTS) + print(f"Uploaded file, file ID: {file.id}") + + code_interpreter = CodeInterpreterTool(file_ids=[file.id]) + + # Create agent with code interpreter tool and tools_resources + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are helpful agent", + tools=code_interpreter.definitions, + tool_resources=code_interpreter.resources, + ) + # [END upload_file_and_create_agent_with_code_interpreter] + print(f"Created agent, agent ID: {agent.id}") + + thread = agents_client.threads.create() + print(f"Created thread, thread ID: {thread.id}") + + # Create a message + message = agents_client.messages.create( + thread_id=thread.id, + role="user", + content="Could you please create bar chart in TRANSPORTATION sector for the operating profit from the uploaded csv file and provide file to me?", + ) + print(f"Created message, message ID: {message.id}") + + run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + # Check if you got "Rate limit is exceeded.", then you want to get more quota + print(f"Run failed: {run.last_error}") + + agents_client.files.delete(file.id) + print("Deleted file") + + # [START get_messages_and_save_files] + messages = agents_client.messages.list(thread_id=thread.id) + print(f"Messages: {messages}") + + for msg in messages: + # Save every image file in the message + for img in msg.image_contents: + file_id = img.image_file.file_id + file_name = f"{file_id}_image_file.png" + agents_client.files.save(file_id=file_id, file_name=file_name) + print(f"Saved image file to: {Path.cwd() / file_name}") + + # Print details of every file-path annotation + for ann in msg.file_path_annotations: + print("File Paths:") + print(f" Type: {ann.type}") + print(f" Text: {ann.text}") + print(f" File ID: {ann.file_path.file_id}") + print(f" Start Index: {ann.start_index}") + print(f" End Index: {ann.end_index}") + # [END get_messages_and_save_files] + + last_msg = agents_client.messages.get_last_message_text_by_role(thread_id=thread.id, role=MessageRole.AGENT) + if last_msg: + print(f"Last Message: {last_msg.text.value}") + + agents_client.delete_agent(agent.id) + print("Deleted agent") diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_code_interpreter_attachment_enterprise_search.py b/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_code_interpreter_attachment_enterprise_search.py new file mode 100644 index 000000000000..25501ca4aab8 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_code_interpreter_attachment_enterprise_search.py @@ -0,0 +1,86 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to use agent operations with code interpreter from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_code_interpreter_attachment_enterprise_search.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + 3) AZURE_BLOB_URI - The URI of the blob storage where the file is uploaded. In the format: + azureml://subscriptions/{subscription-id}/resourcegroups/{resource-group-name}/workspaces/{workspace-name}/datastores/{datastore-name}/paths/{path-to-file} +""" + +import os +from azure.ai.projects import AIProjectClient +from azure.ai.agents.models import ( + CodeInterpreterTool, + MessageAttachment, + MessageRole, + VectorStoreDataSource, + VectorStoreDataSourceAssetType, +) +from azure.identity import DefaultAzureCredential + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +with project_client: + agents_client = project_client.agents + + code_interpreter = CodeInterpreterTool() + + # notice that CodeInterpreter must be enabled in the agent creation, otherwise the agent will not be able to see the file attachment + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are helpful agent", + tools=code_interpreter.definitions, + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = agents_client.threads.create() + print(f"Created thread, thread ID: {thread.id}") + + # [START upload_file_and_create_message_with_code_interpreter] + # We will upload the local file to Azure and will use it for vector store creation. + asset_uri = os.environ["AZURE_BLOB_URI"] + ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) + + # Create a message with the attachment + attachment = MessageAttachment(data_source=ds, tools=code_interpreter.definitions) + message = agents_client.messages.create( + thread_id=thread.id, role="user", content="What does the attachment say?", attachments=[attachment] + ) + # [END upload_file_and_create_message_with_code_interpreter] + + print(f"Created message, message ID: {message.id}") + + run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + # Check if you got "Rate limit is exceeded.", then you want to get more quota + print(f"Run failed: {run.last_error}") + + agents_client.delete_agent(agent.id) + print("Deleted agent") + + last_msg = agents_client.messages.get_last_message_text_by_role(thread_id=thread.id, role=MessageRole.AGENT) + if last_msg: + print(f"Last Message: {last_msg.text.value}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_connected_agent.py b/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_connected_agent.py new file mode 100644 index 000000000000..2b8414030745 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_connected_agent.py @@ -0,0 +1,98 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use Agent operations with the Connected Agent tool from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_connected_agent.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os +from azure.ai.projects import AIProjectClient +from azure.ai.agents.models import ConnectedAgentTool, MessageRole +from azure.identity import DefaultAzureCredential + + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +connected_agent_name = "stock_price_bot" + +with project_client: + agents_client = project_client.agents + + stock_price_agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name=connected_agent_name, + instructions=( + "Your job is to get the stock price of a company. If asked for the Microsoft stock price, always return $350." + ), + ) + + # [START create_agent_with_connected_agent_tool] + # Initialize Connected Agent tool with the agent id, name, and description + connected_agent = ConnectedAgentTool( + id=stock_price_agent.id, name=connected_agent_name, description="Gets the stock price of a company" + ) + + # Create agent with the Connected Agent tool and process assistant run + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant, and use the connected agents to get stock prices.", + tools=connected_agent.definitions, + ) + # [END create_agent_with_connected_agent_tool] + + print(f"Created agent, ID: {agent.id}") + + # Create thread for communication + thread = agents_client.threads.create() + print(f"Created thread, ID: {thread.id}") + + # Create message to thread + message = agents_client.messages.create( + thread_id=thread.id, + role=MessageRole.USER, + content="What is the stock price of Microsoft?", + ) + print(f"Created message, ID: {message.id}") + + # Create and process Agent run in thread with tools + run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + # Delete the Agent when done + agents_client.delete_agent(agent.id) + print("Deleted agent") + + # Delete the connected Agent when done + agents_client.delete_agent(stock_price_agent.id) + print("Deleted stock price agent") + + # Fetch and log all messages + messages = agents_client.messages.list(thread_id=thread.id) + for msg in messages: + if msg.text_messages: + last_text = msg.text_messages[-1] + print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_enterprise_file_search.py b/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_enterprise_file_search.py new file mode 100644 index 000000000000..b183a0431ab2 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_enterprise_file_search.py @@ -0,0 +1,83 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +DESCRIPTION: + This sample demonstrates how to add files to agent during the vector store creation. + +USAGE: + python sample_agents_enterprise_file_search.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity azure-ai-ml + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + 3) AZURE_BLOB_URI - The URI of the blob storage where the file is uploaded. In the format: + azureml://subscriptions/{subscription-id}/resourcegroups/{resource-group-name}/workspaces/{workspace-name}/datastores/{datastore-name}/paths/{path-to-file} +""" + +import os +from azure.ai.projects import AIProjectClient +from azure.ai.agents.models import FileSearchTool, ListSortOrder, VectorStoreDataSource, VectorStoreDataSourceAssetType +from azure.identity import DefaultAzureCredential + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +with project_client: + agents_client = project_client.agents + + # [START upload_file_and_create_agent_with_file_search] + # We will upload the local file to Azure and will use it for vector store creation. + asset_uri = os.environ["AZURE_BLOB_URI"] + + # Create a vector store with no file and wait for it to be processed + ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) + vector_store = agents_client.vector_stores.create_and_poll(data_sources=[ds], name="sample_vector_store") + print(f"Created vector store, vector store ID: {vector_store.id}") + + # Create a file search tool + file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) + + # Notices that FileSearchTool as tool and tool_resources must be added or the agent unable to search the file + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are helpful agent", + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, + ) + # [END upload_file_and_create_agent_with_file_search] + print(f"Created agent, agent ID: {agent.id}") + + thread = agents_client.threads.create() + print(f"Created thread, thread ID: {thread.id}") + + message = agents_client.messages.create( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) + print(f"Created message, message ID: {message.id}") + + run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) + print(f"Created run, run ID: {run.id}") + + agents_client.vector_stores.delete(vector_store.id) + print("Deleted vector store") + + agents_client.delete_agent(agent.id) + print("Deleted agent") + + messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) + for msg in messages: + if msg.text_messages: + last_text = msg.text_messages[-1] + print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_fabric.py b/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_fabric.py new file mode 100644 index 000000000000..4e2dcd0ac3b3 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_fabric.py @@ -0,0 +1,88 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_fabric.py + +DESCRIPTION: + This sample demonstrates how to use Agent operations with the Microsoft Fabric grounding tool from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_fabric.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity + + Set this environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + 3) FABRIC_CONNECTION_ID - The ID of the Fabric connection, in the format of: + /subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/providers/Microsoft.MachineLearningServices/workspaces/{workspace-name}/connections/{connection-name} +""" + +import os +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential +from azure.ai.agents.models import FabricTool, ListSortOrder + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +# [START create_agent_with_fabric_tool] +conn_id = os.environ["FABRIC_CONNECTION_ID"] + +print(conn_id) + +# Initialize an Agent Fabric tool and add the connection id +fabric = FabricTool(connection_id=conn_id) + +# Create an Agent with the Fabric tool and process an Agent run +with project_client: + agents_client = project_client.agents + + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are a helpful agent", + tools=fabric.definitions, + ) + # [END create_agent_with_fabric_tool] + print(f"Created Agent, ID: {agent.id}") + + # Create thread for communication + thread = agents_client.threads.create() + print(f"Created thread, ID: {thread.id}") + + # Create message to thread + message = agents_client.messages.create( + thread_id=thread.id, + role="user", + content="", + ) + print(f"Created message, ID: {message.id}") + + # Create and process an Agent run in thread with tools + run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + # Delete the Agent when done + agents_client.delete_agent(agent.id) + print("Deleted agent") + + # Fetch and log all messages + messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) + for msg in messages: + if msg.text_messages: + last_text = msg.text_messages[-1] + print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_file_search.py b/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_file_search.py new file mode 100644 index 000000000000..00d3b7963d09 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_file_search.py @@ -0,0 +1,104 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use agent operations with file searching from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_file_search.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os +from azure.ai.projects import AIProjectClient +from azure.ai.agents.models import ( + FileSearchTool, + FilePurpose, + ListSortOrder, +) +from azure.identity import DefaultAzureCredential + +asset_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../assets/product_info_1.md")) + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +with project_client: + agents_client = project_client.agents + + # Upload file and create vector store + # [START upload_file_create_vector_store_and_agent_with_file_search_tool] + file = agents_client.files.upload_and_poll(file_path=asset_file_path, purpose=FilePurpose.AGENTS) + print(f"Uploaded file, file ID: {file.id}") + + vector_store = agents_client.vector_stores.create_and_poll(file_ids=[file.id], name="my_vectorstore") + print(f"Created vector store, vector store ID: {vector_store.id}") + + # Create file search tool with resources followed by creating agent + file_search = FileSearchTool(vector_store_ids=[vector_store.id]) + + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="Hello, you are helpful agent and can search information from uploaded files", + tools=file_search.definitions, + tool_resources=file_search.resources, + ) + # [END upload_file_create_vector_store_and_agent_with_file_search_tool] + + print(f"Created agent, ID: {agent.id}") + + # Create thread for communication + thread = agents_client.threads.create() + print(f"Created thread, ID: {thread.id}") + + # Create message to thread + message = agents_client.messages.create( + thread_id=thread.id, role="user", content="Hello, what Contoso products do you know?" + ) + print(f"Created message, ID: {message.id}") + + # Create and process agent run in thread with tools + run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + # Check if you got "Rate limit is exceeded.", then you want to get more quota + print(f"Run failed: {run.last_error}") + + # [START teardown] + # Delete the file when done + agents_client.vector_stores.delete(vector_store.id) + print("Deleted vector store") + + agents_client.files.delete(file_id=file.id) + print("Deleted file") + + # Delete the agent when done + agents_client.delete_agent(agent.id) + print("Deleted agent") + # [END teardown] + + # Fetch and log all messages + messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) + + # Print last messages from the thread + for msg in messages: + if msg.text_messages: + last_text = msg.text_messages[-1] + print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_functions.py b/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_functions.py new file mode 100644 index 000000000000..fd712a5f5342 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_functions.py @@ -0,0 +1,117 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use agent operations with custom functions from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_functions.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" +import os, time, sys +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential +from azure.ai.agents.models import ( + FunctionTool, + ListSortOrder, + RequiredFunctionToolCall, + SubmitToolOutputsAction, + ToolOutput, +) + +current_path = os.path.dirname(__file__) +root_path = os.path.abspath(os.path.join(current_path, os.pardir, os.pardir)) +if root_path not in sys.path: + sys.path.insert(0, root_path) +from samples.utils.user_functions import user_functions + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +# Initialize function tool with user functions +functions = FunctionTool(functions=user_functions) + +with project_client: + agents_client = project_client.agents + + # Create an agent and run user's request with function calls + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are a helpful agent", + tools=functions.definitions, + ) + print(f"Created agent, ID: {agent.id}") + + thread = agents_client.threads.create() + print(f"Created thread, ID: {thread.id}") + + message = agents_client.messages.create( + thread_id=thread.id, + role="user", + content="Hello, send an email with the datetime and weather information in New York?", + ) + print(f"Created message, ID: {message.id}") + + run = agents_client.runs.create(thread_id=thread.id, agent_id=agent.id) + print(f"Created run, ID: {run.id}") + + while run.status in ["queued", "in_progress", "requires_action"]: + time.sleep(1) + run = agents_client.runs.get(thread_id=thread.id, run_id=run.id) + + if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + print("No tool calls provided - cancelling run") + agents_client.runs.cancel(thread_id=thread.id, run_id=run.id) + break + + tool_outputs = [] + for tool_call in tool_calls: + if isinstance(tool_call, RequiredFunctionToolCall): + try: + print(f"Executing tool call: {tool_call}") + output = functions.execute(tool_call) + tool_outputs.append( + ToolOutput( + tool_call_id=tool_call.id, + output=output, + ) + ) + except Exception as e: + print(f"Error executing tool_call {tool_call.id}: {e}") + + print(f"Tool outputs: {tool_outputs}") + if tool_outputs: + agents_client.runs.submit_tool_outputs(thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs) + + print(f"Current run status: {run.status}") + + print(f"Run completed with status: {run.status}") + + # Delete the agent when done + agents_client.delete_agent(agent.id) + print("Deleted agent") + + # Fetch and log all messages + messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) + for msg in messages: + if msg.text_messages: + last_text = msg.text_messages[-1] + print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_logic_apps.py b/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_logic_apps.py new file mode 100644 index 000000000000..53b7817af440 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_logic_apps.py @@ -0,0 +1,134 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use agents with Logic Apps to execute the task of sending an email. + +PREREQUISITES: + 1) Create a Logic App within the same resource group as your Azure AI Project in Azure Portal + 2) To configure your Logic App to send emails, you must include an HTTP request trigger that is + configured to accept JSON with 'to', 'subject', and 'body'. The guide to creating a Logic App Workflow + can be found here: + https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/agents-logic-apps#create-logic-apps-workflows-for-function-calling + +USAGE: + python sample_agents_logic_apps.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity azure-mgmt-logic + + Set this environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + + Replace the following values in the sample with your own values: + 1) - The name of the Logic App you created. + 2) - The name of the trigger in the Logic App you created (the default name for HTTP + triggers in the Azure Portal is "When_a_HTTP_request_is_received"). + 3) - The email address of the recipient. +""" + + +import os +import sys +from typing import Set + +from azure.ai.projects import AIProjectClient +from azure.ai.agents.models import ToolSet, FunctionTool +from azure.identity import DefaultAzureCredential + +# Example user function +current_path = os.path.dirname(__file__) +root_path = os.path.abspath(os.path.join(current_path, os.pardir, os.pardir)) +if root_path not in sys.path: + sys.path.insert(0, root_path) +from samples.utils.user_functions import fetch_current_datetime + +# Import AzureLogicAppTool and the function factory from user_logic_apps +from utils.user_logic_apps import AzureLogicAppTool, create_send_email_function + +# [START register_logic_app] + +# Create the agents client +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +# Extract subscription and resource group from the project scope +subscription_id = os.environ["SUBSCRIPTION_ID"] +resource_group = os.environ["resource_group_name"] + +# Logic App details +logic_app_name = "" +trigger_name = "" + +# Create and initialize AzureLogicAppTool utility +logic_app_tool = AzureLogicAppTool(subscription_id, resource_group) +logic_app_tool.register_logic_app(logic_app_name, trigger_name) +print(f"Registered logic app '{logic_app_name}' with trigger '{trigger_name}'.") + +# Create the specialized "send_email_via_logic_app" function for your agent tools +send_email_func = create_send_email_function(logic_app_tool, logic_app_name) + +# Prepare the function tools for the agent +functions_to_use: Set = { + fetch_current_datetime, + send_email_func, # This references the AzureLogicAppTool instance via closure +} +# [END register_logic_app] + +with project_client: + agents_client = project_client.agents + + # Create an agent + functions = FunctionTool(functions=functions_to_use) + toolset = ToolSet() + toolset.add(functions) + + agents_client.enable_auto_function_calls(toolset) + + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="SendEmailAgent", + instructions="You are a specialized agent for sending emails.", + toolset=toolset, + ) + print(f"Created agent, ID: {agent.id}") + + # Create a thread for communication + thread = agents_client.threads.create() + print(f"Created thread, ID: {thread.id}") + + # Create a message in the thread + message = agents_client.messages.create( + thread_id=thread.id, + role="user", + content="Hello, please send an email to with the date and time in '%Y-%m-%d %H:%M:%S' format.", + ) + print(f"Created message, ID: {message.id}") + + # Create and process an agent run in the thread + run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + # Delete the agent when done + agents_client.delete_agent(agent.id) + print("Deleted agent") + + # Fetch and log all messages + messages = agents_client.messages.list(thread_id=thread.id) + for msg in messages: + if msg.text_messages: + last_text = msg.text_messages[-1] + print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_multiple_connected_agents.py b/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_multiple_connected_agents.py new file mode 100644 index 000000000000..95150ffa85eb --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_multiple_connected_agents.py @@ -0,0 +1,117 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use Agent operations with the Connected Agent tool from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_multiple_connected_agents.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os +from azure.ai.projects import AIProjectClient +from azure.ai.agents.models import ConnectedAgentTool, MessageRole +from azure.identity import DefaultAzureCredential + + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +with project_client: + agents_client = project_client.agents + + connected_agent_name = "stock_price_bot" + weather_agent_name = "weather_bot" + + stock_price_agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name=connected_agent_name, + instructions=( + "Your job is to get the stock price of a company. If asked for the Microsoft stock price, always return $350." + ), + ) + + weather_agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name=weather_agent_name, + instructions=( + "Your job is to get the weather for a given location. If asked for the weather in Seattle, always return 60 degrees and cloudy." + ), + ) + + # Initialize Connected Agent tools with the agent id, name, and description + connected_agent = ConnectedAgentTool( + id=stock_price_agent.id, name=connected_agent_name, description="Gets the stock price of a company" + ) + connected_weather_agent = ConnectedAgentTool( + id=weather_agent.id, name=weather_agent_name, description="Gets the weather for a given location" + ) + + # Create agent with the Connected Agent tool and process assistant run + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-assistant", + instructions="You are a helpful assistant, and use the connected agents to get stock prices and weather.", + tools=[ + connected_agent.definitions[0], + connected_weather_agent.definitions[0], + ], + ) + # [END create_agent_with_connected_agent_tool] + + print(f"Created agent, ID: {agent.id}") + + # Create thread for communication + thread = agents_client.threads.create() + print(f"Created thread, ID: {thread.id}") + + # Create message to thread + message = agents_client.messages.create( + thread_id=thread.id, + role=MessageRole.USER, + content="What is the stock price of Microsoft and the weather in Seattle?", + ) + print(f"Created message, ID: {message.id}") + + # Create and process Agent run in thread with tools + run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + # Delete the Agent when done + agents_client.delete_agent(agent.id) + print("Deleted agent") + + # Delete the connected Agent when done + agents_client.delete_agent(stock_price_agent.id) + print("Deleted stock price agent") + + # Delete the connected Agent when done + agents_client.delete_agent(weather_agent.id) + print("Deleted weather agent") + + # Fetch and log all messages + messages = agents_client.messages.list(thread_id=thread.id) + for msg in messages: + if msg.text_messages: + last_text = msg.text_messages[-1] + print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_openapi.py b/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_openapi.py new file mode 100644 index 000000000000..e22b081ccce7 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_openapi.py @@ -0,0 +1,123 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use agent operations with the + OpenAPI tool from the Azure Agents service using a synchronous client. + To learn more about OpenAPI specs, visit https://learn.microsoft.com/openapi + +USAGE: + python sample_agents_openapi.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity jsonref + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os +import jsonref +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential +from azure.ai.agents.models import OpenApiTool, OpenApiAnonymousAuthDetails + +weather_asset_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../assets/weather_openapi.json")) + +countries_asset_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../assets/countries.json")) + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) +# [START create_agent_with_openapi] + +with open(weather_asset_file_path, "r") as f: + openapi_weather = jsonref.loads(f.read()) + +with open(countries_asset_file_path, "r") as f: + openapi_countries = jsonref.loads(f.read()) + +# Create Auth object for the OpenApiTool (note that connection or managed identity auth setup requires additional setup in Azure) +auth = OpenApiAnonymousAuthDetails() + +# Initialize agent OpenApi tool using the read in OpenAPI spec +openapi_tool = OpenApiTool( + name="get_weather", spec=openapi_weather, description="Retrieve weather information for a location", auth=auth +) +openapi_tool.add_definition( + name="get_countries", spec=openapi_countries, description="Retrieve a list of countries", auth=auth +) + +# Create agent with OpenApi tool and process agent run +with project_client: + agents_client = project_client.agents + + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are a helpful agent", + tools=openapi_tool.definitions, + ) + # [END create_agent_with_openapi] + + print(f"Created agent, ID: {agent.id}") + + # Create thread for communication + thread = agents_client.threads.create() + print(f"Created thread, ID: {thread.id}") + + # Create message to thread + message = agents_client.messages.create( + thread_id=thread.id, + role="user", + content="What's the weather in Seattle and What is the name and population of the country that uses currency with abbreviation THB?", + ) + print(f"Created message, ID: {message.id}") + + # Create and process agent run in thread with tools + run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + run_steps = agents_client.run_steps.list(thread_id=thread.id, run_id=run.id) + + # Loop through each step + for step in run_steps: + print(f"Step {step['id']} status: {step['status']}") + + # Check if there are tool calls in the step details + step_details = step.get("step_details", {}) + tool_calls = step_details.get("tool_calls", []) + + if tool_calls: + print(" Tool calls:") + for call in tool_calls: + print(f" Tool Call ID: {call.get('id')}") + print(f" Type: {call.get('type')}") + + function_details = call.get("function", {}) + if function_details: + print(f" Function name: {function_details.get('name')}") + print() # add an extra newline between steps + + # Delete the agent when done + agents_client.delete_agent(agent.id) + print("Deleted agent") + + # Fetch and log all messages + messages = agents_client.messages.list(thread_id=thread.id) + for msg in messages: + if msg.text_messages: + last_text = msg.text_messages[-1] + print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_openapi_connection_auth.py b/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_openapi_connection_auth.py new file mode 100644 index 000000000000..94602da687b8 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_openapi_connection_auth.py @@ -0,0 +1,103 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_openapi_connection_auth.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with the + OpenAPI tool from the Azure Agents service using a synchronous client, using + custom key authentication against the TripAdvisor API. + To learn more about OpenAPI specs, visit https://learn.microsoft.com/openapi + +USAGE: + python sample_agents_openapi_connection_auth.py + + Before running the sample: + + Set up an account at https://www.tripadvisor.com/developers and get an API key. + + Set up a custom key connection and save the connection name following the steps at + https://aka.ms/azsdk/azure-ai-agents/custom-key-setup + + Save that connection name as the PROJECT_OPENAPI_CONNECTION_NAME environment variable + + pip install azure-ai-projects azure-ai-agents azure-identity jsonref + + Set this environment variables with your own values: + PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + OPENAPI_CONNECTION_ID - the connection ID for the OpenAPI connection, taken from Azure AI Foundry. + MODEL_DEPLOYMENT_NAME - name of the model deployment in the project to use Agents against +""" + +import os +import jsonref +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential +from azure.ai.agents.models import OpenApiTool, OpenApiConnectionAuthDetails, OpenApiConnectionSecurityScheme + +asset_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../assets/tripadvisor_openapi.json")) + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +model_name = os.environ["MODEL_DEPLOYMENT_NAME"] +connection_id = os.environ["OPENAPI_CONNECTION_ID"] + +print(connection_id) + +with open(asset_file_path, "r") as f: + openapi_spec = jsonref.loads(f.read()) + +# Create Auth object for the OpenApiTool (note that connection or managed identity auth setup requires additional setup in Azure) +auth = OpenApiConnectionAuthDetails(security_scheme=OpenApiConnectionSecurityScheme(connection_id=connection_id)) + +# Initialize an Agent OpenApi tool using the read in OpenAPI spec +openapi = OpenApiTool( + name="get_weather", spec=openapi_spec, description="Retrieve weather information for a location", auth=auth +) + +# Create an Agent with OpenApi tool and process Agent run +with project_client: + agents_client = project_client.agents + + agent = agents_client.create_agent( + model=model_name, name="my-agent", instructions="You are a helpful agent", tools=openapi.definitions + ) + print(f"Created agent, ID: {agent.id}") + + # Create thread for communication + thread = agents_client.threads.create() + print(f"Created thread, ID: {thread.id}") + + # Create message to thread + message = agents_client.messages.create( + thread_id=thread.id, + role="user", + content="Summarize the reviews for the top rated hotel in Paris", + ) + print(f"Created message: {message['id']}") + + # Create and process an Agent run in thread with tools + run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + # Delete the Agent when done + agents_client.delete_agent(agent.id) + print("Deleted agent") + + # Fetch and log all messages + messages = agents_client.messages.list(thread_id=thread.id) + for msg in messages: + if msg.text_messages: + last_text = msg.text_messages[-1] + print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_run_with_toolset.py b/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_run_with_toolset.py new file mode 100644 index 000000000000..b6c86a10b28d --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_run_with_toolset.py @@ -0,0 +1,96 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use agent operations with toolset from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_run_with_toolset.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os, sys +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential +from azure.ai.agents.models import FunctionTool, ToolSet, CodeInterpreterTool + +current_path = os.path.dirname(__file__) +root_path = os.path.abspath(os.path.join(current_path, os.pardir, os.pardir)) +if root_path not in sys.path: + sys.path.insert(0, root_path) +from samples.utils.user_functions import user_functions + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +# Create agent with toolset and process agent run +with project_client: + agents_client = project_client.agents + + # Initialize agent toolset with user functions and code interpreter + # [START create_agent_toolset] + functions = FunctionTool(user_functions) + code_interpreter = CodeInterpreterTool() + + toolset = ToolSet() + toolset.add(functions) + toolset.add(code_interpreter) + + # To enable tool calls executed automatically + agents_client.enable_auto_function_calls(toolset) + + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are a helpful agent", + toolset=toolset, + ) + # [END create_agent_toolset] + print(f"Created agent, ID: {agent.id}") + + # Create thread for communication + thread = agents_client.threads.create() + print(f"Created thread, ID: {thread.id}") + + # Create message to thread + message = agents_client.messages.create( + thread_id=thread.id, + role="user", + content="Hello, send an email with the datetime and weather information in New York?", + ) + print(f"Created message, ID: {message.id}") + + # Create and process agent run in thread with tools + # [START create_and_process_run] + run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) + # [END create_and_process_run] + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + # Delete the agent when done + agents_client.delete_agent(agent.id) + print("Deleted agent") + + # Fetch and log all messages + messages = agents_client.messages.list(thread_id=thread.id) + for msg in messages: + if msg.text_messages: + last_text = msg.text_messages[-1] + print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_sharepoint.py b/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_sharepoint.py new file mode 100644 index 000000000000..5d3246b56cf8 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_tools/sample_agents_sharepoint.py @@ -0,0 +1,91 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_sharepoint.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with the + Sharepoint tool from the Azure Agents service using a synchronous client. + The sharepoint tool is currently available only to whitelisted customers. + For access and onboarding instructions, please contact azureagents-preview@microsoft.com. + +USAGE: + python sample_agents_sharepoint.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity + + Set this environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + 3) SHAREPOINT_CONNECTION_ID - The ID of the Sharepoint connection, in the format of: + /subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/providers/Microsoft.MachineLearningServices/workspaces/{workspace-name}/connections/{connection-name} +""" + +import os +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential +from azure.ai.agents.models import SharepointTool + + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +conn_id = os.environ["SHAREPOINT_CONNECTION_ID"] + +# Initialize Sharepoint tool with connection id +sharepoint = SharepointTool(connection_id=conn_id) + +# Create agent with Sharepoint tool and process agent run +with project_client: + agents_client = project_client.agents + + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are a helpful agent", + tools=sharepoint.definitions, + ) + print(f"Created agent, ID: {agent.id}") + + # Create thread for communication + thread = agents_client.threads.create() + print(f"Created thread, ID: {thread.id}") + + # Create message to thread + message = agents_client.messages.create( + thread_id=thread.id, + role="user", + content="Hello, summarize the key points of the ", + ) + print(f"Created message, ID: {message.id}") + + # Create and process agent run in thread with tools + run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + # Delete the agent when done + agents_client.delete_agent(agent.id) + print("Deleted agent") + + # Fetch and log all messages + messages = agents_client.messages.list(thread_id=thread.id) + for msg in messages: + if msg.text_messages: + last_text = msg.text_messages[-1] + print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/agents_tools/utils/user_logic_apps.py b/sdk/ai/azure-ai-projects/samples/agents/agents_tools/utils/user_logic_apps.py new file mode 100644 index 000000000000..979fd5eca143 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/agents_tools/utils/user_logic_apps.py @@ -0,0 +1,80 @@ +import json +import requests +from typing import Dict, Any, Callable + +from azure.identity import DefaultAzureCredential +from azure.mgmt.logic import LogicManagementClient + + +class AzureLogicAppTool: + """ + A service that manages multiple Logic Apps by retrieving and storing their callback URLs, + and then invoking them with an appropriate payload. + """ + + def __init__(self, subscription_id: str, resource_group: str, credential=None): + if credential is None: + credential = DefaultAzureCredential() + self.subscription_id = subscription_id + self.resource_group = resource_group + self.logic_client = LogicManagementClient(credential, subscription_id) + + self.callback_urls: Dict[str, str] = {} + + def register_logic_app(self, logic_app_name: str, trigger_name: str) -> None: + """ + Retrieves and stores a callback URL for a specific Logic App + trigger. + Raises a ValueError if the callback URL is missing. + """ + callback = self.logic_client.workflow_triggers.list_callback_url( + resource_group_name=self.resource_group, + workflow_name=logic_app_name, + trigger_name=trigger_name, + ) + + if callback.value is None: + raise ValueError(f"No callback URL returned for Logic App '{logic_app_name}'.") + + self.callback_urls[logic_app_name] = callback.value + + def invoke_logic_app(self, logic_app_name: str, payload: Dict[str, Any]) -> Dict[str, Any]: + """ + Invokes the registered Logic App (by name) with the given JSON payload. + Returns a dictionary summarizing success/failure. + """ + if logic_app_name not in self.callback_urls: + raise ValueError(f"Logic App '{logic_app_name}' has not been registered.") + + url = self.callback_urls[logic_app_name] + response = requests.post(url=url, json=payload) + + if response.ok: + return {"result": f"Successfully invoked {logic_app_name}."} + else: + return {"error": (f"Error invoking {logic_app_name} " f"({response.status_code}): {response.text}")} + + +def create_send_email_function(service: AzureLogicAppTool, logic_app_name: str) -> Callable[[str, str, str], str]: + """ + Returns a function that sends an email by invoking the specified Logic App in LogicAppService. + This keeps the LogicAppService instance out of global scope by capturing it in a closure. + """ + + def send_email_via_logic_app(recipient: str, subject: str, body: str) -> str: + """ + Sends an email by invoking the specified Logic App with the given recipient, subject, and body. + + :param recipient: The email address of the recipient. + :param subject: The subject of the email. + :param body: The body of the email. + :return: A JSON string summarizing the result of the operation. + """ + payload = { + "to": recipient, + "subject": subject, + "body": body, + } + result = service.invoke_logic_app(logic_app_name, payload) + return json.dumps(result) + + return send_email_via_logic_app diff --git a/sdk/ai/azure-ai-projects/samples/agents/assets/countries.json b/sdk/ai/azure-ai-projects/samples/agents/assets/countries.json new file mode 100644 index 000000000000..58d3df70d28d --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/assets/countries.json @@ -0,0 +1,46 @@ +{ + "openapi": "3.1.0", + "info": { + "title": "RestCountries.NET API", + "description": "Web API version 3.1 for managing country items, based on previous implementations from restcountries.eu and restcountries.com.", + "version": "v3.1" + }, + "servers": [ + { "url": "https://restcountries.net" } + ], + "auth": [], + "paths": { + "/v3.1/currency": { + "get": { + "description": "Search by currency.", + "operationId": "LookupCountryByCurrency", + "parameters": [ + { + "name": "currency", + "in": "query", + "description": "The currency to search for.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Success", + "content": { + "text/plain": { + "schema": { + "type": "string" + } + } + } + } + } + } + } + }, + "components": { + "schemes": {} + } + } \ No newline at end of file diff --git a/sdk/ai/azure-ai-projects/samples/agents/assets/image_file.png b/sdk/ai/azure-ai-projects/samples/agents/assets/image_file.png new file mode 100644 index 0000000000000000000000000000000000000000..50ae6c65367af30a10642fc910cef97bfe765796 GIT binary patch literal 183951 zcmdS>hdbB*`#z3qX-cJ1Dnv=jDmzq0JJ}>7D?(&%Dv=o#6`^c06WL@$QTCoi_TF3H z^Y;9_kKb{;{)C_7c)ngoJjVTgzg@TMI?wYuubYRo)Mct|blWH>D5$Ppk&vaJ*qlQ_ zL3w%$C4OUlV2~C6Cukw5Y$0cI&%#>MOqb%Oro}@e6APpJT8vh@X6E-zjL&lLaGX8O zsBdBM&|HX<^TGe`1so=3dYnH)C93ctTOVGzZB9W!Uqt>}XIyP5O0kZD;;O_&d7I#o z4r}?n{mXn4hU*5mux*>;@Z7YI_eowU9dF|S-e!BPt2M{E4$x?OUGn+#ZI8Y8@i{vC z@kh#EpFdj9^hEY`_BccCPv4eOj*-;U=8lz|#!jj|hKcDn{gsbJ| z|9;ZFXFrVp=X03OoV#H~^FJT-qDAxnPcJFqhT1wNCN_Z|KYnc4y7i*6@`3K|Zoii= z*B?4`NZ{PL2(53Pv1&PdBO@a>)NZns1o7NwQpvZSj`lDRlbsyvI5*N&z`$?Z{5(6pT$YG5vw*7s0_hT*cPe+uLX6$a!)Eu`uAX6jLq7LV0c6XdD-^u+nd|k zB2=@?Vr3#I+dDdLs{Il>#l*D!=+UD#%X93V1sGv$%rfhh{$h0d zyUrOor2zLUW+eRBx@&7|AI=QO92PX&xVE^5x6^tTO9y{p&`t`@F6O+?b=b5>=Yw8tF znr*AlTb}qMepuLMSAS(lam5=>^NxHm*KPVW;hVCpMm_CjhxT2d@^Jqyzb*AskeSPq zcl;(=?U|-Rc}<<2^gDO%WL5aM*P!9!an<;P%!@*IzfuT0F3G1Em$>hK%cUunU#?($ z_eJ8J4C95tnD;XroUE){il0%_Cf3EgFuHe-rSkoGU*~_Tv{?HKDk>^T<8jhq>xD%` zY7%arU07NQzY%yQD?f%y)%U#P(%ye-%bZdnLat588ipN>4GsH7+cL_^7@WQ7cn$vE zcjnZ{FS*R%7+LoPKMx;1yx_6txb)|HH|rDc&>lHw^)x;{zI1+Oq=hC~J+EA@W2PbE zGWE(rkFQEbORDbn*X(y#RQkupo^RN^^J|gV^rM(12Pro$!^YEii=48k%>?7Us!;Y; zmh)7NYRds7Tt8TA3HR$c z-u*(8q!9V1Ap)lBD_*mU%gg&qwdv{T{GJrPE%Y#3E{%97_y-1RkmBk)>!f(>~d$OlYz+zyR)AHzl;qu;jO)xk(_=cKfM5f&= z+r7%*t*&C4^{Y>xKK(t?^0_GW*|W{9Umh6NI|iv-bo|mP?7VikV0G>ghem!v({fR9 z@nqHcf4}i57qKU~O}i!1j5}Jdq#t9d?ZWku9jR$M@w-rRPW9&PmoH!X`TIY`sxb=) z9N4_`@YirjuN_W_R^y!_0TTQaB&vH#ea>Dx(~JE%F+DAwZqTqp{R>M~s7Udx^Q%uk zd|=G$q@|~SJ~8ny7>|QuV!YrVUBZbp?xeD=Tw9mp*I)l$nCubKc=fHMw=ZR5@s$FAvm2D5vUd#f^F4Ps8;ezSh_K$nHIO(Blxl@oyJuQLc+V z+Z1hV1om^N#y0U9gXYh`fWF@nGW*@Ge8+*NU|mY`4x&gckXB_26~TRVX%gN zDXK}Tf-E_|l&@XeHaFJcg*~f1+5HXq^-Em5j`TLy_1pKK-X*edNJ zOCi`0-<$H0?O2C?)FT%%g^X}~uTp#!a?V=o$IIg?_V<72oDmQdJj%?x;p)|^1`O-Z z+fHq)i@qryBNf7B(kUEKk9?$o&6*Lbq>ZIne5CXwHMxO9J=YyE-=9TmEG3xSYkuZl zH^l`I{LnPNKM)Poo; z@jiHV7cFg!=^@-O5@7m!SFy0K_`+x8L3ZZa>LcdWcOJcU=FFLgL-i+B3!IK@r{$2q zHEuWRTB@zTu<`qnJ=@TaF`5%nzhmAz?YSuz`LnQa-QeKhm8(}}Y}s*ZIaZ@Z-@Ir) z*-UEHe^jVooNZ0f++a61vK58O#b0j+3fNno`+L@J*uZP7jYNt|ImW`mBqE~LV^zAw z&gN~yE+G;96%asXH}ji{$;JNnUVRC_8q@B}S&X$G6crVHXldyh5U{Vdu5LFCjSDWl z2eFxHH5xP^azZNjD}FJ#JYHZfYQn@k)R}i~s3|!)Rh{DvGaK8s4C9U>WJk$lNl8h~ za{rUfnWow=g6@nqCf?byeLKzT>j7-82*z-W!CICCg_^tNRf=KKcd_=^H_yrqu@5Ac z=SKew4{yJ9>lPCJmf}Ovk&%o%JbO_!1RR%mjJxvL?J|$h(HYES)9jWhe8;WrR@tVW zYqOh{Q+@C2kdP2=rxlx!uJOUaz+pZV<7b~ge=c7tDUp!4bg8JTi@}%S{QkVTrCCFV zwZ;Bs+;o0rdR$zbn6Iv-cVOl@^S&MQ!Zwc(bqAtWjF5&Fy1h8%U*?cHgsl5}bkqmY zSX^7X=Yk^B7)p4i^*H@ldzQ;*y($8ZG%Q)&hbQq5ty1rUY(GBjIezWK^*0;?)nOYx zDkXCqbKA3L&%{W}*Aa9TXxn5@BR@RM7!Xk$pskFDFb75yoGCOJH_n3F6U z<=NQ8%e!k`YPZnQqZhS(b@lZ0@`lV7CNyjGHwLgN6akP}Iu&GRR``;Q^QXv_ZMdVO zqbA)@$zq^-9bWMyFHhuAg0kJrz;3BvfyW3%MtqV@RlHI%mq9)2X^s5D>gwu?Np~zP zc&tWSU5|>p{mnV89VloXeJ#B0$B(Uf_VbtC@jZ}^xI`s0a_rbK#$(4e&X08zBWic= z+Qs*)LhOq z>)qB@8FI?Nb#Q##7q7aD?H+7+rF-h^tpoR)BCpca2tOEV&$XrbeDBwun>TkMYBz1) zPhM4;r1coPcNaUp|4#qr66dwWnB4~Q7JvIc|4PPp#U0CjU}Q9b%3f4eb<3tX`e$h= zC6c#TqDnf~!`~MG1@_`SC!|7!Z|`bgDQ`fWN}8|Ajx7Yf;FvksH}~ruf4ELYUS3|| zvmc|a>8B7JF_!O0GA7%%$Umy_TDDE z;giFnS%iPc18jrvV{xvoo1$;Nb9=mDvlz>(I3tt$_qSSGTN9ur{%q%4AxpNgj+_%e z6W`I6=F?N}*irH`@T|g~4iWpglKMEghRW%&fdTiW+2IJM)ujU>c051@ZjQ@yTdDpb z=}e4fbZMd9={Ki@J-^J>m~A<%+Aj(WREo?zIZ(r_sivjHw4Yt&X+yly_F=%MkJCCGC{7 zv=T%bx7CPpdc>R{0QA*YEc&&PTd^(3;{3*C7Y!p#ll_%)zm7#7C85g_xQuyeW>7or zMdVZu?k9YDD+M6M@Ba7BZeCtqt@8pxLPc$DhdMhu5AYcsKuUE%CD1~}+$60HR7!F0 zw7a{f-_3V?2<+|QmL%ZuH0xQi8-R0U>^GPX)*d%%P5W`z@;K8f@BP}1hxm*xAW$TS zIaM~NN}u1doA$@5x%Mpc<`hjA{g8+VX2k^M$6IMw^lwGv1RmnX0e!f)b*$0va5js(XAzYg4DXHip`Dzlcc4S*l7E_=9J@Kdbp}F~43*L2q zSM64?`xpA&naD?*2A!Mb%&rKIi|f z$VF{lKzu6LInq*=Q%QKzaz0hj}lRa=oXA8j7v$fUbLz!+85|RJ0QAr6Kn@s)A+l+?F%>k#BiW(Xel2cQ=VpjaL zv17vjn0Ung&dSRA15B@U`~BzMyez&br9(|FH>W;E zq2HM+_~k*H;@`f66HH9gePd%g&!0bElkM%jBVaJxVjmI!vXy6}BeKUpZIqXO$Wa<= zvaNwbC5N$!nkiEOA?Kad(^PY9CLO3uN4mSU>Q81)nTf2Sv@x@=TuQ5~s^Yv=w)o@3 z`@4U?zX-Q9>C7zzrb)8Cz4Xw`tSIv8EBQu^e213U^N;! z#gBv)e`ep;CJkpFApSu$)Ym#fb!xIt+zS00NAol)Dyn?E8->xQ^#U`embSJ6or(8h z!9CwR_f^Q*?%%&((jc3gDZI!VtUH`Z3or~2V0Yp>Axnc$&V-Kkb}q{y`I_wddikBZ zZ^&;!MV0Jbr!GUMg;MPGdUmizyfv`^x9=#dLNJ}kH#ZH7F)u?!fZL@XxZ<4dFtci zqpzkp+>)5|_zH5dOuB)Bjh&rmBW+Y?2XJ*6Dyif!!pSY)ak($+XLA11HQl`~c;|0~ zzTKOK0%zx%nXBNpuEsN8^{PVSex{F(jy?!)-L?CL=(hO)=H0so0dp?tihgvXIyA(q z@mdJHqOjoK+Nq!oE5N-Zyl2MPL*;-upFDX&YA|8Ug#UqWgNIHjBng3lNnE+I1>p zfq@$Q*#GqHJtS_BM|NZ<8;y4rl%ZwF^+A zNEC?41vxn!^p#S;^NF6Wt}Bp;`x+lJGxM~(qM6wla_QaQwv$Uoh=97vCr$5Lf5E{) z7=Iy!FEdj>`OcjPe0;3(C#IqLxDD5@R|q$p>`$?mRgf2LL%alJ4Y6URHH+O28ZqqO zzm(B+^V+o{a$iS|_+*%LZ7UEVL>{zzy4{|cpRiIa!tQzd!eh!2DkY zE2GkqlHVY|(yV(~Wx_Y22tGpsJaCM!7{m;9h@c)UHvXLQ`Lm0c*Y@()>|P%g;yvBy zcxlHw^TaP)peQOTVti8}lWSw@e^QRp=MX=~OXjB~_}t2nb2f`p$7SW@{`=!8rJX)> zyl3=%8=tuU2>;&EaRiGVE*J}NJT2cCCjLyKqUGw?P(yqKHmc3U@5d9Zg)W-`uAT~y zUJe9}Bt#6%{g!fuK?55&J>816Y?e&0;L5KdbCG=QvYj$6n<(CjI@;y$SyEI~To{T^ z<^l-SRJ(ME(rTpnG14+iRc~J@64<^|iX{N0u9hZU`Mj-bQ(|fxc5`xaQrvbvKR@0@ z`{0~$d#143t^IuE#U^)@mH(dG501+PcDk}MT~(9~7)x{}0D*@1(*X7)UrhH`(Ke2@ zXVJK9rsi8P|Bx#?f9^qfSHy^6BOoz($X9OMkN~#$Gc@FhhaAfz@jV&QyK&Q|Kd27w zUS8q`25g59AN~#L08fF|v;PG62En!J`3@e<&B~EcQH9^WZAwZ?nwXk8(>fpUzMn-x zp6fqhXZ>?}k&kB6?a3PFUHm&TOf+h!9B|v@x3i_usQKZ&Bm2itdzEvn*moWlcoZ3V zEMWe&vhoL+h)d8{o;*AoL<=I-obPxJDbx*@tlysGusAhP7eh^IVqLq!g~kn$*jn2w zMC@k1LZR3~k?H(Tv@uDI5XX221_Z0ZyAda7X-MAZRdcMUsOg2JZ8fBjN{cr$I6Ok6 zhne=2W;M=2Ymv7Vc>Q2*q$RvZ`t@|CS?x@RLG5R~Te)_#*OWyc{-z+Y2t^CEar2!o zMuqr_hWreuID`ZPn!JkHd=ZhUoMC)qC%v#cwp)4|GMU!QKuuBboMB_a?wgyr>}K@6 zkDOB+cck6ed!MrYNDm&gq{t;Ye&f^Lm1O^}fOQc%gBVrlj31321-&GXV5~DQzIICF z-RI9|RWgiyY9cNZ=}pB~@=n6~>gM6&{gXO}&ffDL;5YnFNsM2y4z5E4r!__ywWMAe zT_^kYd|Phj!#_{TC!RVk&HOM=6)hk{9=ikNN2ABVWo~Y6Vsf$&LIM_mDx{)uZrOCS zm6y=}ix)2%blnLP+jJv{=jwx77hJ_2uP)DP1Jcmhk2EA}Fc9i7)R-7Arpnt(n9gyUh2!h8B}*$qxAXJ zlhRUFCypH(P%9;LDlb&`OknI~Yt$pjAI9P*#!8W*2r8BQPXlHm$mGvaG(8%Tc(*b*OkB*L?*fMec)eZ!ZW-iYQ7W>(w6O4ETJW1@ zK9*j?u3&Lf=Y>Ddh-w2uy-tT$I@7er?P5N)oZWPv&oz@dfVnWU-tt9`9PmWtY>PO3 zp-61yp^y>EN1b{0+JG!Y>FxmCsLX&0;tWG{Qxcai_uEzJ>Z53YVKYO6O#T$+e}hFF zDjb(lbBe<%OH53R_P@1N0Yvl@7ngMP_c>Wvg-uPgd3Lj6_)jAF;Ihg9){g8;4%N&L zll)~MwN!LGU;U1XS4ZDGl-S#=lR!3l%Flupy4}0~1Jt^ZWz!6sDj9V%0KcMi|Kby7 zIr@<=4{+V3K(Y(ttD&L@3=GV)9NvG)ht4(X`kTJE&E4RqHStQEBkI%0y zYydIYCvvRZ%W}A}u&RohmP57L&fKqVs<(n$znU>YqktaN;@7O>{rmSbZ6=urcL3m) z(QURf-%U&V40~F$@X02+S5y}oZ}vXfw4KXw$s!=^jk!4jVaEY33T)t-w*!=^bgVZ* zg!WDM{i<8EKXuywpXkh>=4a=BPCMv$wt$7p*%wD&m6zY^M$PaX^pX@1x>uhrDE7R@ zLWCn#W4DJ?F8h59Uut1{|Nb;PJ3`cVeez{7vGvhXA)cTFzp*-WH2fD7WhX#e4hY+D z?LV!|D7@p>cI+Y~D9!E?4?3C-q_RXpV4>Ht&4vn?GWGoojIh*ig+SdY-GGnEP?QVb&Jl38H&Vmx87jvIq z2+T0&e{0GGPI;BfB!AYc&Ar3oBLb;ZN1MXZ3j)0gVUzx=Jwx| zYlyl|D&JO6L@`%Q;hLcVfV;Lf|aw3jLZS#_VN|Q>?LraPj<5g zGG^CUUWM6AcB|NPvYns#{sSG0%F0T*JAWAfuaTJRf_9zq-a|t}q#&89JB;x$AIzv9 z8v(8taTctEPm7|Ucp+Odv9Vq2+o1?Iu8##z52;PKF?K!14JU-5;lBAN(b`4#c z50>s89))NS$?q9&85tdI2tA9RPlioN4#!0xe_EqBNv2goT0wiuAfpS-3P|TvjHlv+ z0c`@zU%9xrv_31x(_R?M8J__46>gYCMI=`w>5({M9+*x41VtBll!2(tA?5tN|F;Pa z9M}Tpm*~7aH1y`hi^q_3Gyz7l#(L1BAn>9z@`a1b231wnyAK{DYV>Y@4D1xg^E4$T zBlFRy~`N48$uY&BKY-^0VFRP+W)I?(8oXgZ3=Tep~@p;Ruiv97juVs*aD zP{evKWwxsCsrUAe66iDB!jn$-4s(h_L6^DzNU1tcJzeJcKjh1}`df?R1JFiXldQAz z>%RpawU6h2&?+C}I24W+RQO^aK5*O3p6u3i!+2Hk1ht&~>(;Hi)MX9XhNwm$OtFrg z_i(S1!Q&9ikRak6DZ_t&W^(J6@AawU)Nf;u=Bm?8%Uj*7O&BR^v^&+aM1%u^K{qEU5shc;y0eV`}cg{Xz zaQd2KJx-z1A&i|=SXOp9la&hc0y;LIpzPF$he4hCpqQvK(3WA6wA_s4=QC<%R8fkP zJx;PTve#wo2iMngsAmH9bEm%;wJ-@&+>nwg#gmu>UmfQC+k>6^@bCBC6t`(x!Ay$r zrS5nLM2SV*KwOqPD0?~vg)~gkYrlSL+DU`@*vBX&UdF7rAJgf+n}Lj!3JVB1ETjw+>JnxOcI#t#rjhGC$_33HZd80CYltrV-tece8n^kd z@8`h;mGo`g+}uUWz`35GBKCbtcUg}fUDxF>?NeUtiN~n@_0c;0_Dt@C#KbUZ)?IXT zo1fmqPO#=+~rFV8zgyMo+rV14m=*7o_qSPd`3{>3Tl8xHJ@uC8GZkLC7?5i z2>POq+2y~?QMb7)2Bb-k4T%745&)+KXc8&N*frH5XiQyj-L{WQt7J=nU9YzS1rTM9 zzK$EJbNmtdbSMN_^q+;mw9C+^pwQv%6eM@e+q?O$;Qj2t%J}fPmcAJ@z5>)E-9*p# zvY^>;_TJB3zfNt{SuLev5Iqc0R1CXj9YwN6K`HuApEQdpWz2xFt}Ex-aOW?Noy(hZ zT%6Kv&#|Vy&{*B<dyafMzIZ{`v$9 zOLwWyA@ZaK3a9YRq~Dq-*K}GX?MfQ8p`qc-Xd5k16%m9b>N1a=IMIWmNs23nTDE`x zrAM3okjA8G_r|^3HW3hvD|0}eidBf;fBN+44_N%ehrJ)PeerOkmb!ku7`11IC|l{% ztu#-oY#Tl*guA^#WHhxh-fWvy{(>ai4SAR1QTI2mp1ND-CD9C#iMqBAy(Asn7iu9X z?gK$*MMdd>7We&1|9JJ{w@CTF!Tmz-^6PlO;iWR4txwJO-FZYP)~G4Z-Z~g6mvW_4 z`+WoDsAS>@xb~AVe zr~Y1@!3{BfyuY`+D+`)FGVu<@{A|O8XVeS^c_y_b=%WF*?4we1!sd3EYtg%8zcVa; zWYF7;?u&x3KCF?#ot4h3T>$y%n%KuR|D392qwaX_)Izzl!`0=U#28pobNy545O40 zK`vwBYPQH`LS2!1=Yy9~QYL4cG~$K#5_iC*=Z8=JTl%?`L4}%`)%*m^aY$7lfE1b&oje zk<#J~AW{-0>HH4>;4dI(YI1GO6PIK0Wo+`Xk9m6O!wbi!PLLj&B># zInhk&Mo#m5*>N+7hx2>HJUT#WKBcP^c87QUB1YFUt62`niZgG)&+)I+Vm>E6PCrt4=t_uzayn^ri2hc zr-eV812)rxij;nRyn%GOK<6bF^432>O$i5N(C^4juUxK_lIKT5z-vFd>|%%&EDgYj zH7T0nBq)oD!-x(YDsso>JO24zvo`W7n|MTaPL4K!eXL?4-P(M?zpcaL=mh%t?Xt14 z8BlE+$r5EheR=|Fe(Vlj^l?0hv}fFr4Z%<(e`V_WpF$W$=-|olLBHa@WKr%(Wv%C~f)6O2qE3F&F;B`901Q3K zr@as)HA#o3`orT^yOx%gF!emUSfylo%AJSZ-@bk8aBEuYbPLr{HpK*Y(2_Ww)*qo| zW`O_T-r=!j%Fxw_p9r3yg9)P8Ci~V3ND1H2nWUD}m(!#@#~U0Ia{}KRe&$vR*f@+U zPtiQ@LCP&copEhKr_AGiUG)7U3K1W)WZBRXCAqvt2vTT3*eml_)Ak!vU+#hm!Pb@z z$Hjn04z_DI)}`pm2~y5b6S(2Qaq-v4wzxkt+(Zrhn4!Ol^q->jZ5s;6dT@-3KmanR z8xUT?!JiGrXSaDKHT9Iw595#A1Nw9@6UCwx8msx9Lr(GUH-xzSSWaR>g1lQM-#$w! ziTYoHk?-dtnLlq_=Zae`X_K|D!kQ!j71 zopSeL{&0{6*q`FT{h}z4m!BUd`R4b?h&R|)1RnOp%*@l67`^;M`w8l;FDwDHsR+xy=Xc2 z^H$nH5+g0EIJuai-2Ve~-CTVV68AXts3VQs=$RBdT$hmI>-^LMopd@8VS9 zcUYKEy-Z02ASdvJSE8ru%5YtzGa4|9LRFXLEok015d7Qw`E$iZNPMH7unvhCYE?ka zg$lKI_G#t!?_UX;H5c*94qt2ecbd1p?6#`*?jwR=*vxl4UUeE zlW3xPHj3CxT$p(zbwuv{w72Eh4=>~_FT1-6$X>V{`FHa2d=3i_cN`XwR+fTI256_= z_|Tp`OmteQ*i}#hhBC9WN$wFc(Wu74rQlA}Bjo<_*v;q%JX>1~>FV6u-ri1r9Ux&X zbtZ5J?k!#aU0NCsOj}ZhEt~eel6|JU6rU$+GQV)QKrclC#;Vh58I}5d<JhA1oxy>B!g z&W+s8i(c7~O80`bOK)YWg42MeqntD|0qH&L%RUT3OojZ2ywY#_Bc5#Fh^v(Wkw6nGG zq{1!HC~H_;rKpDPoo83w+mxbNEKva?(CHM%{x{HYMgOgshkSX^loT>R$%bs|rb*p%d_1y&e z4^Fp7aKge=yQSxyNq$ZI5Aj=9SL*hE1tvAoVvTr@98ef6oA*;WuoBrqn<~ z{2TQ8Y~Cg%11cLHSpC#Yz# z!Co*N##vJjknJQ|d`7oi|A?+wR@h7KJ=UOXG>%oET?>dCfXMW8UL?1$=>a+Tcm6XJ zq9XugqltEXrp4gP!l4L{d=!NyakqRWC8cQ5`5};!ze8H~!;OhVj}=`WIlJTFnT@wX zg?HyA+fMgEveC*PHrWC>O5gA+;EzZ^P1BB!E0G^qPh_v_DG4+yUf27AYNofR#|314 zYXzVvcKc7IK({k6i~M}Yf2+TW8-g|B;u}#FP&B(?Gpx_zu15SxI)4U0zl8E_nPf;b z*!lG!Qv0cjL+b#4cLuR6pT`oN=m*88@e4 zIwP`ETS`KwuRgsw4PiDQS|1RT7!{k^_v_to{n=x8AdeA*ur*Pjr>6&=n@7~53m0_n z^a`4xgTXv>Fv$y0UmI1wXhT%6IrR|fTsImnuY6QV|ERx(+x(#2AqwoRXmf{q)bHL+ z-x~qQr|WM}%s&XjSC0Cp>X=&$(DD`fN2x~fod$<2;ddrIJNUn3mmlWC3$U;>oNU<% z)b2R*@y(QYF$l%LEJZFJs&bZD)CAWtYzO>kH067zrMS*zA=n%Bv!F1jTy~5D>|#?&I=#e!epGC&`I+;?f&rQJE&y6lHYjZb zQX1v;1d_i1b0Lp#!r7I4#y^D*-M}(h4Tr}_%7o6=Hb*EO(uRJdL49yZcx$Y>#-Mrf zPMz6}4bQL*L|13zPzM_CPJiitW47T12zkE{)m75hC{AVgKXdAW!_U&_q!XXGld=LT zS_hr^j`4NN_+KyhNoW8+idPn= z&GW3KouZ?JJ>58jtjA6ZnY8CS9zvf{sbvg$O}LCR+SuAJ2S?3_*+xbLo;=H;!O%qL z4+G7gA_mT8Sq$!h)ShI1aOppd>C|DRV-gbndP8pNl_uNosFm00eecJO52P0$0T4U! z%!PBTtXCeS-Vx*;9UhKv%72uwSq44%Juf$4^InvdeJPbU85|NaOP{a$4ozxe_Jy2E z8lPrnsjZuZ1sT{#c!D6_GnGpku2ydGw>LUDFIy2U%_>jdJ$z_8oGF(82BvuDm7NZY zIw{CENMK10Sq>7Oz(p85JxOT8CmPXKq53cLC}$aAk~ z+Y^EF_aoZ3-|4DS0yRQ^Bm5HA(#*)*)YKlTlWz|J%E_dj)4N5h*7@#Lbl0^9Ga^dZ zGj!PUV#R4QGZq^+Zyug~nEIj8wp3{T?>BfQB`R#QjMt^QCH|B;z5aI7)pCz-*qR_- zTdB7vtL3;6$+JFI=H6M$XDkr=o8nFV%#y9donkhj7k@qZ=}XbFJAT|Jk0ad5;ANWT zaV$ggOM#C4K3$P;4Csi*o~hC+?Ns!_2mY;0`})mz`1+Q?Tw@l(BBp^rEq;0Y`b6Ka zLvZ?1qO?6NC@8pQVqgg^;u(Oi7WSasi9bH3BI>}?GP2cv;sttg`T1x1OdRG?ev~)S zM@P3`hyqSCt6sfeqx! zl#zjf*e3t8E&f#*L1VZ5s}_O0*?6cF&193D!SQCEXFwxyccLYBPoSz0Cx%%^V=)O= z@juJF%uGyqv4)@C`T5y5<2FS)`4A7M~tZAmoVQbPhEtR8I$A7bsEe>1&K z0Xw^Q`$Q@N z{n!U=_iblB>F{O9Nu0{jsrE1SARBdwh=|B6$K}OyHF^Suj?Ux$RsBoC5{gdA`NeNq z+uHht#y@2{0z%#P_p^m7H0g)2T=0Joc6)E=AgZe^-Fo#!6E#~YWky4b$FO>MI58tD z`4X;d-d{<^D&Urx$&NmCdRv{f$}XrPF->#jA}Z-eOiOOQqK}hf8NO(VQ~-^aW0#X)ImEDV?DhS*-bp9>3(K`r7LRGurtDJ zm7??GrPx`ejBHA#RyVFHc*LHw+zcXpx}t0}0Rq)hb3PyyRvmX65)lPdMNmmDjAmmn zAxS;Y6IJa{q=Pu7X^3a6CRO)3#qFwaLXF`OB>9fa`Vd;#`6ejHlX%^sZ7bP7aGoDi zlcBzM`Er?Xh)aO;W?tSMvh;yWoF21JJ~-UebYISd*B+j;_1LOavw0)|h8I01gH>l^ zj4Oj+CvX{K*1PrD2x>*+;_8y9pZq+%rKP)%&%TZW6kj3B%yzyJf}cX@*M_)?c8^x# z2Z75er?scz>q%`xqC8gjA{i>0-louaq+yP~cI)GmvZ-ur)ZHK1=Fh=oU^ zKor)Nw=tVNmrCQO`c-gQ!W}aw&qr37{RXrop%V0ql1aXC4Y>ESzl+tWxd|%_Y~({tw$A zyfei%YS3hOIJuw{?Zk+0G~>W>NZ-6k zN&VEJ;@0OGAGB+Oj|e=x(5uSyrh@p=vGE8v*0p;ET)Uo>Vq1d{+@zsLlgWPo|`d2B2z#qF&ra71XKPEmIF3h9>nUxcl)I@{9F`w;)mnwlCi zezCT;Y!X0UhxVeFwDc~R+~1F{_VwGf!@g+Id3GjG=!4x_L%;eF3dLk~A4HaWBOHCi zQA5N;;&cHA-`c zIFr%7tPP+UxKtX*0zpUdesBhWkXXQo;yoD3;1wbuyp%Sd&9VT{Fy?mLqob>5TT2cnxq4ht z_`@*n@9v%^Yi+uOVXFV+%kr%h52ns&1Wo;l%J!1g7L};8w8>gY3xZ&O#A$6&&*fOm z&)%>H>Mu`bOB-i}zIj_wwY)N*@>YHq7lFrIC$;SK+PW@;+Mf+yjJ_1yCNoEE2|%6U_ccAsv;!;_q} z{I}*PGnJD9iRS>o7U?8}0bQ~sGXSIbTl~70V9#&af7i0#-tkb~zP-Kng5n+V19rc( zy#>cTzArR?CZaST0u@>%S8m;+Z9GYR(_f>!7lPsr{F-+_bgIaYdaM7*`2+lENMYKj z_~Dk2&-B~Auw%&a2{dCetw9hoJe=ji*U&rJJ6rowsCV(*6x|`gf6v`nozXLvhJQQP zVNq6j{&(R9nnw2*_$ zA>AM0D_Vqn?K;&Kqb4a$_D( z*ly+u&t7XdQ876ZVMfo@cb1)rDUOWJe*Z~U28%#L6-W!>hH4S=tfG$P_;E>jK8pHe z4UOK5X?*B~v;Fwq&N_cVnqM_8+Y<5hG%+zzeff+l7S-dmVZ%pPSP+JV|K{XRf7)+Z z&i-<`bvpG!9}ORExwE$0lIkzb?~dB9%JteXvnP|f417W9I1%kJWg-R~18dgI*uD=d zi_oWoEH-Y@=C)dy44!xPec^fX;#7_;eG8?cc#VE9Joo;Y;cHX#gBO{!`)C{8X zS0Mu1?)miTlXAL2KsncMd5gDaSiC--l#Biiv;gPvuEtf+!v$Ea)7RMZ0L+05&)PU5 zOMgyH{jcRy@NX4|Aka@O3ADd{UHp^Qw;Qr27>8A^v>;RN zcs-&>dv<8<{uWgCZ;(fJ@7;SW;m#+mP2gmAQ{7~cdUqU!(-w2EMC>lC;Cqdf`O#rN zzMQm^_5G&Icg}_;0!mVzJl6*C(sZPmJ8az&lowOD4)W}VXeqIc4hTGCbXx4CtyxM* z;R?{nm`XPX2qvBGi2Cat)$Qd}lH0b!XHk>e+0$P{S{(Twas*x+_y8r%_Klbd_b5GB5InEnul?}rE9w*D9x%=wtUE||V5%LG;^)_(a zVhD;8(BwWkZ~|#725t}Vd|kBX72Ft!?BI>8DsMv4YWm>RK>wA?#xa$~+HDSPI?p~y zsav%?VIb5K3p+|&UB2q{LR%Y(a zA#IE<$@j109KoZCtKHBlZTkA<%kVSu4%krBJie)X*p`6)l1u>0VCdyK2^`>z}z4kgW% z@z~lJ6Kw+e1wW^yNq~sWuwlYedDP`ud*>@Tw2&3Pm|9u6y?b{kJUslVB|4r4{f4I? zpV~4lC6T!|C2DuHdm@CloYjNT2V>NG#Kgs2E#aiJ`^AivyEm~o_Dc=du6h6d$&z<4 zfTLH9v>L7?Nc_I+O|de?cP)98WEYKK@4(2nUX9B;lJwp?OnJ!IduC^5e#Jlr@fu`9 zofuwSWTkG29aELxOB#~c;<2h(sg>hbrKA)OZoAX*@HH0H*2adU)gm)^&9@p(JEK46 z3Rx@+ISkU_VF*zta;*^0pAS_ASSTrL*S4d-u@!)JuiBlYI~hCSo39ZNdu^r#x?@Ee zfKzGzuVo(wpHt8AgjrA-z5}aSz&Kmdp-;cJ;|9fnjnnQq4Km z$thz{jxwEAg-}LbC#PbDBCb|B14Z}J=nr-1YZ$tvgA7gE_@FEQ2=4jnh=%H?iDxM{{nT#5s_!vJTMMsC$smX?wj^m+&)sM&#yPyDpU3yl`&j*(7L^_PFAX|ft9e5D=6d~_!H`tYBenLY| z_rP@+#U=Vva&NdstvV%BgA-H%ze6a9yEv)$36xcZx<>d`Od)Q>P;8rzhA<0v1WVp@WUnP~rpt7<1t1bE7J zGv0Bbq>`vW6>1t@#Z#!7o=}zyfLwCTY#c(0T>u77PumTwk9AUt*7;aK1*UhQG1qbf z<&Is6_LJS4(0(>bar}EDvM}Dj5-p?(NuMCNy%eI8Sg!~ky13zX)6r-;^>Z<| zL>DEF-_HhLbeL7XiKKvGS~e9uP0hy$#@?(OYCgot7wqTv2=t|GU|E6a1PB}3wReF$ z_g@aopF49VRr=>*k}qP;>oqZy-7S3L5U>b`3fh2TX_bf#eV^Z|YHA8&M_?%WP%EC1 z;)REQTL~#Lz;Yj({4U;V;KZ-sQr?Q3yK zB{2;D`@5L^OXXTxTErNWo_x|dp9pyvwyMt?^eY;-ueI)9YWD+#i7E=C`UkTyRPu^7%iw28h?(ysnuf7FsvKJg$q-kux3Sr4BT^5P6;wW;5B2vO z;Lo&bADmi~e}B-_WFz}TAR%EaNC{a-Ca~2A{@YDU_A!_Xa=P@z9DSR?`x?9C#t*zq zPENjdW*YBYT)O!Wof%2<6B~#**;~l6R`0zdC3II!K_MYJGy~7Hu6bWn)4qAL;4%sU zq(lFhL05|!G(&6rZ-CETI+%LCph*l?i(L)y&Zs9>?m$Wvu$@AT%jEkqho2?~ne5w* z?s1{}Zf1#LC^`SR54e)D$tf2sH{O0QlRK+b;x0_}H~>CF^k;`G3hCkNfID{hPhzm6`?$Sj8>D;fGib!@wt)pl4%LbhOsW!sM1MTfUoTA=`h0aQt!7WFFjTuZVjEHUiv77DKoF zu;)-Zyc%uo>8}*2stn36_Egp?n-IE&2Q2lx6_>5Rp z^b$(cpecipv#VslHkMZocm5_K0Mysl3pZ(_q0biKkp^|Js`p?k0#n}33bQaWy`4(5 z7$5iw*+z1hR5Fl#3|ZGdGaDa+=&wnl=ZkaiMzzJMUKtE%I|Q_3nd{X|;el*(bUCg| z>_4D&62s&`gsDRjUgW3+GVNxGqI`BqeiH_i7G~;hNe=TFw<|L;Kisr&<6TULuu-Yc zPJ-aA0HgjlMq1mvOBZGwj3d#!2KRD@eVw^#xhWdg1gey%-vPEqwC~C zD>O9e7@ApFShUtw7R8c{dpB?JpG}zox!RL>4y^#l9Er1&f8-~W3Y{c~+2nCHn8y$d-qi+O;Px)P63^SHV!|^Suua4rQ3n5-h4hstl^US^G z`X-p1GF%tq8=DaW1`RjeO#Ye;4tgQxfuBLV*GGpzn7?8dK7&R#qyMX0l$=$Hy;q_M{Q`(Ad#bLZoy zacBT+fm<+FQ%H;kXAWoaA?uMzAmrjxD&qqK%CRpGA)wx#vra0X$Op{%2FrA$+=h<- z8)A*R7z)n}Mnkv5>n^T%J05ozcf(}@0vYZ1H0e@(#`QY9htt1&+5fuNEbH2Mbx{)H zYXM_TlEY+t8@>f?1eFlgW_mEx|AXo@iI~Fv3dH9lbi&~azEHkipO=Fp>B7qnT%G_- zZ%cEdUhrJ*M(SydU)KaM$7S3u5Kv`{fTrt`Bh_*_5^fS=owmDq3iR&@kj@{Jnuegg z^k_E^WL<{A_>wPuVy&M7O_nM^fmJLINBNIkO=2MHyL_al&>^@HEFfbj-i#sK)9Seg zAe`+EFaN>#!ss&e?43HkK%?&DC zeEVTk`-AA0t5lf4iA&}*U?bQ{&|3|1oW*GCJ{U{HNgtN~I${~VkWKU=c29xB|3Eq` z5)VYJ>K_!a1gUU^uSXNzucB;n?g*L}#G!_nM%U00)Dg5O_W+A|;}9l_+lfzTXlQ2b z-0R^S!(k*_scXA?dbrUYPJ7U{pNvNwy!R1ndT;Sz8>VqcEa3)REip)03ikk93>$b1 z>bF3!6~hw`6?ODP6m6wmz4nH~hx9A#j{Q9>biJJ14TMd?0E(b&HU5FY<$#a{YY@K) zM~M-h0-1y}BKh<25Tx_^%0a_xI8zAcj|8$7iePXUnU(-UH`M%rj4Bw*fc1Hs*Sx0m z;*E-)ZoLBMB|3i03D;4~kX1fmN`f2@k)NNB7Q{}N{*MapfCXGqU!N5Z)O2a)etWj% z_5Np4gnZ+wBt^cX>FEy6QVb6z@r1s5Me``3s||SJE=l&x;h3t3k)GXoHVs^i49H!% za)nIyxkDe)BKGO(59<;V68r)KJs`P{bmY(y_mkrq7&uN-cwJt+9Gpx$gFHZ_F>)#p zF%d%;r%1Icahz>XBJaoca>bL0z&ve{?UgWT5>a3WMUZq%aTbHH%>>0ZS21mI7j)hZ zpLaS8D1I6Ht0v#k9x`iTaq$MYG>S~_z=w=!(}$DYl16P`Zq^^Dgkp`);Ga4nn+6J_ z!oyn*!H;-<(~Mg7z*o2Lxa^~Z(v6$AM^gBRQ%47|cQ^ATA_DGe6hU_q2dV6e{w-sy zLpmcaB@(D(lhDSuZ4DnO3E|%JmH0RzIa(UuO(c^^#Ki?=rw~UUz!dNpN;$E&WBIoi z+<_8+hSy0l=0_aR{ocVWw?Y%Fo~z0k;JgK7TNkt^)crF_5{Jq45wZ3HXT!7@#MNX& zqD;iz4Xw0pzk+&=mPu$$YAv}eg3H`UWGHDK%>7}$a)I&mQ> zTVpCntU+IlmME-^^+>bda7MY{54+QchgTrAGhttl(JhioVKj){>MXGVRL zlB<%3dyX`#kn;bV$^Aws4(1q%Jr5m%OK`ipHevXB5~8MR=EHTUU&KsES_MAKTqWs# zSP4?SWdxxO5brb@A;ax$tJ?PJ<;#b?<@<2h#8q87@JfT&8yOcRBx%S2eUjcdV}-0K zWUBZyv3yJfaw)yT$sdQ09C_jA_aWVyxZH_s?o;eO+)d1tBwtyhzULo;Do@Jjk>c z@Zo>C@Xz=-HOM?t72X_QUx%f@SO*z-nr(d#v4?mIYI4cER58djTAmor{CvY|!&i77 za46O>I;Jx?ED1v>bkA=C$ry)g&uVb=;diwXMMg)!$U z2FID*cy6E9$0g^JM~3>pwcrRTSLS`*t@IDDg?RN-bKnaR#Gy_O*v`ZnNQ{U<#!P&C z`-y3@ujkbd9UN{afXav~iJTRVdLRmq{dICI2dUb>3>U#UR>qzGZSb4T2?`BOn#{$d zPW-${Qr1ay88H~@3!I3<7Jma;k-&tn8B(_2+qbb__yu3g9#N*;HK(`zo1jfx&t`fEnU_vT)pogUE2nTjZ$zS{>Oz zf#iwFg5BVY+B0_j^+=CKUHKh}uJ2(a%VM;Z4BcDzf`0{Te3GrlT*n5D%67MtGz(fq2R3e$ucU zr?=_cyVng8PDir^6YmR?Weor64#%?}C;Wf3eFt35Yxw@lF++rmsAvx&w2M%PjI^|* zQyHbCsX?KXNGS~|QluU2DYH^(&`@a@?M1Z5`@f#oq0DoBzt8Xge$MAu_4a+g-{-lX z`@XL0zV7{MaC>pi%a9^2XI>%@WdgFAPV0NIDIXjd(80T7nPgN|C@LzVLG@10mKYc@ z@O`?#L_7@8d*F&s8Ij?GGYvu2+a?Wk7Mj$p?_XvW{M(Vk<`A4AXuF+O07XTZ*w)_Q3gmr~#Q7v7mD4__V2YtJOZ3X%CJ4GT&mu(=L?D`M_v;q5L!_J7G#P>5b5|WJeDbb6 z`#WTrMf)p;4Z|PKhTU<6j~U`#>H7M(`1r?&mZSkzGhg_rSkcnb7_B8pnv}0f2x{~u zs&Cqarn^hvLJ#*J(c-1a9+six?s&WxfD>ZZTdFY0 zvXCGIdzzykQqOg@U0mGUhVtfUDIkf9gjMG{wP1;B-G`XbIE>B9WMmIufDfF2prdsX zP_anOKHj3)#sWkINmw?N=1j>Y=FBtoMl5?;UQR_7%JV@+#G~whJ^WxMr|lLI5kZpB zU~|kH@!})X;@rXZbM(xvXdY=EmVbyYBN9i!Ny;ix8m=Jveym!l0nL*6_D*_;4w2=Q z_ZO_@3{K&vYubKy z={jL)UU`z`!LzC!Ff?g9C#O7g6uAxE6R)zy@YpA{LXJ2YAfv>lO`A+U)Nh0(fl8^@ z9xv4C!8mAe^;Fz=<5PhXwmPxU0xM zZxF7?eT7eXdxs@x5;66J!>6$p-Xc+gJF8qw)*&KCK(oz91SA8yez{bBwKxu-!8&AX z8$)Kbh@bab^Z);q-4kI*L&(00-R=LpgPqLD;ZcmNC^ zvDL_)l#Bu%%#S-(@!_K>TV-T@|CPT z+9jo979u%pQK<9WA!4NwgIV#JWPK?dN)MTwO5MvjVP$jMMM#7AFl@ z0}+2hNcWZr2+-p;ZYHj>IK9z4v&Q(-M%NogC@_es3hkyDFc?G@2CGd6)N7>Z1VOSW zYQlD8&k*=Cd@%@mgz|@T`xR+4+?SwWRp@FAZpIQL!t}dHbaY9q2{yt_n6qe+3l3t4 zgK-}oLl%J(s<1C{Vv)5!P6py20^!6~fHUOdAafHH6(w{R>F^=Zu5|S(Uf$_A?&mSt zb8>UfcziV-O%vFM!D3g>Q{H8BTo%TuhA>WQca-Q(fLS;=IUmAkhxj@nHH=V+b|r$c ztNjFL18;W*@*hxV6LXmaDxW{!DXJ1YoD29B>zSZ^@YP_4X%WNllhbk>W`22((SQpklSegXDe%$Bu*EprC4qDWtlrk6M^}&qaQ!+x9_)Ms zD=br*gH%$TpWhq)&mQHXMhr_v$-AM)!8(ve<+N#<|4HG_e1XBx*h5ATB018!NHVDhRr2~lI= zO7yiko*y&s4&HQ;D9+gU_QIC=^Qj_4X+m|Eq#^h-MQq3S{qxeBL^JqSxNHg~FX@95 zHGMw~w<5uK304AH5eZfp+(Uo_;4gxaa$mnuMrJ@yzm<+SyDYAPa*#B^1tDEQbCeAB z*q%`N#UMo3ar%M#O|k70AZQNmK#8ppAVJU>`}W=4xeVX^<)xsL4O)hQfl&g!{*nvMO-6HN?uuHJX%)zmQEkaZxK%k9o`G0+5xD+qH zcZl}ZyhL7roccJ7y8DNQ-XSiWfQuC96_0>`i%79iA`u-bBAG0>Nn()#ZPCM0WjN4% zeM#lcr*nE#>b>miUwrxX#Z+g(abHFL^8c#``@J<5lehU3^J|#>5_2pVhzLPMIR2Ge z0C#3CvZ1l<6;;o2kkX7&-_W4{)_>08+3x~wIUL`$_kk!k?|}d>rPbwfbc>oeCSNr3 zoR}?kbuCGqzkB`49DZqbfQzdnG+2tNr*oUfzFlqt-u=wIZ|^>Tb>XvsXaC%}mXE!? zTlDleU^RZct$d}Ts`L1(b}5+o>s!fajW%AFix<~f2)SHz9lLV8w;bQ1wy#%CessZN zl;BG5;ZTcS)x6TOWo!7d#c0>`&nFH)5amU_@1IY+=FtC{%e^jbPb|I$E6eamX+85V zxAWW2ez=czxBvOsdWCubr*vAkdwf1&Ys(JQ5nv_~9G~#W&EMbqbC3OgWy9~90YuXA z+&TX=CYdcyzdXsWpNsLL1$mNRe{>@W2z=cnWG`HJsMTQ3V#a@20sm(M1IS`^ak=;$ zuTte@YsAMcimbk$p2ZsUBdxaJ+-8I3ZHC2*i8m^c$NbW=<=^eOwU2cQel^iRc0WXe z$OGlIX0e6*9Weemd(vf#f4v{%N1J7frJoPW^~!GujhT44MDW{!pxwC1x`E?&h?s8C zSbU{j$^Uvd!k0+|x{*+J{Fg<7V>&`cvMMc`Q3{a*7>@RD$YA_&Hn)H!y#Kk znD}x3{P^Jt@W6PEf<{M?KMThxWL1k`2kPSLN_Y+6`t@($PMke^HcIv&xW)5mBt?~X z16C@AaY1J59+(+urkTq%b+CewL6ut%@{jO)5O~6-kp9@QW27}4nZyf2s&skNU3B0O z6MkT3F7EEmkg|a%EQaCO=hm_rNO@nTYqBYt$hl0U$oQDAJHFH9A~(A9D7c2dt6b+S zX86ORkhkO~mb*Y;Zucf4LxGJrA%IYzk;a(SM_zEiOknbY1)(h*Sw}0^pq*h7a+Kg6 zWUY^31Af=G&lnCEOF()#goGHMCy3%~WScnzEpJ1g39&-MpLJ1S5Jnu#mhoNP?_=2u zMz0!mlFI;EkO9Q>roP?*C?9EZ0`3Y`?^Y;D3CxOczd|EvMMXDCSt8WYOopTDm0dkU zD539}x9rtH$~z0HwzD9jiCyU2xf3Y_A}0M^AYdNnZ68uS1=lf*qo6|-K%pSRB7_EE zqd@eaF9HYBX_Sbl?dG7Ef=PG!t{%?uE01l>e6DnFGWY_>u1Uul z6r>Hy1cis&lM}Y2-L~jlGWV(=@~u}@RX?8jKSf7p!6N4XGeh{_EO>RwomqlwI14wD zpqJT3`fC;s_I0^K5e{BxR0nO?d(izY9XEI zr(!u?hVV1)Ic}pP6?N3(?CdRuR|&jeb1J{V^mT^y+6o&|UyQvL)7e@!DIj;mQ(|9% zTPs!3cMCpOU~@^$fJFl($R4oWfbDLg(=3(CFJ9`@@VmgePe5bt)mUWD6bI~s^M;%BB**1*KjycB zmC?@Fv?w6oU`0b1&4|i^Lcx}P0y5n6q&=>C{lNQqiLt3EDF>iLAVVBr46x1zVs!SI zBJ|DWA+HMy>9H3dA$CyoI77X?YCv%_jK&)6`y7zxyK2>T%Xy3{m!2J)fYe@A164g` zzsKH^>_487>ICls4PBD96T1!Yfj!1}2xWRdj#{xp$Y=v8C=9_|(>9D}h# zQ;pkzZD7aT1bjw3N`U^%0QFBid;y0MoDqB_Wm%+i53xf;Lmh$5Zbt#DE{TKOeOJ%( z)Iih4d2KV5Lj^Fes02=&(bWVz)(-?P=McqVd-!lzRT`27N^YydjT$~)Zcr64>s(BM zC0B&7PFG_iSArhh^y#NH1yn0TK~$O?KR$s1EHy%E3Qi|%q)ixX!NDQZA#5$h)%S`T z>Gis5!tqOLJr;5?b4cM$(F;^h7AhV%#^AVOCTazJ5Y}KqB<0Y|B4iNfm@K^>Q&WHGTU=J9*iUwr>R+y8PlKDtK70iLrkD z1}=-^#+^Az^Ru{F6h|+~X$4#x5)6N=%cq&6$0KiP_@=RO3R?9j4x4UD1Ii@;D-Hql zJHR7(`d44Wd)$^dD9M$cV;U@WqQYMGa_~SPT}_ zNmF1e)COd#6wv@CDReFu@Xfc_ZrRjs9UbGF3a3Pm_VPquOJn_3q)TU>9bF@yYEk>v zD>q<~P4mLwM=pN|x?Jg>i24flU)*GF7f=Bbi<%MZuIHO!+!SC9LRnKDfZS0^;#CNC zJS+xJ^*gLLfg`-3wkEBDSNGq4$JtEFIyItxXRuL)OF`$d)COS%1s}7~9l^ol3h$pH z{tQLc2LT}4Zh{Pc)Q$N5Nh|c)wQC=!Xms$d!0-q#2`g4OJ3BY2heFeZ80XkPSo5P^ zW`!SJT&4xK`kP5u=wr&lxYyztUNr+()_xpDys-221s^?JhvTS2SZp3STc0NuV4=RD zVrdYR?S8&Ik|h(9yH$Pj7=;C0unbA@x307$J>uvM)wa=R9kFL3I(Sn@e?W$hQcJZ*Q2l(8ZWd~Dej9=i{~QU50=&7B7<>~QXC-q zA{4N0D)NJ;$Y&=|fR55Ee>y<35DUT?`9uX-Y^I@x76^zFIadq~3>S3yCamJSCqa%2T6gP3>%)+1F6oztF8Umb6zF1D zkmqj(&ASJk5u{SU;skO+@esPxk7$|bGY+andaO57-m(c6sVj#!NJ%k6SRd?oZw)|g zT>C6Ug)_Hr>&G-W1N>gGLjGMFa40gX2>pmriYK!MTlvAv6QMF`%0e20=GUM&Eqvg+ zXujL0)-Y-Tv*-l~Zch7YDAby^V~BNKY%fa)U}iUDf%N6hfY z?cl{5m))HsWQ7hhhJbI^cJ%9Ou`}P+B?Z_5f z+!6ygW84lx_1(a}Sz%%|^ z?06SI5KlD%YXA%3CYT2Dbloxd>&No_qSQAo;mZZ&{%_}j{sZr=EG(hHduh;K(Eyo$ zMQ)x≫gTEde-k|6EP{8jj}ceyw}sfmHAL@W_pNN~cesMiuZQKK#=$^?!zI{yYfv z5~2oqe*cp?36~R=yCXS4nB0Ow=vaP4`F_kYL@$alFj^zyyqmX8h=AeS1$;LP_+I+~ z1OBNVq+#Nz+0GAz))x>=JOaxKvlRXQ!}onxUjN5=vyo?BtKqD{wq-z#&)yL2 zM$mJA9I1>Bv3r9D?JV>eo<)uTyWkhEl)ZQ^lvn77s z#IG4A+SCvBa>jrFklql$06K;#g!l%h`T!Lk5@lHZ;S?f(7vW`bciw@RdNdRg9DLX0 z&88^L)Vc6Ae+a#U4v?_s&EL~a!J7($o^?W&6b6Qh4@kfW+X;bI1O)6495|^ z^j=6N3H`&0CcbZq^6P+IWLvVcX7~NF}66nQ~p~0@M+tTA!?}#m2X3 zsh92>zomvWpct6|W)1%vdLH$_vj*ntGQBP>m|Lrq{v1N2kAN8xW5M{7sfb7_)~Yj~ z4?Aose`-dKUQgCaNF$K?VZHnt2u?w zkh{&O#RDmZ0fo#PftmY!2-_W}&St5HpQkJW3Su`= zv;x!#&k%t}2l^2l@c98`&4|d8QIbjFka@(p`~~f>19E;-XOV)!IF|Is6O!x-0tX{# zV;DR-z_6fhLJJg;NFXfy5;qvZ86nj&+@hTJv4VVqn}f*}r`Xg_zo5 zxDu)wh$Bw_QWRQ|96TCtecp7V*8*o z=v8qUdobIIhcS*cdJ}p2kNoP7b+^iaXPuC2!t)H3$@1OLIv+fczB!Ye6s*%NIlp9y zR#qba^DHsvsAmn1TMD+7ye4cFMQEw@?6o9s;zOkXQUAYT0xAa{`!%ToTc$j~#XS*P z&Ss|TeVOC{ivu+U{`H#Tn=40f?2>{Fa$=Ypi2R__aLIjg@kP8XoU)m;KlSxh4BG4h zzl37(2~boFY@3BV0^L(g5&q-i4SuT^deO5Tb8+ugr6w}cO(x>4MT`9Im;!No)70c! zRQ3I%yqvnWYIP&%ozJ^p+GtU+Z|Tu+dKz%dy@((9lY@_Vq|)!e%QdbOHlt?|N&&W# zxwVFi@exp-l8J7Fe}lD1V&>EHE~fLEnh%m2q6J9s-zpFJ(A>(7Di^qkQZTLrs3Vzt zHu>HTz9&Bt{(^>|Ms(%1CiDIMg{dB&8k*`~xn9${?pW;6rlOQdOR2_g>@&j0j z5^R-BF^1e7vQ?tX{ebC|K|nKYDh7bkD zDgOC5#KK=>Y%FA=+lLR*sMHBpN-KIzzmF<2A0m}Rd&x#FuL&feVnN;PO&s1MP*F_U z_JCf7pIiyV;EVW%EJ@g|&BYY3=`goG?c~(EtD68%XmNML>F!bvTNHMv5D|(=N3S5p zH4!i3ra=LXq?x4I9)2^2M{uN|2Xif+&k|J7*)WnrunJSWN*(EAg%SQGNEuzqk(?pz zop@*Wjcl|B5N~j$*g`Rn?=C;Ae(}1>S!nTM+a1feg=C%pMHKGaSpy9qM zE^tj}(6z>n!viOf;PH~&R}zLTU`;5F!gKaxTA7$X5{ zmdqYriV;6}hN&5yN^9ZV8eG5=%x5I*n$9)Y-|C?I7nl728O{f&L3P2GGv#ye%>*Mt!yua8ywM>c z;ZJ&uHBPA($pRd>Bd`bN;kc)fXy8N|PmeBy;}BZVz&%c&pr_d#8Ggh{Gqk=xTHRDz zTg$ikdQ&O(HXMm46e12sG>fP)Mwg49m>Cy^jHxGlCTFaf|Jp;BJWODaO$YdWI@oUS z4wxmoBO95%`3^*y%#fLv>{GS)F7)h(?elzPM_mMj%GGbNnJZoO;$^ zilXY@&>&qP$;cf!hc^fcDs%`#^*?WP0~lQ#&u$Igm{&@C417?e`5Rs(XE-DWLO#4+ z&t%TG1av)b0fGaO#Z-^7zcK*!GUZQ-yTn-yjLgY#y@Jkr5MdSlt7(@2nHBJ;}l=W3Y@)RMoIO8yhUpT@27|Mt)UeePJW!Dh+S2burfm(p_kF6+2&M`*znn zVX?l3j*eMyXo=Q`^T(d%0Wxd^4_LEp<9Ej(p|8H9gthdqq1sr6mNjXW*=<-H4eG{V zVw|V|`?hrmLw%rFh%Jrj1;ot;aJ50*?(g6Es7mN-YiL<8U5FIU*v@Uk z_pyPN+*9#K=|b=f5}dFph(Q)54465Zt8T@Opw-fm3L*2O@^aydlkb;$N+Ke{8HZt( zFS@!3)f?8O5L56y*V|Sa9&#_H>YS`!4jl3W9tU%3?z6^lnWJV40 zX`4)KpwWnHTOtBqp^vBpw$0JiWR?J#S`Cqi@_yT)-slhZ=mQGuRYUia_Zm~KbTgmu z-Kpq8VDPMgnW}nQgyvKu{ez0Cp#o_2BS>g;HChV4u;rk9 z-eta!>q|Gh0B$sDP|W^(LrweKs*_Cr-t#cj=oHCHMYN62eId>w+x5$efe zrte+y#8DSf)Z}`RnI~F6Q2^xny`7TMl{{g*C0vizP?oJ*r)2b~7LY8WB*g)R9M-6uU&n2)LL*MCLK^9D zKoxNEf8}vo)hu@Jbl7pG)yl*8h#O>S6iUwfeWF}h%9Yu3a`d>}w;BlA>ZI~4op&Ik zi+Arf=VHOQ7)i$TyZa{DPU;1}=*e@=k4jGjF3BM*ff4gm=TH-t+cm z#GSfGFsHn{TkH{$4gZk5YUc+aqnE_sdvqwkzTz?56=)nTx+>9XplWSjxT>V8O8w5s zgBuYP&vqT-;(n)7rQ^fV4V5lf?0I}M$k0-7Fk7?qKUt?7Td zT0zI__Xnn6l{=q3yLn@KZ4gifV)NII8Mx9ahUd_W{1C=t2{qVy#2JS|<29R$Z))c6 z+O-SWl8|Eq2u=bBJTk@cpJvr1uwsLCx6GaEuP|YDPGiL=Xd^OQ^8Ef-Ky#j6$bAs$ zWI+y?uKWbC(nEy62^7+Nh0NRZiB zk?(G2DPqGE522=ByhAajm!7Fge2Do-y2$fkwfG3hk<Ok7h!}c ze@}9hVQS^4TYt6MN16IWYC325$SLhZK^-2beJzY%#gw(n4bebUD z@w&V*55W$7?F>E$gS<9UP4_S!y17(WoQ-W$e-^~jd6LtStD0C^F7UyCXtR}y0=~pC z;E`#e&Wo}#p+n5((UECuzRUewE|T?LxmmeQTqY?s_v(pLh;+?~$iyE%Vryep0onjh zgP4ZNlP6E7#;V1KRvDRc`2akQRK5BOy7;?ryI2Dkj}%#uK1odwQOmT_x@N#7GlI7=&gkZ-s^$tX#WnnY7=# z>hf}x(#L_{Cws35@`Qmy50Ugk?DG)k>W74OF@JVM-A+E@BPV*X?A?%Qh!{}%N1zdL!I{o;nd%Ehhw@t!6nuH6h z*w+ZfCaQu76jFoWge^11bd1epXdA@?XDg;H99dxo{*qW1&{8sNXw`Hrcsj1e1T38a zWYml{#{4tTnugNHQ6H6{+YmS3OsHlq8y_(uNrg9VH5_FA3&KdjRNU1h?>zx~8RAQT z?oQ&Ffj^$ysIoZ(oM8waC>hOHE?$#mPeyD%MVjSg0g2saY)a)M3k!><81LxVKpp9_ zhc~_hZ5T^IpajjAqWn3CaYL;`1%Atisc5-lPq+jczz5JXT0(NH5kP>Gl(^=~xHRe6jVr2Bg- zBfsmsop@qvuF7|reyP`V!VJz6-hmhvQ4s6tV6`lbNK6E{zz*OE^A;cU#OQEp^GS$M zA0uiWfQ@3(YnYtbfBIEG;` zvKX>~IrHZ`*4H0+lZbqTOg&_3^XmYv+ut5vMB1VVp7{eeEo+$zxa%E&E|9@Ri7|Qq zq}+YfoZ3KVRfXCSR;qk1-Gogp9Mcx*ACWG(n{qcz>2_z(8M-sU<&24Zhi{gZ-|&^>v6o6*Q`a$f&cW15no;oQ_AWFW(MUoS~gIl ztD5QeW@T=G?qO_X^(Rfe$`xwT|kQorZs-8?+5kwed>NQZqoA?f+V zda;gMz~f2pmvSv@T0L3Kd3vsxymXmU^#WYj)J6tG>9|2ychDT~a+tvtZMXBQO;`mE~cn8VQGQk|8fx&&1*J)L;vD zLqh{EJ}~$76TyWNU6Pm^sR>8PBj%Yt`YorYvJ}HqG=_{Lfg-vXyn(WcN+|RIKxKEp z!TM!t({)=J``Kt*e}N(?7M;c0wCz4RB^lR501LoJmIM05gcJt81HoZoVeI}3O?KEl zW06~z&QQyDW6q<=ypI-(ef@}ZarE;%%={c^pZ?~G!_Pah#U>l7<`APnVq*tOqmJBR z{LD@UksC`;!TTC?HPSm`*|y>DN~HQ;&nvc`@b&Rimd#;%E%5D66p918Z2Xe3KhH5e zZ28yNA2;nPt_A#H{83iKEPc|0<52V(`%*+N{pMM0iC-f9>QnZe6s?K%DQNvp-a%Qq zl{_ZeXE+n}^$AW8+sZRTJw6>FYQoi=xD`eis=mx@>>??f6pQ_9l8o}>R#YZ2M`=xT z;NZQP%YI%WgP|7P`uzzcRU_klbM*g8-xID%gLL zX(oU>TCujT=&RWvng~J0;~e>ILs2M>XM$=gYZm$Fha-lFHwBojLzINOhAN<%muT7z zLrIXT#WHqR>qSjo3#HE{vn|P9Niu4KXte^dyhdR9xUY(PcfWF_T-In$Y#BO)y?uOI zGy7`I(wB~Xi9`OG-oj2@i|oaR4kOC@Z?L~#eVZ~(Q1*EAYk>(Snpp7FoRmxKzxk|A z^6_Z+RPjvE&7t?@pD@S3_3hjm@_i^!|M-1RbPG5K>dKfI7}ju(TP;iH&JBErdvFA8 zr*SKGFpHa;;A)q1g|FT!=OkwAJ3*9h?OH`p9ptyJzga_;nj5k+ZrzfB+t6K70?t;*=BTW!6S}=a$SB2(jMr3@yTP3) z@94Sn#$C6gN03lDn6nl6ahDD+sZJWn#{@8cxX__bbRv|t-(Jf~C-n$6DqpK`(^2yw z+942-&E*=$l#$#sN6h^8b(A^V&y=@c$C(+Dvt`Sc2&4)Ib@wx2qs1IWH}0!k@l~D3 zHSStYrj(s{=S92u(Axd!=1;~IQ0k{>uiNb+-M?eoHWRWhf43K=CC?c9ag_RPCGAt@ z^zNk-=DmHui|m5D+}u4|ww(LZhQPOsy@lga|C+HsIe2XTQ|zFBZant2(z)l&(i`jo0QrJmxtRhnOtl-{zeiNxh7-`O))FdsdX;fEP_%Y(W@RS41<1xH^Oi6>(EdrahR=6Vc;>>= zgMDNG8s=MTK>ZwNB;l7n?p+)i^6B?hh>3`(HU(N;w_AUM8c&7|l`8HA4jMgwab^Gg zA?%oDne)?vK@sG?`^W?Lb-0X~qz8EPmgdS_?L2v29M*G>ShCycA3C(-?V0&+ zetYUOL@`6hD(4Aj^f@>n#xC%Y5{?d_m0lc-jEs{FeaHUAVKrmI!M>*pwO3RpB?1#W zFz(Y+=7eG|vS6Mz{O4kjGd#b#mV00N)<5NA|PpM42#%$#y0hk!w4L|oh&E%OrDw70JO zrI7C>qL0T%U&tUPug{*{yr;89gT;L-FE6iT)##$%C3dn?47cL+*_C(@-j({IK8y6U={S8ojjH2TkjkOxevtf!#uBe zdcHN~bTIpxYSAltSp1WRU?)w}$2v%cBC^DtcdCQ5>#9oj^9My9Y3$#BokL(7L+u~F zYZZEx_N9+|WR9u7h*M)%`(x0VmtbXCaR_ic9}mwS1%=B9tHi$sJ0L}Cfy3%I7mOP% zr#rmE^83>fQIL4Bk670B)!ObRPBy4M4ejdC5)XJ_Hw2?EP_lMxa-uwP5?TM0L@IdG zrXjBp{Y^+wE525Ploo|^0PZ`tT%1vQIdW=i0?&*T5FFBvGRQC5-o$bRjlhxsJxgvir zwnFWxAtPWiJAL=tgu|6H!VS4zqCo2VxjO^JmC-2{gT10(odmJ-x0gdDIXt0XvLC<= zE93Old=b_GOQ2gwatpIYW9>)y>l4Q(i#xF6Q5%ME@CpejgPsrzHiC#E3L`$%l8lC$ z#(k+DKN4v0@OcP2tz~UtNwKeu(S~t)#kgZ~&Kb6|i6PjJa(m6qKPwn%r)o7w?t;-+ z323|oaD&)Qy9hvlnv;>0H5QuztTrJ@-z)*Dh613Qp?Juh~w5_(l&8CN-i2;aKfe$Wfe zRBcPwg44^yR8s>ioRCIMjM(kO31wplIaL{!r%kCz zuL{GBMQyJ_@^T2DVgO%m^91P^6&aJZo#cyq6&05xx&!eO5e-F;hMT`(_Ql0yN7`p1 zEp4sK$|BJn*bg|N?Vbq`BDyUS+(aF$$YRP^Vv8FBYe7ex$gAXBU0wG=B3%aQJbeAN z$V6?_$JlE^e?OUqvtcW)(nu-u+nS}!;Xyo{&9rVC8Purdx| zVO)Rw?GuMmG<2M1=M`Noes$dp3-Iba!H;nD};Gow9k3!RVU@;=O@Ct{3LkVK_FS5Ypm*cQIY}hh%?e4Cw?%#SlLj3b#|fT zCwZzV*oNX>(tF(oQK2V&rs8l^d|`yuutkMdllCaa8yhlvd3)bRPitw~&dp|+y|D}Z zQM1Ztjctt6b4w2etB}0q?hd0pWme{@ zN=lQAvWRJ)eJTj-S(mynxXhj;yKr+C%7%Y+Zq{qec9~Llrxx2Y&@_)X(Rf^jy34Hd zYJCqHw{N4Oz5@=V1z=AJu$=@nvUi~W29ZV)ux%O2Z;O@}Pu;qijDYTrHq6^Cc~7C5 zO!LFZ`vS416pc`vTwJZltIMt)x~}#vu-YPgu$Bxk$b9$_*_ikn$9b4lEqQ)N-URuvT#MD<>n;*C>dgzCPGojs&n7gAkC zWGyJE5cdq3%@T61U>bnGUCHzJZX1-2ZMwYm-fbcKasVt!^L=^bw(}Jz=*b>&6T}m- zKP!4WsL_kU!JDN&)m!Ae3WAF#otXjiJ2}ZKZ(hA%nMA88nu9NpB!?J~)r?T_$MJQOHQ>sGDnFI3) zgUavqgjdza8ttk-!O+C7bAceuSG?z@V;^-3@4GX??lmqIO9EK3K`02X$nIgimwjooxN`_tWwdi z|L|~X+i>STr9crsf{y|!Hbh?Uc?S)>WCJy12f7 z?|^&ux(N)0ze~C%WX2CcE##}OHrf-hv*mG?!~)}W>1fPaXV-6HA0mMa6}^SC_l0P9 z5{pcv#d4{s&MeD`$8Y+U_;keIvhpRiTP@0tBThwrJXuzB>0sgGHiIZMkos4C3oMjN z`(%Q+6O1%0YF6wYsJSaJL8(d-AN!L42!*mC-&M9% z?L}~760#~go+vegcqWWC3=e15@V+h%7<>QvD-N}lBgLC%GnZNA#G@CZC40%S&sn8X zP~6}MEtcaBe4jgt6X~6{01r=?g*>R<1dMW&dx30*N$6NZ&0Ftk)))|LgE;1_&{no($rX`zmGQ90zG*(pYQT~DO;NZ!4ZK3A^v`?lGvJAv3a{Jaxb z{W{S*Iq$+P7h2+P3NZ}v1vcbdHYG_cO*^I_Zx`4=~HoU>5l z3Dw?YKCHatxrG#0Cfr&`RmO1yZKbmcQ<2hBO-X#rrTKhh^y`F?yNay}kSgxq=YNww{GB4CqsN>n>_9DF1p6t^KM4-CVX@v)& zj?~#c6Q^6^r6mZ3Y;5!+&7=xLTV<&PnVd9|pEdCOm;v)k9CQX&QE*p|tQCtqRflFw z)S=}PEGCN9UWXB7EYoK`P(WyOb#t>Izx@poxW>Q6IXo<^7-{KnS1e|0mclRE0=yAR znH~COj+Le$<&^sH;ID|VFc0(QntE)WjnTTa5MtAEd$b1wB=eFw)N|0*t&M(lUk;%o zFJw?AN5r4IcI;(#jQi1|~Mx-LEP zu8{`TOBq@~xoCMH>_VbYH#7GHAV0W75Vtmcb3S=n^KSk$AH8 z`^#JBGct0@si&ETpXy7m3x^c7$`91_5w!1*_CRD;K6L-%l4BVP+&ApvIF7*Kh6z)m zuwYe;3h;l-E#N_7C+FNb4Fgh61e#6EIeY8TQsl!a_zYnv9P|$!qX=j`~;$6+FyFIS34hM4}Z=w>bV$?z1C@ z$D-z!kVvl*hQ_0Or$~)b7m0FFxaPfBJ6;?k?^vnD1RjK}H5}M-6=>l)ieXZVA(@^HKI;-aK?sh!nVt4gzoWH*DhAic^)Ma3sBE{-Ylq|x)v2uT)qNtWG# zb?BUEcDEl6*>Z7FN!SRX%s^`t*^kDfaBS$Y=|N1aSR?|89;qK)-QACw6u#Ugv6I#A z!)erz>wyX=W6KQU_XZbtpq%87%?L)65{^(AyQ*oo9O^tZD$+xCCZco$&$$VZW-;=Z zD3ITVc$igL_E|{uLJ9eWW1<+uwGgOy^n@^%48z{H*t#k3&g>1tdyr2l8XL#p+RA|V z1cLM^tCj~?H(njY{euj4#Yr$eRTa7@3M)~(!+=@=tl*613}x7`?85W=wBd#19CDSYQw={imrS@e zZ|oyZB=HNC`czZ#vA?7k=vPWwEfr|4Nl(W827{ddv2b$!^dq~Y3#`&7IbQuJ<%N*+ z^*e$#G8ixX`Um{_;Bs)~tYz&0XnB(oa{nWc(=3rbq5i~`Jy*Epx;e7bHK*4Hw6`CX z+Y58-bSDb^@A$LhheXDH^0N1nJ{NYPp>2|BnmSk4Z`f5H$ ztLfh-jQtBmY2Jq%zn9J9KyT5NK~|Q>Fw@oJw-h(V;)8t%l$Tm1C?=k((ykIt zz1*gOyO}gY#=EF1bKwm`aONg+z5fegDg8_ zsBM6A6d~LN4fEiD?D6w7_KcI0hDrbK9s)q!pKKhsA|Fso;H?cKExR*p$I(|NrDrS+ zd(hIvA&0Y8*v*)-(Obzc1V%hRj-{e7jp=@5XECbcH{aR zMWinL{bgArxZMFf1|NyJKhrkJuT3v-@UpXShUM?82`0{!cXmZ&DU& zsB_45%jTqO${k)rW716KqlD$)MV(nDPT~IDEnqvFiUGlEq#6zh4Yk8R z9+>8_v(?o}HDTvODN3?lOStMc%^zbde^e<%S86FJ+$0ztlvK7#W0TFfBB%8NbK!_QPh9*@J_&os_awDb1-ehxY94_0M7 zV`%WwWVSPzkC6{eu^o%V(0O4oj>2y4E2LcL+b;6Q0y4XrGdHl3>4{ETTd(S{u1hN5fuQH zK3dMSSLag>5LLr(6Xlp99lFAn@!a^C-bJ72nIkrYB4;I}jSZrN`(c*+IKnqb`%GC@ zKtpgP&j)J8OfuPZ@L(thuas0x!*)v3KT;yi8;|D)y!=l05?u+|%ZmK&SXHegBZ+DN9#mMpV=Nu+wy)&=qo$=bF zceu=NFX`NyKV8SSf21sTKKNb2Ba?;$P&7J2E`fZ+3#2Oh(?_IzrePmuQc!B5b!?o> z;TM+$<;S;|gU92*n#A1yN=va{w?%gq2{{QTKK|YD>fUBg&<+})-U%F?iT^IXi*4(V^Y?>Rw1pM)!8Y z0~nM?RWel2r7XXHOWG4aSu=?)NS!!9hdlk`GAwD2`5U+g(}q(S;DFTH`u4}F;y=K( zF98jJU(+VoKib)}&YJl`5p|yXdNC*{{_3R2lYJ-`n+ev7)?M>zqvRIwFF?(eLJUS> z-rjR(!-yOzfwJ{Qg@r1XmPzZW!)8PZh@&VDFviO;F=&lrKzr_nj5fMAHRs*soOdOt z7J?uTYy-B2&g4ucA@2IneoraS{+xA=$p9+lG(Ffd{t5O!@2XTpLkMyaSmDkf47I~< zK<#d|y$`3dn%~0VDO`5g9Fq3czz#!8M zfcRr_*X^Z*gv#o8M#27E2@G6Wozw$Oa3m5O)IoMc zF=h2@O-T+McAd&_jS+H8*U)J&YFpc-;e@~-i89;fV^cAba`$w>nD z#?O!dHiHI+DB| z)te+inkJ?vbOCFr!H~bvEmjQyDBjaM?++PM4}lg8*uG;&TT~Ab2dJrCgO+@m0@Tk-A}#-I=f;RH^d^(lzV{;MhMO-;!Wc| z2KS^6%h$;0cKWT}2vLkD2lPD{B?gZ&!o0ej+M4KvX*ZE@>k~F;zwNs}0*CP+tLV4Z9{H;003a#LvV(}JeBe?SVolBQ9A=xwtA{w` z(0p5hA-8Rk0^cGYWm*Cb{yNBqv8!|AR-mt>C9z_^qPn{JUCS<)wMCLI>)H#SJqx!e zP}M%!pnV_tH=OBIo9xg7Mw0O!Y`blq_u)yVeys)$y@~WJk~}TN5>LS?M%lDwbf&ct zPFU^*DFY1SCHVPw!2BgF3Cxy?aC$|w#vL@fPN~zNPkFjfi<;hg+GNkblUd6Ge!KlA zM^}zu)ok;G$N`HqL7kO>#USE-NP73Bp2x~8pv92rJ`@}3X=^%Wxk^1D?4j5aGj5x* z5d>mB{Pa5krrQ$*mHUVa0Ct(B5{Gc7EdW#U@bl+qNj*1S8k_O4r)lxDsR3ud9p-Ij zerL?SQ^W>k#!IYPwHpFed+Ml8Nwd!_{M;em-I5=lFx7QI91WSuC^SatBBD)Lww>C& zCnQN~;wGqEm#$@Drv=2p{Lyso(zKD1LhJsN!P?2=fREFJ8^t5#41XwbZnG@R5E9dG zPNiSEPqzROE+U$UgdSSYW;N?y>TF+*MK+~L(yq3u5&pNI1fQpZpxa{fSIY96xgm~h zUn=G=@p)Xu{PR&bqe{16w|EN&&^pX7hf>SD^n0SKVv%CcKKSzN?;1+uN7(1Z%o7O4 zJUL=3G&+U=b`m1Prc5HKP0`}@8#9*uhWL=ujYlt%a09tS&!zB=80-3NPkcIQNs4!q z4vUzh$(Ap=7n76piYB|hYPA26q4(+gYK1~aGlJ%2afkug*v?ogT0bCd6=*+6wLb}~ zQP=b5!v&40qtwya{pdT{QpCDsN!u|?)xgc30HX{q8np)`mRYPwRAII{q2-nc(;osF z<6;J_w2;GTy|_8g#3;*XKFpiZ+#u)7GF7|hc(*sSPQ%=tSe@!idLb?uy~BVJa4JKx zXSc)nkj8vtsAZ|mOz`*sioJ`P6b~Le7_ED$*KaS~sD>A?08Aa8E_+>_$`H;I;|(1! zryL}^u2DSEg4s)Mtk5lArdy(-#zy0|qj@h-LBwe;mZ=9j2QhbuJSn99HDH+$=Msl$)Y>7}64 zyQ1*>y4!g_F!xp1I?w18I(pkgoQvCjmv==I*J$7T)*K}yDnL(}hGsPA(&gNkmt%eu z*nMB8y8Fs2?qADp2k7yS6+xgWTB)e!S@p~O31GN!+{R*WxwOR_P6!#UOym1NUd9&-G}!l z8YqYKdLd*F58KJT0Nx&s)8I9=23wYBzxykiif+V$2*v)@vZ}dFzSq+{e`Rtr(xb56 zk;w}+gFw-5^So}?Ek~HuS%qTYRY%^;w)fz$=%`W#RAO|tTB-F5zKCQfq-gz6RP8;< z?2J=1h|%%g{)QSxZ>#M=r{38LcPdX2y{|NSP`lw+%Y-*BL=+Dl8HQvrga3NA#|)(lcsfCe{xV%%^(_TneWK5*mxXr zND4o#Vtp1^q@1II&EhX?q~7!@B=FOR-_m2KS#Z=xm!nA1K$xvSL*iJGua#^Mt#%ZVotIg@s?X&Os|s^@3gu@u3@Bc)_p1DW6@TZ@`^B_yX{82U__xtbpVu4 zr3BdsxgIa*zIW>Dy~wc%u7nsij+**aVe4OGENL^m59gv`X_&%b7IkQY++N6=*1^bP zq*XP`ptKh}9PUIM9s`FQVz<#<-V7MOQBAjR-yZEQ*9Du_tjbJ`u$t2L;e=#a=c7)j z$fC_u3(QiO)t@~mk2LVL@m})qz)~B=tYCA@m%g8ce~+v zi}h6hMuguOt6^lnBl*Z)V^O)AHI^>Y_RWJE{oY@ZbSfaFQH2QY<2RAq##eBegB1Cs z-r&0-?djvwI8MU)bA5<azvtl!d>v>c#5`7B!XpXhXO60n2j`wWPMy^k1dP7 zop7_Ga&qU%G+F8(M2UObQov2|vb`;KH^%V?xtnaJ5X@!c%WWc14Cy9HlplKTuiO=& zoYe(-2P%U$=GiH{f0C*HbaelTwnKx_UMvw#IiN>2uvTbWDgi=?alwM^o;bW?A;JnP zKh7Osaz-HY^Iq!Ulk@Z1?5G29k{5#ShumJs6G3-{iTBxmdM+~L3+1r>=4&^a$u;bQr1vS7lOB@Q2|C^LIAdiItYux zwv%=JZX&f|M8D~o0MA&rrDEihMhYLq|6yKr1wh-a%~p=HS=VP2ijAR!52kOlJ!Hk> zCvIVom}7P)G?XxIr4aeP3hThM>V0iZBCk!(5@?Ucpy4uUMIlk4LMO#32dd{r0_HP$Az>UyG8w(bvuh1wS|?CvLg+v?8B`JF`6xNpz#{ff+pHiPerT6 zZ$SoM>@eXkqOHQNJxuB+ zkf@M2rqz~SHaVg4BqXKCzvY~nl1ZuDldgNbKjS8wxVSn}O2?SYUiIgwLLsX5K@H7; z#5D{|H`98%6oN9ye7c0%kn}B(F~Mko-kqA7$|oS80y7|UJb^ZU!xG+>LG`Jz_9D(C80{t%`HDq}- z;t4=M)V<+fKE|}2Xwt)VD*$@LtmHSAV#_wU*!dp^lu9TzqvI3`*xZ?f>L0JjNNnQ<+dYMLV9FzVlGBx9Hd^#u>qH!cb2XdyRwS-!2N##2nVd}YVy%Y+<}ZY7QL&@N!7+P>KQ20wxT%*`7BK&vHr$x z!c>frP(LF#lR;c&4L7VJieQ*)hmoNsNk)dqW7OmH&X0a@-Vg@-LL7P_3?mk?=zOgP zG!*MHdj7(N+%3g{Zy?pLx~J<*R2|6v)re#P5c0?nG!CBT4<|4Df2@58Sj}ns|B0~^ zgT_{3oGhV4(W;CgN0zjRv}m$bN@>%ceXTfJNGnN;%9heU2GbX#MotpuQ)oL#pFJO*i2SLOJJD$(- zANCs0u?0}wMvXMaF*no3i@=ANIdtF6 z_-veUmqS&rP^U?X9v>-l_X`*7)Xa&fMh6`oncQPb@=Bh_@n4n?V~iock(GvYxw|SA z#{?-5I7iZWq?mK0@c;u*$cMSO!NY(c@LXJ<7R)aI1|A7O{=LQd{x@?T^A;Z@405l$ zU0T$MVtKF$7z=|y;odwh&SuPk1Fvf%*W!%cW!kTDWw;)z!DC_S{a+#F|Ct#7w$n?| zPX`Wt=%Or(Jxi>Z(9CTxE0No&aQ-i*WBNQl-!9}y*8ecper1mP`O}6h7^YF|EDj>(z~ zjwv#!qk2X$Mljv!=;>_0{_5w{)2`q(WZEgH9nUcN9tp+K4oJwU3bxpv?toA*+W|Jj zu|_+5*bZx06`gpw9f(^OPu1swB8Ey$V#%m%H7ckcgF_}ju~vKs1Sl(1#K$>C;Jn4ipvfzu%ji)si>6SbNd{O(SU721tf(nf-q}}-ZGw_hP9f$QU>gP zZ_h0M;>rHb8i61|1qvObZZ03RbIRVJSK1fQ_N`*ClPY)rn#qF;NY2iR%&}ko3^o7y z2_6nAno;U`F?J*8;z~)q`su|*`I3lHTZT0r4ONkK@%kGqbr`!=ZP2>NVWe0rv!Bv6 zIMGK7NUITD42BajBO_=p_?P}ud*O8Ij&XrvivUAX2o6)Wd&!N1w9KERr@J$OSyBbS z|5-owO~HS|d2xJ=U4o?B)FGvw-}O3wyR&RoxKM<^!KGpUtUkWEP?f$#sA7loc9k2Rpwzvt%jf zT7PAX`|fwTbm| zB$F-A^(k*+z0GG)U8LjKzTFplp&|Tlzt0$5Bx|yHV}Lc+(q5p3Qb(tX%3q0mE!8@5 z=0M`(GF48~2~v6Amt5ce?iikAZa*^Piqzhn*2b@+jo{tQrTh~-1WZ=@d>Kv@1E50k z3w(}Z`1VKxg4>2B{$TW6Mo92~r7mSt_!VA4`0+y%&9@uOyj^f^Zl`7$ja87GZ$vD9xgvoM-4jAdhV3`rj+YABqBN@$Z!~EEdqxE=>eu1YF453dLAOEZ79( zWUZ!#h*abczmohci6^#COh1LZaqwyuUx(+F$QbHM65+N37-p?(cr_$M8T>g`QI``Q z_bVJ*AP6Hk^=el>^MH>EyBB{}4t-&*rN0<4|PCLP)$ZI zB3O`pe|CBW;Vk+a7Y690wrNucqS3H970R)2KaC4vg+S!sV6ihV#R2GBT8UbyWTQEHD;QV)r|Z1|45x&gke9pOuG)g(}bEeq@5(c`0FRhe(<9Mde{Q*2su`J1xA>^ zPtU^f&LQ*v;UQv${)r;9O)FuPP(st@@>1fVb@4;qg zhuHQ*2h)@xmw}(l&BMb(=(9x)Mm!ZPjv9V{xQpZKc$H6;vIOo z1Uin?8aWteh*3k3{A_^qB*`fT6RGdLX!`VJ(EE;D<$$?HS6@^n0rqTpJi~1^(Bs}$ zD7eL~YBzz2cm`E_5+%724} z)DU@>6uBdTYv9b!r5+XE2+)&=Ehnh4pR_EF3#Zr#LSVF!OaK6ON8ytlZ3G9yEx7@$ zeRyj-7+isZimZLrp;x-nfWE!J*Vfce2j)#|G}ZUnfyr&aobSAlsS>J!7feG>FED6% z!zllS+fQKSg}F#UDkF}aQrqg)c0v#c%mKy(D9C2NwD`1A+2gV%f^S42LHX&?yy6!l zMTD)Q9fw!7;@4#BP5^|;C>r1o#yrG#%7>4l#{<*ax$mZ7o-pnD*@4!69G20%79%J~ zz`u$ObV(dfr)DZ{Wfi#F4^eJYmPD?+6rUY>#ZR+t0xw$8y@wS07cL`pCd6eNXdRlR z@Jp|g_QpT8SWDWm{WXaq4?zK) zi%Xb*>GItMXZ}(;$*G<5YdrF6xK;Vzsf+AR*&q@U46tSC-TnC0w{GRiyHycJ)w>t_ z%oP5TC|&v9MhrZ{3wVG|s!h2GPeJcqJVZwJPO-G);4Z{N4QmhC3NnBVrA!?#Ms3&% z8|G7(bZkLH`ttPX&qiunpS-RAJOSGS6KL#P16oYk?PL9KLsBx#2Vf5UU*S9 zSCq&YarkBZ9a>+J*O}B95R;J68R7`o>rg9rqQC+rUbodF1Lho{NBbg5@C_=1FPLN_ z2PYf=be@zV=$c-$ckEoE72Br3Oh+{tIgWTGf=BPM+VlF>ljOpe)}BN{YI2{L2ZVOv z?5++A?@9wUNuXDLNZoJqHU$9Wd?CGF@}q4cfvT49Xl^u;6*R#z%zJSbB$I>_fT}RC zk8OvRcwkq z*RfQyaCh-6uW`UH1Z3C8(@F!xIH11w?!(m7nj2{%IS|lC5o3>y{luwLYWU(5RYlHL z%~(@BkJg*q0Fx3*hOJXuKa#W`Tije+jMYY|rwS}hed9mKFe3#6My_NK+ns8U7DHmG z()#sY79me`+C`2(Fj7)dI%`qs+L4fuFj*h{th=|jaMkn9*@i|hk$OEB5h85&^XKRI zXKLuL0Y2C(_ZtS+L{D9gmH;qLc~d+7c5}_SOLD>P2YO$Woiv%brz_BQ+zHte^CQ~L z=bSLQd@*);Zz9BLvu|9yl`?UU{SD-W^g8vtc546q8`E4Luh?tX9tNx^0!S2-^-*XL zf+ke;*GzMrlc5ok9TMErVk$ZF$sK>yl`Bo68`gF-%yD;v%C4hroJi_;SNU14RWDtu zh_%a4I+R9K;&FZH+xlVS*U{fz>2RHM`L1=LZi9fx%lvB|_p8qNbV;e}_+R)`q8cKo zYG!sFoQGkpC>M)ZewH~}{m&x{1PdLDk+ zq-VHqw%Wc#YE@%XQwu=i#T{br`=Be>@OC%Pi1+;fbw~|Vwx^k-BSR0u`)R44ZS|Z) z)WmFF-%S8eH@rLZmh80HOzU+^Fy8c1&$8MRb0a?_>vob)9ekyFT_?E9aNvjRW?%+2 zNT}FlngJbeH4YmuCZSjeV>0|6K?$SZUzaK*vN)n#9=#p*>)SG-@fZfW)|me?IlqFQ zNaYyBbIlj9O>M^Mf%(Y4uZ2sT?8S{uMm_opDk^MZpuo^?Xn%qkaW0eT z8ap4QFNC{Db@7_{XXc0Ou+Ec-GKx=2KmudW4r~2}Y?ECXDG0p`INz^BFFl5!A8is` zkg6)EH>vF+f*){e)p7H>GcA^|Xo|_ro$Kf8By3_gle%YO4hna}n)dc~7u(z2xFp?a z(Jp)P!aRDtcsZ1#?G$g&j1RrWuVVLV+bKw5Uu179>+L-x_1TU1Ex|=1;I5CULIQDT ztsy_iD<0v?X5V*G=+y(b%{*LN__-MO}Z1cGmzW29im%*oMZp-X@1F_O9s|5KTrjl%{}KBHX70K>O{ghW_p0GDkq`6vL<1h(Cq_qePzoVpM6oeL9o zsCk>>M@oGT_3N&|mL6eQ2qxmKu1>6}UbL~{BJ9zDqK{y%*>ma7sjcl5}@-j*&nC z=1CDw%2tE2oBU}RHYg+=sG?_&N6rZH%<`luh!JJ|hbVcxZI4XM=om~{0XmkJycFb$ zbld3uGiV*^@lKQ{XDvcD#O;1{V{6?DbqIYh_0pdzLYyNJFT@~jMroHt$`$gB5~`u^ zCKr6{Se%nAV)5HV5wo4zL1p#3NQ(WhCQFx7cVv9M3hDq~bJt0WR^kWUndl#?C8Tg}eWFfXonN2dCl(STo zb0r-2!&^-M2-JmTTkATGVZ!_Bscu(Z?IU=ZXM`ce)K| zkK(NA!Z2}p1(MjHRmG6WtJkAar`oN7zCz=mB`q2tB3@lJuha)Yc#GZ~C3QGaM zfkfYeQdwkV9u;&ny^-r|fZijiyLAg0^a~`3RL*b!21_Qz*7_oxBwt$Th1(wgnm0UlPGIHpoN);syY>#g zPnuRr3d;Mb5E;@v-?L|Dq}}MM@xVfa+DfgxeUyClv)grs=91enDwLC4Nh}N~s66o1 z6HiMS<@_LibIIQ_GH98{P6+xLQ0j2X#vy9R=(phy`nNvDbP;qYnEvsLkw*Um?z~_E zZU{P5!fn3%Q8@QCaj2$TUt1h&MoCdP&w`*3tqD52P6gZi!Rtm0{uh)jJNx{#speT&j}fpd&-PR2eFfp3OvoZbpgc&*7Q zkNivrs1m%X!F>15cQzMFHs%Pw&CWK#o)1Il;N(Pv_EtnoB*7CNCg1gD^g4GQ20Q4d zrrqxVlqIixp^}cb7yJ=WGk_gDPY^YD8O;CM6i?M-x%S?h{NGVP4{N>NI zk%PL#-ox&Cnh6ynb}OB*tjfq-)KjE~?-Nw$Cdod({N*PYjQeVLFBFly2@N%7r2rh% zXm|-wF&tPF+6=G(`$f2~Z18~M+G0d2F{|8F*BRt~dTQoTz_kXSpLR~Z8IR`xT)^VR zw>P}QG`Ow;9%npOPyQ3HF(?5(iqt8(76}}z{h!g5-?qj)PN$@}zRfxgtpyrgW%a$8 z9GlviGl3w|xom3R0-Nt&>Ze116oI(?;CqNQ$xkjsMAzWdEuNPlHhQyZb^l?_I3$_s zU4)A(EG%r8yD9AklZv5mYvjQ8T<7{6<{*5)%N#f0x-_TD6mE8+4j$U<&h<)ISa-Cc z*PBo|hwHT=Dc8XS-u*}o4=2P08YP0le^|o(Bnk6d?vL~An`$C@0S3m7L_OP>RVzeSKw!Ij+qi^+WB zd<|B-AlQOvA;4*l|8Zq6eoHA3;W?&t>~ybIjW1CMF2ggqX45ufLLuFdY1KHI#A~%tWS@&7OP#+yHdo@7T{_;T@77hI;%W zfr~IcY*FC0`SO>bdh}#c9Ib~-LzurnWS|&A(4Ir*9EtV35)M;!NU9j&gT{zFy+XwnKAs^XOrk`)TESNhh#qqlfu{;uUi9; zPF0}_lCW0)rHQYt0iPol#fND?B}VuJg-^#sUtNOZh(qE+#g7vHrz9>#gyUi&{F*Yi z&`}``amI`pc^I67XeB};UR$H~q7z|v!A7X{rzvrnoRLQPqBe5(pKuQ~QaWPf?ITvG zFoan6Kbfao`7Mk*k4!Lo@f- za}u+WT=8?iA6i)C6>T0jbIo%yfkH(W0TUKxM`@rRCJKSj9+dW!j1Ttc7I1D-WKwn; zvkEyni3kGGGA>3x)aeC1U)NRvt|H;?3cS5iHY&78gaxhjk<99y*aW2op6{rXmrn=B zFjpul8UY5NlFjH;4VHk;zBhwd*`PdYl*<`^Y(~D-SnJc%5;*a@F$=!LxPDs=LXG1Y zRA3UnmXv(Zc-RoKMcwv`myyCy0#fe+QAm(3M8(mrcM$sFi{R3U$0&#$ryW_Pxng3! z^Zv!Pp1g9|T_~)T!8cIcBV4&0LFo+QUD;jVb6~dDF^bJCG3&1XDxFBFQ87VYOHB0P2I{V!hevt12srChZXcOE~IQD4_O;_EacK5I35cfryy zsd;I`x3L%_Pm_x5>7G~jID(`dJ65w1Q$+S(-X95)HiIi7RT`2ar={J%0t0s%MzN6~ zJ~8JFG15(!NtKVM9|ayT z{Dp{{CQ<^B;pPGIzxSp|ku=yK7MpBtNLLZeD+C{xi}D)}u>2FmV?*46y(o_m`!^4c zhND$hl&yd>^deHUcJ*|%la!B_UorsqkHVN?EXV`w>b5WAzY)1VitPK&xl52_5Uuu$MKQpTsxyjHvoND2+(8GP%FofCB)^9C+!A_T z3EN^y7ZXkTbTRo?-8p^wVRDdC3;<<&;}mf5HZM-FnJi;tTi@!-fK=Nz)0s5Ad zItpK7KxJj+Ru9uZ*MSDReC0|w25qlizdoPlc!12T2zl=NWCZ(~i)h7Odo4Pu5Jb2w;;F*A9Wsy}3%~^?tQ?}O zL5Nqvmw45jmQoR3))V{*dhupxanmD#ATmuqo%vLUz$=c&=DNl+G08HpIkRqUDT{jL(Ftl1P7Mgw4AVPFT5-XBdi zsvoUru`9k--lQ2$6Nz7gA`{^pXGl^9iAU7_WUjk8)Ca63Ql#z~K7*C?(SAW7mrkGl5556I(fR4(4_d z98lw9j=r3}GWL(lNTPU#uvzOuGes9Od@7R8ch!JD3`a9=0S4U=dH+2h@g=yMoM|DL z(qU4id@#Ooy|KSU>-yR4NKWle%h$HnPPC2!HMtIZW?PgKRzI9srlof$TL)@bei9v` zR&+ZIT|#F=wX2(3K0sRJq-F=|hmz^oX*pMr(t+tv*2vf_Y5j;D1Kg2gV^n?{Z~baO zm^t?k1J{RcHXBY)LwX0UOtfp1 zQeO8)@F*~%Bj&6??AatfzRLh?J4&`EAPg3a5$YF;?tT>h z?@hMME|m2>{Z_1q=HoAK2?iVOFkH0ye10^{hOh@<(+B|aX87jBBdI~;U4eSyNS4-*Yl*T(RWrk{;#6VJoIS@_f;C2QD@h$RU|?pk0JyULpmq zit>}Ly}eC)!agQiRGOSZ`U!;e>MDBcBJ`0l!ked~8Fc8PxBNl;MNhmmrzyH|B?aEk zv)eUx?a3K;OFmNlCI8v7!c1iQ#Jp~s+1)n&Y3le%lS1MDrt@w07YnE=FqgcGD@Gt> z`N*?=zV<4dqJ97Vl@)aR&G<;$* z=)RBilq3a7L=P2gkRLXBD4$N07yg>#9tYKm|GeLOHw*@OFFKp6R4c4gFIFeqYy6sG zH8H(F#|$}bn4!S7NX;~*5Y^V7IB{D!n~3zgQ;WK4{>i%5KTJYwT~|ZCW9{`+3v4~= zok^&2&}W=sl87L#^uUP8KIU&99=Gjo$roZx(6%^(;b^Z*7khlpK%%T}I+?BKxrs%U ze2ziVu(4iI?mIT5?J=qS1_Xa`d%e1#oz4{h%q!T1I`fUDl!U6z>c$Q)VYq8|#HhUfI+d}1{PP{w z4h`2$(q$qatVLkzc%VhlTs?(`6>T8xz}6SgY<7CZBetmK95`SjP)F<_aHN%32z#1+ zK2xMdzBjXxAz1CZo=utF2!hm%;nMM6f$Y?!8lnumnJ5xPwAe^}c{}(*5hj#>6vgnw ze=bw&XhG66Mj%~UF-L7-Kfz6{?G!S^V1BQ5uaq@HxzMLRX>E=CF(;~*h0B! zFN|xv%`vy_HDt$$O&g0Ee#Wx~pjXjs*w=q)hX#?=^qKc%aPkKDbc)g4#3-8|VuLnq zpK=ddc=ixMd+6oK_Jlk*NET4XiJa*E8VcG%X|=_84V(ou5z7EYa8KsUHRs@S`UgiFOuB7- zyf$3Ew>`6`gB5SdEN6QQAefMVZIJEqhLJ-G zJ(b`AE zz_O-x3y3Ug$cBi9o`h}W2DP#2?H24Hrzl9qng}xp;yQ{N9IsFp!)}J$v`Vn|)6tBj zox34}+lBcZn#qodPF(;m!Xc}SDwY~IZd^-8P1N5EEvdOkGx|@G_F?ZLO#RG4zNktA zw^#YlX(Ov4D14_NspqzIvZ#p23ji!y@0uPoT#ZDk>}3R;Y;luqJk}R8P0?boElqwm z)wXgv!G$hiO_-Ij(Zb>u&Yd9Wgl%N~CR@8GWg{}C1$*?|g*V;^w*4F98oju!zvaDa z06U9_uypg%C{$!#bETw7Qj6eT&JYR|^77ryXCZp0Pe-^BK{1p7ErJ0%w(HVvG~XRK zx)K{vK30AJMir6N8Tw3BNIz&U0G^C0D2U`PY7W70m4@VA9YKfYGl)kSL20ZIWFxDG zPy}&AsE(Sn@J}QJDRyvm>kOV>;i^GoG29#Imr~oJv1z5gUy6FsEw;VzG(Lu!-4g3* zxK5h%-;g%!=bXVcX=Q)us+)E=@GKoGKJKgTP6~UfXztYdYA4-S2oZ2#PYqefGx6eo zRit}j;7PzN%Bi217S_7C+=RC`;ri|Z2uf`M@{;L}-`vGi1Io#A6i^s%lx!rZSn+dC zo*x2DZMo)4brIlX5bg&c@`PlcIA@|Lua}cT1hYLa3(MPn$MZaq`e>zau|@5?ds`c40R>IhKFLFqFgp zdlJ=(U(9#%CQRjmX17jDEq>(?i`9;ClKnd=SmoS*kSs%g;V#jgPmjBmdMHFg?ze~8tVB9M7F_dN$ zoe4muL01RF7gXN5d2tRT1i_#{DGjFcHA<-?VEW$^G^XkoUsjp`E(N+Lwax_WlRf1W zQV~~&Z+gqVeeY(^M|-^bzZT;_e(lnlt2db}n0tv->TSZoK|#{E1|(l0CkBKWpOP~X z=10#n1vHO7fLgxi%eiqF7E#pmA9-x@BSisBjz_^4vSYz+&@bb5-m-01BnMCFi znx#cka-kbz2Z6JpSh9PFbC+V z@~cR3hQV^iTY%|M&xPGzn)14C8&hfo<-g!a!8hx_K&6s=lhSET?6h=j1GR@K%{m<= zz1_M%STOyQ#qQHDOm+OE2T#YeO_#YWSg!gATnm5IRsh9gN=c<(D%QCJslLO4DxuJ0fkB zM8HsrZl^5OFMp<`b7am~&Ux4b!hmHMRXz`#ylBTMIDzNV@GNk3LwC&i@&gR#F+{w< zS{V2`4uc9YMe z#r&x{#aZU*g+Dnr;^oUtKfe)J1E^*XkPmJqf?CG*h8+3uzNwvQrg!4QkE6fdSn)7Q zZ~gxiP|#T4IFxk;C>y}GqhC?>U+PfEJs#N9hd3_ixp0nhQX*kfL#^|UF^p&{hdS0r^Ha2L)2>6j@PsX*i8(9Rl?cpBqp4s4QuR~a$Gw8cUb z!4-b}twLinqOSjL5kRtFD?wSF1CInHG*)FR^$Oax2~1T#-{aI}{>Q2%!vdM78Nyq~ zjQAsH?j{D`hIzjXUD2&+^{c7I7(PC`<>Q~PIAh7T>GHt?gML+34q6^^V%plFhnNaO zgYug?XC$8&V(xc8zh-7dcbrI+(Nmj@!krcGi=W)dZ1Ry2k-UwqsPV(>kx)fCMVv70 zFU=Wwn>-}qA)yP0uyoRqK;*3sIs$XXZrhy zYaHc~REO&whm4lM;8L|RsjJSiNBKT<@^4(W_9)-!ifaQJ`8O)~J@8e3wXjd^HRzXL zD$z8Ewtbo#8T$twrRVT>m6twdeC%o4XXU}+pxt~)HWlg0LIOjbXC%0M+UZ~Es|vQB;b zqId@f2O$+-zFX#-nr8<-(-aC-qA*b z>p?pRt2X?N$t&OrJJA_X!sHiO19p_IB*!2kN7myHazUVCeGDixHC%GnD#A7 zzy#uop2Lybf+M!-$P|<3AK#_c6(oQ~mo|HQ=r&sUvKG{8y9#5?)F+8JPyT({%@6mz z#Y@IaP#hq+9;h&L&b)c)kIpbMmdhRGQ^s=0HQzKyLV(uR=g(}t@$$3!l`fMFKmghT z6TIcr>j>K42q^~a;a;=7XkU31vt+HR%A3YvXmT`^m6g?xj0iX~^s(&x<7W**UV(-c z`gm}>#FQ4S*}h-HmuvC)de8za-0U%5-vk~12x{+%4+17LD@Rl^lP7!0PE0-?=w%(ZB-Fg8C_GO$ycuZ$AnSW9uEehE2W5$#| z9mA8$n8TwN$;)FJhsy9@R*zP8?b5wv$Nr9*!Et=qq@u1a0u-tds++FmTwmg)y3I{H zx@{ngknKAsM_e@47>>Cl3_&CrdQ@VoH+q_|*{f42${9CVJrc%QG3tV!-l~HJD2&hm zG$#}d850wOFr5(8q!jT0&`Z=h9Y8@2sAUSBtwhxBQzUY3+H?-zEEi8KrTdf@&ngwZ zXE2O?ZrK6vx({z2uw#dh`{_zp%z@|7L60}sizrnMvgYpF7u(g<1%G}ZT;P;RjTAC> z18>UiCps0D%u!QiV}+MTz5vV70;sVPq~V-KTO4)RLYtB9MTm3`W?NpSkzeVZ-KbD# ztPV)*-)y>(C5CaArz!Oq&$A*7b-Zcm59WU=3*y{ryan^$)~`f-Z%2(N&8%r%X$=Oe z1@$RSeD^qdRPlP8r|n#pX2QPfn0XmNK$V}LNMZ|DXxoUe8zSWxI}6B9=)nS1mP2R^AK0_G zmUg2@kG2HguN`NRMLu6lgrp}Qer(%v4~;PT*l@%YLm9QYp#r5}%#t__+u!7ieGwr(tXeue3J#I zPSx2azp)kM>`Os%%N)^jk2A)D5ob~Lms65ORrbmHCTIa}^)_gaeXwA>fPnVfO##xY zn<6fWmq^n|7lws#_{fns>5+Fse{woqeN_~)VFoK3>H%T#;>8)a1;c?NP=-lerl`ee zlMAQ<3j_=5n;eqBv#Rg&hjM&_l~wDCR7NEG{gjJ1TsY+mT_cTkJcXlvxa*(asqUZ* zT(LqbvLKvsV6QngSKIri#GqTQdyt{9!#nwB*)&h`PaMUfcc0dl3CjG zNgr~Q8UU!O3JMCL`Y55HAC)90Bc3r^jQJ4VCu(4AUHN=E<%N*Dk$whh4Zec+$PDLv z__;Zbxca)g1uGWEXBO?WX)Op?Hqh>-3`P{M(a*s79`NbYrx7OlEthLH*!ZP6iTY_Pbcke35!AV8Q{gkXvazGl6OQ+$~sUo_%x}ZTR7L!am@P!8E z=BnA)+0_6cpc{aP&h%bp75<)2U_<+%qesJ0qQxOiJ@>|TCA!GPmD#-mEn2v+)-7#eczjS@M$u8VeH-o_zAc*Om|3E@8P0gZ`z92=2d5+2 zs)571iE1ylt>R!LX_H#JEYW-DJ zT|;L&uAd-1bBkP5y~JC4_04UDLZZNopkV%5ma5< z#wE{k4c(inrs5ahDVF!J;kbTSp_qqzMZM!1p*QEk^2MZ9V>#f_+_KHUBuCuC+na*i z3}Ap4aur*v%63O!$`ZuvT=dtQc*K=o>g_pafl4$<8W6HyPl;xN=cF`8ng>hiAkZkqEGSR1us>L5bt4`- z48Og#0`|;2z^O(~NEoo9@dB+CXu@U#_@~MY&;&aOQkV^BrV=YBUO@gQx`+_u!XifL z$)K3+lbS|MiKz<;UmnuB`{vx;i-b(|^k2KzZ5$k;vsKx}N ztX=uzHe>rU6Zf^)|M|v_{VTcM4oC2rwhI!SB@6eU+MAa%uNKhJE5RW{hJ>R8T~QpR z5#@);P8RReC$X{-cy@B0oemqj5RT-hJzakl42*Oy=PRuq!?^P9;*7s*bZT+F>OIaM z4Iijr26(JBwL(;?`0D0GHv7gq8lzHxnlHJs2MUqxPK972$Hqv-Z0<$?u!oU9vY*MR z?wBRk-y)P^Qfc^kNVMm|GuR_5eyg(41Wl$Pr0d=7u#StyWg>+1rJywg`wxvtpZ&%G z&d8R=*4CuTiFeiSvq#UK*X~UHDFwHXl9c}WL|=0{9EZSHJv#cAdX$1BF(>&Q)lY=C z;5@!AT4cN(s*PxQ-jO47)it!$cVEBVEFP25 z*FkgQdm8{P4r&@&L_VDn10^jTwlpIA@ z{xb&Nt-VY>8^^N8zoK1P51ff;R~S)6cOI=&!{_tGV5)WV5y}GMY`z%{Bqnru@`A0- zV91#)c$ASMtaiL0KU`xyr%BHw2RB&`SKNN`>{%Z98=<=^&_&x&Au_QBQ&Vf*k^yH@ zrH^uE{*7nY#V%o?yy~$BjyZhTusfxW{YA&!EFWo3n3^qqElD#Gk27)Qb>rQm8rV9} zH7A97cA$!q~3VBfID?ASig4)OUY>Ru6lpxd%TrU?W)if5( z0BXh}4}*pnYZS5K@f5P$*@{y#k7(qq-p&21Kj6L(RNL5lTQJ>oIX2MNnKAzU{)hk! z1yYJw2^CyT;7A%+D7cxI2=v1dOY%Y#DcdI=!T)o9l=c`5(DC6j@pz2x1{>E2@l%=k zpiD`L3Sv99G>OXp_43_nnX*l%2pV8zW8nwVL-5iHgWs~{dp^1?eDn!ydRkhV5)ok- zQv{Y(dH0K0&WL%mcSzfUFZ1WBtFkk?U<1%<2unY}R zeB~$j2q}&Wwv20;!*R5{0QN!{jij{J6*RvzWE}41aNx%Rz}YZiLE53?)|#AL=vXe& zLjw}E8kGX!S@oHlz%Y^a03EI{h9%P%$A`~F3@UUBTDQz}*tqiK@+1!y^d*$4Ma>vD<+&7CW#>&-vqNRZ4oc9}5pjJe*N3|W4A>oW z@59HBW@gNg+12fjHe{w9wXOHViQ;DKXqMa580Z%-`cE7%ea^Fbh+k{q&G$q(j&4-~ z8`t5ZN4@OiA$nHF0Zh*m@G*4UoAGoUzVzVmi08Fd{Of}6Wl5eN96Pv$-*u(j5Uqo@ zyo#6n=!z>;$?_*YKp*vTN)cKJOiXf64YOt^iu_zh<0c}-}O?fssQwh{M zRGD04ucGe$V)$dwZqd;t@fue*={a?Q&X&yEGErp$6xx3Zn=F`rE}m`xt5HF zp~dGVsZYtn@l2*F`e(i$7O&?YTqrVnarF#Sd+!q=Y9$43WD-=cfB$|qhX~!~-~T8u z+R8udum-p`@_{lko3L2xPCMZ7*#eMh;ab&HH$m&iOZztgMt$0wXZ0YTSF!Kkzt106 zf_u!$20hCkr`!Lg&YfoaZWdrQQz=vTy!(Aw*(&yf9Vh#jFr&~5pOTsnCa!IIU}tn) z6i!OsBJR}5?tRj>MNT6hTSxp(iLM{xzLLRh+UA1ttVwcSg7=_-aNdrWpZ zz}j{Bah66uukFu2*lC4o?HSfvf-JS)yss+Aoq~U3Wn(aL*=YwZE{^j#X?~Ne+=yAH z)+4|Gs;jWv9+X=l_}Z)4vT|~kNRHExUdedz_WOwVk6}07Jyu7L9yP)vi)#%^mxxT@ zCmRDDz{9Rteq`hZR4}+RSh@EfJP_a)bM_XIoWJ3o!EsKLjPtt&EyK^DVOnKERfq{w z0pozZ3$cIABo(Ki8k~P=;*O9elAOH-|5X_58(?&(Xh=l0=sp*$5Ibr8&RTKcfp9Q# z!m1FB15CIZW5_xn5`v<9MzlfBN@Pk5=jHXpc1kIg@PQ&4VHZkDX~(Wu<`eO?EERM{ zJ2HGiPoEyM^AMMC#&pp-1AxVR8d@~SxeA_|d@OggHBoK?GQq&ofC2_RG`l@GMsH$k zH`TW^Yr}_6xPRT82D^K!4~Y*w`e>VrTCH50qvPZ?MpGZpSnfItY44qBpioIb>W?x7_B4%Ku20e zmhszZjkGjce--cEy`wtr5-{n{t?&VF!qWeSTV>2``*2`vizi%NZzp?Vn(Ak;b=tIm zus?XVip@GWnFHo+8Z_u{5c+#q_x5M??B``%gs<8m)dtwn^Oh;>xG`YiS#6?YaM+R! z*m9z?>V`hZoFL$@m>En2R(^-}Y@@<^0}dea*IyTbgmd2D$Ez0>>;4O9@{gH;ck7;W z<)SfS1-RuljF@*-a9!Sgbt9pRgG=L~roLSIhxEoG{J3F)yYm8iw6$?d{sqpS#~q+- zDuWN2yAQzb@O4U@3qj}I(b0ld|KXyvmj$KGnpKpi$5;m$`b8*ee!lrm;tsTTF5tFI zFgPFN$5(7$95Q@NZ*veZ!0Xsqf4-5?uaeO5-g7K`Im1T;)CjFV#UJ)8DGT5C|87Ri zgLL|7E*wYLu<-WTNZ!ELGcwb5uDM`~JHkiI3I~)$I-3PCa^JQjoKNA0Lyta%D~C?F zc7O|D`D&$I=gvR)msPTyJKL_*N>*C{Qz*BrRtVw#RsAeP{T|L2xCbo!7Y>D&zruw> z;RPJ{R5#%BBUspX)jTzvx!(jZ$=}y{S>%J^+%Hc%$9-z?f>+7ir}qRyhKEgmrbX!2 zXTB4#Ev8T6^dYutG(BRG@k=~wM5i<+)NYC_ariieLsa>S{qsw4J0Tf`{|HcbE_ma| z2_y*nr7bBT7VriuTcsyPcwD^$N8arm{ndM&do?N`MP^d&OU^?0LW7DI64U(Q%%@VrZewyc+Btq*gshB zi{Q<_xy>$P%gf4$Hp(IjA6Kp#`dLH|tDs3DaXcpBt;Yb2NZ$Sf&Ur;v?Mg8W7|y?@ z_n;Z5;RG8w$jI=AOn<;0uTnN0NFd7%?)?|_>Z$GN8~(JnPv~fC*t5rC%8@{Tzy^1n zc?bo?uoWf?a7XS8H`$8-xndV`N%DSslx{L!?5~GI0UfPkBg#PB__a70(OcWjo;}-w z?#>997~0OflHML`6nE>;azzfG6dUsM6}gyaWXO{cFqY!+dTk(*4e*uOOw0qJcw*q> z+s_#P@y8!D_=(6MZ1Ni+O(4%5CN*U6O&_>@@4}GlT#x=|ZG!M)OAg0-4Er20;~T(z zZ%W~}!U&0L@~*C~A}=a$e{5=^X-{>TJ5jx$THJY*tF5;ZkLP$5^U>+lWn%cD^}RYf z%-?Zxa^lPc02W-D`^ov~(8p^w;&kSKC4CRUSD#?js#VuYI>v>7_Mk60okfF2ka8r% zmrIQ2DHyHvxaS<#li(a? z@v$oY+M%2)y)m*YfViZI2LmQ-O-1M!Uq>OlpNL%6HN9Ydfh%NTT2^gJua}>niBs*n zcdEf+6(-j8pBh)KUQLs*L}H%-$%I*tIJJjR=!P%I=LDH(oa;!#B2bRyKg3)v`yR1Z@BG38N*I@Cv{tC#I@y zYV7^%X=9_tY6ZOn6a|e>=JX^6wZGValaxuFmd*Q$@dY!5rm1b#;puhi9daU3l!c0H zF7z0(P3CG1YqX9RFb|2(Jta45?LC7>$p^7wr6=V=sdDx1zNoM%+`J%EZS62(<+%bIGX3-<|DmO@f@B20j6F-fknj(DfrAl%RyFCe> zDR6dC@~ka>8eW<@`+PRruiq{?Gc#dcUPhWot4+QAnv4#=-8MSfhBj}^a^KnsKM-r* z;XCK!O91XZEpl})&8}+Z?F}8?D|e?|44`D#%bjr?EIjm343|d#Gg= zb)Q@P_L%e0==lBor0LTn#zcm%0fJn|n(WqYEQ*-C&ZIF1K+gY+=$h~`B zAqF|N?4Sonb5KI*?(Tg4T;cFxK9if)4Rf|{=S#k@EGPJ0#rWs?pb?}f-&^tE!S64k2IvU0xJ^78T$#+p$n{AWE3?=&aN zR@e!5pR>0g-n$O~Nu*fnw##3xX;u29cfF}+RrFn-td7Vuh8N6F6ao@vIR6;^SCCj$H zK2?qFy(w+_q+kQ@NZtXFOOWJG(j7KUAf)t$2?964@RR+4UDnVLMes$s{{--p5OqrI zdaeLN)t%DY{YA8^M1Aj)RXmEFHvmd#nRdb`1lLGjPXY*&buL^nlo=~< z4VX?&y}ysDX>Y2~HSI*M0mR8`Ro-;R!O~ut`>FqOlxopg1rf(_hRG0YKv- zTPq+m_q=lDO3pj3GINTw`?Z?l#CQlj0uV7Rj4>xPnHZ&RF!DHF#jubdKvW@4f0nRM zo*eSpC#)Fhz80I|kB_##7PRoZo_BCn^)zJ%WH1vSiKD@t2{sAaC3!mX0BSTNVL*7% zB2{)-RaGdc^=eS{^V%x@T(M#fqTluO^iVQbR&uckA?>@YE}bKlEUe-k5LYM_1q03T zmNdH<2e>LQGk~j73s{JQ^^rI-PYKjcg1Eu~SDpfd*7?bXf^j9~ZlZ0A3V0jPD{rfn z_b7?8)rl9uD<&Z}Zjf{eQ<2zUmT3=7_J=7{XIyfYd9<=*r%_34}EuR+>L| zD}a7dD>7+|n>tnN-9hi8-z~xVo)oRi#CXP87+h|{uMo+KHPh!CKR#A^-8v+lQW$WBVJ$R0s5ABJgO8Jp3JiD>0 zFky({aKdkJMfnlE^0?{42h#~FAlyWO&Urn4UW;B&cv8idQjyO$iV->H*I%bUSWuh! z?(JL73vSCopm{{^bVbOSzr`eYQ=iWG!!xISLDNg^M-&c1Z+=ZSvYp4-d%rCrqY4ij zcz0PXb6M#Kc`Qodz5ICs^b+wNx5*OPpecbGDGP;EgjCVrqJO>D@LR40XpFsPNNJCM_SI&YCdoXD99$eBb=V6s-}eK61tu&-#1AI1n+NI#zub65MT+fdK7&HWqN z^VN@X8qx_jx86G--0p+G7U6dO049C3j-3;a1R`HVvfJD_8>ZdB)l08{Tvl(CQegFc z4Tj+nbY)WAi$*?p4hN@Cj}&9;cAq`_^FE*d3=t-RKaS{0w$LkZ8+Upgbu z2?4`~BtI8@u8pnA@He(#BN8eX>WB)OQUU?U@ANAKWH!p#HisOz z#1L_{b@g_oONOOY+6whSpyE`UKaqr`jsu&bE!9?Ps8!silXAYduTM`q<^fK0>^zHAYI=ik?#&-tcqC zkxLx54@dM|P}#XNOA);SoPQ?ioRCgPX794<5BpD&<%O~rQA$AIK!v~rp9QDk*C0hD z9Nk_9-XxP_pEJw=3Go7_bFVpuZr_UTcVGc#Oxpbo%t^ldeh-vVrsJf1u7x=N53 z`yr;P_@rw)?zx0v?rh*MyTEwBpS6we&*5t6&*R#pV$xwWeSYejpaJ52A&%6cKGVAcH)TfO{ zF>C<3zYgT=Y3~+Pu>2DMF+Jy01$#9K$~)ShpFD$|3w0^gqoAQ^4N#SlzF`0Ab`X=G z@FH-6x`$(6In7Dpn>;xTiExxnz3#-A5ZTZB!2p~=g|9ieJa~3ZEbz#og| zh#Od4(%gb6Y#_6cY78y8a^!U$9v<@8fO1C9-w-r(|5JQhKPI{DRKdKYN!$kG8VAgS z$wDfYwZ$&y2c-H(Eme9_G>OKxP_vKwl&q!S5Z|zr^0vYEJVykY&-m%eHZ*rfZ{qf( z(Y1GWzr$~VM!fn^B_ow~ddZRjb5WJvji6*Z$W!C9jP<7=Mt7;=kwKEV*d&>u_uD*4 zw?moe8G2$ouD9*lOt88iKYl#i#qHE7Xu4k~+Ql54F_8Z#WG_@SOj*49wBAI%q!@b; zUJyD^Kc4!0VOu{ z&vn<%F%)l>8$b_tIK>~8^kYZo>c^f}P+#y(-ZRA-+To**evzC^G%&so?mv?S*(Cj) z*ST|*`01Te@VH-2IUfvOnsF3BV*i&$m9H3VV2HI--ZPN$5iyBm)&MPiAWc!Im!B*c2^w8;&L07dpAXdGs8vjjZg-wy z3JDoJb)Qo{S;n$j4WrbaE;zNfB| z8Mojgfx9E7JaKd`2Dn1_sD}>@;cfP$H$1${KCgZy(L^;|0Fo}5)85@IrSUnvhg6WnB6(&NHuY%vWTeUYj1?>$3fcKJ=+2ldOguP>RxA?1d4Gh4h6jI>838Jr!BxtnI+=p=Ce*?qSXlJ6PJ~GKH!Tc>(9FJG4sMGMm8aQY-^H=F`o| zotJPfUwk}TEg%lEO89QQpsW&MLB>uhD@v`UL1&K|WpR>IelJEB8b(zBI;M-arL1I_ za#7#*U>FA-E`0#1_6@unKYq+2h#DJ(E@sZGxL(&*S8(O;y=tN%@PL$DSxoFS#R!A*xU`Yo#=e@*9J}zLEet_bKZk} z$AM;tp=3Kf9u8H5@P(Fht_4x0Lt#UJlI;e;=IQC_Cruh^YQk~nDJmum--Ykbp^_;G z6={aZ6=hZH#R8HBk}QnS1L0#MO;2ItsD@T14^2Xvy?;Iq_bRBfZj0U=8at5FB`B_q zF|SxhBkO11R_nbp>r7^^Tn;2+7z)`-*}>t=m3Mz78weda-ttaKY&Ls!Q6yS28a;z) z@M4D#9a7Ufq)<-x+-J|SwP<=3T-cw*xsc^_z&zm&CqHI-Ve6d>;R20+hLn-wfV|2N zm%gBB+Q2MHJDHCUUTdQReIRiZbShD7VD*Ci!)5k;2%z*C97QzW1VbsVCszPs01y*O zvO?(ZM3TGqPFOiyjhC|*Uz(W)Rs&yLVrK?F<=mp}d~}-xu9PQ@LLm%;Bmp*#sa8V3gZgW~z_q#`&<2_w4BP;{=$ zLh-wLa8n7f;t=*g!O@;}T_gmg0pCaTb|G2Um3Or1F~~tVnjZ_)nRo?GNN99LSv0bK z#`1yuM>t0yPr!~!47?1wZ!UEEwa!$OWiN1gpkT-LP*M4T!Px!~d**(qSQ*YwC&E&T zUwj#fq&NO}M>$P+Q{)Z$?KR!z{4O23NPO()oI}PDTMR7%q$P3=)G7#`QN)r~UVT>= zmWVFCNjP?4Ue}SPoB|6SBRGo?Ob#bQ*mRyI{$91Y)7?}Wf z*`u0-5ff9+E9khU3Xtl9t*A{`y>;bfl=g&_L$9FyA$}0UaB$!HwM?aE-cYh&w9118 zAc8-4g^m1Z$Djsfv9HWBk63df8An+d-F$I6JP8s?Ldk`jWm&cf=(f@0C? zGrLX)=rmYL*lO`s#`Nk;)Thi1nHz3?-ezQ|YRN_rix3MmpEZ8WJSaHW5K<~^lxS)q zd54&K;~`~H6-fj&r{P8+{|TrblLZl2i6ZL4IuBdK6IPIz$Fai58W9}r^G=JdFTCP#lyqYo(8ebu6EdiYG;CduhwnZUOePT z9bv+o1*nK#al+VBk%e#skxxB$cH8vO*@su-qiSeddR)5yi|}F2!r9;WBr`J~$5jAH ze4#+XlMu-}41*yQ6}>u&p+E;{nC zk!NMn2AYmTPxh$3NaK}V$E%)C9ms^Tq?Bvvl>s+=JSyNe>wx$Z{pIGWiTj`K-67Wt z5fj41{a2?BRr~F8MFp9uT5WV}tR%_@XJ-bABQFDaWTTL~20d!Gbs9yb!h|j@`By*} zzjmgv{_^F^DS30pz>ELK+L=K0+;?mLFY{cHOc_$7$QT(*LK!P%D)U?#6(yAWE)t?h zWC}$C;YI@@63LLEgi4W=$SgzT{p{T5ocBHNTJKtCt@mE*tmi!UL;e51-}k%swXc2c zYtQ%>W5&&s`o`+XmqBVNPS<-kY1(o4-q8bN)y9;1QQUu~-|Bj$bx^3++OgF7J?St7z^vbic zK9LP$u)m!c*jmS^=7wyrF^p|k2K^>(r|`M=X*|Dp{rYvld*89PC+Ca^E+xBhWjPDv z@ADm4U9stHasH~O+)yu1PeVPsh%0}pM5VZSu%S-36Z+pAiM1F7azV~B#l#U@)Bogf zEg!Te!(}Xg(4QRCjc>tdgT#_^Wy6Zl&aIqU+Wk(>-}_>i)Ge*I7?4ceP}{KX zzlRm$vKvR7MX$|v%F1BgADwye=KJGf1_;wCj{mGc!<8t=gd@1 zeC*6MoLt%1`YJeo?>~iTvyQ$|SNwUBL+ks7&!s}2`1r)>JH>qLy;;2{JaG3=R=D&b zIP`6j*1c+g(2Oh^z0N@-Ja&-_a+d2}>SOU>ki#{-KTsA_X+!!bY9;CcQ5EK~@kly6 zccy>!tQ*_n&plH!d_C)$u}5D2hoerb)~i;p<%GFe8;T#5jvi{D?mZJmw8ye0uMO7Fg8*`IImHsjy-TeO5^ zD{50J=N%ABG`7wS)`}SW1}KK#<6U$v)+c5SlN|~0gl-{0*JJP|0!=KYnKkD^`x<%iQw&kk5q6d$3DA$naA))VI6K zbfUPGcSn4hb5M~p8nBVFe;`f}+{}e00#t*kzoZ!ZcM*x6tyjM$Bbfu~{!axnNe1K| zb6G#eo|Qt~t?D{)m|2uTx9!lP`WQ=**VFa9SO@}NdR6{lc;zTxD;7*TaZ8KnCblhz zwd)0%qT4w^p*D3xUBi~Qou*mBnGisZlGR8l-3Ja#yfvRCoBLX~Q52bgfZX}e<{?rj zN=SY*fN4Nkt;bNrls731;fS|cES$)L`E_-5q0-2tGJpW7mz}(P@zf1<4EHV#mkAE7 zSC9gh;P`R*PJ9h<&Ozkm;DdkIqE!`VbF;zSx*OG^b)ZBox*VH*X%-6rEf~YndNnWi z1KjQ;6zun)qT*=?ih($QIX2K@otrH)7~;W0gm34RbnvR9kcDKX&~HzH_fe^-sf`S# z_{OgI`OQ^EFoeu|pMPfc z-%0ntX)Lf3+6_=cS)-b`n-z!Do@BqkeH$LL?Mv(l!7DbSl8pU@7-S8I46zG2#eNw< zrj#x5&6>IdfY=;RPm%JV67)_zPGLqxR(eC~j4J<*rO_90H^k_2E99nS=-k>RSEi3t zI5|U%iZuMspa&EhE@PrXfEd|1SeD<(^S!fwSRCT7I8nm1Hn3PBL&;jgILH#7Q(qbD zf79&-e|N!xNO8~kBd9ioGG)j=7uF#fG5FM_Z;YqQ(2kUxT%4{kCu0)XFn)Yi$_xZ$ zGH1xU&X_yx33a3PM(q)n6|8xjvrn<+s~5EI2%W@Hzm}X`kM8I=6|Ej z0Zu7jrZ8$EJ_RN7CMTVxJb7H5bS7uQY}^TC)?)cEq(O|vQK4DRnzdi-^Te$VUfo8e zuZ2?$&4w;@7<>-XFFUQ1vY#;m)e<%ngl*4cW>ioGXIa*M0J^yGp^FB_9%oJmStm|l zs_(j-U39xGh&ZEn6Y(ryPx+}6*1b`h$;Nr%9Z5r<@t(AihbR-vVg@3H{TQh&!E>As z7OcfufY*4!-;;?0reWko|#ld?ncfgbU7Ohr!s+(eaXj1{JyHx(|U^8 zmDMxnk1Pmi>HQnu&l!v?bBs_4eWv`9G33F^Uv3t~Jly;%S} z52j3Zipm%kb8$n4z@PBj6P*fV_GsVmH8W)C1@EV3+-m?G9?>&G??nz@^FuaZNZNS) zSG}>PXsuZl<_YOmXf%}r0%HnS*jKdRJU!^hphhiRlHdWJEcW+XzrI|yP8?5sT3&wA z*KLoX>+?~GgI2uRg~|W2OUAwaxUe*8-aPEjpaZqA^Y34p<^;sHedhU*+4r?IvY1Y( z5)z4QW9PSObS!VS{(kqn`b35|&&-A^?ZK>{EMoE+q14QUL6%pH!aoo%47bi&evL`Fn( zl6gFh!S36L%E@k6$XMIf+=Ci_QyCbeG79tlDKi%s%9cTp*VNvTQBmf)b|%qU3EGMJ z$uC0YO}*vq0LmK^_d5fh2 z#LFp!e*>2yV=BKj+VN`@Nml^8@6+`Phpp1=Y^6El%1+-_RBtbC+$H4f=zg;g6_eZ{I3l=Oe z)BQQ*v{u5Uk!e@1xaZhRRrCJMUW#m1p?DBnOY1f?+{WBU<)vD`OZ_bg@GPwzqgLR0KT9%-)K>+mjmZ-qX;XRe=HMssN@+sOhQ z=U4S{a8Olx`;u131H5XaL&@qROL>*n-<+>7c0+$i7QALdUk}6lPm=DYF7+BccFV4u zz4S!b7b9jXQ!ukU$H?1EPwQ0w$5SF-WyiGG;*>oNd1g&);q}C_)B|qvW!-iKR?lu- zmPaVIwrhSeVil)DVY|%7_Q@1|Lm|lAabl?YzAJ>AH`VG{N=mJjlL1l?L*p;~{76cg z5sB^w+Vrtc*irVwvK9<_@hDE~$H^AkUq>e_OZ@$r5%&+jm~P_G3f0DxY4^&Djs|UQ zAGD7SLeZUi{PDzQ)}Nn-CbXGalC0j&B8PJ-b3~}A=e?7~!LXB+iw0V3r!g$`qW~1y zTR;Yu4&fipSLPd~vF7<Y$#Yvnz)T^a ziF-eVZ&dbU8)FYk2pIB83=~p)v$`(wki<5KvC!Nk*W%38?;O*5Q!p6HCRpxyxGbS* z-tp~=CF=qkHfuJ8D+{0sOj-FhJu2##hP^{Plt|v7T1qyNTu$cf_C571ZYuje<)(wM z-ju2W_TstUWx||tUyd`(D7>S7P}^@E;dI3<13E4JX&#MmE=dxUelg@yld>?!bxGa+fjTf7!KWZ>uEJ3TW_|GJAbH}gC7zJ z$||kb$htF!2DvOf&$x_+i}D(7Ai9(huVu(KLP&97%l4z)jMi9s;M8UP#vpIaIT^&2 zg}F8wsutP=K8TtQRgTkq!O=zsWQz9U?`rSxg6ITN#X*@0 zrw2gA+wua$Wk}-nRSwtgI}aeKV(_JSVa zZ|;qjiT|*WIE+&8SBc>9Jetksw4C$rF2(`u@)n0F;#E6zhzE_sP+X@hnD~I@*gy>i zaHZ(DElo$^^iJIUhLg3q)y95{)|Ed$vd1Z;W)b!7#rrK6Ejpt0N_78Zf==L34x-h& zsXrN)r;L~Or%lT~MMF^z$UL+=y=U24_NT8^d1(kegBdTr6P`TqR*{{zD?G}4<5MbU znERhj!jpEcPYSI-j$^1+m1DqIxA#RO%>BQKP! z>6jZ<|7o;5VsII}yx52%k}4x_-bW!}4WCYbW+Hy790X`~pN%)u1b0JCZKu^nufOr3 z`nt}{F^ALJ7F=IdfuTea@*;MmiRHvAdJKtKPiOt{NjXw-GwddBjQuRKxmKu?741{8 zV}Enn=+5kz+R@!*4-}#6HSkF=xHu)%lqD4xk7*YyitZua4-05XW}ldCG-3bH=^VQp zu`UKS5l_mA&9uM?#rW9waDBswv@9Rto=s?#TF{bj#{194)LwSjp)DErqp7*$Q1s#5csaS;w>XtR5B;($-Jp? z-IrYv5PVUxzn(cV|dzV(f1xn+v$fec`=Tu5%H-P3jLLs>FAZT z?dxGA5yPu?|nI49|TJlbE9TN+VAY<_xJ<$Fby&(Jh zku~j0fiF}c=Lc)0K3k@@nGFyLSF2{v%J{!~$Yy3W#&GPjU@DXee?3NJs#gj-{3WBy z)*XhG`Go}>xf3CtwZ_oNj(EE2bx&yk%EqbBaAUf?dSz87E}@qww~tFC*{*V9UDzEl ziDrGoDz+l{Y`6Bj=mxsiAKKRVTtz~jBup+H>$C-;i}yW-Dc#KdORpVR{qctC^?1AF z`)g0UMgC)LlVO~U*xA?iymkjy0% zHx-#njqPn8@l&)tbbnrnj{y;-;sh4qQ<%rs`A|OYLH{q*^WcnUPq-qwo7AHPgUT8N zTvj%&i80M|f&u9mo^H>Dopzl=~Xbj2>>MVkXF+oo>hU&ByjmooUL8?Ugjk zIJkMU6Myw=+p=Y(v|Z&JELiZO?$;8uFap_NigPHwXFXpNvM!cq-Ctx)%zx;9JO?I| z1&(5+d({=)N21Y1KI`7LT`bzHUmEvFQFKIA?Ro@*|-zi zsD(}=5(gO&7tzYjV|FSmpwx@J&Yv?k^zimfZKzC%)0XTsro@r4?TuxAG=}~U* zA1k>dlW9J+T>9hj&T~C`5k0<+su#P5hckhNYeyN>(lW=x{qQXBinERN-+8erLfqWM zNDdzdqGA-g!^?oY@v$e@=>~7#K7aP}!Uqo~qrBD+YM@g9AQoeTHH@9J=3=|`XXks6 zIx6))x@tcs(N#plB1Q+hC@$I_Pl)wg;AAPI`1y*N0l~G1}G#O$RM!Qv?xVR z+uIC-ULhfVel&lp80LhyUv6Qa@Q*rxNPr-@(MGk4BEmi4Wk&MpBxiWCs?EmDw+0rj zDdkf?1btsKua}gMrPx{M0$Csrwp<|(Vy|shmh-Bx`UdTC3WmJ9a2R$={l+JDDNey* zfc@aXtJkc7JBax|qyRpRp@Z&Hp4+Jov1mm>QF&cl)EalGwYT)=rIFg*DfRK2$e|q7 z^VmACx>;S{*fdhRy&oua37$3K%&=5bKFeTSlv6v7;h2tVtfiMs)Sl{Zsie@q+J6E{ zF0gwjjDv)&Rm&VW;E3Nm_W$U%4n~*_UL1 zf{;1nc9uN zfl(A8AwT4lTdn+SkdMb`Etf+?#$@+!iWqz|Gt&xZ5*hRo>s2gAn0ixESl|DD_3fkg z@?GUy$uVV7Qn@r9k{Aw?YzYTpCVH;8v|0*lWe$uYb`YW-gh5D9u0!CQyIO&X>ml4Q zmypmab2)xTa%Xu@052R5nN~}op5XI)t^5?EoabI0+4GrdEE+yAzFA|1g}IC-0_DCC zE?%>pk+pweTt}JliHkos77!Ve?UrP0vk7S@&{`NIKIC=jbOd8H!rwfPSKuo#7GP?F zpc_M%ePM^MoKCTgSi5#DT0hr$=mk#71aeM3PgLBdct4bFp^x4?rA!mx_U6qS&9C8z zb1sQu=U1uGZ%bNTv3-{*QoR6mTIN#I$Uc!@)XwrX+_9G|7gYbbt3j zB}Jx4{`#M`JZwj*rY^DKea4Nfwf6{P7a>|e?Vff?+pZ0?kEF0(dTUo7GXA9D-xnlb z>wu6)s7W|z(G2haG6#)N3N=O6?`!{-wrzruent6B#vMnAUz_)WdKl~A+<=3Gm~4v8 zDD#*Zg@u=NP3kFj%%O*${&wNA@FQ?v&82cId7{n>bkWG$_>{Z=yBGelfqkSXK-&#; z%0>drjqF}SX4f&ye_MbVWn#WtP+-u-HBo_{f!zlumDPkK1P@t zAU7V&VDc8XRkEkaszC8UhP5jL?>;2#ye!|Q*bv7@5phY)j6KjBOsuW;w7j>b<{`WJ zT{Jb>qS{Z47fE|=qdk2`BJHs_ybLA`EIpM~j>+L&{zxMx1~rS`^*^Lwy!q53y8VJA z;WZIc&0eX)Ah)aL_iBaqLt07?$2yt2{z`aRPAMk&)^C2%z>OBU2`*M-n|FUTSocv% z4H=n1^L=h`%haVxwQCzJ%xpRAThZUL{28UR7`y89?tM4sV6A2kXy^-FMlh`@XCXO+ zH)5TeNwL_E%Fc95tK3C!Y-<9`~K%GXHp)I4H>)d`x$VRc?d}wXuV(a z9eboK2;@&Lcr8y3%cvXsgjVQhIwz0!bS`mpubrbo*TEdY!|1IYjtlFgn{uRE*RJ;e zQsm;eB;J((DVsLB7Eb6F277&s1S`$dXjIg{Mz%7H9~a(tVW5CyAufdjGRU<%%-6=S z@aXs|Tr^fJiZ=EyaJ$)4hL0-x&0ZH?22=`ZNkb15?EnM3`W+vD=0;Dp>@J-ac!_eKRX-l9;p=8YVc3)1OR6VHeo{g5~iqZyrzP5ar4p zqd5ioO%|v;I;j2h7j_%53C6w@SNKiG^jGv*KlkpN(Pt$}iA@cITG-peBu7bndiOa&ZvR@YRhi(A^-CfS?rad?x0V{l{3$|MvZR_hG{d z)@r{@ma%*KN*sf#k0qs_zJ3kUqJ3d#FfpQU^{;n6uPF<79;kFh+NGwU;g~gB;ak-j zesfK-srfnJwmcQN&ZtjLJFkQk6MhC_{2t7umbq}dvd)Ea-XT| zR?hbn*U!d}avVZSp4ZoX-b$ci+ zQeWG=GfTH?b?@EVf{JA$@=m+MEQ1%gM}xHN&X9N64Xu z_vHJ|KR>g*#dg8?kKBIAeE`7TJlhdNCphu?W9l1TgLOJ47CfkQvDN@fOxdk}my4B) z>ZX%e&#jkcux>!B=+-sLbNETr63F|;A8u#x@c&h2^Ef@k*0976PtY%9Q{J6{^V17~ zUFv7X^Ta|jJ^cF=-GK)>LZA?zPxTA8{kU$70L+r+*sWIUY){VIjCyo(0jtJJ+UEVM zo~K&6^ros->|u~}D~BL0Ev+T|p^_)fZ>uPxJ1o0B)Yfrv^4e@IVOXE8`8t#SojsRB z%$E$XoEG(Db{}L&50T@X`J=cN;0KbCj^6MZ>YjWpp}5B00ajAB)dL@Ls0m0Ek&Irb zdlmL>A|F#bXeMmE^bA|Cm(N<4=l+1fJ&E+d2FU{Fqy{@YX6qDt`|?qfA|VZz!m3FD zDuNR5z~R6{b#%L86ahiTuTg7TNcqex$ZWCf7wP*Z?>4o4s~R_N-plQSsznQ#3gJ~h z5K{+pp@5`pmI)1E0of%XX#= zy=v>sN0+RJsNCj-2NiJ&Zq()6x{bxC0iAtTr!^gDXfQgWuhhj7XxU#OUri=OmZD=$F|3)wAdR?PGerjQj*>t{OYMjj0d=NCJ!qe?`A;hoW9PPBMS85+NILzP@u=2a z!}ck;%*RaPF1!@AsB8i5Td3aDWeiC52Y^f_KtMdPmWM}Eu&<1{xxHJrZnATRzM_S( zvG1Z=yDZPvs$Ie!Kox!qv2;Qlx%vG zScc}!4r0O8!%X4ZNVIrg_`f*xV&dbEwZlve)gp_8O`rcoyD&w1Iyz;Cish})KNkyq z3VY66?nq{he)_I!*>1D`_zk zUKab@Ua__KQ@gIRhC4RXBE{y#(mi90QmmCtaYHV`_Gk|U6mAe4m85$7-*TpNKc_862+Z( z^63XPiN5e3MuotMwbkDj3wgV+gEt)(5w6Qyr^VTyZ62j{8(CkWzbW31Lh@qQF3%0q z3;n_RI57&urSjvBv-M^eP2Wq6H0vqsGK7fZQI6AHJNrjSctOZ#HaagMmdT6(r-T8i zGz1u^Yg)*SbEI<;5{V*A*5*nF9h8*=J?*{xYVXaYDRfX$(db9e4H>mRZ&Bb5MkOjs z_OB~{Nzj{0VFOHG^1JEe4)S?`W@5oWm&dR(!yWu1&04wiN0#bRx&xTA0cs~?;4bd~ zhf7p26juu_jafsG_)L3mN74l&MMODl%n*^H|N4$IUnI*Hm!N168`qfIZ=o5btqnwx z3`|nfCDuO;y9YxO5}>{os~}qEYkf z*gZx*YlAD8Ib_>})(v-*VEIxIV;xzCI`_>_n|}Ws6VlkmHQ^5a}JvK@vqp)@LpxV zqI$==ifMVA2#=dR6LpZd(sKNfzqVIHRzHKx7lfG@JE}6o$>i%n>|l8;nQ_s&9}3B9MBa z>Q=Xy&uvbUcBbHQ9g3(4K2FSqVlvU&oX&BwH?-HKSOpq#9a2n+9gA`PY2T(*k&7fA ze$7ExV;%Ityj(uJPxbesSl?%9=IyurMC+C0*7081`aF?j3BcIDKOW4|cMHDghqoVA z(p^Lwn5vb2fXoM`+?&}77X5zc)LGIPsVF#~3+@RzMI&z*lok(B$2Gb76BCiM#d=6q zn_^`lH4lxs6R~VFDrKZO_4ujH%EvV67*nsFl>24A(4ihHjAXV#j6dnO9f1OSMWR{m z7HOQ0cr;tQp45?p0Ew4yqDneAgdgEiAZA7+QZqOvVs~MSg%yxDFQr-eyD)0AC6I5D z#)<9L>A^~&rzssKU}O)B>+Uwfe)`EPtz%CE>dZopUQ*Y_E4_^K4B$#emc@j6=p5B{O2PC%a^DE!QHfp1A=}-fGkjG(hV4=OA zVIe}(*btdGq!kOw8MR5h*F)w3$7)b!eSO8)+%_$-0)h873%%M{p%YJ0DQiB^XGBZi z^;^dxW6q2rY05QUFJzaw1O{LJ)^Xp6$GX7-QvLTis;O83H((PT3%8_!YgB6LZYqqA_urAVQkqy&lqO^O8v z3|Y#Y)3PB{%!8Sv;)sBJ^>diPuPXl3>&TF=DVar@!>ln#s-=j2DfAmu3*nK)PC}vE zBa_ye>AqqPlyb|~t)rO-Vj?G)HcHZ65zBF%%tF8%HlmM0Rhv5My7WY(0{Qiw$n@@* za;>8vzq%6~vvp+ijBtrh4lsjd!iN%G?oUUb>MNcPD^!HlGxK@$XoZ1~Z`4lbG+i9? z*!TBuC44{=(ri;hmordZucW`!WTeDDdRHuu=+wy=t~{4|JZHPT6|FWz6}&xt z=Bsb`l^V&dZ(|vY{8j(eCH5$JA3lGKqEQ>>4K{7DUwwtnVGeV7{Q0;46khr!!>7EN zcp}?UfRJd_=^1kEh-vzAAVr7`uexK;9&t}a93$r}et1j8$ulIfS7Z$?qDcHi7AJ>k zWxnf%HG5=Q3K;PudHGR9hEW?jtTLnGl#oVWR$5(9S$F-%)UulfIMp&|3wZ3^Z`hNL z3lSp(8D+Eui$-2qjiHlg@^x8U8nUY*(Egdp?PXAIfB>IgNq^{yXF8wH(AK zt@GuBuC)}I&UkX8ynH|&*EW|#KZ`urIOx>AT}vo*0INlBlwlbAI6Ul8)gwm6r}=6H zbrYT0_$GaJfuN$EhD7Ko7!t2;n-;w-YQeL}+x#_foR=1Wi$})m;cL6CcJck2*}T+? zZ8rb{ldp3l%|jq#H8k2<=o4>`-}N@0a9KPQ?{haL@;ab*sF|9YbRZrf%?#$nTs)Q- zCqLe4a)BiuOiCuav7=$q1q5a7i*R4;lsk`H|0pcv{39Qp=BlD~_0PPHDa=8YORo(!_OIh4Y4#VWQT(;|T7Tw2)NLPL0Vt7oGgKwRkMK9Y_F`6(o_o%4lz zY!M4$WJ)vUxt=n8OEcXI6Y45bJ$TL$*UV^3i(W?br}T=WoFwY;NotxWxIbI@t0<|V zZtWTK{Oe%G$HS~YcptB2(~|~|h^_c?u_9QJjKRq2?wtjFoBXZ)!lFVNsKT-V&JAcS z$@X`7RoGcqO<74uWERDY(j>eaNTN>gq-!0Fj4Il_OfFI6yzEhCXne5!_38 z%snM={iv+4mGWo#4ehWp>2`kVTC&h~L{`EZBSoqKfHhi0sB0 zL5Ke+>w@XKjpYvGbU!i(i4FuAeb+NDupU*Iur7fjdjjI#18`}(&hDNqZZJ8NhKqgH zBGddgM$#wN-`phWBtzvbmzM9#+cEriN@fHr?K3Nfl>FT7TdKcq5Ag(0q`aVPA%xTM#N@ei{Y zK+}jQ@KDV5-=1Q8g}wq$_ZDC}vkt@Lc3gq3qYRxVlQd$?wv zQ9>(ic(v(=9y5(D{6Cza=$vUB0{OSr;O2^BF$9npObUr7#nE#? zqcsD3OONsw?Ktf_Lu5Ar{j=0aTg}leFT88%z>DL&HKOA|H?m`6oPKiT)shTG zHtTII8J-R6$rV+7`(cvShj7CN9+Wd}FJ4qgypCqsc$T@GM$M!Huf zzwKk>{B$Vw7|Z}kG+me%8iDm`7D(jd0<22D?!z9NO0^-=%$w(^354zm_95aW_8KS| zht*vsRHMCITRFgfQmM+`%Dlysq!{8u=uWMz!Z34z`}VcJzDz|F>VUGE$wnVG_;Au? z)4-qMrWS|m=yoH$aJpC#fUYk=gsrGVCIOW1Bd3)S5&9ESR3oflj$6jxC7Cev=_fUw*Of08-hb&K z@_Kh@`Rt03*gDc!lWvyu?D!^8g5dbmRxDak8{KTjOcZ;c5pL)h7}!WEm$o;&$ZYp~ z5YichuDm}O|F-L*A4;Ey6U4tD5KgJl(+|O_ktIgr{x6wAgd325wxUaC4FChmmy*5KKu?#1Z zuw#6FS4*E?_BuVUq}CQ86zj{J59DaHXfbkFeT6z!Q>n+%^yXK4(hX5x=6*mA!M>+aDKYBk^&+_MmOjYZH#DD0jvG##O5xt&lD#~5x(yaW7^gE0y3_qx$*u_xvME+b zN|8J?8yioB@7N`TbNOmO5aCiNxkDl;0_|jCSJc7jtR<6bOLkVo1<7zV5b8`})~?X7 zqKQ;@pJ|rY%o_t?g$5&#afK#vO+?OYr=e-lSv)*=_D8vtVg-XY%UzMZioAM&s6a@T za%-q^9`#y~Lc;!nv&wm0_lU8N?hM=S-MBGR>Ej=ER1{qkS2joys>jiAq7#y@gRBcg zStsH*c${vGN8b?zGC5AoA$-S*tnqoT1@f`d&&FVclA1_R_8GdGNGAh8SRn@I4=-;1 zE|>Fp&u-*0DGFtE7>_fT?;P9LJz^J+Po58!siD?}U1E`Th_qA4Bq>-#^aH_cZD9d9 zDitaElvp?x{h$s^JV|qv0-92$Ka(X@qq0LpK~qjN`{(z7ukYVm01Mfvw{Z5`$NeY3 z^ekmz=E93}+9`^L3R?nKP}BB_ntg1QlO9%g`qE$LSWVXMqok&$qGB22y+Zc9!5>XV zT*q+EK4lCzI>a-qrCVI1FvOJokkpRL;73nOTUcHQEvtp$i-Nm~rF-tO-;iNT0ebz< zJ!?hW)-s1t@A2{6(bC_oGzWjp{IMM6r_5ZV&?1Do&Dd-_9q^pB_KP>A(ODc%WFd;o zB1l_8bpF&@;UYDpDlpf*uw?$z3tsU}+PMUfO_vhyP#;?QU-S8NdmzcQ<)zsc$Dx7k zr1uS)KBpPi-CXWr#p%Qe52Og;gvbJ)DhC$jNZecMUfbBf^3vKhf;FUF%WC1+d8JeD z4685eL1p{|Qt;rtEp27#35oUr4@_i9L>(ky%;f9{TzCuf52$zX5&GloF+}C|$2%_S z65@o!PF`MW&&v=s0Q#F>Yp@pLUwf3}4v5D77)kP~|2?IJrOzxqo4!oG?PxyA%@Bk_ zHKXTxp+YMSw~>8#p{yV=oHVLN6_`!^sG4}bJT^|kn1tUjEm1jh)J}x7uC;hKW7af7 zL<$1}a@)%OoegU`Lg%}f`1#KNNgbsBB}G;ripvXXxMLi&C%@k|!CSv)V*Vi?b}h4J zd6bQRBZW?FB#LYg`BITL^^egoSbJV@$qyPBhoO6&uj-!Tf7Y@bEpHUv;Kn zZ4>|}baS&m_3aEOC230#_mH$Y+VNjCU>~fB5|X{%d^Na}k#X|(2esC%%bu6{1|p5S z$JgFuccHV|zE_Oc?6=xeAfQqUAU33zS;S^IBDAy8xEGOXj;4 zzkYHyPB>*`+T>^P;=xR)yT?*Hh4GhrESZzpS$wW_2HMU)7kOTN>dT;OLaB})KYpP} z?Y)6DlD&&-y!m%#*bs|HMI5Ip?$-QER95nDq13t06E~|y@0A9Y)w9jHB95`$m`dn^ zFp6)`)MXc)X&~t=euDnTA1p^&=yQSfiJD)po-wB%h=VT*z}vN^p6+>b_LI$TKZ&Fo z2$M(+TzoZJ@zv$6;|$;M)D>AsLPV(gz$mSE6ZB?xULB~d(6QcqBw@?H6TbW?%D!2m zrsh$U_?9s?Dk(&UuPUo-!5A~>1WMDd*I*}_W~1KL_Jmv=(vz8LMt21p!9uUo{%Sd2 zmcEE^M#Q2}T~!?|B?rnrR@70iMZPL01(=gHT%YC>q0xHu=z8kC4nE1ab&H|yg%?Bp zIDc!}&+$NA8dClBof3h_)zwu!B`G@chXpvpA6zK%8Gvs{ zyWA+pmDK}`J-%YSDW^n8N4%9-yYvkG!qJGXhigWU69+=z$r}=;2uZUzKh-GN81be6 z5GPYfaIN!xUmi$@wd0!4!Q-CS%hbiTkZZB0G0k!5*YHY3Zr zMtSA6YS)Vft=L7>N<2fp-{d5VqeU++EHiVK5ab;wuOLbD*Iv}-s58PNvO^h;p_}AD z^AEG}F>3AWS_~pT2;zVZI*j6bq73xWdL1r|`N#Kj;ydLRf-tqz_RnVPPTGqQ`ZcNC zCgd5I-joD2p5zek@>m)4j0scrA46aCc z&kS!WDG`?DAbyE$qON1(6}GV*PKUH+IBkqtPpzw>6g2ndm*qB<`|~itfykmvP5}(+ zX>=)SQd)YEx(knunrRrsl^;JZaB673o(Nw~=4Q0CEoB>o=8DTd@6hLz*h&~f?kqz* zY&y1S-N#}${|WuQ4-tRGiO1#MLes+p7x?`CC4J?YYMP3OCQ{E@ihUD|6E?~oNGcYb z`uaL2?kIe+gGkm{UeZ{zPNp-bRk}2wh6wzW#skdV>B`@H!4M-{uq;vrqY7gQ$`1Fg zb!f&ZgAsHe;$bH2+^Y!4OQ8Z`Oem(*3!eTNK36%Y6soGC^6dSZ0m$o|Q@-qEp7ibe zHMN62(xH$M4O#`eei`IB2QvdExqe zdvU_QCHmU;(x7_YaMZmiw}g*HW!Jc^hTbqVD3_|a$x>|`2Rj|3uCNE+DLO%Rx%R^$#)NB*2Ez` zc{D6;DCrUsMz%e}NkH_&a`sV2ga4cJ8rU;qezQVqrKYz{!b9DTIGk>f}J+Yof z-Dd}XzX==B(Qbci>GOMQN>xx``Kn9xE;cvo)o5N_ z+w3`)F}>57H?>aslI+&wZslGz-Q2yaX$ym{rw*RdZmVX{%GCJHyiIkj>x`&-esm0d zkS1m+?>b)WXB0djwMoCC>SHA{((=8Vc|UBsVaV4(3w!MBG2QBUC1plah?s```t{4d z;5E5v!;P6^R;(C+f~_fynf69&+xMD%(YpP1p9IFo)DTgH2bx(&GG;KJ+pla=1nv|e z!o}rMm+dw3>0r{pEX0pA-&wRDZWE|DU8;Y2?b@|#$I8{3Q70cwFm*o5AZmV!x4L4- zlP=rMS_IB$mZ;@(ZBUDCy5Vp>oqG3H=J%M5AMZE5>)#tUHtf-}CnjNf?iEkeKy(k? z$5WLZT8);^l@74ys#VXiaPbOi*g4|FiLErciWxXImRz|bB--WUyp`}1r%*+zCUyjg z>(9E-VDjV#L-jf++brn2y9vMVDsM|!Sy^Eo(z1oB=OzC$p_?`}rg~^xu=6=Tt}G`` z-B0=C$&;xFSntxYfXpo>t6rxb?Ch5cVG~FwbtqC(Ryz@Rq_(n3_mRU-pXF*G5U4Fe z@Q}597*q_+v0mlt&)72mK^cDYs((?F85C}L_LuwH{5I5i8pN(?&29wn&yM*E7SvH! zS5HbxLiknx{pZhqVPWe0olfSexJ8^qd5gPKb867*ehpjn=I`C9?0S6e{P}<5{P_%x zR}uvN#(KM-o?qFVqor&Ty~bPp(8mSy=KY0)RvN9n`}Da?>NC*T7(I=BRmvt2EXJza z;VX*?hwi8Qj|8nwQBGi8e0fIj06Uwj;scvLZ{X6NtI4`dlvIW96{de_Oqc=}e z+OgRxJz?C~u~+z0+xF~f+_}!1ii(?^dj|g~xa8B3NcjHC7n@}&-DZE>ZMFxJK}jK2 zYegT3GYSQqPElFeu2N|zKq%L@ySRF{t<|aAm#9Y ziKeEbw%u63I8{BWylvs(f&77}C$1&hu^H$nU7NU$vX zdwqCb7w#gEDtr!qpf`HUX=@M3ry`!1nVVmxMxE>I+~#(UfcpltO7~BO{*bo%gbcqxr-N14Z5*0Ai6g% zE6|}b8K@&UcB(bgyF^Goqw6_ zVLNLJ#DS@lh95PCum|+H>)iCWc(N%xZTfe+Kc&Zt7MA|=IA@ote>7$&=Q`-Wlb`Xt zd740EUjpfeMT(Tr1%2DlBAd%_O>0MU>w9(T)KMrlZ{B>8PZX6HefQqI%P7TNFj(f3 z=>6!dsd<~xn`UU38y4lbZ6sHXw6<=E+`NA0I!xyHQ!HJ*dbNlWI3qLjFJ+}`*RHkh zzeL%&!(@MzmM#4#-JcN|zBlSJc<|t|g7X81|9n4^Z?5CvSX@-(3;Fd7b}fK`0f!Zu z3nf0$`4JbilT4^`5a6N;1{tP;>a%NTak3kMF3lYMgtQ%;e65gBZN zL2gDWABBY%D+D`lzcGi5hD%Hu0h!|KnXh)N1%uMFigf#Nr~FT!?uU28W$(?_&!GF7 z5c+B>#_u26MlcWd`X_l5>2tFAW{1IQ-=7>gdbF4-hFpj?vt;jHGq0MXSyEOO04|%y zdkAc|7{T{tPWVZ-a;>{<_5(PQn; z4`ZJ8?s|MNpO)Ue&}7yX4%yKM%QgfA^uLY^^as!MK(&*nPIdpd8X&fsQqaZ4C6#;n z0rKxkPL3La4(pZHhpeh#9X_=jLk7D`ZMM0iQHRRPN=|=wkHUpZ$60rvpbF%~*$=K4 zoS9*CletR;YCpq4gAPiYZ{n!P!LWbEJPB7$BL`J-lvhyW&b^5fN_qPSdo+Ua=Lv8D zO1ABccK571!BlMqLz=GC%;Xqd{fYDE&Fjr9!7>JA(_RHwck9y!jn3v&IysVv%|jZ^ z96WJtkAVZ#hpc>yt7n(@Tn)F0fh&*%zA>A~;P=5->v)A8$ z{CEY0gY}Lb0RfHX&z-xDqHG>DOoIjuQepVhO$OSGB~y&%2)F#&q~tdb4tmun0;;!e zg7>dgk%Yq%wXo=`hNt%g=nn1Dr3)>tdYoD@{@NBA>d*W7NtjJq{k<=!=FaZ5)(6c) z{7AuqNCLH=$z|oy(o|}8G9h98sZ)J$BvfwKZW9Hlr?+>P9zB{UT&gM^$M5c2mx{3M zYBkeQycK0cc7V=^Y0WIBlSQDvzzc*1`|;+crs=`Xsm*adj@=urZipOOk+kFX3ys4W|Jn3!8?17y?yw*AN$Sryx+bzKz9q*;2;M9 zoA$vtVuArdJ$^jRd2r$<&z{Y3H$7zap|WzcnOOrr<-lRDjgU&ME^X7UU0P;lBf|YP zc!nle^W54uM4e-mI1E-&QHfDBF8DkE z9J=oBziX3rcMLAPe?LU;wuS%YNy{~3t+~X1qpF#UwUePoc}wg!e^s>}?5;zfZL`Ra z3_DxikfmN7)Tfl`XHe%V6e1uz)UnlhbKB|Drp@82?=p1NM5eZF@7~R5S6U_a@N#$W zPbR5P9<-)Nf)Cd@<7r}QS|U&bJeG0i&Ye#e0K?B+Ie!NaM*bHD;=Z|j|G3aB zqz>(HdTDBstHB|GKZmvdY-$=gOYR;6i0`?7L$fX+nhd;-HUVvn$GQr-+Hnt8o0eC; zxY0tZYgbJJ1GOVZj+{Jm#*{9BfySn2W58q9>Dzwir0_E}Ex$T%po)#i$J=topn%;pM5Ea!`pI@?KZPLSaKLQSjU0eA)Tp;4`uhd@UNl`+XR;0+-G{!H>IbuVygD|!^zq$*)7VJ*k!%etk=kDlZ)PMhG%IDnm??JiImbLlDX=tqP(Z+A8 zrKP)f^{A#E-M7vgY-u^`cQYNojMF_%$6oAs@o_H|mC#<2m91E`Gz4}#5T37yB30pw z1H*i7`B=5l0cX~QhN{ucnmcEXMQr8A6J}`~dyBV<^Bt9#`-D!*v-UPk z+VrghNGO9aKGH=c#*SLxS!wA8T5rWbiu|Cco7;PE(R1o#rT0GSvdodCk3;>G1w45y(+vHFgS0C{)@0KT ze4$>hRiu-m%uv0u+!Gx!z_ISQ&8%*w!_u^mPr|lEcl!9c%^(K{ht2H=sq#di9%AZk zSBaU(Du;QUu0yeZ%vWX!(lrDwo0j!5SEcU|gbrW*+=+p~13Upu=yK0a6s;L`|4i;Bhen;1@pUaa z^^fQ2*_w`5b{Cj_oSaG(MHg-Dx*=Lzj}Z^<1q3pEu|yaY*xd%rn{U{+ zZ{K}KX1^r$jxsWR5;$tfs&*_sYybv2@VpeROL7m1c-_8z8hiKcdsbGK5o#E1aylX5 z8q)8-aMt6Fy&UwAxrZJz&$SfJfC{qlGE56SmOZ_&+u!NFJ#ltd--V5M!7Z~Gh;6_G zhPbtUy5O@tBt%(NRf&oE_JiE&DBgejFkg}qt**Nd9_W4jX=>`c{m`Pbv2NG&f{&;f zoCsY$E9Rf#Tf`#e%)zItJW#%0k&gjCG4y6@*E(I7p5|(#6&7~n_oRZNLZ2)jK6r3j z)EJlft#h26v!)lAn$GA%3Z$+1=#r1o{^4t4J!cKSFVz-Z^3-U{!@U{YYTc&IQeEBP zC&CEh{#Agfh|GO?;C{-z`}IqMLh&d4x60G+ z)JXwK898=rBR}P7NiLn~0Y+A*3egagl04(->lTfylS4Xm)7_RZ?-ZH7&i8*zs7YOS zZz@o13l6R|aNt0}u!^vDnibu)-vAEvkf+qOYgfsBybRiM3U^7!&O4&|?cth{X9*q3CqDI2%s(a9l#)VQ_0Pm&j;Iq2*GS<}WNg3(KI8%ep zCvg)Xm+KW&GF1nGsOqPT&24G`($eEGnBh;9|WF)`f%k4OGli$9egvrV-X zGP?NB+Ktw)zLZ=?##m8FBe_pNpg`Q6ax*R&B?Y8%FVZ=teuE6<4$ z!OPdL*XKrf1>G>p8)s`rsg|=OKdBGWtY>fDZ033HO!~z|ILUUm)~#Dhf{tx&78wa8 z|MGh>=*7Q--lp6tZ3-Bxv%2BUjCsfDdiKj^jJR&NbBpddNZ;p_1ca9>{FRTl+r4_9CZrD?i=nH(DMPTs5v9|cRy$C!@@@V-m$90M5^ZTct8mH7E3{u!z+y&a zPBYb}*`SkNW!YdIaYAWpX!@RCG$B-C+HT@_o6ficet)16ka?Sr{vN8Km}Xj3hclPw z4YX-RI=c!C^@nkri?q;?RvH0ZMa7pI_c?fjC#GXl6%f#XG76esty>q}KKh)PP@E3 z&^&Sq0tG{_k6Iji7rr%_Tq$EnTr424_3+h(B7%z9WlDWy+2o?KNhe)h-NIA<=tL8Q z5m*!oH3sDH+pwWN^UJfB?SW?*zt>O&Tr1TV1lo*d=3Gx5Boph79$mb4aXf9}U8>KeTnFrsma{730GZ1hOB|U>$cu0BZX_Z^|i)$=3 z8GjgLQ3?xgxR5$))~u1L_fg^HsGl1H zvYpw#!We67KV=tp_p9{X2u%^MrkU!>{-g9_>K0CbZu|t8e@lERX&!Xy*|RBKdPsyi z@5^VH&YsLx$MiJ8<#ha0c&!$VFlAHfgS^{pW&;*d&dRaF6qC zbv7#_BNg1and)Kr`j$(N^G3S#>(>Gsx62|bcx7m6uhOw&b7ZLBe*AdO{~B02`}f#I z4i56t$Yv}2df6p$XL)< zAUja>uid-bh=-NDcTnc{J6nQ+u7S2r;i7)Q^MjIn0PxG4Wy>~$^V<(!-I`cu)5Hy_ z^8n^#4K4io2fi*Ob}UVXZKg9Y2Pm_v-{OfIcMiD#xjg*$&*ezs#w@J*q#QGn0Is5; zal1`{b&N<$vfd0&=0(rtBD%P_{RI*^@*HVZ!@;7BcGhe_~O&_4ojnBw@f z_w~DX&Aq*=hWfuntS)3<)YDIUqKCpTL=XeXeZs*Vt+^m{ued*H_wEJzJH?&>rT1q2 z@8>se?!y|jj`U+a-!i{`GBNQw_4c;?`-7N_t%uz7GFnVJVq~6E3~C*- zO$@5)wEbZVlR5M**k&-OS;ygTKU5UVt`IFSUca>$( z^IQRN>I2oQYGRlBX8;OClwDKpBFoFlBB)lmK~pz}zq@;|Ha5RoxLOW3-1z45WpsQVonP6D8|4oJrl#FhcaW6ACg+!zHe*zR zMjb{7o5gV9RRT?*+eyKc)d(g;V>S|l@5*pmvJE#Sa?8c|np}IEj@iLpkkG*^E znUcQs2N2|3>F+>{Dk7|_>Wm&WDrRsiN^avti@MUJtc#3s?_dwztRAZ0OV1@I55l%j zN!}R~Ym+T0?b0c_`;cc?L;DQ*!&OM;{u&#fx^Z;V_iqmmXG*y_>Boc|Lw`_K`prRz z^m(OKv$vfbzn}q^Zw^~Wh!wwz`;NbU{(O(m86ZIz^9Ok3xGrA2*zTNrLs+TnG>2_8 z_L(iM;gVmzeH(<&Amlnn<~d3Rn{yRhquMQ?s(bPrhy>M*2L&E)xFrf=+?g@Vt`T-sCd>?b}=I-Me>eN?51hSn;j<@a=Y1LGISjx<*+s z|KOo|nJW@;`0(E}%ML?Gg$J2B_i5K|k80CHJGD0c9Uo)$816=F8qx>I^>58Y=&1@} z3P~27DK1PJ^qBrn9RZT<$-TzJQt6n|$eTy%C|}*ibW}}6c81P$C#pEQRAYbx8}nAu z?}x}MdwjZ^Op2`cGmXXKp$`wGe%1k_`tN<}j`$wdhrK8)!03qjFW1`ikScnfmSV&5 zP12n(2uO;8KprMwsaF@V*U451Hd_cm=0`6f-g;cd4d;s=7PYl81zb=LNEO6OjG{H# zE_20-6)r1QT;uCo93I_t!c{E@jP&5YhOZnq!?1e;c{=S3T{@8@ik?5e;<(=IFq}bl z^^k8~6Q2nE4Tr|~y5I93hxzW3v=|s<@_HuKt~?fgVS}bk{prOb)o59oxa_ei!TQeW zz!z}IV*L$^!lcOfJp(5EeIn%P3Y-2KzVuj0u1CN9ZQGU)CnXL|A)CFM&Aw1B^zo@) zEj4UvTsw~MqG=aA`w06XZ=rK%30J#@tkg%>v4w)*%KiIRX*V_oCQ^MWPB^Iw!33GM zfuy;9vP#S>8Y~QCZlOySHKmv5nlH@`1;%r?;6v+?`qrT`xJ(%gpB;dBcrV6(L3occ z_tPt|XU~BCQPEkm*6O!VJI;Uw#K(2$W&;iXr*4GnhwgPWx7?>&$Gw+X8i^eWNn1rl zbE2N)o|xSce)&$~Oi^B_{cTR1iM)TyR68B6#rj|hB01guM`w$$k5_6dU%f> zKYqsjiJpGCt598Vp%cbvS|uZK{9K+huFvaY80rlOL!ilAfD`F%3DjiLL*~mpd-rZD z=mnT6Wtb;p9EuAw_N|Zp2VgDg5`jvPmoC@zmXg5qUwr@mC944VTyw*i18asjxG4uY z|NP2H4_qmtSAlQR=vWtF&*r{IFcnrJFHPItU$ytK^kayM(wm_FX?3=j z5ItMECFjna!#2KGQF(dzVr!N$XydgYcy~SmInVDOwJ|yDv-YQFO)Jdj_pr}jz8HKP z8_=j*VQrEA_i&#ah01>m@T);*v;k6*GoC;5T%=*?mqXW?cM=t=LL|T+MRW7)>-gA{?tw=B0cPGX zp-q5A!c}bAq-U82y=RxQO3kFf1r%ZSW6dJlhG<`sZ%XTy_^px<=X8oh|~n zhd^aRVLSFHlZo53HddXsifYxN0~lWRP99pfg4XpnPVGTtQB(|`3^?@B4qnBBZAwNT z16=IQA>fqFYZNU3Y&wTm|bv>{z-|NJ>rC z>RgOT4LNuhfVKrci%#9T)yHe?3Ua|vr}NC3QXIhGQw7BJ(YAcND?EH2&RoPnUCSZ7 zW9r+3I2OqfK&KLM#LskL>GS7*QG|CgG;B@Wr`o^guB91@$VW*b61}WXC!EcL@uVth zRG&K0n5y6PZT=t;M}P$_9sU<%Zys0k`tJX)q9jwL%u^|}$UKuFp(0ePiG++r8KVp- zL?uI!d2FBonaWHeQO3xWp(4t#QKn4&o>%tgob!ErzrXMAxBob2@6X<9t@r!B@9Vl= z)9qyB7#1GBf)kwBM}xL))vk6~eufE~UzyI!IrRqCx?Y=WbAE$0FqjSj7!R!Yxj1>; z1$>#`C#Jk4oXse*5^Oup>J?#=NNoQ6{%Ln`bT=UZ1t&dj>*Q1q&)qs=859bxFy!#J zcaI3QxegFY?c%~@s!KjA>N`=kj$6X4Qn)B(9Ub*VxCq&nhbLy%ZtpoFY6L$>@p943 z?c25~n-N@AulK_kgawK~(6|Is!-zKo(M{mb2uj5k)W3DXJ>TcyVMB^F`ihM+b+niT zolrHb(6QNF@v5G_+34eb}&fK)Iw^^)y%0kdkNDIEiFf@x4ojTyf0;Ga&tGzS}1kUvM{;@!6C{$24gAG=b|2KFn z?4oUkuElr`POlMGs)aM_9bWCgYeT_Xn>8gov|rPaqc7&>rdU?#VKeA&)5I70&dI9N zpaGMS@JGtEC2!xxaNR{Q%ndDY9jJy{txW8jfh4F3h{@MQevnbwvku_NiyCKAUP%8u4HbKDnXsO;HH-GLC2h+#+FfbbjNa5L(9?`)*tYv zqA>#t9mwEm)7r4H40BGYf`NIt2m0mW2F29vsk@Lq7|#_L&+7(vuY zAJGS)%P6D;51Ys%l>wTeWVI}M)+XAQjl0jB>7&>~pXFR^ww763YJ?nrtzpLFIY#NxRA>d#@_QQl=b*Zv>@&B<*LHHFg`G`)`l9U$_?za zM=*jk*s02jI~DC7qXN2$X!c&*8`MUP5G&8}^>v)h;3(s-@E8qtIMX-Ef5pl~i5kem z$XOx|{Waj4At8Yr(dpd3KKs4L^y#bVqeX%q@0{H1Uv(mtylS;-mr&b~f}sBKO~2JK zF)iixm;&d4s-7~Q*^_NK7)FCpy4Us<$9Jt+Q)`8euo3mzySWtm`}p{TEwf^HJ@T80 z>f)tK%Lfn0@v7h6ZItV{v!25rzu|#G2zNvJAk$htJ{-Fyi~juT1p|*3Vob3%tXSo7 zFOg_%JKLSIMqe|I!G3w#y)Ff$0 zRIM8KK6e+j>Hht#1?xE2Q4R+HJ`X@Nme92aPFg|e5(-GLGD|*bSyTv=CfX7T0VSt) zV4nT@uQh7bB1^~BbpmM>?dIJbW8G?lX{-9PlEI1Ii!IXc-K$qC#h95lL@S6S!U2)S zQZ*ETNTQA+{H+AR_&(8dau5?*taC++$XR#s;>Dgz|MaCP+sl;2q#r}@K>62CtrFw^ zd48b|d>u8qMf>)(2mA?IOw(}Bva3;C-=%+=SZK|Adw)48R)V?^FP5=$gm0qoD5SZ+q}TjRJXYvu1VdYi>S)do7yTg=W12 zjGwh&K_VhY-ujh4kcfa4!JVq>q7> zs6-cyt<#~mX1~2}mN9icXOLhV0#pHLb?n}K2#P>}4_z2w0FsP1O?d)W@Jhq^@4x6g zFm?wkYdkFvI#koo+lqXp!x9#d(MA+M;4e@t?bsS79;mFofA{+ZTovb40hCtBWrLRH zEF`T}$6`Qa?Be=cN+(XX8+Cz~)Qff|1G&maNC}Huy?7W>ZSW$c-?&i=7_?313d^`I zTqiHDNtBR)unf{bGFFoj`4zl)QH6DfH=*u;Kk5rWWN3b>a%g^jpSSrzzG?!(*aG0B zi?m3*8TEEtHFY;4M0Jz-l)0CWGje1dfy@{t)_>?bapFXF@Lq5a(H#_&l!UUvjKKe$ z54l~WSCxlW>#Vv_5u92+)1|{N4E}Z5XSFaeW>8#ec=wocy}|dNKcPEnFm4q7+;Myp zvMdPeaC#rviFAR^r@bdlGG?%Eu_6#{Mk65YHES!)0WtAKM~$tOcl)!)n)R>v)rF32 zC6CDTOT#~OViRyux^-Wx3}+A%NV@p`K>>$Fi^2pst#>`Ugh`Vpi#AnGXXvyLjT-et z#_E6Q7|J>ssc}RYwP~Y;!YxB;Qo&2h$uUVut3_R#nwkpnTSpQKTrvXVkAD30$*ynv z1GmSnr)cEM*>||Pk$2tZ&DY@uP*Z?;ih}ePv$miKtWxJmdHt6Bka1gaakT4ekj)`*7&Ci@S2NkHclXy3 zw*k75jLxBSWLn5K5Ee&7MG=c5|D7DU9MjtZ8d?T-=9fHiVaCPQprx&yPBj&`jX>%& zL(3_QD>vvFd=SMqjeEq79W`5g$XAUWJC^RM1~M!m;u)K}8f=KaheUq0`F~sF}NgE zQ&SV&O$J+f7JISVK4ax}ikb&}m0#)0>X@=Rahr!6fB@R7){gj7ewZncdBNrQG~H%> z{S<~(iLSRRyr5xOBucQHg@esLT`TGz8lqgq7$@u7uDBUrgOsBBXz$P5?oQ~rR7G$C zrb4F!E^0%E4kZ=uM3W75O+U~~e*E@r6`?>Lhcy+{@O(C6&xN1ch``T=daV2uF^1SX>sAsfY5+CD69_Rq(`aE znP|h<|MM%zzpnFP!-HRWo)|8GfB`=@pIm~rd=^4IOKvL^{Z@mxVG zq>Tcnpn7kgI%go)DIt$szh#S7zGJ}|nB6OMe#wBjQZ29;=Y@U16=f(ew8Tt4+8b`SjZt}Ka zHqUNmW!Xd>2~m;op+{L}6RUP#d-dv-V$bKJw=K8jTv#e%)#l%c<7cGeEpf8V=D%9} z`h0*ng4$(Za?zCiqJ~IOD{F90XX?Xp@}VL!i^6&*?*~lTAc|@ zR2=E|W-W}aXt^Sdv#S#qKge~NosOO1-)~zxMAho-u01ElGS%zUt-G8;xiRrKfFsNU z$i2XhzV2uJ)bevv{5_eS)D?-U30s4YKWEmuO&cZ*3m_c7dC1->UtU&8VHb*uX3hv3 z)f@|D$zccj4Eq}F`;;61u%j)sK;#qRmPQ}tA|^8)&>fx{4c@8C@AnVix{RSgt|UMa zpV$aEdmVryB)kmOLY4IGQ1Nq=$m!YUv^=>gOCv=Z%dj?5S&*KW{_cO=z3KKvgd>YS zJ>6*BqH^DnV6ulHMdn-^ZfTj-XYu!rLeUCWD1_6oV-BaifkHtct17;upzAd0s*Z1l zv9fOYx7+-b2`B>n*V2U05yxCz#LY$RLHt(GJYzF+X%-QxGC~hVe+MQx)WgGw_E_j$ zcBBZ_X|PlW+yf6`cX;b zp%#~vc=qh2t7|$D!!APeKzuZ!bl}|z0+Err8fqHpA&OsIRj$RY4o-pl6=4k}z{F=e z@I4eg-5L3=-C|qXHwPS}9M|Uv$epKe6g6H1OKbk6muUR{)#9ut|4}u@u&VT$JJ!cf zqcLeW{O*{fzo@`v7!jM?Kb-uEw1`ZVCqEu7ig1cNjPLkYXLDmG13}h)N~4VD;(sSN zNQZyS09Mr>`~eJ>!Yqn3wjMl01De3DJGSAcJF@VL*Y$1PYH$m#fLwgp+Eb^#R^NPl z4TupNg>>ma!0ws|=FqFOtGI1xavNrgZnuDw{^Yz8J;`yJmj|KK5`X6Grz2FZy?XYP z!V57>tPilQ_T|afabhe2U=1J@QU+a?hKbLuqMe~3U1wqk6uch=mxYA|zJ=&{dF)+L zJ)-~S)~Sminz>sx#{a_Fl7>Wzk81a>_0_la61C7`FenMJB~6$A+H&lrLSaCEMe67e%CdQ+$V3;t;$(PaEYaXGdw-d}%Zhzna;MUeiT* zy=-hico1iy%#YhNm*L?(36e7zvMpejkvzF@>35FzQj#sfpe(e4XdmJ`7~A!du@&xv z6Kem%V?V!4Zz46C{3VA^AsfXEBuWF$a%l!w&4QrgyKMXHR_U^9bYZfJr9y%0#N(1~ z6;n6iI%{B7Ne*U!Zizk(!&OiW&yTEkFg zrIRBs_)00{;la!_Q*9}jpT zgx_S_>gP?pmVEc;=aS_jsu@+y(Tk4HKus{Sz1KFMGqA{~Wx9-ZG`a-qr^*JlKb_&l zpoccp!yP1BGNz!H`p=l+;USC=7>3;Y)M?^N^ut#q3l_K{cE`(?lVsO}5aP)=TDbDj z+i|x@7O6we!-|Lj%GnlX6FWy_860iZkg(uKx0rs9b<>o-=W}BZ6QjHex9qADHGV~wX zbL-;+u+~NwD*p80`(&H6jMyhe#=qY`W!fFFlQIRE$)!4|CoEM%2(YLkalS4`Vz}=w zdEAu%x&ub$3I3MG$`p<1VmWyk-~9w#U}OyYGWdBCt|H} zyG&;LcUg1%A~bp@q$ey-azfBh?z_#&JXhFdz{noA=G@|3g#V`&dF+bYwR6Xg!~EPi z{!JOvcAz$*q}A%l-ltBLHto@|;{cwBFxz|ica5y9RDWFF?F$QBBz`^A_cavL@=^~@ zS+1iSVI$OE@sm?sBv6Zj|EjTh#XgXXiI`P{5AJ1RtUM=@M{q<%X}y!RYSz5X$Cmd+ z(*vx27w&crLtQn>-1Y4}>hK+x;dsWr7s_^H16yzpOW`Mq^6I`1GBaC`-1`&qJOj%g zlTbCq01ynYSarN6_!ubpf*3+DIXth?!YEp=gipr#9nQ{>IdWa|{#Td?DaAE0+Y)M` z2Nbw%EykOfm6fY2yY&f z#-j+dLdM0Vx>wdjyzQa$lgvsA_Ntq_quppBb(0s5zV^L%{`s+6Kz`FR9` zg~4bcMcC9&PR1#Tm1eV~VQN{K?j zjEK*;&F8aJr>YtnrkeTOW!Q@c?#prFSo4+Nl2<+HR}fs(uH4}K+lIiqnVbdUoRoZ2 z=5PMemgL5pwGE9D2Z);6uqqM;)k}ZcAutBDk6vl+cEffd2906XlRG?@hFO;Iq~e>ir;;1awQ*lklnt;d{K9Ru9bw0)TGxPf~WxF z2|ut+b2*@uoA@4zHuIB0QH5r_;4|z2=^ni6IlT+NBA;ns@)|2udw5;plv^s5iMQv< zM*t$Y2&*U}drX9OYY|;^=+&zsbEo*2P_l=CT@7lr;Uc^WowX?L?!bMX!$uh1XP;c@3eK9dr6qJv4rDrjMs`+(G8nZRXw)8_n=dB0AtB2leOEHIMm+n8% zc~MUN)~JVpM|#Ot0wPiVX|C(J9mrM25h#`e_8+{_*`H+y6Dkf7+8O-$s?L&|QqluO zr`d141U>m0rXtFrYJ4Y9MyB!^Pfi}Od>^b65+1H{=xt=Qa3@Y;7>25bRVjU+Bz7io zH!r3T5JIe4Y6>(?E0!-`u8cuFeP@hB4WrX*^Yx4ev2KO4p=iV2JNKLDHg4R78ET>T z-pQW>kuh8mXl%`j9Du9u4x639wmyn_t6{?i&6v@SUTPNlMoJ)PD|l`?>MAk|?tIQ} zkwp(w<CP=XBOdC)R;-<$d-c04kE>VXRlkvQOyCW(uLt<}v}gJj zISzS6roa;%9#t{@GxDLp9y6nl;flfvR&T;BF*<^wxxvMPBCgoGcf$F(5~#rcmT`;p zI`<>LTS;gEGkW6b!nw)`_wL*o$iOmT$`m=bL?F&-OhYh;Frvb~KrIOF`!4*f)o1>@ zHebHE8u{9NSu(rc?F6IBL-9DGR79uX%gZ!;M9OJwTos0x5nSjs-qo0Wau4}RKnQV2 z@GTj7UZ1}j`$ZJG{H9Cvv;YWkZ;A$++RBV_MRY{GQETxwuSz>*0$SLa9$YXT|o*!tG)tNt;FjVugdVrsYm8SOOONo zj6Qrrk;qaqCDlJD_$?n_*maVV(_~Rq4&iuDMTyps>nO(fU#J;vk?`VA&kv^@2B(Qj zZ5qKxJvi%GfGF2l2+J`h?LpEKKRQLpx!bL2MErFyBsB%FNd{-fxWca_Uk%0$sYuJs zb&PuyVv|LF`6pnXYs!+j{NJ!8Rmo$#LX1WVoDAi!B{8=N+ch=J_Fs+oB8EY5EM`7U z#V^QXuoq1d2MjX5y?eBr-(lKsbG2HHRQc~dJYZ6*Lu0RFvC;o_^N8MrSE(r==0CwM zZHhmLpj7;ck&(-o>XTZ6X%o4sx#;UVJGXH}Db}I?vVa2!n=y6Lq!kD|_E6tN7$cVv zBc0PG-vQ)bl=mQYPW?J8i`QZXxF^z`2{PIpm!QjL#1}NeJ@H_CBcm@a=NI>?B#i76 z!k&snnm$sa(uFshZdXIWPOAbS6?YSl)m zhagXrv(>rX$oA0BO`ERa;~65pO-piZ&3+!-zNroR^~zG#r_Uu8n|PkU9O0_8;A=q< z3Ya3p)T~7Z!(EDD@8@!Mgk@%3S8tuOkujDrM3j1IX?rZar4``<2{HRX$vQe%L`Od2 z4y3|Q1XE)2{-O5`uSyV4zW{(QgBeZj?6#0gyum7T3ARI9Ld z!*;tiG4!6|5VSMl3E@{xE1|9S-`znO`aP-+_V0GO0%-+*5Y z056^FMpMB|o7z@uVWg~qfL}4n|IUi|&Jnk|7aTniTMJHzNAc*OP3a$Y_@Nx}Ab~70 zU^c5@TIZX6Ka$BiXRPcXI5qMX@wKBriaSjOxKlCJ$EP;PKHId-q5VY<9-wxCGZjrJ zqH5O}4LHyLYVexIX%uqmXS!`E1g;H&ft!MafyBcx%R5Rp$VXj(ZKn% z0FH^xX)onwV}-l-d`JFTQt_(u%ig^!PUPR`M3{hyI@vpB$eP<22}P!gTBF^3hhPD+1?!i> zMQP;)b3g2I#@Uy>pjtB&_mz%`uqh+!&JGQOgb3!S-*kEj0G#8Nl~(6CAgdsoLc>-^ zyaRH*kIUHPHhe_rY{HKg2D!WsDg@})cXL#Iqt5c&l1E&j~}Wh=V`M>=v4 zac0_mHb2k#m$h10F29ChMppeeE#4Z*=GBclXuud2%uc z+)bn>qOYI3n;AlaPgu}TPp4?x>m{rLfRc(#)6@6X7#Ei|&H7_XyX|pe@fdB#)9<4VWiniT=+FJzcx^v`&{=xF;_}>fP^y;@i8m`v7BElNmxA-<0X>aiFCafrx^bwBh2o zLp%*Qw^l-^DhBk2H&u_lL5&b+OJRa#5H?P=haK6r zu)1Z2+!Euqy@F{e%;LmRVx5D44y2%?(1bEz`b@=tM7wJmTzNKw+x61hqBJl6`Rj}M z*ihwB1R7p?crxjdUWuRfeFw8yit&7(0ivKKL z35xE2$lgv9R~9}5{9Pxg7^H$=7?J_;E(T#h1Fll_AKXfLBrRU7{nxy zOHXJKzLpr2M4Bq??_X)4Bw#Rd8zIjTY2crk26d=-GoGF{U{2h&*wr$FH++c?^NGtO zpep;^IzrQth7O72kYh;|^!^MR92UGTL~xPr=6Dq<82ase5i$0OYHH`P&i<|Z1DL)QVvR#N}25!#^s@wX=e;xOse6wiQwP}f*{4(14S)o zIwm)DHBK#oH5B^8He9fFFVOR z6YMnv9%s(HO_oK?roHt6z#0uFsZN9Z-Rf)=f&%Od-#gPRvb5DI6~#&C*YkZpJ0X4| zghI#DZ0qoq>@jq`rO#SAx`?DSj`Y(&(YQ39K5W=9P-X`#RW=8}Tf`>t&SAK!VZ%m^ zgsX?P4SP+Vm%;7=@LAZ@Jm2w*9oMM5IY##4QuKrpfL%MWJ7X_shZpc?WE6^%nrhmc8$4W7i?&J z=;=VqC@W&)fFxHU$ZvsA)M%Y zz<`D%MtV4y8~O=YR7?8(@slRSmiSqkl)ih^W!{HpO$GJkaF;MP4t<5<$&)Af1qD3J z=x7A{)Xq$VPjIQ^2a97=NV!529o=)?BzhvRTJ`@KHer+#4l3Mk^l>dd;b7@@mu>#K zdFfYH_IR$j;j*^oCzmwwG_Tb1fz78gL(L*oLoxWn#g-Mz-%~d^kB`o>8vOyOo=9{5 zd3J6!(4+@-fY!>=a`HxcJ%nl=6as+^5OtL96x|4)T>^##d5%)LZCS0J(+}mIsM)xZCB5oXfX_ZuoZMx&6Bxf}gPWk1}iDJ1% z%tOY4Avn*iPKH&5um=G;nr}Or1+nNmIZt#QtC_U&*$5W@!3Av!;I9Ph5}Z_#RV(Kh zVht2u5=hv-VQ3a{!TzKY%{}iLRQhevgj;7$pDrY#Sh<_!KeLy!^Mk(q&zw0^f8l|4 z?jLB$v!2uXNeG@{Rb%UAp!@<9PzLvcRfhe1jUo!(WFvc>eyRdxDj~eI1Rgk zN(56fkOYP+Tqn2FuM;6EC;Z`~N9}iP#%D*jF7*{_d~BiS7$$BJkK1OnR&M%rWdP4Z zt{L6(`vD*KxX;r@GEyv@7pI)*qC*qKJ-g#^Lihn4?J_!naDd({=HxXoZvg(N2NpKU z1iA$65dlY6gWd6#Jnl8HXhy)C1D>gW+kV1GG$OQ9mohTq+`8yRIZ`0Sj1H8_lr7C0 z&7w_;!GCnutfQI7poRr3x6pz{PebFy_c5E3qz%9X%hb;0u!4F+3ygda{=mCjqqS14 zvL=a6j#pC+uVt-TLfF=9((~}c`FJp7h!+v9VO8Pgc(74}Jfb7pPD}1u-g&o4M$ro- zA+7l^OMZO5NQM) zN06HcK_dEhh}4W0Abb@;91iKvo;BkKt^#1t+@Kfrghl+{;RoYxmHpVkL41!S#9G|3 zg(Z-sLsZGO9B<&%nWFvVd&&o)QZqBgf3AGx-gs{1w~tT&F9E_HgM9zEH)hfSR0+VYcJzg z_UC}p(1}T|4Gpqr{k{GTJqZ1JZgJh@TVTjmPx%_F7-wTRnpm9e?Z#L~O0X?T>=?Qg z6qOsd+3Q8w8dQ${s~K4p1Ca38sERcMfxyO70sOvy+#8KltSbWwAc&EX0g9sd!cY9< zC?$7SP~VUEJnfy4a+A)+_1!-0Y(5dF?Y6Z8RdPw{sG6rW0|A$)ak^AQcyo^4 zV{Lff=1e@yLjVsh(WxS3WvS`btGAN>II(pLy0yvDwq^=;`EUHdMHVjwQ0RLF;_X-g zOGkcXsF5<}@BgzSkge72Mo;K=t0+TYmZ#$okv=6;wI; zO=BO+ZS@*8(pIpah46GO-gm-+!&}Z?@IQ9prs9Q@81B7>ql+6%%ApWh==(3wW7J7a zJ$!i71f@}Kr9HAMofRNiAx{OD@yw}A&qR~(S2>MU<2udzsr1hcvk7oGh3;N*q?|C0 zlStSau?4j-;`MLX{r`g?I7?L8a74%qC4RE$DyxH3QL$4C$qJ`%8Hit=s^|Lu|02hz zU;>QTWgscwr^D`bH!@PEeIs5A{d#5JZ28%Sj--Wkq{oo)*4L89*c^M%$j*PmwMjy& zit^$BLo|OxL|(?$VM|4MhI*J*@(!X9k-5l4bMxj!dt95^eYUT9;yc0gnRO&m63WQG z-=$%t>o^wnvrH6$5Pyr2ciD`axj!yWvfr>~Ybqo{&#a}L>+C6l_L39_|q zfjLB5_i#Q)f>^*f+jZ1Mt-9(s&-`=u#{C2OA{mB}93+NhbO<&lzLb>-Vo?n~Wib_* zC_mMFKw1d_B`9a?jEF%2Zjr{K3!ylY>HIQlSZuDiD?sutbN8GC9nhwZr*ZZFI_)4% zM8*v@`a?qe^0E+4UZg%A;qLCf=#hCi5?C5Qv?SkS(9qTui(T-hA%m7K;V&`|T$YCH zbx+V|%Pif$CiG1^cdns0icA$nKDZj-jO1jZ9ozKb7u-5p^E7Oc5X)5*qJ{&^EvVlu zu_Mog`c7XZ_Tmh!MUzQkOYGGY1IT2~E9BCv=#dCv52dN7m-mXXLA*{Mfs{md!y#n4 zD%LWtrL`Uho`{g8Tw45psFXPtOmB(>9&)bhZ~GAKB2+MVi9|q1@5CUIC;8@$zTlX4?AMwG?hYZr%a))s3F)EiN_=_qfbVMb z`S<57jyB?c+WK_ytUHDy7w!VHKQ@5hCn`OJITl)K3l=O0Z`3VO^o?$?T8N!yku3Ia z1)-^nokKr&g;8kl{{50oh`?GU)NFiBAv=i(7=VFcS6z#jeLj4N+)INXQPZ5)n}-dF z&Z#_ff@Ri~taFN>!7YPtnOs`)*aa2Dw{x-D1*i?gTEJ1baa)Gj?uK7p&22Y-Ln>D? zD;*ZG36P2i7$7JY1CZlJ^nys=Hou-)-r`(gn?=n&zB$&fbLXR~@(|U8zim)EYU1g6 zo+O%34J0@T4nY`ER$gK!Q@ck3P4iJQIrk-=iq2$i`I1DH>>*xLY1x*Lcs+kasIYKB z4B5op#@sGL#oQ75AnR4~ZHu<7kDn)X#HJ-}4$bOiPSn*qzHm>6t?XJ^EStB-XitPX zLrSOk_{O|16DCZZ?U9%JlakspwUDf`26|SMMp|k#p~JUztqyFW2;_-OFP*GokVSu6 z;<2kU`4Ou@Nkx%K-sYh!qJ>sRKYOjQ#H_7PzWczQheNv(?=oL>!AMxvp{piW3nc;5 z%?fbI7F?cEi$MOHY)A#6nEx-y1irOSGz8A#UMR_tq0hYpM0ZG(Cb1U6L)NZ6^5kXl)6n#2^c#0usgbq*`S|Wc&I9EDB^Gxs z0(Vb;_B!!0M9OGBam3cG)e-OcsE-8~z1JteUS;%UyX**(CYJ)b5^1 zjKC7UD*Tk(L?U0*(lR(Px&+O@8_^`zOG(K2SXSoH*B#0oi)L8gahS>giUv}2#L>)a z7iJgUG}&hAO%U?44T!{91DhpLKzPs)kO6iLqD}lakB9TQfZr+ZY4EED4>H#q4noE& zNb0`>Pg+t#7VH-TBtV@|-`JkFG(zx9C~3rFnq)%|=Oc;3t*_M8QxC0%$U=FegJ?Z&atrPc_}pEFWYNNbHH#=kQhQ{Gv{bd= z?39oh4zU(1t~wV7ENNv07!=fn!`e&c;CyO0j#d(OjyrK^iR=^_SMr7_ZbKt}4*C}% zpd15hHG$u+MDsAjz~%{LA^I28cr9@Wv$&;^#zEnFM{|6AZ0Z&+qohGvAqnzd@R@9GP%f^=(iN=Bj-#Vw%w@ABwy(W_YIhm%?Ofaz<#I>}ST(I~Y zvH>&Ei%Svw+l?Bb_ee;sj0_oi1HDTod>qf1Wp%Y|F@%`GI&ObeO*=B?S$h&LE4{jq z`U;sH;=+uQ0m_<*%u3wCPtQ2pz0(XN$VS#MIlm0JANgpQ34% zY6ux78NeLZ69^A4YQ8O`*z6wh+Hp1R14&ZSd*WOV4Zvv3ABYPDJyIU#PMo;@Z==NR zE`N?D3Jxzyj@%mZZS7qy1entsYiu3vRG1oRobb6$zlg*~4SM`sZ8!aeKVbm{ z4`4E+T-QQYu(-TvGZ#`c#5yjnHvDdFel+YlZ9Rdwc1KXb>A(@a?H&j1(@1%-$1!I}B_bKHWbkr{#5>y7 zg#0znL+6m^d2B_ThKX|>+l*$UiU>udS*7Ku|F{(&F;byq7y#0$QUXcxV}|nn(L#i< z#!S~fJvFx)>(+uqn7!{!tS$Ka{0|>?&wSM*B2_G8z&SvY!do)>n?4om&K1!Y8CGSM zuU4ndy1tpKFwXFTBq0SvMLA-`#?5GS3XkZhgKlY%#);2SITJT*4>@1S10YS#zR7MF zNOD@ZU?E%COy_e7^IqTW{L!kqq z)S42$R84NiEBp>F9Zpdc?>*%0)PDo!5^r-D{7v1{Bml-X)6jl9D;9JSF^U*H1n`26 zXvwnXW5$zJA!!oSzd^|4n%o7G5}gX}K+y_fH(*ZaB&arRd<->}uCqOMH_1$c8*n={ zM?)Mr0M#C@uTlR?uA~1CbH9&R7&^l#(jImF{HEC3{@y!p#~V~Bqc3f}{Aaf}hZ?XX zSo`-+@M<>O1W!*w=kT!#ZXj1h_3wY7Z^l2=uZuTt))q#Dsl&19_IS%m#O6kNx;h5j z9Y<_tnnMhTkdVafd-L?8?en-Ki3505rJqE!K9o|^+*D?Uhuqe3{`Do)T!Ve^Tk!$1 z<}6syA-7ZX30qiIMl2baxEm-4R|}vW`-|P2J;cN&ST@htJ!_t2BFa1l zeVIr8D@hLk7oMQzvuDp-PR%h9RtPDfe%=)vj$H9x9S**R+r|7ZCO1R7mM}Mq!%eOs znKET1+arb@_P23RdGPEX9`~#(>yD-~)K_b%B<;jy@j^v*WcZs88h_vbCD&9;Kt}*n*K-f?D_`1gb29q?q1Y z&+ZgX@~}rimB8U3ux0*H&Me$jTV4-2tYh^27L(!1RjaCzThL86%F0{9);zG^H1rMP30caxIBJy13(Ls2q?7~EP=8n&B*`*<+_9Vizpha+&iHguT-X0 zmArW)spskZv?JUK!_4l%Aq5neGq0;r6u{6$MniF|$hO3h0w(3u($OmR2lb-U7%~mk$w72(-w3}O4<)Jy6v~!P2WbQvX0CW*PkiFq841b^dri{D_SZXriKIXJ z8)x*U`p8)0o0Snx%(7`o{nG%<*?8P+(T^>>Li$9g`%fkZdZKZ8gS*}XZ>7r-4-FAA z;soAPygj4z+iCuDLQ^E2yB_tmt6Y&o$lLDdEn_bPED;)2pk7Kh#@}#oYd=3fPBy)? zq+1_>4Px*VH5xHeCFXd!EXSat8Zd)IBVyRB{@?x+`V*X?;WXiG5BZ{MBsPuEvr6~L zSw5tQYwSLs)%H;+ZoIU%9H_Fh1B`S$yE{r$GTmCWR6gU_mshT3sKb=S_tRha&)Z|C zedo>Nu-1gu=y@#(N)RLzA(IU6TjT4{)?fT=gTOZHD&q=7eYjyQ7S z8~@!WcBp~hKmUzO;8itkrNo6Y1+jz}#d{)x-nD2|u>bF7FXdw(9(tAcb`jwQq9;^9 zLG8J^wb1g-l}f)}+jxab&g{NKwbxC^M)ydzYXkPGa@Pjq>LLL^fhiY<37AC}3$_fQ zrXkL`+qZA`UEUpPLSoCQcQHtVxA~b`PB!|hT17JZ()IL(o`l)hO;kn~!pAB6Qf^%4 zXxDBxuS)WeyUEB`6D6I@ zij0w)VZm~Gk(&MI+l2}u1AkW zvIJxIjkM%5_+y^3utfjX`w^EKA83-RKenQ0P@jLYN5^mwFs-ch@3)!^8 z?tPUUZH~_M*iiYUWutc=cj7w>k|0>^F0Q8ipTN1dYWCHsUSmd&uIImNamVHgj=4QJ zCvboexFKKxL5!2@VE$2df-I735LB~*#|qXBI80^{!G39oFo2jXX?{M@ybh8?tBBBz z=}9xuV^%&7q^^a0C*B+u1j)hdJug1zJ#_sV7L^BdCrG=zJg9&*%mEVg;5B@NYCf5V zlBq{_iee8z13I2H%57g@Q#SPt0CkY(Ft|^2SUYSSm3c1*Z#&k~7RNXkp+wL%IcYii zJjC(_FfXK4mJbKvCr=io+b$k zn`&hjya6+1gX6I^QKZRd9&!Hi!LwKGBzCu&qV}H_2j`QV2iEQ=X9A#&S5IBL5=33X zH}fM0@2cOZQKBD3V|v`r*9s@1QyLL-ppfUu!Z(|LSkG?oZ_EN&)~_*zQ+ z=7j!SaR>J=sXcPZ2X~V#qckM7F^ll}O{%Kj*LTEdp~=mVz)1NMeol~po=#FKpxMFC zu(q?9)D(avLZ8`Fe&DNEDDCDs1UtjLid2xg8N90ueFyfiF`a&n+4>=>bl?FLEQ!^T zq{q#9!A0ESQ-SES88-B;S|ytayw#@JNMbxqmWVt$aQVXEoL?e zoGjvR9^UE_@wjqPU|ilG7;s7>;31Q^-8if0&PTb*gpG2tekVt50`w$~Nim+bj3APM zd_w(9d>i1LZ~7iPy}IrA{jst6oV0v<8hNj5hLDUThyAr#cPm++(}XXR>J~MG?T--y z6~>sCyEcnVlMESJ3Jp$_Im4|~$B8abDE`I2qCR`#1EtCtaZ~vnF2~KfaYIA#tY(bO zWT1Bh56_!RtR>b(lE|Z6A*G~}^!59w;9(7BofH%ns$(MnTbKZj zmB#cc)IVXi<#cE897TjiCA13!CrdP=|uO(Vx(CL-u;G>Oou4(|>}u?NpFAc6?CY}MJH**N$?p@L^^P9Kl4%jQuC`%S zreukMYreFW%GH*e!cvp@=B7@25~OT#1Nf%voXb0ioX8i^v)p(k6W`DkOw zTR)|ucJ$H51gOY(km0+)PM9m479RNJ8MEU3unuPI;$jb1W-miRl1KsipCGk!LB3Km z)>c4J`q~0oWs&hq9YT*?ldDnD25LN;>VKEzA?6u%zZAmDCetx1pVh%?cB_Us&^{c%%V~S$^3XYFnryxC; zlQ;55mAVGv^|a7RF*t*xk(;SpFIXmmqjZ<7Lq@h`98|a($=L!hu!=MmixT7SzquX& zMF?U!x_!aO_EM`PfNXPd>w`uYx;qc?Jh6p7s9S>-RD<~A&(7{E@U7uE#O(?<>>aO! zqVSRls~@LLYX9V&OzO+gtjSdg2xY{3XLX50Yb^=71~wD~jfv8rODx1z*QRJ3Y-q#z z1ACY#*u*Mj#tmIvi#9$D=Ju=3dEwdCKVtg>GgZS%4c%i7UvM7KDQ@k&q?h7-FJ4z5 zdjLf0?l5yTERLqqb(bCEPB!l5!^Ytj9ndpJ5Iohz$B(xq)uJ!53(lP0Xkp{Z(c&?s zL2*{;ws{EdafvIJo96`oKjIanq-eTLUZR_@dhv=++gq+F%H+p{WZebi=-QNtG9>=G zpY5QTRRra==+d>T%nsP^6CP*hHKZ5l%$Nc)!L4$RcRPh-?K~TQqp&;Q>F-~rVkAtw zjZ-?9rC`5i6b*ajW!HOXUA?S^s^+vSy_2Wx-Fq~*^APOMy5ycc$yKPlXOACZm%)wJ zAVRdsmqgqE08EdUkBQFd^m~XL-TL*baU%xExHxlMk9Y+Cm3kW_v75Y}e;X+`0$HW} zsSP5-Uy0vWTsED=<{ZmF50?l%0mQ>40cyNIml;Qe4)lSTka{B^ErC3eJZ**}}t zZqiecpKE)B%9fsyp}{YcAew){KOiYdfP_V(-2_%cUSkG%Zh`6Ye8%~#P;^QnfFq&}Gva(ruLs9IDMP8@!ENB%^{oUn}F zQe8;YBDtpW#YF4%0!mc?6h>PH`zQn}tL%yBX&n6#CmBUA`$gL;H@ypeCtVT26A3?y zlPg;0I8_F(TN&^qD=)GW8z7%U7b1uS+&AW~Uj-?85f5Ny3pRCg|Au{I9A(c&Lq{&)!cSw+{c*;7`Y3L00Je zZJ$tU^EV_U9*vGSTM-C{1f-3vKWegM^D$&hGt5#6!IM-WMt-UcP-cf zn1DKpG;ljL1x*4DF|i2X0Y4J^lVOxXqy|zKDc&f!d?!qVfrcW z9IZFsDx5P~o_RsfnwWE+Pq zsu?mOL?h!8VGVpgLfe3f-wRHjIkS1LS9GNGv;3v3UmB5aVm?IWins?M5Xh|D$!p?s zVOD_!KL*7hr7+RZFk)d}BIlWLo!&g5VD4d&dyRncraEwanmQiHVY%+l4$%lJhYyaG zn{StM_Dig2;#XR1(Sl(SIPiEppVp}K;4?8tkr^15x28cSbs84YMj(T#UaeX+k|pfN zb_&f}!8uj=5rLQ_0k2|JQiJu~xw5kV@6UB%qtq0XVhgkrr8~cY=}lxFu=(| zv3e_RFmbbS|1AG0Y+~%ZJO(nZtZ!(cHe?OBO2q*iE3+!=86r31orZtj*!uX8W&iLOoFcE-KQB#2CE<6$Ugl zssr&ydo8_ryCYF-!-#(_-dj4uGLd+vE^;+FGq}x(hz?@|$oWwOf+wHmdZGwD@t7(A zrLLEq1^edS@NZXwb!6s{5lLO03smF7D^I}pau=7C)d1E%U7$mBDtR@Mjp^pLqi|RQ z&bNrv$Pe!mkzp4YTPAZ;Ol^f+X<;&Kll~%Z6tnx>zPS0v#Qbx7os?Itlu3dqP3eE? z%$XO+lx0t&GeqSC-#WYiY&zV|6X83WmgG94$L2JuW8I|NO&X@ZckD zGnTbcdJK$RU}?EcXJPBE5s^ApEmo*<<_0YNnC)g%-Sqk-z0!b#Q7Jcf9Q$^&%k&95 zdhI^;?dxnGodG!n_4$&o)o|iH^p&|t+MItQQ{M1?Br}nTHKfr!x~3)#o1Zjfntw2; zj=6F6(T&@-B~IPtKLq9S0m*vhARO)un3Mt?W&ZQsF%(Q2@$XCuzXNRN`}W|8-X%>H zy%upj0v&4y*G+L?vTq}XHaqoCqF=?1J3H-)nFQw!985~#UW;Ju$U_I#FHA`~;XuPT zAoVv7%FLR>Y2JX|Y5g7^Ydj#Lp1!{E#Bju4+ltN`J$Uiz)jmK4Wyg-A=mKinXeq8H zlWOG!{;jR8ox{;(P``d_GPKHn{7B?@pw{HSY8V!bVj9rIX6c{u{o$SbpY^CaYXvLr z;o9c@FLlp^moviVO}UB`)QPs}SoX=lmS;a*Y215Oejq5Enf2(SlR`NyjW`}14~}+( z3FhLK2Y_?l8tLH%HJLdt?%Leub9kp8XxQa2hJEs64q8b5#uvh(-R4%9n3>taxN?QV z7iVi{?WZ__M$w?{&~2)i6$KKj?Ai0-*N^j7k_7#5EpzVyP^NKrztI}srw&aHw~Moh zYSPr;YLBp|im>D1MGmO@N^|?asGfTEU4IS5tOKm_Vy;#n#Rf_B?(Xv>Ug%V0Ckr9O zbD5jehikn&fK};{`}Y1z1NK=Id_h9q1{RjF@o2kYR~q!d;=#1uQshTp2skHd0a6NNYGVb1WpoHAz@y7#|PsfU6V`FprRhMu)B;0_!hcV4wVHxqF zq$EMysl0_;v;xNwvjB^a5T*a7;D7jkBk1Sm&GAwUSP3mu%p-NC89;#*>Dl%+r@a$y zsR-jGEUIn&mYn9ajzLr0)O_K+RP$k>dM2MQ*B#ljWxw@pE1v>%p(7X5+}z!Nck&-T zY-DWb$ypD()XN{m=#tAwn1abvF8`N(qeI%oH@>XlUJ(3J-m8jc@yWkvuDFxrp)7Hf3tP!u838uR+$&7^)O})Myif@3_i1VQMTC$t4O~4 zqzALrhQ4`n>KJ3o0rbj5UzG5X*>#)wOX+kV9wS8mLk}V_mYcUXIrTo~= z?C<$obKec*iJbeKGpzU0KWF7GTl}W=U7T7ZOAZ(zB#)em1M-8abw$|3=bn<+n(sdI z+?P#6k9NpDDn1KxqCT9?9$H0XGCy^z`0LX?UY_dz>}WgxMT@2`YT8DL zuWn;xWHiF{CVA|L&JQ%w!cG2^ow3zbq|FALNamz;uhZ}Q9?ZUzT{p3Q;~TkMYGy^%#ZXp&#$j%{myy zfw0%UR_$RnS^fr%8Y#1nqBt_Q-Pzlb48QEz&+jvmKU~{m#>4#LEd$PbqyLY5`Qk;) z1NTK{BbQ{ZX*@M=t!2tLd~_K&Wy2@21G~01WQS zeFJ2eIUaH=-}V(aY+J9n``}9sv40v)oX3^EuNS%wPHV{A9EZG}H3gSDmhN9<~P{IEWKbjNGt5i^4S zdE&(*lp}*~ypK_{@%4!T3=0Xl`fd36@Bse)Yq*Bu>J{%3PsI$-wpFVkz)Sb|$N85g z{`2t2Ab^I>R~}C8yQc;IbQ|QFsYqmYdT*y2c_Y`mQ8&pJ?D^F9!B~wgI#=DVT(UX! zRQcAcVVzz@`#UHUf#yfTdj%TZ;;<6(o?tBFvE|V$T|6!coC4F)L`u~|x;5cUtFf*N zr_-vX#?1^$S59SaVvso&PcQGvwI5b4F^4Lz0k? z`hsAw%k3uykjvt_wvN8j%=6{@cKH=n-`Q`;E?zNAz8O<$; zNRnfye{Vsq9tD&nqJ>A)mcSah#)T|_8`S+bA89w2MOK#Ef1-6U%ks=*^>^>-iXX6W z3;>nb5s8E}$PJSjW!v>_dGlo6mF5TA;Z#9SkkgPYrqQxQ< z+>sY9T;Ku4u*s4Cxh^c;yQZcl}9tHWIW-&*wIu?kw0D<2qX}WT`1J>!RMB3QzLykoc8+)OKWF|qJq$(NTBY-$9p1aiDPmoZ zfbdOfm(5YO;U3fgt#FEBLls3};VYi=dSeFXBL8Irm?dQ4$L-tqhMl$l;Tnt8ldr2i z*+|1W51jSwk<4ay=iV~kpV@5DaQ=h-9BI}KySOozm*!S5h%pfaD)gO@j{K0jmB3rQ zA08eEw{!>4>rmcQmj0W(6UiVG`x$NDa)?E7-LsmBX6b7VjniZo5S_Zu&CGv$P@IKb z19P%`m;%-t%LiK1NJEx$IF^F@N%_M zxq+4E6=@91GY-7}y>E@DIZ?L?WnpO6Y|z459wwKQ6@EORQ0(!8-`;JUz*yYGJT8ZN4_$fWuQ*O9@^ob;wLj}74An8rt=ja29$;#?{r8qUuXYP2L{42;@ zTq9*n`%N3w+wbEZV2oIzdEoV(O3TVN&^$r!>w}-lp9Np1hlh`T!#$7~e73##&=C3} z=DGIzgBv&Mn}51oH*C$CDC4KIGn;Zr$aLuL8WRI`j<@6g=I6~mABqupS_wbAzUn* zI<n8FY1;QPBw_i+0k8nH61q(jU?RT@Vr?d9K>$Nd9w!G3n-YmiN z5yp%=H}3DFH&1VTW^{BgK0{n=(e~B)fI&?K?p;qRhrPLBu~ecB@|!8ka|# zd`Iw!;4o8Yt=s+lJvVL{$zvt_Ylp2U=8U?}8r1pErUk{f2l8zF-@Gj^N;!B}9G_O}nrHc|C+^vvy{4tPayGUs z$#Tzh?jJtSoay5}I;32dS;mzs(v;f`A0FOvqPoA+zTpjjGsa2J;&bkcQ(pN2X2~)) zK=f|@rITb)>YuioG-+{%gXp)YIWrtCKcA~V1=N~K6xFDQ&wsRLmu=^xzj!yij_X#jPIwI>U4Y-l3 zQ|4@BABQ-9vl_&ZGyG&%gxVQXyXX~?U~T^S_8_)yE;S;B%R!!UB5&QKGa3s%l06x7 zezQJUG#lsrKYYCjT#kAB{(sp=wi&Vw*`|oHRH$TW%vf$~wvv=3*;<5TlE}=Mg*3S( zJ0X!MqFallnIREHNm40_wAsrN`oGUR&-eFy|F8f5cfVfGGiKC%U-xx=KIi8=kK;Ix z)1`kgq3WLf%MG*VHJz;61jr-UWHfy+LDA96qH5yf;-rSYMGRjBwMFv&S9)K=VA*<{ zVoRv&becE617jrn4gmkk)vKwgspH8jwsZUdA4bB-9)s{o3v5wr&$IWULU6<&YR)IL z`})1pYZlgTw1kXX%7XxtZ-@Tmx&K4UZwHP=12mO8PeWj(m7t6V>NEfli*of4lwX^##z0as*hvBS!k5qM=!3< zTmHMG^Qe(e&bhRIzPVyCg#<^eVmJQw+ftH2pIPsh!z2yCbu+-k?dZmlTR0;W!ydTn zo+Oz(2Ciw}Fa2YBHlRT^P1RXgdn`)EF>yU7v)XH-iIuaxW$E?7x;fVjH_g6qyot>{ z`lj!%^WQTA(WJf#Fm+AQ7V}qkRuk4W!}`CTW!+F`s*Oz$JLCS@h;hLwxodmVUo(hq z$q<*`hK4EM6M*8ElO^W{V#(y{`;}EM5XEn;yxl^Il#MMU0Bs6lVaUywrRB8ly-U^8 z+u{g24XZ=E55HT*@Zs9`>FcGiXYEWTi(BozKuN0Iak}eEVx8H(^m~aj)MRv9-D?9} zmf@yU>`xL!abm+;OvRLAdq~KW3Jd=qhH`2lgx;Y~Yu=djp|1lfmw9>9Tu?CrC6=jq(Ldo}8bX8G`zvq*Zd%5A@F54n* zj_K+bMpi_IN;dR!K#O7(xwwF2x#Joq-7J%IjmFg~FdS|%Dp~HLIHF&V@k_S-K|PQ< zO>R%4wrx{(aC7?f=rIyqc)DFJ@2PrJ+eESXioWbQpoF)kO&)fHnh7M8FTb$Q(m8#L zR*r|WsUp=oj)byN$Mz{^b>D9~0z(Y$_4_hD2E9s-2LBoGxC;Z3tfB`ythA~BzRlNP zha!+L0XKCbe&xyG*IE~zFUsXH* z&cu#sTvaR<|q#AAz|l`Y9i-=tvc&o&i}6~+3X z3BxHbRNr?UH+F2w%ioschS<({W+;K<>B(~s<`<0DU4x+Hbl+Cz5Jw0cqj9YTbQJTI za#AG$Z#nkP=!t-?QE-QpMBiueV`yp0swRy;adev|A0x47 zIiabNvYD@0Mrx3W-OOcLqHy=kWjR;`xyO#dt zMl+5FOO1wQi)+XmV+YL%F`g7~A9N-mJlbS5EbuMg=NIg3mTy=qT~il7lo@T@xUm$q zPNu`M3y&+t`v1$4XG9cp3A1vhD2b({;uqgNYNpEI(7uBfKi@mo-Lg9OwDa!|*44P@ zx^~HE;Q!Cj6_2lfi-bP%w};{1pYEjikMuyV@4c%Yc^N5?Uh8_*t1%_L0O+n6DdWH1 zkaU)jCI$n>4yqKo0c=sSM@cd%K{)EOz-d^DXZ$l8#52n%)h+rxAQ#iL1i_?3L$)Ue z`Y}+Fgx6_S(K%N;83rIUed@%HY$hb0VrokJ}772j~2nhiD(ad+avhMoVeLq{^VO&%VBcH_QIj$jq^PMO7C_)xEsr%cc5l5DqJ zQs~03lII`q;NK&upkPFHs>Py@_Nn82odf7CezmWa;RGoYID%v;OV%wp9o6T7LpnOK zLch@a$g;~I``KIaxz(^YL6XuC2Sixfa_m=maRKROklm+;71qmh* zQo})1|NXG|eL)R$2QKt(V7MacZ9t$&vTbqN%`-!*?=|UY=ms1r^qNcuMPYddM(r^p z6>Jg5E zO!kMau5K83=qp8cQ)kYeP4_uWmmlbKMe^rS|6mmbgkz`{`UKc;3GYRQ1mLebi_|)s z(k~VvBM3|JlAKS}&YXRtk~`%4l<1170_@j+JMEirBZ~S|dgR#lk7t0_zaNw9K3Mp4sgcy=aDRbe~ zb))!aH6WHoPy?Wl5+w9?wvICq0EDk9_z^j@sgVx#a6o#xAg>?_6{ZV85of@n04^;C zz4h+cMp1N^(N(zGyJ1p<1SZdIsWX$GqT8J`ss4g!QuhzV+a|xcGi~w`_K=*3sD7Am zy3hRp2K5&(xB7rh4}g1LK*jRfwr${DQ;AiGBb|9ih%FI|h{iJ6zojP9`ntNY$jdP8LS8;ZQH#3DuCXa{&Kg z8SCFyb~W?G`|xs z1o1=%Bl$bg48SXaG_f50j82OdS)i9k_(Bg}?8Jl34&J5?F4CJ?KTy1=wS?YbSlGp^ zNo@*HBln6fu{-FAj&sln8$uC!7nPi&bws_;pAxqIX&Qg!uBYLRFBy>LmgAuxn?l}2 zl4>l?i8&&$c&=s2HyAiT~RA4 zqr$o7L#8w~EUQ}pOi4)2`Gz~a!i6xT4x?~Uu+qbcEu?|F8Oj7wDOzkm1cDM{*CyXi^Kply0P2MF6w8ebeCcKZ_IgiIix z^0XPrxCle9!xo3{wcxWArQ#7K2$2Es=zZDq%F4bfLh!jo#@*&nZ&neR%MgxCU$Z7* zzt5A7ei#P7UlUX$ps8TzXu~=>OOlj(X?5rTS!PF21Rw9wqaDC==a$x$Noamr8$fn=mQrS|Rm+8yN=!J%*S9SG@^=A8P94bqg> z0w=ZTY;h}RiodZoudv>5J?`b6Y}cflJ)nx<%%kL7M9#Bz3&qvO8Q@59v1_}I}!-k0>1zsRBiVUfdyq?i-NUGF7+Fq=rN^X8uOU!{Tw zQ&BZDHuL;WGG;oRWh|Oh0U%Z3?VWyln22{b&cdA!{YWa{v2zqDOetD|7I`w?w}dy70m*LqgBW#q+g4%B`e zGkI85Flg#QU^qm{?nJ#AP(3FdffR~aUu0-ffk~x@QsD9BV(SO6ACsxs|B$MXz2$y? z;LQUg=15Bu$Yt^K=l#EEU63tGfhi0M$yIKb`aZ>5$4rMASahkaV%=pgED8TTn>^Ib z=-z_2o?UHbcGz5MrYIKsE?rt5(x`>6X)?^i<#|O7l&JI=VRZRgINBi;bno!X2>4teXyOSPX9D{IQC zHtE@|KTrvuB*p|3(z-uRSCU%b@tg%7JBY%RAUi%I*62CXfO6Cq-lK&ed%(}SN zGWFr1TjM8B&K4@Q=KQ!_I|^${PJV@~V;I_49@K;lPZ}!4lYk!ufMJc6!(48{{=W`h zDh7~52WZ%>o0K%ps~RYVm+?F}PV4a!awZ`U!dyq}F{k8dHS~o)PhIHkMVA}C`gOj! z|H!{OMpF$h#Kb5R|2kxj6TtRQ?Msii*Vi$}{3IC-darB&F!2x5o$gln8;vfbh0+vG z6^LN=$_pHYWe+YS?qB;#1#tLpQ(@=)_c#z18BBbF3n}G_)lq#t7Y*Hsuwx~NeLXU+~fZl?{xNzn>McG>{-K02_FI~Dcs^-+?b~=#(!)|f9 znC^S~=FM5XF?7s>QrTBW11N(7hO-ZHmv^t%L>O?6rILycTtU)ASF91_UWYjZHh5^G?N8n4Z&t;Da z(6Gex^~Q+{bf)?8&;i7&?Ou-ye4&tD<&7ZQf>%@j^ixwsXAiX*@|g&)(u`eQ?&4j= zh`UVEv!QZ2s-qmk&!crOLklMBdHABD@Vd&%8iWzq)FB*LIyz5Bc-V^|&)!qZU zWfOO`(Vnwj9rbB`W8GCCMm88*qJ9f1UCSDDzlvdmEhu4B>7M=jJ#roTM!KYJ_~#Ij z$uxPfOi>sxZ{+@(^+Y3}u_3Mc*Yn>-m0BkG3it9XL%Jyd7tV6WfM+5nw+&FZj0(sn znK?c^LfxiK1M8R7q*5szwv{@Kqr^e7+Eu7oHplAuLy}HwbGNS;5J<^ov;I@n4#s+& zpj0}}B^*ak7VZrLz2@~zbFo=>4e=SAjE&AZ$*kE;4*sPZ&W~rGvS_1wy9M`|G0x?B zFk{L0Y*W{(&ZJ!vj^2(lH`VJRc?6Y_2rtETRhH9KkbTqG_r#vIoja4=L!jFPo!(IY z>c5ke!l5FWfC6A@(?7;+%gv=T{CMNlXmQTx8tsAY<$Dd~y;*GOAC z_#majD2+RSS?5;&QEnv+E-J5a3Ud zZ&E^VLe=j6r>V`(wR~~SNGB12LfX2f1a&{oz+4bb?@V$~fuqR@GSTMDI6S(AN)6ii zjPY~uo@1dw?!ZJGM&fgf14x1yr=o=STno~xOJ^RCHO2iYoWJOP5Lmkxn5!j0#S|9_e4FeJxL^^xA7>x()us~4Ifzza#?Og4hDigO;b?AU zAEL$(`gKUrNyYj!oX!Ygf^h3mM}2J3s(JJ2VkKRw{j>~pJ7?ia z2GUI?FJ2IJ@dupJ=3t?@y-q&mejX9Rs~4>m-5|;F;BAqt?m`b6Em=>`Wc{8!ml8c^ zC3r==5ie)U#osUJAL$Yo<@C9uXNV}`xpRCQ za=Z^99%P+Hxm6O5BaZIR0Gt_!7X(;mBm_H4V^sLC+rnB4hYv8Yru4{;H*k^T)g014 zl2_X%|1yrE-Frprq|>a%{ch310ny!ii{k;!QBQ=EkbTsX$G8;|0qXT^q%#S~mnBA< zkZB#1m=!&zAm_9adhZ?dQWfi$-vQ@b!sI}9o?TRpIVZLxe3gl zRuwI1zwE6Vtf(>|52DaRO-$|}L4%6u+I95pZEnfeh#9G`QloSM`(? zN0Bko)E*pQBFGoxRd+r={UmK)B(naL4u?UwT+tGEPtZ|P{z5OHW$4-IY)JPKv*Bkt zIS23GPo2QRilDk0V$>(7zwwP$p<^8`u8rqmQw2VYE7FQYBsrbIU31tYL0hmw*XRr3 zoRgx)6s@XkKEd-jCD6YZE*N@t3jC^ZvwZa(oOuDEld2OSxwG{$VJ_I|Y8QLC@cfwi z)hLOq0FI!OEUa{tZaYkMazk8l)%X}5YozET_tD^m0jZS&+nWi-Uu>6-b6b?``kq@0!E(WQ`zNR1d*O| zacDvA#`=x6kQ(A2e0+v_!Jbc34*i&xXoima#$eZK7V4U^h@-(fcRqc3%sLR2L0wH7 zl^ey0pmAQmzP9t#n>$wl(64l^SFedc(vKfKI)wESqVs~B{<`uA;8o^e$_-roEN{sUfM4?{kWB&cq0o-Vqhs=c=tje8 zPo9#-BSBiEYlc+nAjRhQlwyo5Dg2=A4ANi*2^mTLGrR34Q(T}H%NDvH8!5Rm2bCOy zEhp#{SCIIrqi!T$+)oyk8#rv}&=sA{4^L$DNsF)sozcoJ)E+L(r$mG?4w!2?*bTt1 z;;Dy8ar~3Lmo8mmRZ}=|niu{$UGYDUHLt(EKSjej7ePZ55pe;sGP? zrbug)5VFWImU5JA+O#Pq_D||3ZulUSy2Rr@o9wh%ECfNAUgO-MY=-zjk z062tvb0$i**e$IT!}H|2QKa*(=xA`y-ohV`g1+}QFt}2kGkoaKylcHTRA|3}OGT$g zj~{O@{g`FG?(m^QB$lP>ifSqV%gU@YZXKqslCM<(R7=MOI}_>O z)r-j;8Z~bGVDa>B1w@cTt$8bocDeR1=@ z^6`tbQ)r6VaW1}R`Z~8N3h4MqQLb`uUR!mr|KWWu_Sj~mW$l06G4eZQTGB?pDut5>)Xdw?G# zp-Ky^w?KOz`X&wwr93mJqYKMPOYdCzT-ATZF`Mqw_6YdHOasnSf@7nOo#LX9p30uf zaVnrOr1IUG%9y3CEHWv~_UYC0jAO|@E}ylI#D6J(ghX!rCFBl#vSJ;@v1{eEDhwd= z_bmXT-DC+}UWG&wGE4MJUJP!b^86uV`{S7Zd#>tU({x-0@{{K$&MC8HcqeM0f>pT- z@&Zx}A^bM5d#V4-_2hPUG#gFUm&HlNM|J0RyUA=Fi518^U>c|JaM;Bwv)&%?eb9hH zP6=uYVzmi(iG+v+*VN-ws@o*BZ*G{S--{Bijs)l=9WlD?+JzI4tx`=u9*)a!gy{>q zjWNy!jfe!kTFkqXnso6z3xQH9+92R}w~}{jwIAWxQ2P-CCs(jJS`chFY|I=Z=`!NJ z9Y#kvl7!N}dKvl;8QzP|y6um%3ln1QJ9JU}^O&!5k_ODZdJSxVxPEJ?cO0%~YGGsl z9t-H&l0BQByqVCGJCm=I?QDF}uYTM$$Y43rL{bl+NxT;c8xuKJJ^b#IJ`wi(;xAPoWjPQeyCx3UIXiSkHB_L4NL5sO39tuR0{rTYk(eP&JWNc z4#ZeddSb|grKIBQ$s!jYh9)fg#It&eXCPUdpmUUvL>Fn6+^5%+sEg9HrQQKlgNo-s z!3z~b8%Y@5=RD-jUQbVdo>0kz=&;_?!c$^SOc?)Y;SKATZ{COy%Zoz9vfseDP57)c zsTRbNCQWi(jFTR)`VIjh=dUS0DFo%2BH;f4#Ngj+16j=3#dJXmf^u%p8hYYP+s2o}NyCN--6xPB zU7Ut0S#l7wqJO!1v0h>0$477N%lf5@^*jc=8PEke`|Wcpd2gi}B*pq5?pnJ`tFlt_ zKfk`eC1puRMpZb#OR9!cUzUMSP1GrrjX`p4aMtsg7 z52O)z%`{oQ**r%ZAmX}Up`gS>*p6s9$&7Zr~4Q8h$nKwQ1#nu zg!&#tTHew54bs6fZextEN76279OB`lwi>^N!cJ&&E34K)C&_%JFry_ji!C)B(K(x1 zE+FsnOuOdRTj$x^YEoA(oSnF}tKwh8?IK?UTAO{&QMiiVgSybj;g%~B+y@UHsH1cn zA$H|?4I~*LSEFCr%(>mhStz??UjdK`fJcq&>8&pxk~A>@*{bdup*+$aTzoJ{v3|G` zuTND*-*b5@^1YF~1$jN5*AwJV1Oz#nRcr%z`dEUwbl7o?t;+QA0>JxMQ zM^@5oGlkApIc#OR2ZMmFa!;eLkdK$sNT9T;yACr* zCua{ku{7W|$%wYtH(>5r^d{@&{WpJ3j=_t%`0-;*zF7_XAdvc2=+dBxuIcN0^_Bl;ZgAe@PI_74i4O{cZ)7!Y4L%ff*_PZ;-WOSipH-k%}Tb4qJ zf$>x&cK&NSS$NKk$_+nqL{LeoPB`)0OZ@!&2JtpyE^F79Vz@#U zScY&Y&>57d$RkQ3o=fxW(yTthl#wJ0x&F)4`ikjKK0c+f*-n`>z8N|PAe{=Oc3ZI2 zIRJJW5znSBes6AfzPfkB`iqtC036QwE=*!-N6!4*&BM<{J-8G{{riDxc!tK!-m2G^ z3)nq-ub!fK{5X0;7$6IIz3cuD59nVJOhSJNpd^hUN=h2D6GYysT?xOq*b96Qy0O3L zcqdvQXT`5L5@5n~*D%y+&kxi-Tlv8x`51?}w6;dVWS?21ea1@CjGZ|=htzwArFp2& zlpf30XNbm%IU}T_eZJ_!?S1HY*9BJxuN4adG`5-^tN-dUY#7a# z09Obb`nD^K4ppuZZU%amD=tfijOA(L@#M{>wW`a5Zssbku9x1~UQF zaaJE!2Xq;B|KTO=t+Y97B1Q@!YIP5l&;G1^-4D+}Fq?0R+>r)LildT9%)$2T%j8US z#__`E!7sz3dHU85zoyNBynHEoB2ns7P;cM8dv|1Ajy}YqJ8?cuyG*W!^o;~~*oQ&h zoKOG$`>%lteD(Aq90`@e@RF2`k2*V!ZK4z#17%3}p|m2d{dDu3@#ae@B{MG!4lpQRL0&}&zjsFQG4n`d6cJ8U$je0@sHR)A<#Uiq$v_Re} z6kAXl+*fIKwC1*H|OR#Yl8@KA> zW+eGgzHUN2#otj%6A4_x8d640AkbacoOH2||Bypt?m9a2>aT6jRmMSoBWL7D6HSvO zO3DIh;M4W#y7t8e`n$ZCJ|^iSxvDU}FhxhG9#il@phL$NX(h6h?kdfvaQPeQw%PTv z9w+5fG#h91IBu`@neynvoC^Ua@%LN3Mg%>T{o`TLm32@>3&%LM$E8o+@o@v@=s=@5 z37_bLj7Jz3A)~;dGrbSLquL#U7%*I8SMZ|;T3IrdaO@3q7d!F%RyIT9LJTCU1=6Sc z|Ml!D6x6PoY^OD)2JU%rQ|QwZ*$XJn!f2k1l6?#%BfoK!IAjSpqa>|*IkSuZo{^kl zXXz;N1M!eEI_$)bhTFq;Q)^0Fiei3{Lnb$nDFC-;zkU7oZF+SHJ(X}c zuQ_mCEA}%~xNs>s_7^+|H6uM=?P+_;8%W0lsIyHAy&=m+#rRhQrdjV#t6?2kNdUysCrd|0+c^eXY$q}^)QA%rR z9x|wtLn=wV1@=-2phe4crD?dyXyUa%(LCP@)1ItyZ($*%zXcjBQSKWHX>~CZp6 z>SKTKtT4rw>GWlDK8fYRF^|T0qdFDE%{Q&udrMh=-CW@3N34iev8?>osiOyxO zEH-Mv9B$l8Zr~!+Vu}(o-`+MV&2h5F!u161ysxAzJkM`S5rNxH{O1hmnjsAa{$3(B zDLln6+IK81dIPcjZ^rR){LIAx*IrL(lk`A3n1zz!V4;POq>AGM#M@hs?Ou3*Y`9oU z)y7Nxu=TAp7)e_ky>by`Acm08(uD#Nai%hxu3wnjiE8TMAzWSI0t(YL6{oLyF&<3i zb6`+?-FDzr8Ps*6;FC5R3dZ<{COAh)b2lAhlp6QD8?Z>Ksf+*ngEi#DbWwxhh!W${ z$5)%N`7l(W0FC!3pkXNz-4GP{9qwGS9^Beab%F22pffQTUL0Zfr`)`GQ+3V%d^7ZV z)UF09l#UzeP1xM2%SwWkricvh!Hb_{?XmzGXp8QEU6>5`(I8FRfd{;X%-l*+^SNC%#v&Mamhvyro+`;is2}zVr}B0F=M)vJ6QRDu8$r z52UUL9V*+m(|%^?S1Xr$!=`;h&AZKHG}|;a+q9TUVY7bk9zFhfa9dG)ULZ}$3FJ6g z3Dpod(tMLKXw9(q#oN?T6+eu?gA8hC$Q)-Oc*K#(-M&^dTQp01rxLe#*#v7(dvA_X z)l{-1n(d1gF6T7T9{A7xhu%p*H${&brp5&qHbQ{KIi4_$hz503 z@@N~3!H%fZ#5}GG6}h^&veNzB1R&M%(q;|baPOK*sWF856d{f z63IrFXpHXXyDy>~us%4#OLF$-ey-|hmSDu1r_1RWm2}!#2RVGjyF{GLNavp18_cQ( z_nbj3)~p+`SrKXYYzk0m`xO1roKIMD8JDh4-PJ(Zdl>#mnv8!sZQ(Huh;6l zhQ9n&;;87n=u_6r_qH|F$xH5_no6eq20C8aa$X)}V@H{SgwSdKJ+6A1bi?d3kVP1d z*hOEc{?;giP+DJ0WZeahlm1AS41&BHnQlj=^Pl3Mdk*2CZ68v>xOw5oCmEsi-$)0r zP>kaCB%V^R_WC&?J!_BRS-yilWqGgS#*(@Y13T$011{8UGJWlg49~f36Abx(s{|UFW29u#ktJMa ztP$oNz;#kqfduu42IV5L(D?l>{i2My2-*dBh3V- zdhkLkA$s>#rjsD-E6=9*c_p`?aj9b%*p~D*W0PfZC_3Pm8UCgG3cfPCrteA zg8*M_Us1m=!PcTt(AOx$?M8U%cIhHcGr0vTF$AE_8Dh7-V2cU;(q1zs@xh@M7_i2Q zu!pc&Mku|pvKE7znTY%|0ZR@c;J515op{m~FrYAe0Hr9Vh&F6eG+;;FO3ZY=t6dv) z6rM=1G8zB#a{%3Rqec5ZzdCFErNf60Lr`ri>JZ{`+JM!n%x%J&QHQOKjtTxBQLGZ9 zJ;D!z?tg&V)iwl@qTb?~lMRUaAD3WCmmnb^UCQ~TT->1Y~uWHwMzDW=HGKCbp zz5Dd(Q*fmWR590r|5leAT)y=+Ew;4$hW~7{79*<(01!5aiuFrn6E^FAz8fP1 z-D>_z!D*a2Rb(WzN=8yAH);!-tNVj2gR`iCLOBEa)HVNbwq^0)9#zbwD#5J@h=447 zDWGy%pp>1uzC^d(b!tj|m9(kQw<}E6dw%vyv4HV}SPBYJ&yu5us(P zWN7I*Te%>4>8?0o&>(bd4YUSl-ui1Dz9Rrgf0rl9 z@W9mNijrnRz>~O-AwmY=X#~TZR3rsyff5OLg$B*2jii{AqGR^g6oAW0 zDoql8lukMeVo*186b6Q+*Q;BDK1_#GK`9=_-AILiq#(R5T2^-E=vlKf-74B)`2s`0I66HUvRGQ>t^3baA*`sxc2r5x^v~q zDDo^@o+0v*5f~9%X-T7n^7(f;Q%-VV4nccQaff9j{X5AW5P0R$Ponskp!5)$^&k_u6DwDQ0wfj?Oc%tn5`9QlY~Kw|PJ!}cda1z?el0_3$M|(d{<_ua*@)v zi$riF+3ym@$O~Z3XYG5(vXPh04jSgmNCiY|_y#glR0)>Vc%u4u!J$-o`f&1=lg}-i zjs-JuTlpB`uqbei~gV&2Gqmbn2?K3ffF!e!RCsU5y^4xk0si&U#TkdiqZP^ghJyr_WV z=v@v{p^A$;IR;)8X?GZqJ4?VneNvZEJn0pqXLEUl;ijmcs2cLh=AqCNZ^Dl zzx*TPg}})CcGU(TIDJz-^z~b&z)KG zPNKIM{z!_#W_M*PH3|{lBhFsb1Z)|&&^ezN8W|?Sm;A)HyImzX!&U2c?CA3gS+61( zCO}9K`uMU{XzbNr3Y3;Bl!T!{2oJ zw%5?1A9t@LUaD#62{*z*f}f_rT7O`Gnwos&d+*`$D@Xia;({1 z&N;jN&83bH_+j&(wdCqv5As`enB=Tv()MXe{C=QJHnSMW2L2&$t84W3fp4__wHRYD zdj-g{!;KM<6VE*6!ZRXKcsYl<=$k08U%x&xXD%m%n--w)&vdy_M%EH5DSF9ggvLSo z2y1RjWlic^f(@|{-OxN7oFb4G4-Vmjk=dHXZu#@i+g!{Wnp+}qB9~l^&LO7@38YaE zCunW09cm$Nb??>itj+sZS2XsVX2o@gu{ni|(a@o@q0gk(2#7Eof&Cp#MPEAe!SnQc zr@FLU`7f@z>fpV5-KcoE>C3^jd;6h*kB_ljULZy7u4MSbgp>J{7>?G@iFkR^d1q)Uh7F+KW7Y7LyNg!XQHhEN3YAf1sx7EL z@m3y}coWE9ap0D`s8IaNS1yl_DI_smbMXU0c$J#EucECmREBp&A&5ev90vliK5~0& zt6xe+LeJ%jkWT{EN&^#Ua>A4Q903)l&P&xLD|RfSP0u5lqInE8Qkb?`sB#haiWU_W zw-)=vi^29G2F?MZs;fAYu1&mAGyGf7KMooAwQb#69sQY!G(q~@u50^GYaiuF z7OGedN)v!26aa}gSCn@npWzssBq$BjsP_g*4Pz%MP*ap+F?y{!nqD5ZcZb7rim3mP5O1Gd z#e|02z#tyJe%X$zE&E8RqNu0?ZOJb8>5)}ftAjt?8LJrnD1E9b>1^O6TAQyyS$4Xu z2919d{-cAYVR0^&Iz-kSC*+Oy)Bdcd_}oU3FNz$swvQT^>gMTm)`eJ(%Jw8cqFV(! z@%EZ7pUZ1wLphH|)-{-Bgk4*e3%a2-y8b{Zovx|ma0mbCs}cdS-_P}xS4U_E63d|J zaAkG;dpUJxmRLn#ZkT&tU@vA#97fAv$+Jn*^yL`*U1X4pu1M_QRoKwjxn=0sIO&Nj z4M7_1AtO1)Cd5oW7a1A(?ELFAx7-9=jOk&K#+C4qI!cnI6cgl<(@6znmoQ}`lw@$M z+I?eSWY_r=Cc-@*q3F~a?sWaO?xXxO1rX6kqC76abmj9kX#IMXj++uj1k%Emz5qq4 zso*4faHJm7IqZZZ6u}iW-wm&u9h{9k7cHi4e}Gd8XZodX&QdB~xehuYRv6HZW9uL_ zJ9cK^rvQ`ewNZS{Q@v#QG-OfH2?FH&B%#uDQ@(md9=2+bmij94ee-r-D|5{Si-y3L z!(3Wr!H%ZleoGL*izGrYc5o8u^Fd$vutJy+`ddJsq?Zg>Z=ThTm&(%Qvhj8YQiEQG zmSwr~6P-6mO8GarFv$?f*i4N(Zrx|s*RIQL9|5}i#KJ?{FF(GpK4}f;wD=(+FymKs z$1|N=D<}gGU+8e-bBSf?xQ8_dvgK{BT*(2vFgIPeq^sqMAJLiFYjRG_`vu<+_c_PJ zYN{v>HRe|R87F4mo}`kuV;^bAuSIDo`5k@KfKIW}EQu`T?n00S5kF}$D)y8d1}^qO zrURHBqAp(wL^MfDd09xK(AcNY`;|;7dYL`@obhwLFc@?LhHQ72bB|m>v>Mrn?w%pE}BTkMu0m z@%--5&w%qS`VCdOjS;9o`ci;()>aj8C*7ONEi+%phpP2qrg5$wV&@0K_6GjZ6)1?CMsNFKN>dk+PGLrK0E> z(Gd`Wq*eMoFhZWa`oyVI15RQTpDZn5Q!9|R^((4v(y`TWSq3SWAW~Fn=+OP8=h@+^RIIMXcNqUG?C$NE7QqJJ)UD3IOr)Woe z3zxxeaCfe!YzpUsRo7q6>m^?-zU0Z15L8ya-b9~8xzW&u>M+oOs||^hM_W9D z-6lOw#4##tz{IH8@k4Ju@uel{&3)Hsm3mxo;pFa$t_zu%NTT$m{cO@CEq3C^zRW^~ zhVR&c(zT>v?pLMfNlYDGYJn+KYBCnRkZ*|S@}<5qOFjk@@xH7ky%)1I8ZDxn$#Jq; z_wn7T=sA6%(K(7f@MP*vt2@lG@x8(WRyLg^y&`S-xO-O)bc04O8UhA{C@y>%jWOh{ z-)UVaoPU9Ooc~vyI_?fjSDd(o(uGaW(!k93 zM1 z<;Umu;vSNj3A!$#dVaw{<4S*R0xl^8*Mr7(ZtAF^mq|*kC+;5_AKC*vcRqM7SH3rm zea{W*I(#;)p{F-!Hap7-uP#XpNbwNvxFodG(>p0WNi5|H3s?0N&Zp|mv~Tfn1#;0b zH6;=IjRcPXT?!{^s}28J3lm@K>IhOn!{jt6h2N zS>7Lkfwo-O#Tu@txLxW_?S-)~^lO`G$ongAoW)ZhhIu}Qd;LCmYr9zOTN?nXu5)Q}ZyUVFJ zcVmR4k77%Ny2UA5g?9)r;mjAq8L$wi(0Ew`duAlx@K6b_E=CZfWmYC^zoghNPsP+@ zH$e3mWH0egJ^(wQ2*wOr(qjo++WUZXR~8Opoy#RR_t<@TTYJbr@$FGbLz37kljGdK z7Qy!r@J1OcEsUZ>+Z0QM41o#}wZO-ZC8{Yx)ZQ<1-3D>1LV#nW5@R0BTlv*1I6k?`ClK_I8yIWguLX1|96d zu3RgXYG;hF=fVa_B)~bK5mqWA`=5XDGl+&+EEA2j_99?amc0#>Za)Z0eJ$pMYn0SL zTF?q<)@Teuvd_lT5v*UD(8@xVZAw>mi?)iQ-J&uNb_+x7uTh;3F=3S(KH~w!EuqXn zdx*S_a{V})OrVrtkRH%`F^J|I-*<*Pr;lUFc!2=c-;9;94{UoinDw6e3=bu>TrV|& zxT$MS-Xhtf7VXbZ-C*`IUk~9vU+?9kE7E$!eK!M3Yw6-}tHsEg4# z2(55s7KjRMdkmU6NxS2OY;p`2D-Tvhh}JY#h59A1ncQM)x)3Fsn{jX?)Gpft*74t8 zdEAA%g3kx@cn^zhw{Rv15Jb3wyYeia5+Pf9CiP`3hqT7y-c(t9S+O}U#gZEMB7)Yz z+_VE0%SX~i7UXp#D4z=2xTY%-r(_Yf<-eS#je}7D!$NJ-0^6*ds}EQVPO zjT0&3Op&8YK}?C5n}(4Bs0n;{>Yv;r^}wHA%*}Pm6UPc|^$o>bmY;Zz@{j$_!T@jf zo>;)pBtvXL)#P=D>)#Y$O4klKl1Pu)vi($3!6+ZBDewDC4Auc%POn%EZWE94Cs^gFe`=R}fYY51B-fN|n z8?HTcRBDcQ_fGN6MU4P(RdkukCe%W!*o1eo;qbdd?=ZG-2mw{N)z_x9z> z(|%2xwQ7|OkMbNM(*L|R?_lnS&~noEg+BBMSmc6P`@TcAC2$>{ep_~oRL7Y2D+lkou@B`vQ&hDKdqIvhOZQ3~Rd08p>+V^r+SwC<& zkr3>FyPJ5_%8VJTL;v|cg1<5~vvnE~tN6D$|Kgu!yUi8_3O#_`vcMWwhm)<}roa>J z4P!G9B8lmgGk?p>^;+~0!b+l+6$HWpKbReqK7(bvJeHFddXPsyL=a&_WSz63A&k#W? zf~6>{1~uFmNwM8FmZNxaB>{tFyQxX1&^J(3Jf)&e{r@cuNym--f91`sU&@4+xVX64 z1>>)I)F(dp57S?Lr7P;{)!UoHX&Rd(M$ z8Yntxpp_zV-Dk1bDX>KrD@4>u!?2Rt6}9NblQVQy;ws6Y8Y!6EuJZ44URSPy*XSIgrv#n(Qe|rP zET#$fl&}b#)l1Qfs>3KGg%Fj+4mc)-g(Ocx|8WlDec+&#P6%*~ql~zDp+axE(nrXt0EQaOOkVu+_>V(5>3K$j6OdhDrNcx z_op`u7D|#sB5MJ>=ykCf_p|Vc=zzt7FJpU;x6oT&%#iz^8e6a%nUzS(=l4tpIZDkW zZAj3i+?xBgr9630U2(BAmDVW2j3D3yzwM-p1}e_UTS#52FdOQPa1}lKC5*?y+Rs@# zMd(8@w83|F24@&QwHwPHTB-FNeIEV9>1p|_vIT?BC_ToiQ8N(Up3bg%`QpVbVs12z zWuE|YC4&}>5FF^r)+yRN?g2ng7*rECg@Za0FQ*($Rduy3fT1{33h62)-fv{m3`*|o z1C{*RtJIlpXX}UlAz~zmB+{)tr5~Y*q!A@|;a8tCv&QeIW%&Z`7i$fYRD$BD}@JVSQ1 z<;Sx05(1cgwZMc#hF&y+NLtC}?UzDW2;DhP*v$~8APw5+p;~(T&_?R6aLFVRi{rW8 z(YWwaJ+<{@wwNt9Sikgoh_2nrRjXq4diCtNFejUM3xd6agNQP*bNkj@hha}zC}{MN ze*DJ`j;iU$-TnBz>^X7lU`*wi{P`@=Bqut3k|stFY2}1QF?Ja4t_;swi0!GAu32#Y z5)?Tmm!ktq8bKSX%NoC`{nGd()*T_AfQPxF2S4+vipPZHyUT0yddRV-DT1@24EwMf z$+@^1%C#`8xZ++YU;3U_`FR)PJec4791}&cG{h`W2pq;lO(;P)v)Jl0cqrB?2dq|4 z%Bc0Y&ZAUPM&J@B<3G$IYswx~lM%)kdymH&ZHGQ<@CKeQigMZX2|sDr5G_{U)R?GU zyZov?uS4~8E#`k!#ziBw+4hfYTYYJ<*_BpPQAH1 zP{5AN@)!7W*ys$DGS$;8p;D8}*6J{*ckgT77YPQM8M_2&BH#TH!bMZg3Y1YY5Trd+ zOx*k7@5U_GD}kqtQ&)J)5um#aE>zYYUqI_8Bh6cm}eF~rpy(-WyeG^L)2eV~lDoAxs9$YaiX9@yF;nUgHr1CsJWTxXlH&)WqhW*cEM<-H^8dN63A2$vj{gHaq-bC}%S9ro z5+AXZo!>$=WDs%tI|rP6`sG&(pNy{S-%P{Z^&qF*^$y!2=bzKJ4|!ZDuzu1_7hunp zwR-WNhT;tx)RoFZ`8;rV13af(tl7f$elnVeM#a|vMUXpnvEGLzroq%>3F!K5S&)eMpmk+Y|Jy~FQP+KsiUK#rGsWdHdVwO#*=`X5<#s78_Gvvl2lSzc~Ves z(k_8Z1wg?5JyUbzJ;m#w<&S<8OIgH?QiuRt?3F4?*IBy`M3Xyf*8M@*LVuE@!`Pu8 zU6gZ){y+8Q_Ga2o@Q*~r=1N_`n%E{%V?y9Hq7NnfuwjnW%B4ESj=g{c4r68GF# zH+8yZyg(tD_P!tCo}VDYFx0`7`==5Rzm#r#L{h9&=xn$-*uLHtP7As0Gyo0G01`NH zmJ_iDs(Gv)MU|$-f4uWp#l|&r$T!u1%a&IgH&Pau zl7S+l@0nW;wyG%tr6{p@kR>UMVlf(sO)v|p$MNF(B(obh{e}ph(IAk}{ z57w9MDyN&42AKDL1C&g_JtDvYjp_qjLden^a>sT+{C#r3 zBfJ6HLb4t!Joa@b=Y1uW;--)~6>`0_Ru>t1P()t#0+e`yQ#xb*U;VOir(=wB1k~S^ndV}9{^nBsb0(ujkW1@^JUAa8A4{`7 zT$tc@JE%&eawmyk$Zo2}5hh7&M^$7~!6ZudHs92RW}WSD7n0V>y@J5T6vsmkUc*7~ zB>#F1FvKi5v6FYr4*lXy%Liy(z*tMtHpV!C5Uyo4l#EPpldn#HT&$Tujdi{e?vQQj zFdktfOTy<_S4ItRlgFunOBbatmRm`**sb+Z(bPXjv!NL7NTDrc!?WW)^4E3ZrQ{tk ztQJ?CDt(@fnDR^7(`eE?$Iiu7ZxJSwJ=WyDeD}UaBG{^BO3nNZj zQ!9&#cA;wFWs@gCowMw8{?#M$?K=OdMdju1be#ite-&in3d)f*MOOb^l5u?%5)SOg z>E`%N_donuIY-)WAKWrP zxA#APY${6n`Sdrx9cuow|5sC6hkf-&(ygWwkBn`5MBjR(e^8f^*L`YJ_Eu^evuf`JN$hy}x1PK6N^JLLimnNKQfJ>`E}wN0^M{2_$PAv> zL8gR)DSKTVRQH|v`OpQ7b@b1_R@rf%m5)csb~~r;Uqp5Mm`lWzT`B=clN1U-~6)maw$~2`>tH_nu9?qK}|oY zdFoyUm;b)oNJ-lFXqTRHYCX88Xp9m4FN>@NXO=NNJn>6^7)b!&WiyKjno6SB0I8eo zW8Ui9Qxu1)N9(2Z<>l}D)jV@0(UwUYZnce+yghP4_I5r(_M?5hZTydi;w=U;GSbYY zXq=*Gg7C)WXkS8-ZQA??XkAUeFBfIqbY40?UP)n|En*We7$kw$008b9)KheBfnE=d zMjR^gGutiOK&TPIGK?T);KYu<9c1X8=Gb*@XyWT%?-;&hx27M`0z2>%Ia|=L%di}X z+B2(Ebd^vBr3?sgD>1EAtto&1^MX%DD}HD-EQ|n|dgaRFw9Wr{r!wu0v}Ni766@>N zu4%2dg*YeOcG%HKG4x-uJLhxps*9l`TDEMt=EdJlgpTHz`7KnSN?vil%aU;vrwRR1 zHi?|~n372UX#|j(sNA1E ztqJP>g@3hbspcG4Xz~w$or%S29 zE9vf6k}umVr4`;u4p@F?#?y=8ORf(Hn18IUflIMo@*^#cQNmUTILq77>c$@1MujQ^ z7a{l@b~I5^u0WRyPl;_u>cE@za~Czvy?a)83SORXyQ5Ca9?KL14kS^YO1m;;oaKFs zAwy^y8m%>*r?bFv$dLP?Q7o+o4XMy| zPB8KLVu!mFNfjn98+G$-5d)NG^`)YsHD6|7sIIRMne${q5^&Q~QYe{>JO0p*=$NH% zNe0UPHt!g)_XptozN!y}zqAtOg4KTN)G0SVv~%LpC#)TzJ{w7}l$JT6YwA8v(G<=t zudftNLRlodj^)oDU?a2`L}Kgm>h0Sdk&&*Nasi6jiz|QarTDAQ(P*f`5ne0GVyeID+2JyI#p;0kO*iYM1Drsv&e{U|=B28O~d{h%LbFo=m>u8=;1|h8c0$I(Cr^-Sou zIy;QFu=x1=K%@{8)hDrF0NCxBIS@i9VSeP72xwh8l3fuca~?yAq+5=R*S5-)y$XRd z&u5}VeMO_K_AwZ{=KUZ)KYztq!s*bOnhzhw2}?GodDfS^WVnTeeJ_^0?)re{h(8^u z&-+X-0D+3&6HO;hqg%l^T&JFL_r;4BLHd|=EwqwcE%hZe=BtYJRb?|&8GMD!2miBU zk9;Fx*ZcAHwyLSLc6dbA5}*5&uJ2BeA1v(AuJ8!n%dG^#lDp*t&YCDcgg6JGq{d$# z-bSbkUP<{|PIXFh{CnU;haE(z27JU0{ZD`(IcergGA_5CyPo{)18#;Z-h4uVDBbxT ze_Nj0YQ&fCb!vn>{aHxw7wirv5B7HWWL5)-5^tDVU(uaP&wg0I_Q5fY9{>`_D6k2c z;^8d)(pzAT>6Q_8RfZS(0kVcvQ-K@3y#M9B@BOyzWHZIz>NcbHa87U%5SsL?@4P<; zQ7U=iC0e&++RoWZEEl*Mo0}QP)3~cg(2LKC5e|$!?j|Asu+qHx6iX?RGWb zhwz1BA)bHUXPR2Zr^|XXa2%`@4*m#2D~!4*A(}L4B7-}n_YJr8W#t)lAEE>gkn{C9 z4ZT{Nx@}v7%z-ZdPkCn=R%N-qeafuuRNI|XirTPHNE~tur%W?ll7$K=BF;GCgo-(! zSZQyX(-N2BkY?h*pa?RF1A7x{qEI5B;!q))GMEFP5b*zsilp-`9Oz=XIXv)oiuJeGl|F!V!6ItHUZLBQLxJ+z^RH%OM;s6Q>ke?*R$TpRCU7 zjb#ZFdvl3=6wmqf?ef%I(RA)qb)-(wnx9guCjYte79Y829ch3l)v|Hc2BSG^JIl6Z zOBC(3#Nor>Gm1{hq%Vh9pC{Y=fIdft((am>mk4+?LLR7Z^xgTas$bi}}qS$qI{9Jw&z1HG=@ zD>kDh(bzZNdWf#*5DQ|$!f}HJ4HDsF|8%j8{vN&i5C~Thur4 zM*ofE7`6~pl*~lR$%N8A*z1T z&pgGk*yH+k{>$T=QnwhK-XB#m zGlk?{lFS~uD8_r zyXrzj!uuWxlRMb#dAg4oZtO^vcO>GtWIm|$th|R@^`M%r(1YInrDyd))JjL~#y9+$ z-WlmW<^736TPJnm?e;W^M=VW)@dx(qeFCu4;Vk&yvK=6mGvFoC;=l6RE2}8}8<$-u zWHB|#dukJqJ-@tkAw-;aK`DT_WCVrId`4cRbq!r<#^>7D_!M%on|Pw{PlNp>(%P(g z)sgVN)79hp?eXC8q*OH3?5a!A@r_^cY^t9Hb{xkFL#o|kr1j4d$65kt;+@VVzcK^n z=)CHDKEJm06TfNu-NxzLbsB~{Tkpd7w$?|D6nU#b)6NkcM9k}^kKmR6Le-xhrjI63 z3`-T~tp$bGojkW%>!>|}T|POG6gz@X(yvc-9jwqapo4%}-HqCmW z=hjE4fa=abiuoxIb&NClE2)z~_DP#qi#vDis%Lw)Q+L?EnH-NF(9FZ<(h}A`m0*GtiO-7ZUgle|L-du%OB~_ z^}5oM@KN&$xG^ts;R`k{8_9R3EG=M!+}-G2n--3-2`E5|V&Av#&5#%{0kW6&ch*?{ zDzMG!>D%{*ueAyI%TlG*H<~Op+{L16yW;rMeh<~{h5c=3RPq9e^LyJ_@*EULz zpWLn{)7M$wZ%7X%IxU{{*^xBAm(BPODj>4SrH6M7LAC^t8Qwd4bXl1{^lJgUazotv zu84@d`Z)}Ox^($*adGPWV!!2Cgp_nhaE@m|7UOsBd{o)?(1d30t8Rj6wNVJstMDxX z;EQmeydar5=c-})svBYEw~96E3&blYvCIbjB$ z@ws^-9xtCLJbNv#Ukmw4dgu9MB9;tV5y>#A`ReaMdw>(jWy1!G=B<}YX+ulK@#C12 zOWcT1^ydfmoDEKFXC%fAl&gaS#^I-v(?NkTYIRdO}{p; zeEsdJRY(fz`j7*7;!%(pG7qPo$p89giG`S5O$Ug?25O(-r>;X=jC_gNx{N%dAy=>5 zW%!4k&pv+g#PQNR>xT!9wIuRnMbHE`O>&Qkii(2!?#i6|7H;^+4drwbRYHBHfjtYE zbTzrzY6LQ4_T^L`1t%3^qXEp7P_3SD4xEJ6r7QVGt~FtepN3vmxgs@*y1dkDH(PF4 zOz}sQAIg1&1TfI0Iv6cY+`rl;{~8$dWT)>4(^}nM%(k&l#>LF>EUG5&P2WDF!!&BW zCv~DhXscfK<(BIZ8L)>7i62oHWrST7`meL?(%vE4nHUtRH{;NZo(-LpnD#?EVUoM_P3MM7NGwrhbwO zr(U=5i2)N2TWhv{(f;u7v`BnhvOUGv!ASpAk! zB>epI#NblMW9;#QoZ?fgRK-mvKC{y!@mR`ipv^JeAH z5^sE?%v%6qxO4UYX%o<(irroPONH-Wu^ji+zfgCr<$SdMbNnL930?g|zpwxMhgF47 zFu_N_E1j%uM+Udn{fkxU0>VRMZ}Dn=Z*%7_9(W$H-eSUX-)3!$ui5l6)2AaDx<_y` z?SmleMW5qmsDv_;O6qiMYFaCK?c!x2VH5Ocl{px7Xa>tJ z;$NdXXlEqwr6Y(Bzn=q-frN7u>6<&93(a3SckbNpj@Cnxdob`@Jwr`(W!uswL(^=Y zx#NW2%%{+xG=LZYn!_*b{S@oD!U@;#9a8qJ&L#JmLsk~GYY}~6+gow--pM*eqlLb8 zJc*Q~cgqIWeBC|g*pzYY(5=$O3|s0qj*(hv%K>V38}5NYCXZ-Vy##F5D50}}6~^I! zCNYoUo*uQOqU@Pg0lmnykC9V*JHA@;`^l^I4MLFz^K{?qQic~#wC$XmC!DfQNzMkB z&G1uF(hpD9JpTflEMI_DyTQoG=^FExU}y%41?qu=&4`R01;?jvUjkClm=1kXCn&)= zrg!P6TVDecf3kEnreP_=nh2&DC)Ng1u#E;MCnt+v^ZH@VU`mt0G!SIQirRL3ykE|l2Ff*s$xLV#U)*Ep2tsT+ zWvX+?yaYS1k z<#njIuAN*O`#1Oo>_9bCxU)4TvL$A{S)DTJ&#$!1ET~=cKL0C`1hxMA4=ssq?Vz3u zXMA%ajYaBw`+1<5d$)c%uwK&_Y@PwPQz|A`^o5j@T+yoL9qgOqIXJ#)-N$2S5Il=X zD_COu>fZC8_T)+BFf0E9o1^7N6e=Q(cW5@<;&04OqzzrXvu2bi4LX=wVfg$;?W8fr~#PC0Hg zksd-43O(Cu=_e=&0lPwB|)0Hd2FC295jsjscov3>iY(nrv# z2CuaHG1WP9+)}`tNE&Mng9e>7*~?v`>NOC|Khf(Qys8B-LC|y^pH0M(MuhkGtm9g< zMgsOyG5mUX;`aSy+55=>klDGiBt}4xL;EY9J&*A-Ia5HeZV#&JSq%+xj%3qWs{Ty1 zvp$*m(%du>is3FU8^cp-e$wwPx_OCx+TkZnYeEJI%J`c!nUZI!`JM50md$B*5BoML zC|lNI^@N=BHNWW6OtE4K2&C9Yc+kj8WbpZa#6<}%Za?I2k%HwvcO*%s zz(1}?jV4f!lxJ#qKok|?ic))g=n>v0HE;a=@u$fltI z^{ILga6YFc-LH95mrL5*E$JGnB_YefrE_bp@cIvfzp=T}IH}VD4h?@AyHdZ&%@X3R zI=|Lez`rbz-3dr#zJ7jY;Oe0xzw1}0(}l1WzrNVd z!Au?^ia0ljoM&82c|wDvje$oul*j{*1%D9m(ws-npZb{tHj{SMkP{Y?qvaABwbzys zMO+IKS7o|Sy9-87`Lh5XJJ0$)B!yv!LjRT^7FIRxq;`OS0fhA_OVIo{==<0S&1lZH zeSXr!t>s?E%>;ZJAaWu3STq`2pRz?QR=<_r)r}HS_%KL%re-gr(!ClnvHH%~T<^e@ zEeeM11ShLMU#}-lOZzG>K1sb@Sa_HWD-RN=8v%FLur@V+H2d`lJpvML!gR4*8?FW% z;8zcOzx=DR9C@9!aIFcK0ofn2<-gkmq6Dbf4k;X%sDw|s+ez8$)r*?&{9v|M=qb@y zVYM!_FR3o-RC6@J59;&5OK+leAQcURf|z~$`SKgZ@~gQ-oAHZm)k%2%T15~2`M)B1 zu`iy#5(fM~`Jtt@;8W*5ShIF*Iwbe%neS|8<||1xE~_JUwrmHaiO@jA?Bipu=aL@6 z?~nDli$q>fxktt=0)y`fCM8ojHubigZvmdotfUHk$Q&cJ7G<7B(PEwCnQ?$UEJ*>0 zpdqQ#n%I8VH~ZF0Af4O~Xqa;KYJ6fv)m?FkgvF`5V?PY0ohwsm7D9IMxqI^DNzE4{ zOEuEoDiFZ@Pq2%Ker=QFHl;fEwxI{#6e_~Z@O+u6igv#G#sK-bFh z6z{ke8PHigqU3pgjFgX_w}u#K_A;|}7$SFQU&@(2g@VSR#CL!~?YY&Gai9B-ST}kV zHs~?urX05%KaA!4(}Xp;vv-;=aDHRL=OD(?<4g2SgzWH>Y2Ir9EUq;3Ef|qlmsZ&J zcE75nVBZi7BN%On&*DBL&LrqYq!>mv`?%#h z9AKorn+=c_$FMa``(#`OrNknJSIrd{Crn0@=rBnQ@|h&moJ?ahZnF|X174p8QsA%X z`h6+pL9S;K#`ExZ4Mr4Ku6UQj`U%EPYZ{VVjhP11kmDw(+;ADF>RKLb3tA|9T!DZn zi$Vx816Z$`)C6q`@aPB&`99`(R23sv=6IJo!t%E_sx2+%h9=O+%JR!EzodUx#O$GI zn9?s^pADugc*pdF51fG#zV@lp27hd@EQ7f3$*+g%A^iTxjZ&7=Eu#WB&-3(b*6}m1|G_1YPsnf z5&gC}jQAK7LylJ#GelZ5Ir%qFdI|uialW5lp~BUWYu#;fE6;;v*^H;g2`dyLqPk@y z+Kg&&rTQtN2Q7P6@-BzVq9!6(OzJu`J#(P`+oo_f4^tMf?T;|U+->|c|6Yi6_rn90 zZfpC6Qo?&%tHuOORPvdsv%7yz69LR&{e(a?J7V?FMzpjjq zthg?K3cO0QEi@U;&U;xKn8L%zGUgZTE{YQW99`r5WAV5OssGC)>!zbNfO*otPFXgx zOI)}7S2>Zts`L7z27fKZAV?FEI@Q-U!mLldu}40Yv8T(2>g%m68q3HzsN^xo3^&h7 zU_ag?F>~b*t!ry3`5z_GXXMrIXf{P&?v?E9gi&7<3R}>u$rFCC{&@ZrOK$4%u+^X` z9jCL}=Ckj@aJ(pgyu0$)jQk6GvH?ojL2iT!RkcLQJUWS{il&2OVGe-pU4qPTJ3l{+ z>R7^0vUOFJT8Z2DpV`p*2q;P(G}MlNma{Jo!m}rnRg`2)RtK2%(JAC~=OE5o}k1b>QeCEQF*fI+%mf zUhDgG4|$Y5gf%f>3rMJ$Pcm)J!c70X?|=boSR@ zeWjsyCT2Z40v3O8>%qU-CK-(S@;bH5WB8d`E%{PeumZ)+lg}VW6&~Ir5p00y2D3ig zDMDk*JuGJh^%bu@S2Xw0mk^TkcIy>9)~jF0cTc3cROU2CLdey&_g?H9Q^u5!OrE=@f*3DR?P?^{n51FUKmt|>a&Cln z(zzhpS!_}f+5Is)*Dd6mJc0hxByMMnf$Z~BR@Ehhj=o=|B5QD?f@L1V0r=K=Jd8|m zh^Xj!3l<3ZM>lD2gt>2AIiUpe1IM!feH*|0a=^-kO>`;p*|yA&-G@7gm=v+y%$%$* z*Kf858mXc~d}a&Yn!r4RZKz8jZXKhk^x}g8s(4Kof(>(o7TDX_k;R~n;thD8DOpkR zxHS?sc^GLHZ&$>nV$Yb1;d?cA^4+H+)@X4$BKF=HRWZgy1*cJqqrWJmaGXIhBI(`w zGVWZYpa|+LP60*jn86MtEO znBsn@*JdIcm~=JjdUNPC7Z>^!ZT_t9A$;Vs9xVaUh%bI{HR^!fWiR-3s6-wuJ3_9U z(&`o!Cg+WNmPa%8bae0DbE$`9hHFbb!`Nl9Y3LM*_PM2} z2pySh20QQkpbGr`ZvW$Wmj~?N(yiWyo@&u?__Y7?ly=^m9jnsIfQi=rTT0&E?~>z5 zVc|axIscFOfJ@u*@<%wl^Uqy82)jS};Dd>~=CEIvp1@U+0CTZuJAMJ{`|PcJxM9WZ zHzt=peLBkAp(i5f|CG1&cA$4tge89qvu=R{u6k{m5mp+Ir z>*C`j;10RB1CO4&PM0KQFmH>tWK8z0(i^Gm^VP}^3vmI% z(!Q;o+E3)Y_A3U_r3;$lsrz&8HH#Mw`VcpsT1-}^yWT36+cCvkTcg=O@bP2L`MW(w6lXT) z4Wv>Taz3sjV{vF57ca+@i@PC;KmFNv$|o!&Tn(u-5TBv3h#TQ7=hEb<2>I# zCapudCJGaq2cO^O@SRiAGj~>Lv~39%86nN*XgD0cKcA_AAv-Q0aFXsPy_$DH76XOy zG8LOBL-b~#je8uPTAnbeAZ-uJ=-F6h*11>U@o51c_rEj1&O@AEXj5AZNTY6#y7*=f zHo#_77;21dZu=R$40G@W?egnQC>we8J1W+6anHBj`hM`)wEBzeJf?iobRQeg98m^s zBqeqhyv|2s9`)J`$eVj*yZ5y&TUVO{A9TJEgfG(37qchFr2N%8a(M?2g42BkOR6WF z)n{kKk?^`lr!@cePBON*zRU8y{AbX2{>2_V@u6f{a&X0QZqqxBJ^+IU-E%O~ue^jo z`FOW=izYG>&iqCuWs*1-Y^K*5FWHFz=;F5b1~7#>JY#oww*rkh0W-Uu-6n5Q#`Xt( znGa^n{@@5DNV2aj3_Ho~`o!7gUK!D=N09Sk=kxQsFLckmIWp5P{>Dg%Vx{>0 za@2cv8@vBR|9hSuk3GB_j70L|J1Ym;O{+djBsVneQ?bBNdG(s3N92#b*8z_-G(DvG zgO5OSC^@~Gkcl0l@#O-hB)a#Mjy*V@PFTmiY_pXp%n{!SSXPk1&^s8MvesMvimuo@z zrmXMI$G-c=DWZA%55Y$6qam&!cbi6*ukJbr)d7edqbnwN9-1Cf(((M#2Y9x4Ke)@3 zshg2KVgujnvP3z+HFF@s*-?d%eu@^S?nz7GL+8{v&IuKYP=2Old!+lSs)$vi`&QCs z8eK|h+VuUH_NPKK!#;8O=gd}5b;$wOfgklBF8T5dkl)$)QOl3pdpGzz>g8L{e!5ne z>7D8P0}CMZTJEB8xE;z-D7T3E20naj@3WD9-51THDIn)LQMA=E1KQKfBs-&Qo$Mz2 zGr;5taeV72F9JdaIY$^MH@pack}P==yZJN6EP6@N45JD!4M+(H2S|=MdN(ftdlhbkm1`KbvNIt zj4a#nZ073q9@!*;r_arB=pb#ZTY=_Wq|D1q8)$-P?|4>~{oJrH=V{sJ#fL{aFvKK> zexCn*qL@Y}8jNJ#OP=L!bw1Hy1*9gd#vhe(#2yV9msuXp1k(}Y%Py2TUc^|`$BANe zclg*xmYibEi`Cg9Rkwti<}3;rO~S3)=Tn54>pgz+n0gj};DLm!i;KRcU<7LImQ@j) zLGITGUDcc&Ae{GzLs2JYL!pex3o0AuT)X31xJ8=bd+X!ifhp7);%@~GOUK(-s>PHd zLBNa#Y7>or%@7f*4A7~jady=%cDRuCCfO&|BpxJn*q(>rgc!UX(@!y>MU)cY;<1qA zEswe3oRdZFF&>u%>T@!M7p`Cq%SlQve76S0Jf?|xtnhT@auD$0l{u{|&&{Vl)eXlV zQWxEU*yPjw~T$| zOdIcXQpnnmNq)t9R)K`mfI1u)*?qsFcfGn!9i9+^We_ za&n0&86)2${YInpRB#9z5n;TRcftRAP0Iz#yAs(|=6b2AsirW`8GTXV)3r41BWlQL zond1?Wwt@nB3CRiA7?CQb~OwaNo|d9v}=n5Ypg;7N$dPst-*32PaL9F0DyKknkC8k zwL{KiILUgIyvp;DlPcd(w_yt_kBnBcFwv^HF&Ro~4Ya7-`+-E8l=mKYiS-VpAUsG; zFQvB0B6uaw7^d18^GsNKa*90PB%m_45r|MbT0)9kWXh0{R8|*& zsSwDoUh50>+vyCm(SN2`5*ynnp{~W~g{o=mh7B7Ik2|dq-Kv&NPpJMy;j!~wQJY+I z1G_q?U28xMX z@?5!~a*vZge`R12Y9OSLBUKaO7Q=<=J+Qjb2mx51@1}R9Js!>+crWto=$C#gjeWk^Ld`zvm+LNd5P|*Dssk|o!;M?Ch@lyMb% zIk#@PM+-lrn3m0r?D0ghkOMIu9ZWXWK;`>XWW1sIK~n4BO=Q#xM5L#zH#aRdmcRmt z#PEYPwvMuO|NX2Zf=qQ0X768>=KSjSH{9N4a*v5W{==fcCaDqD(PHG}UdRmj%0RSR z43_C#eTo9tg<7`tmF-oeO5W#O8c~Tfs4CJ- uT23K)^IqEYkJ>=yOYiNBp2(@Hb zflaKPkUf{>xWI7Jwnh^!5S07g)o-v$QXk9f9P{MqCcu^IY(ji@jPdZZxE5hi zKlPh#J#uV@tOKg@$1>L`wa2V$t`PE$erSO}11_zx_G$u$PzrCsJVDZZ`UesmSmmZJ zYyN&yET10m9}APH>QeN-jha*~ysG~vv5-#K^B?BF`v1ufQ!8IcxKP`FTk4rD`u4_w MpMG?>|M)Nd2Y2> None: - - endpoint = os.environ["PROJECT_ENDPOINT"] - model_deployment_name = os.environ["MODEL_DEPLOYMENT_NAME"] - - async with DefaultAzureCredential() as credential: - - async with AIProjectClient(endpoint=endpoint, credential=credential) as project_client: - - agent = await project_client.agents.create_agent( - model=model_deployment_name, - name="my-agent", - instructions="You are helpful agent", - ) - print(f"Created agent, agent ID: {agent.id}") - - # Do something with your Agent! - # See samples here https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai/azure-ai-agents/samples - - await project_client.agents.delete_agent(agent.id) - print("Deleted agent") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics.py new file mode 100644 index 000000000000..f5cdc8e56262 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics.py @@ -0,0 +1,85 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use basic agent operations from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_basics.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os, time +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential +from azure.ai.agents.models import ListSortOrder + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +with project_client: + agents_client = project_client.agents + + # [START create_agent] + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are helpful agent", + ) + # [END create_agent] + print(f"Created agent, agent ID: {agent.id}") + + # [START create_thread] + thread = agents_client.threads.create() + # [END create_thread] + print(f"Created thread, thread ID: {thread.id}") + + # List all threads for the agent + # [START list_threads] + threads = agents_client.threads.list() + # [END list_threads] + + # [START create_message] + message = agents_client.messages.create(thread_id=thread.id, role="user", content="Hello, tell me a joke") + # [END create_message] + print(f"Created message, message ID: {message.id}") + + # [START create_run] + run = agents_client.runs.create(thread_id=thread.id, agent_id=agent.id) + + # Poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # Wait for a second + time.sleep(1) + run = agents_client.runs.get(thread_id=thread.id, run_id=run.id) + # [END create_run] + print(f"Run status: {run.status}") + + if run.status == "failed": + print(f"Run error: {run.last_error}") + + agents_client.delete_agent(agent.id) + print("Deleted agent") + + # [START list_messages] + messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) + for msg in messages: + if msg.text_messages: + last_text = msg.text_messages[-1] + print(f"{msg.role}: {last_text.text.value}") + # [END list_messages] diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_stream_eventhandler.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_stream_eventhandler.py new file mode 100644 index 000000000000..3839bf205306 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_stream_eventhandler.py @@ -0,0 +1,105 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use agent operations with an event handler in streaming from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_stream_eventhandler.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential + +from azure.ai.agents.models import ( + AgentEventHandler, + MessageDeltaChunk, + ThreadMessage, + ThreadRun, + RunStep, +) + +from typing import Any, Optional + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + + +# [START stream_event_handler] +# With AgentEventHandler[str], the return type for each event functions is optional string. +class MyEventHandler(AgentEventHandler[str]): + + def on_message_delta(self, delta: "MessageDeltaChunk") -> Optional[str]: + return f"Text delta received: {delta.text}" + + def on_thread_message(self, message: "ThreadMessage") -> Optional[str]: + return f"ThreadMessage created. ID: {message.id}, Status: {message.status}" + + def on_thread_run(self, run: "ThreadRun") -> Optional[str]: + return f"ThreadRun status: {run.status}" + + def on_run_step(self, step: "RunStep") -> Optional[str]: + return f"RunStep type: {step.type}, Status: {step.status}" + + def on_error(self, data: str) -> Optional[str]: + return f"An error occurred. Data: {data}" + + def on_done(self) -> Optional[str]: + return "Stream completed." + + def on_unhandled_event(self, event_type: str, event_data: Any) -> Optional[str]: + return f"Unhandled Event Type: {event_type}, Data: {event_data}" + + +# [END stream_event_handler] + + +with project_client: + agents_client = project_client.agents + + # Create an agent and run stream with event handler + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-agent", instructions="You are a helpful agent" + ) + print(f"Created agent, agent ID {agent.id}") + + thread = agents_client.threads.create() + print(f"Created thread, thread ID {thread.id}") + + message = agents_client.messages.create(thread_id=thread.id, role="user", content="Hello, tell me a joke") + print(f"Created message, message ID {message.id}") + + # [START create_stream] + with agents_client.runs.stream(thread_id=thread.id, agent_id=agent.id, event_handler=MyEventHandler()) as stream: + for event_type, event_data, func_return in stream: + print(f"Received data.") + print(f"Streaming receive Event Type: {event_type}") + print(f"Event Data: {str(event_data)[:100]}...") + print(f"Event Function return: {func_return}\n") + # [END create_stream] + + agents_client.delete_agent(agent.id) + print("Deleted agent") + + messages = agents_client.messages.list(thread_id=thread.id) + for msg in messages: + if msg.text_messages: + last_text = msg.text_messages[-1] + print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_stream_iteration.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_stream_iteration.py new file mode 100644 index 000000000000..8e5bcffb2ccf --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_stream_iteration.py @@ -0,0 +1,85 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use agent operations in streaming from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_basics_stream_iteration.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. +""" + +import os +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential +from azure.ai.agents.models import ( + AgentStreamEvent, + MessageDeltaChunk, + ThreadMessage, + ThreadRun, + RunStep, +) + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +with project_client: + agents_client = project_client.agents + + # Create an agent and run stream with iteration + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-agent", instructions="You are a helpful agent" + ) + print(f"Created agent, ID {agent.id}") + + thread = agents_client.threads.create() + print(f"Created thread, thread ID {thread.id}") + + message = agents_client.messages.create(thread_id=thread.id, role="user", content="Hello, tell me a joke") + print(f"Created message, message ID {message.id}") + + # [START iterate_stream] + with agents_client.runs.stream(thread_id=thread.id, agent_id=agent.id) as stream: + + for event_type, event_data, _ in stream: + + if isinstance(event_data, MessageDeltaChunk): + print(f"Text delta received: {event_data.text}") + + elif isinstance(event_data, ThreadMessage): + print(f"ThreadMessage created. ID: {event_data.id}, Status: {event_data.status}") + + elif isinstance(event_data, ThreadRun): + print(f"ThreadRun status: {event_data.status}") + + elif isinstance(event_data, RunStep): + print(f"RunStep type: {event_data.type}, Status: {event_data.status}") + + elif event_type == AgentStreamEvent.ERROR: + print(f"An error occurred. Data: {event_data}") + + elif event_type == AgentStreamEvent.DONE: + print("Stream completed.") + break + + else: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + # [END iterate_stream] + + agents_client.delete_agent(agent.id) + print("Deleted agent") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_thread_and_process_run.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_thread_and_process_run.py new file mode 100644 index 000000000000..3f8ef58de884 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_thread_and_process_run.py @@ -0,0 +1,69 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use the new convenience method + `create_thread_and_process_run` in the Azure AI Agents service. + This single call will create a thread, start a run, poll to + completion (including any tool calls), and return the final result. + +USAGE: + python sample_agents_create_thread_and_process_run.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under + "Models + endpoints" in your Azure AI Foundry project. +""" + +import os +from azure.ai.projects import AIProjectClient +from azure.ai.agents.models import AgentThreadCreationOptions, ThreadMessageOptions, ListSortOrder +from azure.identity import DefaultAzureCredential + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +with project_client: + agents_client = project_client.agents + + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="process-run-sample-agent", + instructions="You are a friendly assistant that generates jokes.", + ) + print(f"Created agent: {agent.id}") + + # [START create_thread_and_process_run] + run = agents_client.create_thread_and_process_run( + agent_id=agent.id, + thread=AgentThreadCreationOptions( + messages=[ThreadMessageOptions(role="user", content="Hi! Tell me your favorite programming joke.")] + ), + ) + # [END create_thread_and_process_run] + print(f"Run completed with status: {run.status!r}") + + if run.status == "failed": + print("Run failed:", run.last_error) + + # List out all messages in the thread + messages = agents_client.messages.list(thread_id=run.thread_id, order=ListSortOrder.ASCENDING) + for msg in messages: + if msg.text_messages: + last_text = msg.text_messages[-1] + print(f"{msg.role}: {last_text.text.value}") + + # clean up + agents_client.delete_agent(agent.id) + print(f"Deleted agent {agent.id}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_thread_and_run.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_thread_and_run.py new file mode 100644 index 000000000000..90b4141f09ad --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_thread_and_run.py @@ -0,0 +1,79 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to create a new thread and immediately run it + in one call using the Azure AI Agents service. + +USAGE: + python sample_agents_create_thread_and_run.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-agents azure-identity + + Set these environment variables with your own values: + 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview + page of your Azure AI Foundry portal. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under + the "Name" column in the "Models + endpoints" tab in + your Azure AI Foundry project. +""" + +import os +import time + +from azure.ai.projects import AIProjectClient +from azure.ai.agents.models import AgentThreadCreationOptions, ThreadMessageOptions, ListSortOrder +from azure.identity import DefaultAzureCredential + +project_client = AIProjectClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +with project_client: + agents_client = project_client.agents + + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="sample-agent", + instructions="You are a helpful assistant that tells jokes.", + ) + print(f"Created agent, agent ID: {agent.id}") + + # [START create_thread_and_run] + # Prepare the initial user message + initial_message = ThreadMessageOptions(role="user", content="Hello! Can you tell me a joke?") + + # Create a new thread and immediately start a run on it + run = agents_client.create_thread_and_run( + agent_id=agent.id, + thread=AgentThreadCreationOptions(messages=[initial_message]), + ) + # [END create_thread_and_run] + + # Poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # Wait for a second + time.sleep(1) + run = agents_client.runs.get(thread_id=run.thread_id, run_id=run.id) + print(f"Run status: {run.status}") + + if run.status == "failed": + print(f"Run error: {run.last_error}") + + # List all messages in the thread, in ascending order of creation + messages = agents_client.messages.list(thread_id=run.thread_id, order=ListSortOrder.ASCENDING) + + for msg in messages: + if msg.text_messages: + last_text = msg.text_messages[-1] + print(f"{msg.role}: {last_text.text.value}") + + # clean up + agents_client.delete_agent(agent.id) + print(f"Deleted agent {agent.id!r}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/utils/__init__.py b/sdk/ai/azure-ai-projects/samples/agents/utils/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/sdk/ai/azure-ai-projects/samples/agents/utils/user_functions.py b/sdk/ai/azure-ai-projects/samples/agents/utils/user_functions.py new file mode 100644 index 000000000000..cb1e3d9cf43d --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/utils/user_functions.py @@ -0,0 +1,248 @@ +# pylint: disable=line-too-long,useless-suppression +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +import json +import datetime +from typing import Any, Callable, Set, Dict, List, Optional + +# These are the user-defined functions that can be called by the agent. + + +def fetch_current_datetime(format: Optional[str] = None) -> str: + """ + Get the current time as a JSON string, optionally formatted. + + :param format (Optional[str]): The format in which to return the current time. Defaults to None, which uses a standard format. + :return: The current time in JSON format. + :rtype: str + """ + current_time = datetime.datetime.now() + + # Use the provided format if available, else use a default format + if format: + time_format = format + else: + time_format = "%Y-%m-%d %H:%M:%S" + + time_json = json.dumps({"current_time": current_time.strftime(time_format)}) + return time_json + + +def fetch_weather(location: str) -> str: + """ + Fetches the weather information for the specified location. + + :param location (str): The location to fetch weather for. + :return: Weather information as a JSON string. + :rtype: str + """ + # In a real-world scenario, you'd integrate with a weather API. + # Here, we'll mock the response. + mock_weather_data = {"New York": "Sunny, 25°C", "London": "Cloudy, 18°C", "Tokyo": "Rainy, 22°C"} + weather = mock_weather_data.get(location, "Weather data not available for this location.") + weather_json = json.dumps({"weather": weather}) + return weather_json + + +def send_email(recipient: str, subject: str, body: str) -> str: + """ + Sends an email with the specified subject and body to the recipient. + + :param recipient (str): Email address of the recipient. + :param subject (str): Subject of the email. + :param body (str): Body content of the email. + :return: Confirmation message. + :rtype: str + """ + # In a real-world scenario, you'd use an SMTP server or an email service API. + # Here, we'll mock the email sending. + print(f"Sending email to {recipient}...") + print(f"Subject: {subject}") + print(f"Body:\n{body}") + + message_json = json.dumps({"message": f"Email successfully sent to {recipient}."}) + return message_json + + +def send_email_using_recipient_name(recipient: str, subject: str, body: str) -> str: + """ + Sends an email with the specified subject and body to the recipient. + + :param recipient (str): Name of the recipient. + :param subject (str): Subject of the email. + :param body (str): Body content of the email. + :return: Confirmation message. + :rtype: str + """ + # In a real-world scenario, you'd use an SMTP server or an email service API. + # Here, we'll mock the email sending. + print(f"Sending email to {recipient}...") + print(f"Subject: {subject}") + print(f"Body:\n{body}") + + message_json = json.dumps({"message": f"Email successfully sent to {recipient}."}) + return message_json + + +def calculate_sum(a: int, b: int) -> str: + """Calculates the sum of two integers. + + :param a (int): First integer. + :rtype: int + :param b (int): Second integer. + :rtype: int + + :return: The sum of the two integers. + :rtype: str + """ + result = a + b + return json.dumps({"result": result}) + + +def convert_temperature(celsius: float) -> str: + """Converts temperature from Celsius to Fahrenheit. + + :param celsius (float): Temperature in Celsius. + :rtype: float + + :return: Temperature in Fahrenheit. + :rtype: str + """ + fahrenheit = (celsius * 9 / 5) + 32 + return json.dumps({"fahrenheit": fahrenheit}) + + +def toggle_flag(flag: bool) -> str: + """Toggles a boolean flag. + + :param flag (bool): The flag to toggle. + :rtype: bool + + :return: The toggled flag. + :rtype: str + """ + toggled = not flag + return json.dumps({"toggled_flag": toggled}) + + +def merge_dicts(dict1: Dict[str, Any], dict2: Dict[str, Any]) -> str: + """Merges two dictionaries. + + :param dict1 (Dict[str, Any]): First dictionary. + :rtype: dict + :param dict2 (Dict[str, Any]): Second dictionary. + :rtype: dict + + :return: The merged dictionary. + :rtype: str + """ + merged = dict1.copy() + merged.update(dict2) + return json.dumps({"merged_dict": merged}) + + +def get_user_info(user_id: int) -> str: + """Retrieves user information based on user ID. + + :param user_id (int): ID of the user. + :rtype: int + + :return: User information as a JSON string. + :rtype: str + """ + mock_users = { + 1: {"name": "Alice", "email": "alice@example.com"}, + 2: {"name": "Bob", "email": "bob@example.com"}, + 3: {"name": "Charlie", "email": "charlie@example.com"}, + } + user_info = mock_users.get(user_id, {"error": "User not found."}) + return json.dumps({"user_info": user_info}) + + +def longest_word_in_sentences(sentences: List[str]) -> str: + """Finds the longest word in each sentence. + + :param sentences (List[str]): A list of sentences. + :return: A JSON string mapping each sentence to its longest word. + :rtype: str + """ + if not sentences: + return json.dumps({"error": "The list of sentences is empty"}) + + longest_words = {} + for sentence in sentences: + # Split sentence into words + words = sentence.split() + if words: + # Find the longest word + longest_word = max(words, key=len) + longest_words[sentence] = longest_word + else: + longest_words[sentence] = "" + + return json.dumps({"longest_words": longest_words}) + + +def process_records(records: List[Dict[str, int]]) -> str: + """ + Process a list of records, where each record is a dictionary with string keys and integer values. + + :param records: A list containing dictionaries that map strings to integers. + :return: A list of sums of the integer values in each record. + """ + sums = [] + for record in records: + # Sum up all the values in each dictionary and append the result to the sums list + total = sum(record.values()) + sums.append(total) + return json.dumps({"sums": sums}) + + +# Example User Input for Each Function +# 1. Fetch Current DateTime +# User Input: "What is the current date and time?" +# User Input: "What is the current date and time in '%Y-%m-%d %H:%M:%S' format?" + +# 2. Fetch Weather +# User Input: "Can you provide the weather information for New York?" + +# 3. Send Email +# User Input: "Send an email to john.doe@example.com with the subject 'Meeting Reminder' and body 'Don't forget our meeting at 3 PM.'" + +# 4. Calculate Sum +# User Input: "What is the sum of 45 and 55?" + +# 5. Convert Temperature +# User Input: "Convert 25 degrees Celsius to Fahrenheit." + +# 6. Toggle Flag +# User Input: "Toggle the flag True." + +# 7. Merge Dictionaries +# User Input: "Merge these two dictionaries: {'name': 'Alice'} and {'age': 30}." + +# 8. Get User Info +# User Input: "Retrieve user information for user ID 1." + +# 9. Longest Word in Sentences +# User Input: "Find the longest word in each of these sentences: ['The quick brown fox jumps over the lazy dog', 'Python is an amazing programming language', 'Azure AI capabilities are impressive']." + +# 10. Process Records +# User Input: "Process the following records: [{'a': 10, 'b': 20}, {'x': 5, 'y': 15, 'z': 25}, {'m': 30}]." + +# Statically defined user functions for fast reference +user_functions: Set[Callable[..., Any]] = { + fetch_current_datetime, + fetch_weather, + send_email, + calculate_sum, + convert_temperature, + toggle_flag, + merge_dicts, + get_user_info, + longest_word_in_sentences, + process_records, +} From cbe20dd0f06d4465dacf4d214a4db3848b79a87c Mon Sep 17 00:00:00 2001 From: howieleung Date: Tue, 10 Jun 2025 11:48:33 -0700 Subject: [PATCH 7/8] test readme --- sdk/ai/azure-ai-projects/README.md | 523 +---------------------------- 1 file changed, 1 insertion(+), 522 deletions(-) diff --git a/sdk/ai/azure-ai-projects/README.md b/sdk/ai/azure-ai-projects/README.md index 0b5be408b78a..70c379b63ffa 100644 --- a/sdk/ai/azure-ai-projects/README.md +++ b/sdk/ai/azure-ai-projects/README.md @@ -1,522 +1 @@ -# Azure AI Projects client library for Python - -The AI Projects client library (in preview) is part of the Azure AI Foundry SDK, and provides easy access to -resources in your Azure AI Foundry Project. Use it to: - -* **Create and run Agents** using the `.agents` property on the client. -* **Get an AzureOpenAI client** using the `.inference.get_azure_openai_client` method. -* **Enumerate AI Models** deployed to your Foundry Project using the `.deployments` operations. -* **Enumerate connected Azure resources** in your Foundry project using the `.connections` operations. -* **Upload documents and create Datasets** to reference them using the `.datasets` operations. -* **Create and enumerate Search Indexes** using the `.indexes` operations. -* **Get an Azure AI Inference client** for chat completions, text or image embeddings using the `.inference` operations. -* **Read a Prompty file or string** and render messages for inference clients, using the `PromptTemplate` class. -* **Run Evaluations** to assess the performance of generative AI applications, using the `evaluations` operations. -* **Enable OpenTelemetry tracing** using the `enable_telemetry` function. - -The client library uses version `2025-05-15-preview` of the AI Foundry [data plane REST APIs](https://aka.ms/azsdk/azure-ai-projects/rest-api-reference). - -> **Note:** There have been significant updates with the release of version 1.0.0b11, including breaking changes. -please see new code snippets below and the samples folder. Agents are now implemented in a separate package `azure-ai-agents` -which will get installed automatically when you install `azure-ai-projects`. You can continue using ".agents" -operations on the `AIProjectsClient` to create, run and delete agents, as before. -See [full set of Agents samples](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai/azure-ai-agents/samples) -in their new location. Also see the [change log for the 1.0.0b11 release](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/CHANGELOG.md). - -[Product documentation](https://aka.ms/azsdk/azure-ai-projects/product-doc) -| [Samples][samples] -| [API reference documentation](https://aka.ms/azsdk/azure-ai-projects/python/reference) -| [Package (PyPI)](https://aka.ms/azsdk/azure-ai-projects/python/package) -| [SDK source code](https://aka.ms/azsdk/azure-ai-projects/python/code) - -## Reporting issues - -To report an issue with the client library, or request additional features, please open a GitHub issue [here](https://github.com/Azure/azure-sdk-for-python/issues). Mention the package name "azure-ai-projects" in the title or content. - -## Getting started - -### Prerequisite - -- Python 3.9 or later. -- An [Azure subscription][azure_sub]. -- A [project in Azure AI Foundry](https://learn.microsoft.com/azure/ai-studio/how-to/create-projects). -- The project endpoint URL of the form `https://.services.ai.azure.com/api/projects/`. It can be found in your Azure AI Foundry Project overview page. Below we will assume the environment variable `PROJECT_ENDPOINT` was defined to hold this value. -- An Entra ID token for authentication. Your application needs an object that implements the [TokenCredential](https://learn.microsoft.com/python/api/azure-core/azure.core.credentials.tokencredential) interface. Code samples here use [DefaultAzureCredential](https://learn.microsoft.com/python/api/azure-identity/azure.identity.defaultazurecredential). To get that working, you will need: - * An appropriate role assignment. see [Role-based access control in Azure AI Foundry portal](https://learn.microsoft.com/azure/ai-foundry/concepts/rbac-ai-foundry). Role assigned can be done via the "Access Control (IAM)" tab of your Azure AI Project resource in the Azure portal. - * [Azure CLI](https://learn.microsoft.com/cli/azure/install-azure-cli) installed. - * You are logged into your Azure account by running `az login`. - -### Install the package - -```bash -pip install azure-ai-projects -``` - -## Key concepts - -### Create and authenticate the client with Entra ID - -Entra ID is the only authentication method supported at the moment by the client. - -To construct a synchronous client: - -```python -import os -from azure.ai.projects import AIProjectClient -from azure.identity import DefaultAzureCredential - -project_client = AIProjectClient( - credential=DefaultAzureCredential(), - endpoint=os.environ["PROJECT_ENDPOINT"], -) -``` - -To construct an asynchronous client, Install the additional package [aiohttp](https://pypi.org/project/aiohttp/): - -```bash -pip install aiohttp -``` - -and update the code above to import `asyncio`, and import `AIProjectClient` from the `azure.ai.projects.aio` namespace: - -```python -import os -import asyncio -from azure.ai.projects.aio import AIProjectClient -from azure.core.credentials import AzureKeyCredential - -project_client = AIProjectClient.from_connection_string( - credential=DefaultAzureCredential(), - endpoint=os.environ["PROJECT_ENDPOINT"], -) -``` - -**Note:** Support for project connection string and hub-based projects has been discontinued. We recommend creating a new Azure AI Foundry resource utilizing project endpoint. If this is not possible, please pin the version of or pin the version of `azure-ai-projects` to `1.0.0b10` or earlier. - -## Examples - -### Performing Agent operations - -The `.agents` property on the `AIProjectsClient` gives you access to an authenticated `AgentsClient` from the `azure-ai-agents` package. Below we show how to create an Agent and delete it. To see what you can do with the `agent` you created, see the [many samples](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai/azure-ai-agents/samples) associated with the `azure-ai-agents` package. - -The code below assumes `model_deployment_name` (a string) is defined. It's the deployment name of an AI model in your Foundry Project, as shown in the "Models + endpoints" tab, under the "Name" column. - - - -```python -agent = project_client.agents.create_agent( - model=model_deployment_name, - name="my-agent", - instructions="You are helpful agent", -) -print(f"Created agent, agent ID: {agent.id}") - -# Do something with your Agent! -# See samples here https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai/azure-ai-agents/samples - -project_client.agents.delete_agent(agent.id) -print("Deleted agent") -``` - - - -### Get an authenticated AzureOpenAI client - -Your Azure AI Foundry project may have one or more OpenAI models deployed that support chat completions. Use the code below to get an authenticated [AzureOpenAI](https://github.com/openai/openai-python?tab=readme-ov-file#microsoft-azure-openai) from the [openai](https://pypi.org/project/openai/) package, and execute a chat completions call. - -The code below assumes `model_deployment_name` (a string) is defined. It's the deployment name of an AI model in your Foundry Project, or a connected Azure OpenAI resource. As shown in the "Models + endpoints" tab, under the "Name" column. - -Update the `api_version` value with one found in the "Data plane - inference" row [in this table](https://learn.microsoft.com/azure/ai-services/openai/reference#api-specs). - - - -```python -print( - "Get an authenticated Azure OpenAI client for the parent AI Services resource, and perform a chat completion operation:" -) -with project_client.inference.get_azure_openai_client(api_version="2024-10-21") as client: - - response = client.chat.completions.create( - model=model_deployment_name, - messages=[ - { - "role": "user", - "content": "How many feet are in a mile?", - }, - ], - ) - - print(response.choices[0].message.content) - -print( - "Get an authenticated Azure OpenAI client for a connected Azure OpenAI service, and perform a chat completion operation:" -) -with project_client.inference.get_azure_openai_client( - api_version="2024-10-21", connection_name=connection_name -) as client: - - response = client.chat.completions.create( - model=model_deployment_name, - messages=[ - { - "role": "user", - "content": "How many feet are in a mile?", - }, - ], - ) - - print(response.choices[0].message.content) -``` - - - -See the "inference" folder in the [package samples][samples] for additional samples. - -### Get an authenticated ChatCompletionsClient - -Your Azure AI Foundry project may have one or more AI models deployed that support chat completions. These could be OpenAI models, Microsoft models, or models from other providers. Use the code below to get an authenticated [ChatCompletionsClient](https://learn.microsoft.com/python/api/azure-ai-inference/azure.ai.inference.chatcompletionsclient) from the [azure-ai-inference](https://pypi.org/project/azure-ai-inference/) package, and execute a chat completions call. - -First, install the package: - -```bash -pip install azure-ai-inference -``` - -Then run the code below. Here we assume `model_deployment_name` (a string) is defined. It's the deployment name of an AI model in your Foundry Project, as shown in the "Models + endpoints" tab, under the "Name" column. - - - -```python -with project_client.inference.get_chat_completions_client() as client: - - response = client.complete( - model=model_deployment_name, messages=[UserMessage(content="How many feet are in a mile?")] - ) - - print(response.choices[0].message.content) -``` - - - -See the "inference" folder in the [package samples][samples] for additional samples, including getting an authenticated [EmbeddingsClient](https://learn.microsoft.com/python/api/azure-ai-inference/azure.ai.inference.embeddingsclient) and [ImageEmbeddingsClient](https://learn.microsoft.com/python/api/azure-ai-inference/azure.ai.inference.imageembeddingsclient). - -### Deployments operations - -The code below shows some Deployments operations, which allow you to enumerate the AI models deployed to your AI Foundry Projects. These models can be seen in the "Models + endpoints" tab in your AI Foundry Project. Full samples can be found under the "deployment" folder in the [package samples][samples]. - - - -```python -print("List all deployments:") -for deployment in project_client.deployments.list(): - print(deployment) - -print(f"List all deployments by the model publisher `{model_publisher}`:") -for deployment in project_client.deployments.list(model_publisher=model_publisher): - print(deployment) - -print(f"List all deployments of model `{model_name}`:") -for deployment in project_client.deployments.list(model_name=model_name): - print(deployment) - -print(f"Get a single deployment named `{model_deployment_name}`:") -deployment = project_client.deployments.get(model_deployment_name) -print(deployment) -``` - - - -### Connections operations - -The code below shows some Connection operations, which allow you to enumerate the Azure Resources connected to your AI Foundry Projects. These connections can be seen in the "Management Center", in the "Connected resources" tab in your AI Foundry Project. Full samples can be found under the "connections" folder in the [package samples][samples]. - - - -```python -print("List all connections:") -for connection in project_client.connections.list(): - print(connection) - -print("List all connections of a particular type:") -for connection in project_client.connections.list( - connection_type=ConnectionType.AZURE_OPEN_AI, -): - print(connection) - -print("Get the default connection of a particular type, without its credentials:") -connection = project_client.connections.get_default(connection_type=ConnectionType.AZURE_OPEN_AI) -print(connection) - -print("Get the default connection of a particular type, with its credentials:") -connection = project_client.connections.get_default( - connection_type=ConnectionType.AZURE_OPEN_AI, include_credentials=True -) -print(connection) - -print(f"Get the connection named `{connection_name}`, without its credentials:") -connection = project_client.connections.get(connection_name) -print(connection) - -print(f"Get the connection named `{connection_name}`, with its credentials:") -connection = project_client.connections.get(connection_name, include_credentials=True) -print(connection) -``` - - - -### Dataset operations - -The code below shows some Dataset operations. Full samples can be found under the "datasets" -folder in the [package samples][samples]. - - - -```python -print( - f"Upload a single file and create a new Dataset `{dataset_name}`, version `{dataset_version_1}`, to reference the file." -) -dataset: DatasetVersion = project_client.datasets.upload_file( - name=dataset_name, - version=dataset_version_1, - file_path=data_file, - connection_name=connection_name, -) -print(dataset) - -print( - f"Upload files in a folder (including sub-folders) and create a new version `{dataset_version_2}` in the same Dataset, to reference the files." -) -dataset = project_client.datasets.upload_folder( - name=dataset_name, - version=dataset_version_2, - folder=data_folder, - connection_name=connection_name, - file_pattern=re.compile(r"\.(txt|csv|md)$", re.IGNORECASE), -) -print(dataset) - -print(f"Get an existing Dataset version `{dataset_version_1}`:") -dataset = project_client.datasets.get(name=dataset_name, version=dataset_version_1) -print(dataset) - -print(f"Get credentials of an existing Dataset version `{dataset_version_1}`:") -asset_credential = project_client.datasets.get_credentials(name=dataset_name, version=dataset_version_1) -print(asset_credential) - -print("List latest versions of all Datasets:") -for dataset in project_client.datasets.list(): - print(dataset) - -print(f"Listing all versions of the Dataset named `{dataset_name}`:") -for dataset in project_client.datasets.list_versions(name=dataset_name): - print(dataset) - -print("Delete all Dataset versions created above:") -project_client.datasets.delete(name=dataset_name, version=dataset_version_1) -project_client.datasets.delete(name=dataset_name, version=dataset_version_2) -``` - - - -### Indexes operations - -The code below shows some Indexes operations. Full samples can be found under the "indexes" -folder in the [package samples][samples]. - - - -```python -print( - f"Create Index `{index_name}` with version `{index_version}`, referencing an existing AI Search resource:" -) -index = project_client.indexes.create_or_update( - name=index_name, - version=index_version, - body=AzureAISearchIndex(connection_name=ai_search_connection_name, index_name=ai_search_index_name), -) -print(index) - -print(f"Get Index `{index_name}` version `{index_version}`:") -index = project_client.indexes.get(name=index_name, version=index_version) -print(index) - -print("List latest versions of all Indexes:") -for index in project_client.indexes.list(): - print(index) - -print(f"Listing all versions of the Index named `{index_name}`:") -for index in project_client.indexes.list_versions(name=index_name): - print(index) - -print(f"Delete Index `{index_name}` version `{index_version}`:") -project_client.indexes.delete(name=index_name, version=index_version) -``` - - - -### Evaluation - -Evaluation in Azure AI Project client library provides quantitive, AI-assisted quality and safety metrics to asses performance and Evaluate LLM Models, GenAI Application and Agents. Metrics are defined as evaluators. Built-in or custom evaluators can provide comprehensive evaluation insights. - -The code below shows some evaluation operations. Full list of sample can be found under "evaluation" folder in the [package samples][samples] - - - -```python -print("Upload a single file and create a new Dataset to reference the file.") -dataset: DatasetVersion = project_client.datasets.upload_file( - name=dataset_name, - version=dataset_version, - file_path=data_file, -) -print(dataset) - -print("Create an evaluation") -evaluation: Evaluation = Evaluation( - display_name="Sample Evaluation Test", - description="Sample evaluation for testing", - # Sample Dataset Id : azureai://accounts//projects//data//versions/ - data=InputDataset(id=dataset.id if dataset.id else ""), - evaluators={ - "relevance": EvaluatorConfiguration( - id=EvaluatorIds.RELEVANCE.value, - init_params={ - "deployment_name": model_deployment_name, - }, - data_mapping={ - "query": "${data.query}", - "response": "${data.response}", - }, - ), - "violence": EvaluatorConfiguration( - id=EvaluatorIds.VIOLENCE.value, - init_params={ - "azure_ai_project": endpoint, - }, - ), - "bleu_score": EvaluatorConfiguration( - id=EvaluatorIds.BLEU_SCORE.value, - ), - }, -) - -evaluation_response: Evaluation = project_client.evaluations.create( - evaluation, - headers={ - "model-endpoint": model_endpoint, - "api-key": model_api_key, - }, -) -print(evaluation_response) - -print("Get evaluation") -get_evaluation_response: Evaluation = project_client.evaluations.get(evaluation_response.name) - -print(get_evaluation_response) - -print("List evaluations") -for evaluation in project_client.evaluations.list(): - print(evaluation) -``` - - - -## Troubleshooting - -### Exceptions - -Client methods that make service calls raise an [HttpResponseError](https://learn.microsoft.com/python/api/azure-core/azure.core.exceptions.httpresponseerror) exception for a non-success HTTP status code response from the service. The exception's `status_code` will hold the HTTP response status code (with `reason` showing the friendly name). The exception's `error.message` contains a detailed message that may be helpful in diagnosing the issue: - -```python -from azure.core.exceptions import HttpResponseError - -... - -try: - result = project_client.connections.list() -except HttpResponseError as e: - print(f"Status code: {e.status_code} ({e.reason})") - print(e.message) -``` - -For example, when you provide wrong credentials: - -```text -Status code: 401 (Unauthorized) -Operation returned an invalid status 'Unauthorized' -``` - -### Logging - -The client uses the standard [Python logging library](https://docs.python.org/3/library/logging.html). The SDK logs HTTP request and response details, which may be useful in troubleshooting. To log to stdout, add the following at the top of your Python script: - -```python -import sys -import logging - -# Acquire the logger for this client library. Use 'azure' to affect both -# 'azure.core` and `azure.ai.inference' libraries. -logger = logging.getLogger("azure") - -# Set the desired logging level. logging.INFO or logging.DEBUG are good options. -logger.setLevel(logging.DEBUG) - -# Direct logging output to stdout: -handler = logging.StreamHandler(stream=sys.stdout) -# Or direct logging output to a file: -# handler = logging.FileHandler(filename="sample.log") -logger.addHandler(handler) - -# Optional: change the default logging format. Here we add a timestamp. -#formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(name)s:%(message)s") -#handler.setFormatter(formatter) -``` - -By default logs redact the values of URL query strings, the values of some HTTP request and response headers (including `Authorization` which holds the key or token), and the request and response payloads. To create logs without redaction, add `logging_enable=True` to the client constructor: - -```python -project_client = AIProjectClient( - credential=DefaultAzureCredential(), - endpoint=os.environ["PROJECT_ENDPOINT"], - logging_enable = True -) -``` - -Note that the log level must be set to `logging.DEBUG` (see above code). Logs will be redacted with any other log level. - -Be sure to protect non redacted logs to avoid compromising security. - -For more information, see [Configure logging in the Azure libraries for Python](https://aka.ms/azsdk/python/logging) - -### Reporting issues - -To report an issue with the client library, or request additional features, please open a GitHub issue [here](https://github.com/Azure/azure-sdk-for-python/issues). Mention the package name "azure-ai-projects" in the title or content. - -## Next steps - -Have a look at the [Samples](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai/azure-ai-projects/samples) folder, containing fully runnable Python code for synchronous and asynchronous clients. - -## Contributing - -This project welcomes contributions and suggestions. Most contributions require -you to agree to a Contributor License Agreement (CLA) declaring that you have -the right to, and actually do, grant us the rights to use your contribution. -For details, visit https://cla.microsoft.com. - -When you submit a pull request, a CLA-bot will automatically determine whether -you need to provide a CLA and decorate the PR appropriately (e.g., label, -comment). Simply follow the instructions provided by the bot. You will only -need to do this once across all repos using our CLA. - -This project has adopted the -[Microsoft Open Source Code of Conduct][code_of_conduct]. For more information, -see the Code of Conduct FAQ or contact opencode@microsoft.com with any -additional questions or comments. - - -[samples]: https://aka.ms/azsdk/azure-ai-projects/python/samples/ -[code_of_conduct]: https://opensource.microsoft.com/codeofconduct/ -[azure_sub]: https://azure.microsoft.com/free/ -[evaluators]: https://learn.microsoft.com/azure/ai-studio/how-to/develop/evaluate-sdk -[azure_ai_evaluation]: https://learn.microsoft.com/python/api/overview/azure/ai-evaluation-readme -[evaluator_library]: https://learn.microsoft.com/azure/ai-studio/how-to/evaluate-generative-ai-app#view-and-manage-the-evaluators-in-the-evaluator-library \ No newline at end of file +Hello world \ No newline at end of file From a96bca5c2eb0ba8599fb21ca0346f0a90e3d2dbb Mon Sep 17 00:00:00 2001 From: howieleung Date: Tue, 10 Jun 2025 14:13:22 -0700 Subject: [PATCH 8/8] use symbolic links --- sdk/ai/azure-ai-agents/README.md | 1285 +------------ sdk/ai/azure-ai-agents/samples | 1 + sdk/ai/azure-ai-agents/samples/__init__.py | 0 .../samples/agents_async/__init__.py | 0 .../sample_agents_azure_functions_async.py | 108 -- .../sample_agents_basics_async.py | 82 - ...ics_create_thread_and_process_run_async.py | 79 - ...ents_basics_create_thread_and_run_async.py | 88 - .../sample_agents_code_interpreter_async.py | 106 -- ...gents_code_interpreter_attachment_async.py | 89 - ...eter_attachment_enterprise_search_async.py | 85 - .../sample_agents_functions_async.py | 120 -- .../sample_agents_image_input_base64_async.py | 113 -- .../sample_agents_image_input_file_async.py | 97 - .../sample_agents_image_input_url_async.py | 91 - .../sample_agents_json_schema_async.py | 106 -- .../sample_agents_run_with_toolset_async.py | 88 - ...sample_agents_stream_eventhandler_async.py | 105 -- ...tream_eventhandler_with_functions_async.py | 148 -- ..._stream_eventhandler_with_toolset_async.py | 117 -- .../sample_agents_stream_iteration_async.py | 94 - ...m_with_base_override_eventhandler_async.py | 115 -- ...tore_batch_enterprise_file_search_async.py | 122 -- ...ts_vector_store_batch_file_search_async.py | 114 -- ...ctor_store_enterprise_file_search_async.py | 88 - ...e_agents_vector_store_file_search_async.py | 86 - ...gents_with_file_search_attachment_async.py | 88 - .../samples/agents_async/utils/__init__.py | 0 .../utils/user_async_functions.py | 67 - .../sample_agents_image_input_base64.py | 112 -- .../sample_agents_image_input_file.py | 92 - .../sample_agents_image_input_url.py | 90 - .../sample_agents_json_schema.py | 101 -- ...ctor_store_batch_enterprise_file_search.py | 105 -- ...e_agents_vector_store_batch_file_search.py | 109 -- .../sample_agents_vector_store_file_search.py | 80 - ...s_with_code_interpreter_file_attachment.py | 111 -- ...mple_agents_with_file_search_attachment.py | 77 - .../sample_agents_with_resources_in_thread.py | 97 - .../sample_agents_agent_team.py | 78 - ...le_agents_agent_team_custom_team_leader.py | 117 -- .../sample_agents_multi_agent_team.py | 133 -- .../agents_multiagent/utils/agent_team.py | 436 ----- .../utils/agent_team_config.yaml | 43 - .../utils/agent_trace_configurator.py | 73 - .../utils/user_functions_with_traces.py | 111 -- ...stream_eventhandler_with_bing_grounding.py | 124 -- ...ents_stream_eventhandler_with_functions.py | 147 -- ...agents_stream_eventhandler_with_toolset.py | 118 -- ...ts_stream_iteration_with_bing_grounding.py | 116 -- ...gents_stream_iteration_with_file_search.py | 110 -- ...le_agents_stream_iteration_with_toolset.py | 107 -- ..._stream_with_base_override_eventhandler.py | 105 -- ...basics_async_with_azure_monitor_tracing.py | 87 - ...gents_basics_async_with_console_tracing.py | 93 - ...gents_basics_with_azure_monitor_tracing.py | 79 - ...mple_agents_basics_with_console_tracing.py | 85 - ..._with_console_tracing_custom_attributes.py | 113 -- ...eventhandler_with_azure_monitor_tracing.py | 115 -- ...tream_eventhandler_with_console_tracing.py | 130 -- ...ents_toolset_with_azure_monitor_tracing.py | 128 -- ...ple_agents_toolset_with_console_tracing.py | 141 -- .../samples/agents_tools/__init__.py | 0 .../sample_agents_azure_ai_search.py | 128 -- .../sample_agents_azure_functions.py | 98 - .../sample_agents_bing_custom_search.py | 88 - .../sample_agents_bing_grounding.py | 105 -- .../sample_agents_code_interpreter.py | 110 -- ...nterpreter_attachment_enterprise_search.py | 85 - .../sample_agents_connected_agent.py | 96 - .../sample_agents_enterprise_file_search.py | 82 - .../agents_tools/sample_agents_fabric.py | 86 - .../agents_tools/sample_agents_file_search.py | 103 -- .../agents_tools/sample_agents_functions.py | 115 -- .../agents_tools/sample_agents_logic_apps.py | 132 -- ...sample_agents_multiple_connected_agents.py | 115 -- .../agents_tools/sample_agents_openapi.py | 121 -- .../sample_agents_openapi_connection_auth.py | 101 -- .../sample_agents_run_with_toolset.py | 94 - .../agents_tools/sample_agents_sharepoint.py | 89 - .../agents_tools/utils/user_logic_apps.py | 80 - .../samples/assets/countries.json | 46 - .../samples/assets/image_file.png | Bin 183951 -> 0 bytes .../samples/assets/product_info_1.md | 51 - .../synthetic_500_quarterly_results.csv | 14 - .../samples/assets/tripadvisor_openapi.json | 1606 ----------------- .../samples/assets/weather_openapi.json | 62 - .../samples/sample_agents_basics.py | 87 - ...ample_agents_basics_stream_eventhandler.py | 103 -- .../sample_agents_basics_stream_iteration.py | 83 - ...le_agents_basics_thread_and_process_run.py | 64 - .../sample_agents_basics_thread_and_run.py | 74 - .../azure-ai-agents/samples/utils/__init__.py | 0 .../samples/utils/user_functions.py | 248 --- sdk/ai/azure-ai-projects/README_AGENTS.md | 1284 +++++++++++++ 95 files changed, 1286 insertions(+), 11709 deletions(-) mode change 100644 => 120000 sdk/ai/azure-ai-agents/README.md create mode 120000 sdk/ai/azure-ai-agents/samples delete mode 100644 sdk/ai/azure-ai-agents/samples/__init__.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_async/__init__.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_azure_functions_async.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_basics_async.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_basics_create_thread_and_process_run_async.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_basics_create_thread_and_run_async.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_code_interpreter_async.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_code_interpreter_attachment_async.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_code_interpreter_attachment_enterprise_search_async.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_functions_async.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_image_input_base64_async.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_image_input_file_async.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_image_input_url_async.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_json_schema_async.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_run_with_toolset_async.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_stream_eventhandler_async.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_stream_eventhandler_with_functions_async.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_stream_eventhandler_with_toolset_async.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_stream_iteration_async.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_stream_with_base_override_eventhandler_async.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_vector_store_batch_enterprise_file_search_async.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_vector_store_batch_file_search_async.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_vector_store_enterprise_file_search_async.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_vector_store_file_search_async.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_with_file_search_attachment_async.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_async/utils/__init__.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_async/utils/user_async_functions.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_files_images_inputs/sample_agents_image_input_base64.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_files_images_inputs/sample_agents_image_input_file.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_files_images_inputs/sample_agents_image_input_url.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_files_images_inputs/sample_agents_json_schema.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_files_images_inputs/sample_agents_vector_store_batch_enterprise_file_search.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_files_images_inputs/sample_agents_vector_store_batch_file_search.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_files_images_inputs/sample_agents_vector_store_file_search.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_files_images_inputs/sample_agents_with_code_interpreter_file_attachment.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_files_images_inputs/sample_agents_with_file_search_attachment.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_files_images_inputs/sample_agents_with_resources_in_thread.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_multiagent/sample_agents_agent_team.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_multiagent/sample_agents_agent_team_custom_team_leader.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_multiagent/sample_agents_multi_agent_team.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_multiagent/utils/agent_team.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_multiagent/utils/agent_team_config.yaml delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_multiagent/utils/agent_trace_configurator.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_multiagent/utils/user_functions_with_traces.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_eventhandler_with_bing_grounding.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_eventhandler_with_functions.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_eventhandler_with_toolset.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_iteration_with_bing_grounding.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_iteration_with_file_search.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_iteration_with_toolset.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_with_base_override_eventhandler.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_basics_async_with_azure_monitor_tracing.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_basics_async_with_console_tracing.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_basics_with_azure_monitor_tracing.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_basics_with_console_tracing.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_basics_with_console_tracing_custom_attributes.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_stream_eventhandler_with_azure_monitor_tracing.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_stream_eventhandler_with_console_tracing.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_toolset_with_azure_monitor_tracing.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_toolset_with_console_tracing.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_tools/__init__.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_azure_ai_search.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_azure_functions.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_bing_custom_search.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_bing_grounding.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_code_interpreter.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_code_interpreter_attachment_enterprise_search.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_connected_agent.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_enterprise_file_search.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_fabric.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_file_search.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_functions.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_logic_apps.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_multiple_connected_agents.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_openapi.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_openapi_connection_auth.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_run_with_toolset.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_sharepoint.py delete mode 100644 sdk/ai/azure-ai-agents/samples/agents_tools/utils/user_logic_apps.py delete mode 100644 sdk/ai/azure-ai-agents/samples/assets/countries.json delete mode 100644 sdk/ai/azure-ai-agents/samples/assets/image_file.png delete mode 100644 sdk/ai/azure-ai-agents/samples/assets/product_info_1.md delete mode 100644 sdk/ai/azure-ai-agents/samples/assets/synthetic_500_quarterly_results.csv delete mode 100644 sdk/ai/azure-ai-agents/samples/assets/tripadvisor_openapi.json delete mode 100644 sdk/ai/azure-ai-agents/samples/assets/weather_openapi.json delete mode 100644 sdk/ai/azure-ai-agents/samples/sample_agents_basics.py delete mode 100644 sdk/ai/azure-ai-agents/samples/sample_agents_basics_stream_eventhandler.py delete mode 100644 sdk/ai/azure-ai-agents/samples/sample_agents_basics_stream_iteration.py delete mode 100644 sdk/ai/azure-ai-agents/samples/sample_agents_basics_thread_and_process_run.py delete mode 100644 sdk/ai/azure-ai-agents/samples/sample_agents_basics_thread_and_run.py delete mode 100644 sdk/ai/azure-ai-agents/samples/utils/__init__.py delete mode 100644 sdk/ai/azure-ai-agents/samples/utils/user_functions.py create mode 100644 sdk/ai/azure-ai-projects/README_AGENTS.md diff --git a/sdk/ai/azure-ai-agents/README.md b/sdk/ai/azure-ai-agents/README.md deleted file mode 100644 index a71db514276b..000000000000 --- a/sdk/ai/azure-ai-agents/README.md +++ /dev/null @@ -1,1284 +0,0 @@ - -# Azure AI Agents client library for Python - -Use the AI Agents client library to: - -* **Develop Agents using the Azure AI Agents Service**, leveraging an extensive ecosystem of models, tools, and capabilities from OpenAI, Microsoft, and other LLM providers. The Azure AI Agents Service enables the building of Agents for a wide range of generative AI use cases. -* **Note:** While this package can be used independently, we recommend using the Azure AI Projects client library (azure-ai-projects) for an enhanced experience. -The Projects library provides simplified access to advanced functionality, such as creating and managing agents, enumerating AI models, working with datasets and -managing search indexes, evaluating generative AI performance, and enabling OpenTelemetry tracing. - -[Product documentation](https://aka.ms/azsdk/azure-ai-agents/product-doc) -| [Samples][samples] -| [API reference documentation](https://aka.ms/azsdk/azure-ai-agents/python/reference) -| [Package (PyPI)](https://aka.ms/azsdk/azure-ai-agents/python/package) -| [SDK source code](https://aka.ms/azsdk/azure-ai-agents/python/code) -| [AI Starter Template](https://aka.ms/azsdk/azure-ai-agents/python/ai-starter-template) - -## Reporting issues - -To report an issue with the client library, or request additional features, please open a GitHub issue [here](https://github.com/Azure/azure-sdk-for-python/issues). Mention the package name "azure-ai-agents" in the title or content. - -## Table of contents - -- [Getting started](#getting-started) - - [Prerequisite](#prerequisite) - - [Install the package](#install-the-package) -- [Key concepts](#key-concepts) - - [Create and authenticate the client](#create-and-authenticate-the-client) -- [Examples](#examples) - - [Create an Agent](#create-agent) with: - - [File Search](#create-agent-with-file-search) - - [Enterprise File Search](#create-agent-with-enterprise-file-search) - - [Code interpreter](#create-agent-with-code-interpreter) - - [Bing grounding](#create-agent-with-bing-grounding) - - [Azure AI Search](#create-agent-with-azure-ai-search) - - [Function call](#create-agent-with-function-call) - - [Azure Function Call](#create-agent-with-azure-function-call) - - [OpenAPI](#create-agent-with-openapi) - - [Fabric data](#create-an-agent-with-fabric) - - [Create thread](#create-thread) with - - [Tool resource](#create-thread-with-tool-resource) - - [Create message](#create-message) with: - - [File search attachment](#create-message-with-file-search-attachment) - - [Code interpreter attachment](#create-message-with-code-interpreter-attachment) - - [Create Message with Image Inputs](#create-message-with-image-inputs) - - [Execute Run, Run_and_Process, or Stream](#execute-run-run_and_process-or-stream) - - [Retrieve message](#retrieve-message) - - [Retrieve file](#retrieve-file) - - [Tear down by deleting resource](#teardown) - - [Tracing](#tracing) - - [Installation](#installation) - - [How to enable tracing](#how-to-enable-tracing) - - [How to trace your own functions](#how-to-trace-your-own-functions) -- [Troubleshooting](#troubleshooting) - - [Logging](#logging) - - [Reporting issues](#reporting-issues) -- [Next steps](#next-steps) -- [Contributing](#contributing) - -## Getting started - -### Prerequisite - -- Python 3.9 or later. -- An [Azure subscription][azure_sub]. -- A [project in Azure AI Foundry](https://learn.microsoft.com/azure/ai-studio/how-to/create-projects). -- The project endpoint string. It can be found in your Azure AI Foundry project overview page, under "Project details". Below we will assume the environment variable `PROJECT_ENDPOINT_STRING` was defined to hold this value. -- Entra ID is needed to authenticate the client. Your application needs an object that implements the [TokenCredential](https://learn.microsoft.com/python/api/azure-core/azure.core.credentials.tokencredential) interface. Code samples here use [DefaultAzureCredential](https://learn.microsoft.com/python/api/azure-identity/azure.identity.defaultazurecredential). To get that working, you will need: - * An appropriate role assignment. see [Role-based access control in Azure AI Foundry portal](https://learn.microsoft.com/azure/ai-foundry/concepts/rbac-ai-foundry). Role assigned can be done via the "Access Control (IAM)" tab of your Azure AI Project resource in the Azure portal. - * [Azure CLI](https://learn.microsoft.com/cli/azure/install-azure-cli) installed. - * You are logged into your Azure account by running `az login`. - * Note that if you have multiple Azure subscriptions, the subscription that contains your Azure AI Project resource must be your default subscription. Run `az account list --output table` to list all your subscription and see which one is the default. Run `az account set --subscription "Your Subscription ID or Name"` to change your default subscription. - -### Install the package - -```bash -pip install azure-ai-agents -``` - -## Key concepts - -### Create and authenticate the client - -To construct a synchronous client: - -```python -import os -from azure.ai.agents import AgentsClient -from azure.identity import DefaultAzureCredential - -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) -``` - -To construct an asynchronous client, Install the additional package [aiohttp](https://pypi.org/project/aiohttp/): - -```bash -pip install aiohttp -``` - -and update the code above to import `asyncio`, and import `AgentsClient` from the `azure.ai.agents.aio` namespace: - -```python -import os -import asyncio -from azure.ai.agents.aio import AgentsClient -from azure.core.credentials import AzureKeyCredential - -agent_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) -``` - -## Examples - -### Create Agent - -Before creating an Agent, you need to set up Azure resources to deploy your model. [Create a New Agent Quickstart](https://learn.microsoft.com/azure/ai-services/agents/quickstart?pivots=programming-language-python-azure) details selecting and deploying your Agent Setup. - -Here is an example of how to create an Agent: - - -```python - - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are helpful agent", - ) -``` - - - -To allow Agents to access your resources or custom functions, you need tools. You can pass tools to `create_agent` by either `toolset` or combination of `tools` and `tool_resources`. - -Here is an example of `toolset`: - - -```python -functions = FunctionTool(user_functions) -code_interpreter = CodeInterpreterTool() - -toolset = ToolSet() -toolset.add(functions) -toolset.add(code_interpreter) - -# To enable tool calls executed automatically -agents_client.enable_auto_function_calls(toolset) - -agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are a helpful agent", - toolset=toolset, -) -``` - - - -Also notices that if you use asynchronous client, you use `AsyncToolSet` instead. Additional information related to `AsyncFunctionTool` be discussed in the later sections. - -Here is an example to use `tools` and `tool_resources`: - - -```python -file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) - -# Notices that FileSearchTool as tool and tool_resources must be added or the agent unable to search the file -agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are helpful agent", - tools=file_search_tool.definitions, - tool_resources=file_search_tool.resources, -) -``` - - - -In the following sections, we show you sample code in either `toolset` or combination of `tools` and `tool_resources`. - -### Create Agent with File Search - -To perform file search by an Agent, we first need to upload a file, create a vector store, and associate the file to the vector store. Here is an example: - - - -```python -file = agents_client.files.upload_and_poll(file_path=asset_file_path, purpose=FilePurpose.AGENTS) -print(f"Uploaded file, file ID: {file.id}") - -vector_store = agents_client.vector_stores.create_and_poll(file_ids=[file.id], name="my_vectorstore") -print(f"Created vector store, vector store ID: {vector_store.id}") - -# Create file search tool with resources followed by creating agent -file_search = FileSearchTool(vector_store_ids=[vector_store.id]) - -agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="Hello, you are helpful agent and can search information from uploaded files", - tools=file_search.definitions, - tool_resources=file_search.resources, -) -``` - - - -### Create Agent with Enterprise File Search - -We can upload file to Azure as it is shown in the example, or use the existing Azure blob storage. In the code below we demonstrate how this can be achieved. First we upload file to azure and create `VectorStoreDataSource`, which then is used to create vector store. This vector store is then given to the `FileSearchTool` constructor. - - - -```python -# We will upload the local file to Azure and will use it for vector store creation. -asset_uri = os.environ["AZURE_BLOB_URI"] - -# Create a vector store with no file and wait for it to be processed -ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) -vector_store = agents_client.vector_stores.create_and_poll(data_sources=[ds], name="sample_vector_store") -print(f"Created vector store, vector store ID: {vector_store.id}") - -# Create a file search tool -file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) - -# Notices that FileSearchTool as tool and tool_resources must be added or the agent unable to search the file -agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are helpful agent", - tools=file_search_tool.definitions, - tool_resources=file_search_tool.resources, -) -``` - - - -We also can attach files to the existing vector store. In the code snippet below, we first create an empty vector store and add file to it. - - - -```python -# Create a vector store with no file and wait for it to be processed -vector_store = agents_client.vector_stores.create_and_poll(data_sources=[], name="sample_vector_store") -print(f"Created vector store, vector store ID: {vector_store.id}") - -ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) -# Add the file to the vector store or you can supply data sources in the vector store creation -vector_store_file_batch = agents_client.vector_store_file_batches.create_and_poll( - vector_store_id=vector_store.id, data_sources=[ds] -) -print(f"Created vector store file batch, vector store file batch ID: {vector_store_file_batch.id}") - -# Create a file search tool -file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) -``` - - - -### Create Agent with Code Interpreter - -Here is an example to upload a file and use it for code interpreter by an Agent: - - - -```python -file = agents_client.files.upload_and_poll(file_path=asset_file_path, purpose=FilePurpose.AGENTS) -print(f"Uploaded file, file ID: {file.id}") - -code_interpreter = CodeInterpreterTool(file_ids=[file.id]) - -# Create agent with code interpreter tool and tools_resources -agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are helpful agent", - tools=code_interpreter.definitions, - tool_resources=code_interpreter.resources, -) -``` - - - -### Create Agent with Bing Grounding - -To enable your Agent to perform search through Bing search API, you use `BingGroundingTool` along with a connection. - -Here is an example: - - - -```python -conn_id = os.environ["AZURE_BING_CONNECTION_ID"] - -# Initialize agent bing tool and add the connection id -bing = BingGroundingTool(connection_id=conn_id) - -# Create agent with the bing tool and process agent run -with agents_client: - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are a helpful agent", - tools=bing.definitions, - ) -``` - - - -### Create Agent with Azure AI Search - -Azure AI Search is an enterprise search system for high-performance applications. It integrates with Azure OpenAI Service and Azure Machine Learning, offering advanced search technologies like vector search and full-text search. Ideal for knowledge base insights, information discovery, and automation. Creating an Agent with Azure AI Search requires an existing Azure AI Search Index. For more information and setup guides, see [Azure AI Search Tool Guide](https://learn.microsoft.com/azure/ai-services/agents/how-to/tools/azure-ai-search?tabs=azurecli%2Cpython&pivots=overview-azure-ai-search). - -Here is an example to integrate Azure AI Search: - - - -```python -conn_id = os.environ["AI_AZURE_AI_CONNECTION_ID"] - -print(conn_id) - -# Initialize agent AI search tool and add the search index connection id -ai_search = AzureAISearchTool( - index_connection_id=conn_id, index_name="sample_index", query_type=AzureAISearchQueryType.SIMPLE, top_k=3, filter="" -) - -# Create agent with AI search tool and process agent run -with agents_client: - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are a helpful agent", - tools=ai_search.definitions, - tool_resources=ai_search.resources, - ) -``` - - - -If the agent has found the relevant information in the index, the reference -and annotation will be provided in the message response. In the example above, we replace -the reference placeholder by the actual reference and url. Please note, that to -get sensible result, the index needs to have "embedding", "token", "category" and "title" fields. - - - -```python -# Fetch and log all messages -messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) -for message in messages: - if message.role == MessageRole.AGENT and message.url_citation_annotations: - placeholder_annotations = { - annotation.text: f" [see {annotation.url_citation.title}] ({annotation.url_citation.url})" - for annotation in message.url_citation_annotations - } - for message_text in message.text_messages: - message_str = message_text.text.value - for k, v in placeholder_annotations.items(): - message_str = message_str.replace(k, v) - print(f"{message.role}: {message_str}") - else: - for message_text in message.text_messages: - print(f"{message.role}: {message_text.text.value}") -``` - - - -### Create Agent with Function Call - -You can enhance your Agents by defining callback functions as function tools. These can be provided to `create_agent` via either the `toolset` parameter or the combination of `tools` and `tool_resources`. Here are the distinctions: - -For more details about requirements and specification of functions, refer to [Function Tool Specifications](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-agents/FunctionTool.md) - -Here is an example to use [user functions](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-agents/samples/utils/user_functions.py) in `toolset`: - - -```python -functions = FunctionTool(user_functions) -toolset = ToolSet() -toolset.add(functions) -agents_client.enable_auto_function_calls(toolset) - -agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are a helpful agent", - toolset=toolset, -) -``` - - - -For asynchronous functions, you must import `AgentsClient` from `azure.ai.agents.aio` and use `AsyncFunctionTool`. Here is an example using [asynchronous user functions](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_functions_async.py): - -```python -from azure.ai.agents.aio import AgentsClient -``` - - - -```python -functions = AsyncFunctionTool(user_async_functions) - -toolset = AsyncToolSet() -toolset.add(functions) -agents_client.enable_auto_function_calls(toolset) - -agent = await agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are a helpful agent", - toolset=toolset, -) -``` - - - -Notice that if `enable_auto_function_calls` is called, the SDK will invoke the functions automatically during `create_and_process` or streaming. If you prefer to execute them manually, refer to [`sample_agents_stream_eventhandler_with_functions.py`](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_eventhandler_with_functions.py) or -[`sample_agents_functions.py`](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_functions.py) - -### Create Agent With Azure Function Call - -The AI agent leverages Azure Functions triggered asynchronously via Azure Storage Queues. To enable the agent to perform Azure Function calls, you must set up the corresponding `AzureFunctionTool`, specifying input and output queues as well as parameter definitions. - -Example Python snippet illustrating how you create an agent utilizing the Azure Function Tool: - -```python -azure_function_tool = AzureFunctionTool( - name="foo", - description="Get answers from the foo bot.", - parameters={ - "type": "object", - "properties": { - "query": {"type": "string", "description": "The question to ask."}, - "outputqueueuri": {"type": "string", "description": "The full output queue uri."}, - }, - }, - input_queue=AzureFunctionStorageQueue( - queue_name="azure-function-foo-input", - storage_service_endpoint=storage_service_endpoint, - ), - output_queue=AzureFunctionStorageQueue( - queue_name="azure-function-tool-output", - storage_service_endpoint=storage_service_endpoint, - ), -) - -agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="azure-function-agent-foo", - instructions=f"You are a helpful support agent. Use the provided function any time the prompt contains the string 'What would foo say?'. When you invoke the function, ALWAYS specify the output queue uri parameter as '{storage_service_endpoint}/azure-function-tool-output'. Always responds with \"Foo says\" and then the response from the tool.", - tools=azure_function_tool.definitions, -) -print(f"Created agent, agent ID: {agent.id}") -``` - ---- - -**Limitations** - -Currently, the Azure Function integration for the AI Agent has the following limitations: - -- Supported trigger for Azure Function is currently limited to **Queue triggers** only. - HTTP or other trigger types and streaming responses are not supported at this time. - ---- - -**Create and Deploy Azure Function** - -Before you can use the agent with AzureFunctionTool, you need to create and deploy Azure Function. - -Below is an example Python Azure Function responding to queue-triggered messages and placing responses on the output queue: - -```python -import azure.functions as func -import logging -import json - -app = func.FunctionApp() - - -@app.function_name(name="Foo") -@app.queue_trigger( - arg_name="arguments", - queue_name="azure-function-foo-input", - connection="AzureWebJobsStorage") -@app.queue_output( - arg_name="outputQueue", - queue_name="azure-function-tool-output", - connection="AzureWebJobsStorage") -def foo(arguments: func.QueueMessage, outputQueue: func.Out[str]) -> None: - """ - The function, answering question. - - :param arguments: The arguments, containing json serialized request. - :param outputQueue: The output queue to write messages to. - """ - - parsed_args = json.loads(arguments.get_body().decode('utf-8')) - try: - response = { - "Value": "Bar", - "CorrelationId": parsed_args['CorrelationId'] - } - outputQueue.set(json.dumps(response)) - logging.info(f'The function returns the following message: {json.dumps(response)}') - except Exception as e: - logging.error(f"Error processing message: {e}") - raise -``` - -> **Important:** Both input and output payloads must contain the `CorrelationId`, which must match in request and response. - ---- - -**Azure Function Project Creation and Deployment** - -To deploy your function to Azure properly, follow Microsoft's official documentation step by step: - -[Azure Functions Python Developer Guide](https://learn.microsoft.com/azure/azure-functions/create-first-function-cli-python?tabs=windows%2Cbash%2Cazure-cli%2Cbrowser) - -**Summary of required steps:** - -- Use the Azure CLI or Azure Portal to create an Azure Function App. -- Create input and output queues in Azure Storage. -- Deploy your Function code. - ---- - -**Verification and Testing Azure Function** - -To ensure that your Azure Function deployment functions correctly: - -1. Place the following style message manually into the input queue (`input`): - -{ - "CorrelationId": "42" -} - -Check the output queue (`output`) and validate the structured message response: - -{ - "Value": "Bar", - "CorrelationId": "42" -} - ---- - -**Required Role Assignments (IAM Configuration)** - -Ensure your Azure AI Project identity has the following storage account permissions: -- `Storage Account Contributor` -- `Storage Blob Data Contributor` -- `Storage File Data Privileged Contributor` -- `Storage Queue Data Contributor` -- `Storage Table Data Contributor` - ---- - -**Additional Important Configuration Notes** - -- The Azure Function configured above uses the `AzureWebJobsStorage` connection string for queue connectivity. You may alternatively use managed identity-based connections as described in the official Azure Functions Managed Identity documentation. -- Storage queues you specify (`input` & `output`) should already exist in the storage account before the Function deployment or invocation, created manually via Azure portal or CLI. -- When using Azure storage account connection strings, make sure the account has enabled storage account key access (`Storage Account > Settings > Configuration`). - ---- - -With the above steps complete, your Azure Function integration with your AI Agent is ready for use. - - -### Create Agent With Logic Apps - -Logic Apps allow HTTP requests to trigger actions. For more information, refer to the guide [Logic App Workflows for Function Calling](https://learn.microsoft.com/azure/ai-services/openai/how-to/assistants-logic-apps). - -Your Logic App must be in the same resource group as your Azure AI Project, shown in the Azure Portal. Agents SDK accesses Logic Apps through Workflow URLs, which are fetched and called as requests in functions. - -Below is an example of how to create an Azure Logic App utility tool and register a function with it. - - - -```python - -# Create the agents client -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) - -# Extract subscription and resource group from the project scope -subscription_id = os.environ["SUBSCRIPTION_ID"] -resource_group = os.environ["resource_group_name"] - -# Logic App details -logic_app_name = "" -trigger_name = "" - -# Create and initialize AzureLogicAppTool utility -logic_app_tool = AzureLogicAppTool(subscription_id, resource_group) -logic_app_tool.register_logic_app(logic_app_name, trigger_name) -print(f"Registered logic app '{logic_app_name}' with trigger '{trigger_name}'.") - -# Create the specialized "send_email_via_logic_app" function for your agent tools -send_email_func = create_send_email_function(logic_app_tool, logic_app_name) - -# Prepare the function tools for the agent -functions_to_use: Set = { - fetch_current_datetime, - send_email_func, # This references the AzureLogicAppTool instance via closure -} -``` - - - -After this the functions can be incorporated normally into code using `FunctionTool`. - - -### Create Agent With OpenAPI - -OpenAPI specifications describe REST operations against a specific endpoint. Agents SDK can read an OpenAPI spec, create a function from it, and call that function against the REST endpoint without additional client-side execution. - -Here is an example creating an OpenAPI tool (using anonymous authentication): - - - -```python - -with open(weather_asset_file_path, "r") as f: - openapi_weather = jsonref.loads(f.read()) - -with open(countries_asset_file_path, "r") as f: - openapi_countries = jsonref.loads(f.read()) - -# Create Auth object for the OpenApiTool (note that connection or managed identity auth setup requires additional setup in Azure) -auth = OpenApiAnonymousAuthDetails() - -# Initialize agent OpenApi tool using the read in OpenAPI spec -openapi_tool = OpenApiTool( - name="get_weather", spec=openapi_weather, description="Retrieve weather information for a location", auth=auth -) -openapi_tool.add_definition( - name="get_countries", spec=openapi_countries, description="Retrieve a list of countries", auth=auth -) - -# Create agent with OpenApi tool and process agent run -with agents_client: - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are a helpful agent", - tools=openapi_tool.definitions, - ) -``` - - - - -### Create an Agent with Fabric - -To enable your Agent to answer queries using Fabric data, use `FabricTool` along with a connection to the Fabric resource. - -Here is an example: - - - -```python -conn_id = os.environ["FABRIC_CONNECTION_ID"] - -print(conn_id) - -# Initialize an Agent Fabric tool and add the connection id -fabric = FabricTool(connection_id=conn_id) - -# Create an Agent with the Fabric tool and process an Agent run -with agents_client: - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are a helpful agent", - tools=fabric.definitions, - ) -``` - - - - -### Create Thread - -For each session or conversation, a thread is required. Here is an example: - - - -```python -thread = agents_client.threads.create() -``` - - - -### Create Thread with Tool Resource - -In some scenarios, you might need to assign specific resources to individual threads. To achieve this, you provide the `tool_resources` argument to `create_thread`. In the following example, you create a vector store and upload a file, enable an Agent for file search using the `tools` argument, and then associate the file with the thread using the `tool_resources` argument. - - - -```python -file = agents_client.files.upload_and_poll(file_path=asset_file_path, purpose=FilePurpose.AGENTS) -print(f"Uploaded file, file ID: {file.id}") - -vector_store = agents_client.vector_stores.create_and_poll(file_ids=[file.id], name="my_vectorstore") -print(f"Created vector store, vector store ID: {vector_store.id}") - -# Create file search tool with resources followed by creating agent -file_search = FileSearchTool(vector_store_ids=[vector_store.id]) - -agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="Hello, you are helpful agent and can search information from uploaded files", - tools=file_search.definitions, -) - -print(f"Created agent, ID: {agent.id}") - -# Create thread with file resources. -# If the agent has multiple threads, only this thread can search this file. -thread = agents_client.threads.create(tool_resources=file_search.resources) -``` - - - -#### List Threads - -To list all threads attached to a given agent, use the list_threads API: - -```python -threads = agents_client.threads.list() -``` - -### Create Message - -To create a message for agent to process, you pass `user` as `role` and a question as `content`: - - - -```python -message = agents_client.messages.create(thread_id=thread.id, role="user", content="Hello, tell me a joke") -``` - - - -### Create Message with File Search Attachment - -To attach a file to a message for content searching, you use `MessageAttachment` and `FileSearchTool`: - - - -```python -attachment = MessageAttachment(file_id=file.id, tools=FileSearchTool().definitions) -message = agents_client.messages.create( - thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?", attachments=[attachment] -) -``` - - - -### Create Message with Code Interpreter Attachment - -To attach a file to a message for data analysis, use `MessageAttachment` and `CodeInterpreterTool` classes. You must pass `CodeInterpreterTool` as `tools` or `toolset` in `create_agent` call or the file attachment cannot be opened for code interpreter. - -Here is an example to pass `CodeInterpreterTool` as tool: - - - -```python -# Notice that CodeInterpreter must be enabled in the agent creation, -# otherwise the agent will not be able to see the file attachment for code interpretation -agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are helpful agent", - tools=CodeInterpreterTool().definitions, -) -print(f"Created agent, agent ID: {agent.id}") - -thread = agents_client.threads.create() -print(f"Created thread, thread ID: {thread.id}") - -# Create an attachment -attachment = MessageAttachment(file_id=file.id, tools=CodeInterpreterTool().definitions) - -# Create a message -message = agents_client.messages.create( - thread_id=thread.id, - role="user", - content="Could you please create bar chart in TRANSPORTATION sector for the operating profit from the uploaded csv file and provide file to me?", - attachments=[attachment], -) -``` - - - -Azure blob storage can be used as a message attachment. In this case, use `VectorStoreDataSource` as a data source: - - - -```python -# We will upload the local file to Azure and will use it for vector store creation. -asset_uri = os.environ["AZURE_BLOB_URI"] -ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) - -# Create a message with the attachment -attachment = MessageAttachment(data_source=ds, tools=code_interpreter.definitions) -message = agents_client.messages.create( - thread_id=thread.id, role="user", content="What does the attachment say?", attachments=[attachment] -) -``` - - - -### Create Message with Image Inputs - -You can send messages to Azure agents with image inputs in following ways: - -- **Using an image stored as a uploaded file** -- **Using a public image accessible via URL** -- **Using a base64 encoded image string** - -The following examples demonstrate each method: - -#### Create message using uploaded image file - -```python -# Upload the local image file -image_file = agents_client.files.upload_and_poll(file_path="image_file.png", purpose="assistants") - -# Construct content using uploaded image -file_param = MessageImageFileParam(file_id=image_file.id, detail="high") -content_blocks = [ - MessageInputTextBlock(text="Hello, what is in the image?"), - MessageInputImageFileBlock(image_file=file_param), -] - -# Create the message -message = agents_client.messages.create( - thread_id=thread.id, - role="user", - content=content_blocks -) -``` - -#### Create message with an image URL input - -```python -# Specify the public image URL -image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" - -# Create content directly referencing image URL -url_param = MessageImageUrlParam(url=image_url, detail="high") -content_blocks = [ - MessageInputTextBlock(text="Hello, what is in the image?"), - MessageInputImageUrlBlock(image_url=url_param), -] - -# Create the message -message = agents_client.messages.create( - thread_id=thread.id, - role="user", - content=content_blocks -) -``` - -#### Create message with base64-encoded image input - -```python -import base64 - -def image_file_to_base64(path: str) -> str: - with open(path, "rb") as f: - return base64.b64encode(f.read()).decode("utf-8") - -# Convert your image file to base64 format -image_base64 = image_file_to_base64("image_file.png") - -# Prepare the data URL -img_data_url = f"data:image/png;base64,{image_base64}" - -# Use base64 encoded string as image URL parameter -url_param = MessageImageUrlParam(url=img_data_url, detail="high") -content_blocks = [ - MessageInputTextBlock(text="Hello, what is in the image?"), - MessageInputImageUrlBlock(image_url=url_param), -] - -# Create the message -message = agents_client.messages.create( - thread_id=thread.id, - role="user", - content=content_blocks -) -``` - -### Execute Run, Run_and_Process, or Stream - -To process your message, you can use `runs.create`, `runs.create_and_process`, or `runs.stream`. - -`create_run` requests the Agent to process the message without polling for the result. If you are using `function tools` regardless as `toolset` or not, your code is responsible for polling for the result and acknowledging the status of `Run`. When the status is `requires_action`, your code is responsible for calling the function tools. For a code sample, visit [`sample_agents_functions.py`](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_functions.py). - -Here is an example of `runs.create` and poll until the run is completed: - - - -```python -run = agents_client.runs.create(thread_id=thread.id, agent_id=agent.id) - -# Poll the run as long as run status is queued or in progress -while run.status in ["queued", "in_progress", "requires_action"]: - # Wait for a second - time.sleep(1) - run = agents_client.runs.get(thread_id=thread.id, run_id=run.id) -``` - - - -To have the SDK poll on your behalf and call `function tools`, use the `create_and_process` method. Note that `function tools` will only be invoked if they are provided as `toolset` during the `create_agent` call. - -Here is an example: - - - -```python -run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) -``` - - - -With streaming, polling need not be considered. If `function tools` are provided as `toolset` during the `create_agent` call, they will be invoked by the SDK. - -Here is an example of streaming: - - - -```python -with agents_client.runs.stream(thread_id=thread.id, agent_id=agent.id) as stream: - - for event_type, event_data, _ in stream: - - if isinstance(event_data, MessageDeltaChunk): - print(f"Text delta received: {event_data.text}") - - elif isinstance(event_data, ThreadMessage): - print(f"ThreadMessage created. ID: {event_data.id}, Status: {event_data.status}") - - elif isinstance(event_data, ThreadRun): - print(f"ThreadRun status: {event_data.status}") - - elif isinstance(event_data, RunStep): - print(f"RunStep type: {event_data.type}, Status: {event_data.status}") - - elif event_type == AgentStreamEvent.ERROR: - print(f"An error occurred. Data: {event_data}") - - elif event_type == AgentStreamEvent.DONE: - print("Stream completed.") - break - - else: - print(f"Unhandled Event Type: {event_type}, Data: {event_data}") -``` - - - -In the code above, because an `event_handler` object is not passed to the `stream` function, the SDK will instantiate `AgentEventHandler` or `AsyncAgentEventHandler` as the default event handler and produce an iterable object with `event_type` and `event_data`. `AgentEventHandler` and `AsyncAgentEventHandler` are overridable. Here is an example: - - - -```python -# With AgentEventHandler[str], the return type for each event functions is optional string. -class MyEventHandler(AgentEventHandler[str]): - - def on_message_delta(self, delta: "MessageDeltaChunk") -> Optional[str]: - return f"Text delta received: {delta.text}" - - def on_thread_message(self, message: "ThreadMessage") -> Optional[str]: - return f"ThreadMessage created. ID: {message.id}, Status: {message.status}" - - def on_thread_run(self, run: "ThreadRun") -> Optional[str]: - return f"ThreadRun status: {run.status}" - - def on_run_step(self, step: "RunStep") -> Optional[str]: - return f"RunStep type: {step.type}, Status: {step.status}" - - def on_error(self, data: str) -> Optional[str]: - return f"An error occurred. Data: {data}" - - def on_done(self) -> Optional[str]: - return "Stream completed." - - def on_unhandled_event(self, event_type: str, event_data: Any) -> Optional[str]: - return f"Unhandled Event Type: {event_type}, Data: {event_data}" -``` - - - - - - -```python -with agents_client.runs.stream(thread_id=thread.id, agent_id=agent.id, event_handler=MyEventHandler()) as stream: - for event_type, event_data, func_return in stream: - print(f"Received data.") - print(f"Streaming receive Event Type: {event_type}") - print(f"Event Data: {str(event_data)[:100]}...") - print(f"Event Function return: {func_return}\n") -``` - - - -As you can see, this SDK parses the events and produces various event types similar to OpenAI agents. In your use case, you might not be interested in handling all these types and may decide to parse the events on your own. To achieve this, please refer to [override base event handler](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_with_base_override_eventhandler.py). - -``` -Note: Multiple streaming processes may be chained behind the scenes. - -When the SDK receives a `ThreadRun` event with the status `requires_action`, the next event will be `Done`, followed by termination. The SDK will submit the tool calls using the same event handler. The event handler will then chain the main stream with the tool stream. - -Consequently, when you iterate over the streaming using a for loop similar to the example above, the for loop will receive events from the main stream followed by events from the tool stream. -``` - - -### Retrieve Message - -To retrieve messages from agents, use the following example: - - - -```python -messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) -for msg in messages: - if msg.text_messages: - last_text = msg.text_messages[-1] - print(f"{msg.role}: {last_text.text.value}") -``` - - - -In addition, `messages` and `messages.data[]` offer helper properties such as `text_messages`, `image_contents`, `file_citation_annotations`, and `file_path_annotations` to quickly retrieve content from one message or all messages. - -### Retrieve File - -Files uploaded by Agents cannot be retrieved back. If your use case need to access the file content uploaded by the Agents, you are advised to keep an additional copy accessible by your application. However, files generated by Agents are retrievable by `save_file` or `get_file_content`. - -Here is an example retrieving file ids from messages and save to the local drive: - - - -```python -messages = agents_client.messages.list(thread_id=thread.id) -print(f"Messages: {messages}") - -for msg in messages: - # Save every image file in the message - for img in msg.image_contents: - file_id = img.image_file.file_id - file_name = f"{file_id}_image_file.png" - agents_client.files.save(file_id=file_id, file_name=file_name) - print(f"Saved image file to: {Path.cwd() / file_name}") - - # Print details of every file-path annotation - for ann in msg.file_path_annotations: - print("File Paths:") - print(f" Type: {ann.type}") - print(f" Text: {ann.text}") - print(f" File ID: {ann.file_path.file_id}") - print(f" Start Index: {ann.start_index}") - print(f" End Index: {ann.end_index}") -``` - - - -Here is an example to use `get_file_content`: - -```python -from pathlib import Path - -async def save_file_content(client, file_id: str, file_name: str, target_dir: Optional[Union[str, Path]] = None): - # Determine the target directory - path = Path(target_dir).expanduser().resolve() if target_dir else Path.cwd() - path.mkdir(parents=True, exist_ok=True) - - # Retrieve the file content - file_content_stream = await client.files.get_content(file_id) - if not file_content_stream: - raise RuntimeError(f"No content retrievable for file ID '{file_id}'.") - - # Collect all chunks asynchronously - chunks = [] - async for chunk in file_content_stream: - if isinstance(chunk, (bytes, bytearray)): - chunks.append(chunk) - else: - raise TypeError(f"Expected bytes or bytearray, got {type(chunk).__name__}") - - target_file_path = path / file_name - - # Write the collected content to the file synchronously - with open(target_file_path, "wb") as file: - for chunk in chunks: - file.write(chunk) -``` - -### Teardown - -To remove resources after completing tasks, use the following functions: - - - -```python -# Delete the file when done -agents_client.vector_stores.delete(vector_store.id) -print("Deleted vector store") - -agents_client.files.delete(file_id=file.id) -print("Deleted file") - -# Delete the agent when done -agents_client.delete_agent(agent.id) -print("Deleted agent") -``` - - - -## Tracing - -You can add an Application Insights Azure resource to your Azure AI Foundry project. See the Tracing tab in your AI Foundry project. If one was enabled, you can get the Application Insights connection string, configure your Agents, and observe the full execution path through Azure Monitor. Typically, you might want to start tracing before you create an Agent. - -### Installation - -Make sure to install OpenTelemetry and the Azure SDK tracing plugin via - -```bash -pip install opentelemetry -pip install azure-ai-agents azure-identity opentelemetry-sdk azure-core-tracing-opentelemetry -``` - -You will also need an exporter to send telemetry to your observability backend. You can print traces to the console or use a local viewer such as [Aspire Dashboard](https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/standalone?tabs=bash). - -To connect to Aspire Dashboard or another OpenTelemetry compatible backend, install OTLP exporter: - -```bash -pip install opentelemetry-exporter-otlp -``` - -### How to enable tracing - -Here is a code sample that shows how to enable Azure Monitor tracing: - - - -```python -from opentelemetry import trace -from azure.monitor.opentelemetry import configure_azure_monitor - -# Enable Azure Monitor tracing -application_insights_connection_string = os.environ["APPLICATIONINSIGHTS_CONNECTION_STRING"] -configure_azure_monitor(connection_string=application_insights_connection_string) - -scenario = os.path.basename(__file__) -tracer = trace.get_tracer(__name__) - -with tracer.start_as_current_span(scenario): - with agents_client: -``` - - - -In addition, you might find helpful to see the tracing logs in console. You can achieve by the following code: - -```python -from azure.ai.agents.telemetry import enable_telemetry - -enable_telemetry(destination=sys.stdout) -``` -### How to trace your own functions - -The decorator `trace_function` is provided for tracing your own function calls using OpenTelemetry. By default the function name is used as the name for the span. Alternatively you can provide the name for the span as a parameter to the decorator. - -This decorator handles various data types for function parameters and return values, and records them as attributes in the trace span. The supported data types include: -* Basic data types: str, int, float, bool -* Collections: list, dict, tuple, set - * Special handling for collections: - - If a collection (list, dict, tuple, set) contains nested collections, the entire collection is converted to a string before being recorded as an attribute. - - Sets and dictionaries are always converted to strings to ensure compatibility with span attributes. - -Object types are omitted, and the corresponding parameter is not traced. - -The parameters are recorded in attributes `code.function.parameter.` and the return value is recorder in attribute `code.function.return.value` - -## Troubleshooting - -### Logging - -The client uses the standard [Python logging library](https://docs.python.org/3/library/logging.html). The SDK logs HTTP request and response details, which may be useful in troubleshooting. To log to stdout, add the following: - -```python -import sys -import logging - -# Acquire the logger for this client library. Use 'azure' to affect both -# 'azure.core` and `azure.ai.inference' libraries. -logger = logging.getLogger("azure") - -# Set the desired logging level. logging.INFO or logging.DEBUG are good options. -logger.setLevel(logging.DEBUG) - -# Direct logging output to stdout: -handler = logging.StreamHandler(stream=sys.stdout) -# Or direct logging output to a file: -# handler = logging.FileHandler(filename="sample.log") -logger.addHandler(handler) - -# Optional: change the default logging format. Here we add a timestamp. -#formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(name)s:%(message)s") -#handler.setFormatter(formatter) -``` - -By default logs redact the values of URL query strings, the values of some HTTP request and response headers (including `Authorization` which holds the key or token), and the request and response payloads. To create logs without redaction, add `logging_enable = True` to the client constructor: - -```python -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), - logging_enable = True -) -``` - -Note that the log level must be set to `logging.DEBUG` (see above code). Logs will be redacted with any other log level. - -Be sure to protect non redacted logs to avoid compromising security. - -For more information, see [Configure logging in the Azure libraries for Python](https://aka.ms/azsdk/python/logging) - -### Reporting issues - -To report an issue with the client library, or request additional features, please open a GitHub issue [here](https://github.com/Azure/azure-sdk-for-python/issues). Mention the package name "azure-ai-agents" in the title or content. - - -## Next steps - -Have a look at the [Samples](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai/azure-ai-agents/samples) folder, containing fully runnable Python code for synchronous and asynchronous clients. - -Explore the [AI Starter Template](https://aka.ms/azsdk/azure-ai-agents/python/ai-starter-template). This template creates an Azure AI Foundry hub, project and connected resources including Azure OpenAI Service, AI Search and more. It also deploys a simple chat application to Azure Container Apps. - -## Contributing - -This project welcomes contributions and suggestions. Most contributions require -you to agree to a Contributor License Agreement (CLA) declaring that you have -the right to, and actually do, grant us the rights to use your contribution. -For details, visit https://cla.microsoft.com. - -When you submit a pull request, a CLA-bot will automatically determine whether -you need to provide a CLA and decorate the PR appropriately (e.g., label, -comment). Simply follow the instructions provided by the bot. You will only -need to do this once across all repos using our CLA. - -This project has adopted the -[Microsoft Open Source Code of Conduct][code_of_conduct]. For more information, -see the Code of Conduct FAQ or contact opencode@microsoft.com with any -additional questions or comments. - - -[samples]: https://aka.ms/azsdk/azure-ai-projects/python/samples/ -[code_of_conduct]: https://opensource.microsoft.com/codeofconduct/ -[entra_id]: https://learn.microsoft.com/azure/ai-services/authentication?tabs=powershell#authenticate-with-microsoft-entra-id -[azure_identity_credentials]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity#credentials -[azure_identity_pip]: https://pypi.org/project/azure-identity/ -[default_azure_credential]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity#defaultazurecredential -[pip]: https://pypi.org/project/pip/ -[azure_sub]: https://azure.microsoft.com/free/ -[evaluators]: https://learn.microsoft.com/azure/ai-studio/how-to/develop/evaluate-sdk -[azure_ai_evaluation]: https://learn.microsoft.com/python/api/overview/azure/ai-evaluation-readme -[evaluator_library]: https://learn.microsoft.com/azure/ai-studio/how-to/evaluate-generative-ai-app#view-and-manage-the-evaluators-in-the-evaluator-library \ No newline at end of file diff --git a/sdk/ai/azure-ai-agents/README.md b/sdk/ai/azure-ai-agents/README.md new file mode 120000 index 000000000000..6687ec1c88d9 --- /dev/null +++ b/sdk/ai/azure-ai-agents/README.md @@ -0,0 +1 @@ +../azure-ai-projects/README_AGENTS.md \ No newline at end of file diff --git a/sdk/ai/azure-ai-agents/samples b/sdk/ai/azure-ai-agents/samples new file mode 120000 index 000000000000..8e198d66109e --- /dev/null +++ b/sdk/ai/azure-ai-agents/samples @@ -0,0 +1 @@ +../azure-ai-projects/samples/agents \ No newline at end of file diff --git a/sdk/ai/azure-ai-agents/samples/__init__.py b/sdk/ai/azure-ai-agents/samples/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/sdk/ai/azure-ai-agents/samples/agents_async/__init__.py b/sdk/ai/azure-ai-agents/samples/agents_async/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_azure_functions_async.py b/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_azure_functions_async.py deleted file mode 100644 index 797be02b32cf..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_azure_functions_async.py +++ /dev/null @@ -1,108 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -import asyncio - -""" -DESCRIPTION: - This sample demonstrates how to use azure function agent operations from - the Azure Agents service using a asynchronous client. - -USAGE: - python sample_agents_azure_functions_async.py - - Before running the sample: - - pip install azure-ai-projects azure-identity - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. - 3) STORAGE_SERVICE_ENDPONT - the storage service queue endpoint, triggering Azure function. - - Please see Getting Started with Azure Functions page for more information on Azure Functions: - https://learn.microsoft.com/azure/azure-functions/functions-get-started -""" - -import os -from azure.ai.agents.aio import AgentsClient -from azure.identity.aio import DefaultAzureCredential -from azure.ai.agents.models import ( - AzureFunctionStorageQueue, - AzureFunctionTool, - MessageRole, -) - - -async def main(): - - async with DefaultAzureCredential( - exclude_managed_identity_credential=True, exclude_environment_credential=True - ) as creds: - async with AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=creds, - ) as agents_client: - - storage_service_endpoint = os.environ["STORAGE_SERVICE_ENDPONT"] - azure_function_tool = AzureFunctionTool( - name="foo", - description="Get answers from the foo bot.", - parameters={ - "type": "object", - "properties": { - "query": {"type": "string", "description": "The question to ask."}, - "outputqueueuri": {"type": "string", "description": "The full output queue uri."}, - }, - }, - input_queue=AzureFunctionStorageQueue( - queue_name="azure-function-foo-input", - storage_service_endpoint=storage_service_endpoint, - ), - output_queue=AzureFunctionStorageQueue( - queue_name="azure-function-tool-output", - storage_service_endpoint=storage_service_endpoint, - ), - ) - - agent = await agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="azure-function-agent-foo", - instructions=f"You are a helpful support agent. Use the provided function any time the prompt contains the string 'What would foo say?'. When you invoke the function, ALWAYS specify the output queue uri parameter as '{storage_service_endpoint}/azure-function-tool-output'. Always responds with \"Foo says\" and then the response from the tool.", - tools=azure_function_tool.definitions, - ) - print(f"Created agent, agent ID: {agent.id}") - - # Create a thread - thread = await agents_client.threads.create() - print(f"Created thread, thread ID: {thread.id}") - - # Create a message - message = await agents_client.messages.create( - thread_id=thread.id, - role="user", - content="What is the most prevalent element in the universe? What would foo say?", - ) - print(f"Created message, message ID: {message.id}") - - run = await agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) - if run.status == "failed": - print(f"Run failed: {run.last_error}") - - # Get the last message from the sender - last_msg = await agents_client.messages.get_last_message_text_by_role( - thread_id=thread.id, role=MessageRole.AGENT - ) - if last_msg: - print(f"Last Message: {last_msg.text.value}") - - # Delete the agent once done - await agents_client.delete_agent(agent.id) - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_basics_async.py b/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_basics_async.py deleted file mode 100644 index 04ff45f31ec5..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_basics_async.py +++ /dev/null @@ -1,82 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use basic agent operations from - the Azure Agents service using a asynchronous client. - -USAGE: - python sample_agents_basics_async.py - - Before running the sample: - - pip install azure-ai-agents azure-identity aiohttp - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. -""" -import asyncio -import time - -from azure.ai.agents.aio import AgentsClient -from azure.ai.agents.models import MessageTextContent, ListSortOrder -from azure.identity.aio import DefaultAzureCredential - -import os - - -async def main() -> None: - - async with DefaultAzureCredential() as creds: - agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=creds, - ) - - async with agents_client: - agent = await agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-agent", instructions="You are helpful agent" - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = await agents_client.threads.create() - print(f"Created thread, thread ID: {thread.id}") - - message = await agents_client.messages.create( - thread_id=thread.id, role="user", content="Hello, tell me a joke" - ) - print(f"Created message, message ID: {message.id}") - - run = await agents_client.runs.create(thread_id=thread.id, agent_id=agent.id) - - # Poll the run as long as run status is queued or in progress - while run.status in ["queued", "in_progress", "requires_action"]: - # Wait for a second - time.sleep(1) - run = await agents_client.runs.get(thread_id=thread.id, run_id=run.id) - print(f"Run status: {run.status}") - - if run.status == "failed": - print(f"Run error: {run.last_error}") - - await agents_client.delete_agent(agent.id) - print("Deleted agent") - - messages = agents_client.messages.list( - thread_id=thread.id, - order=ListSortOrder.ASCENDING, - ) - async for msg in messages: - last_part = msg.content[-1] - if isinstance(last_part, MessageTextContent): - print(f"{msg.role}: {last_part.text.value}") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_basics_create_thread_and_process_run_async.py b/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_basics_create_thread_and_process_run_async.py deleted file mode 100644 index 8512d64525aa..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_basics_create_thread_and_process_run_async.py +++ /dev/null @@ -1,79 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -""" -DESCRIPTION: - Asynchronous variant of sample_agents_basics_thread_and_process_run.py. - This sample demonstrates how to use the new convenience method - `create_thread_and_process_run` in the Azure AI Agents service. - This single call will create a thread, start a run, poll to - completion (including any tool calls), and return the final result. - -USAGE: - python sample_agents_basics_thread_and_process_run_async.py - - Before running: - - pip install azure-ai-agents azure-identity aiohttp - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. -""" -import asyncio -import os - -from azure.ai.agents.aio import AgentsClient -from azure.ai.agents.models import ( - AgentThreadCreationOptions, - ThreadMessageOptions, - MessageTextContent, - ListSortOrder, -) -from azure.identity.aio import DefaultAzureCredential - - -async def main() -> None: - async with DefaultAzureCredential() as credential: - agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=credential, - ) - - async with agents_client: - agent = await agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="sample-agent", - instructions="You are a helpful assistant that tells jokes.", - ) - print(f"Created agent, agent ID: {agent.id}") - - run = await agents_client.create_thread_and_process_run( - agent_id=agent.id, - thread=AgentThreadCreationOptions( - messages=[ThreadMessageOptions(role="user", content="Hi! Tell me your favorite programming joke.")] - ), - ) - - if run.status == "failed": - print(f"Run error: {run.last_error}") - - # List all messages in the thread, in ascending order of creation - messages = agents_client.messages.list( - thread_id=run.thread_id, - order=ListSortOrder.ASCENDING, - ) - async for msg in messages: - last_part = msg.content[-1] - if isinstance(last_part, MessageTextContent): - print(f"{msg.role}: {last_part.text.value}") - - await agents_client.delete_agent(agent.id) - print(f"Deleted agent {agent.id!r}") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_basics_create_thread_and_run_async.py b/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_basics_create_thread_and_run_async.py deleted file mode 100644 index aa7f203b9c25..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_basics_create_thread_and_run_async.py +++ /dev/null @@ -1,88 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -""" -DESCRIPTION: - Asynchronous variant of sample_agents_basics_thread_and_run.py. - It creates an agent, starts a new thread, and immediately runs it - using the async Azure AI Agents client. - -USAGE: - python sample_agents_basics_thread_and_run_async.py - - Before running: - - pip install azure-ai-agents azure-identity aiohttp - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. -""" -import asyncio -import os - -from azure.ai.agents.aio import AgentsClient -from azure.ai.agents.models import ( - AgentThreadCreationOptions, - ThreadMessageOptions, - MessageTextContent, - ListSortOrder, -) -from azure.identity.aio import DefaultAzureCredential - - -async def main() -> None: - async with DefaultAzureCredential() as credential: - agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=credential, - ) - - async with agents_client: - agent = await agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="sample-agent", - instructions="You are a helpful assistant that tells jokes.", - ) - print(f"Created agent, agent ID: {agent.id}") - - # Prepare the initial user message - initial_message = ThreadMessageOptions( - role="user", - content="Hello! Can you tell me a joke?", - ) - - # Create a new thread and immediately start a run on it - run = await agents_client.create_thread_and_run( - agent_id=agent.id, - thread=AgentThreadCreationOptions(messages=[initial_message]), - ) - - # Poll the run as long as run status is queued or in progress - while run.status in {"queued", "in_progress", "requires_action"}: - await asyncio.sleep(1) - run = await agents_client.runs.get(thread_id=run.thread_id, run_id=run.id) - print(f"Run status: {run.status}") - - if run.status == "failed": - print(f"Run error: {run.last_error}") - - # List all messages in the thread, in ascending order of creation - messages = agents_client.messages.list( - thread_id=run.thread_id, - order=ListSortOrder.ASCENDING, - ) - async for msg in messages: - last_part = msg.content[-1] - if isinstance(last_part, MessageTextContent): - print(f"{msg.role}: {last_part.text.value}") - - await agents_client.delete_agent(agent.id) - print(f"Deleted agent {agent.id!r}") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_code_interpreter_async.py b/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_code_interpreter_async.py deleted file mode 100644 index 7d01c36f73b7..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_code_interpreter_async.py +++ /dev/null @@ -1,106 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use code interpreter tool with agent from - the Azure Agents service using a asynchronous client. - -USAGE: - python sample_agents_code_interpreter_async.py - - Before running the sample: - - pip install azure-ai-agents azure-identity aiohttp - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. -""" -import asyncio - -from azure.ai.agents.aio import AgentsClient -from azure.ai.agents.models import CodeInterpreterTool, FilePurpose, ListSortOrder, MessageRole -from azure.identity.aio import DefaultAzureCredential -from pathlib import Path - -import os - -asset_file_path = os.path.abspath( - os.path.join(os.path.dirname(__file__), "../assets/synthetic_500_quarterly_results.csv") -) - - -async def main() -> None: - - async with DefaultAzureCredential() as creds: - - async with AgentsClient(endpoint=os.environ["PROJECT_ENDPOINT"], credential=creds) as agents_client: - # Upload a file and wait for it to be processed - file = await agents_client.files.upload_and_poll(file_path=asset_file_path, purpose=FilePurpose.AGENTS) - print(f"Uploaded file, file ID: {file.id}") - - code_interpreter = CodeInterpreterTool(file_ids=[file.id]) - - agent = await agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are helpful agent", - tools=code_interpreter.definitions, - tool_resources=code_interpreter.resources, - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = await agents_client.threads.create() - print(f"Created thread, thread ID: {thread.id}") - - message = await agents_client.messages.create( - thread_id=thread.id, - role="user", - content="Could you please create bar chart in TRANSPORTATION sector for the operating profit from the uploaded csv file and provide file to me?", - ) - print(f"Created message, message ID: {message.id}") - - run = await agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) - print(f"Run finished with status: {run.status}") - - if run.status == "failed": - # Check if you got "Rate limit is exceeded.", then you want to get more quota - print(f"Run failed: {run.last_error}") - - messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) - - last_msg = await agents_client.messages.get_last_message_text_by_role( - thread_id=thread.id, role=MessageRole.AGENT - ) - if last_msg: - print(f"Last Message: {last_msg.text.value}") - - async for msg in messages: - # Save every image file in the message - for img in msg.image_contents: - file_id = img.image_file.file_id - file_name = f"{file_id}_image_file.png" - await agents_client.files.save(file_id=file_id, file_name=file_name) - print(f"Saved image file to: {Path.cwd() / file_name}") - - # Print details of every file-path annotation - for ann in msg.file_path_annotations: - print("File Paths:") - print(f" Type: {ann.type}") - print(f" Text: {ann.text}") - print(f" File ID: {ann.file_path.file_id}") - print(f" Start Index: {ann.start_index}") - print(f" End Index: {ann.end_index}") - - await agents_client.delete_agent(agent.id) - print("Deleted agent") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_code_interpreter_attachment_async.py b/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_code_interpreter_attachment_async.py deleted file mode 100644 index 2474a7242e81..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_code_interpreter_attachment_async.py +++ /dev/null @@ -1,89 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use agent operations with code interpreter from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_code_interpreter_attachment_async.py - - Before running the sample: - - pip install azure-ai-agents azure-identity aiohttp - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. -""" -import asyncio -import os -from azure.ai.agents.aio import AgentsClient -from azure.ai.agents.models import ( - CodeInterpreterTool, - FilePurpose, - MessageAttachment, - ListSortOrder, - MessageTextContent, -) -from azure.identity.aio import DefaultAzureCredential - -asset_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../assets/product_info_1.md")) - - -async def main(): - async with DefaultAzureCredential() as creds: - async with AgentsClient(endpoint=os.environ["PROJECT_ENDPOINT"], credential=creds) as agents_client: - # Upload a file and wait for it to be processed - file = await agents_client.files.upload_and_poll(file_path=asset_file_path, purpose=FilePurpose.AGENTS) - print(f"Uploaded file, file ID: {file.id}") - - code_interpreter = CodeInterpreterTool() - - # Notice that CodeInterpreter must be enabled in the agent creation, otherwise the agent will not be able to see the file attachment - agent = await agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are helpful agent", - tools=code_interpreter.definitions, - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = await agents_client.threads.create() - print(f"Created thread, thread ID: {thread.id}") - - # Create a message with the attachment - attachment = MessageAttachment(file_id=file.id, tools=code_interpreter.definitions) - message = await agents_client.messages.create( - thread_id=thread.id, role="user", content="What does the attachment say?", attachments=[attachment] - ) - print(f"Created message, message ID: {message.id}") - - run = await agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) - print(f"Run finished with status: {run.status}") - - if run.status == "failed": - # Check if you got "Rate limit is exceeded.", then you want to get more quota - print(f"Run failed: {run.last_error}") - - await agents_client.files.delete(file.id) - print("Deleted file") - - await agents_client.delete_agent(agent.id) - print("Deleted agent") - - messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) - async for msg in messages: - last_part = msg.content[-1] - if isinstance(last_part, MessageTextContent): - print(f"{msg.role}: {last_part.text.value}") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_code_interpreter_attachment_enterprise_search_async.py b/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_code_interpreter_attachment_enterprise_search_async.py deleted file mode 100644 index e24340ba347f..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_code_interpreter_attachment_enterprise_search_async.py +++ /dev/null @@ -1,85 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -""" -DESCRIPTION: - This sample demonstrates how to use agent operations with code interpreter from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_code_interpreter_attachment_enterprise_search_async.py - - Before running the sample: - - pip install azure-ai-agents azure-identity aiohttp - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. -""" -import asyncio -import os -from azure.ai.agents.aio import AgentsClient -from azure.ai.agents.models import ( - CodeInterpreterTool, - ListSortOrder, - MessageAttachment, - MessageTextContent, - VectorStoreDataSource, - VectorStoreDataSourceAssetType, -) -from azure.identity.aio import DefaultAzureCredential - - -async def main(): - async with DefaultAzureCredential() as credential: - async with AgentsClient(endpoint=os.environ["PROJECT_ENDPOINT"], credential=credential) as agents_client: - - code_interpreter = CodeInterpreterTool() - - # Notice that CodeInterpreter must be enabled in the agent creation, otherwise the agent will not be able to see the file attachment - agent = await agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are helpful agent", - tools=code_interpreter.definitions, - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = await agents_client.threads.create() - print(f"Created thread, thread ID: {thread.id}") - - # We will upload the local file to Azure and will use it for vector store creation. - asset_uri = os.environ["AZURE_BLOB_URI"] - ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) - - # Create a message with the attachment - attachment = MessageAttachment(data_source=ds, tools=code_interpreter.definitions) - message = await agents_client.messages.create( - thread_id=thread.id, role="user", content="What does the attachment say?", attachments=[attachment] - ) - print(f"Created message, message ID: {message.id}") - - run = await agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) - print(f"Run finished with status: {run.status}") - - if run.status == "failed": - # Check if you got "Rate limit is exceeded.", then you want to get more quota - print(f"Run failed: {run.last_error}") - - await agents_client.delete_agent(agent.id) - print("Deleted agent") - - messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) - async for msg in messages: - last_part = msg.content[-1] - if isinstance(last_part, MessageTextContent): - print(f"{msg.role}: {last_part.text.value}") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_functions_async.py b/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_functions_async.py deleted file mode 100644 index 678e1b717fd7..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_functions_async.py +++ /dev/null @@ -1,120 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_agents_functions_async.py - -DESCRIPTION: - This sample demonstrates how to use agent operations with custom functions from - the Azure Agents service using a asynchronous client. - -USAGE: - python sample_agents_functions_async.py - - Before running the sample: - - pip install azure-ai-agents azure-identity aiohttp - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. -""" -import asyncio -import time -import os -from azure.ai.agents.aio import AgentsClient -from azure.ai.agents.models import ( - AsyncFunctionTool, - RequiredFunctionToolCall, - SubmitToolOutputsAction, - ToolOutput, - ListSortOrder, - MessageTextContent, -) -from azure.identity.aio import DefaultAzureCredential -from utils.user_async_functions import user_async_functions - - -async def main() -> None: - async with DefaultAzureCredential() as creds: - async with AgentsClient(endpoint=os.environ["PROJECT_ENDPOINT"], credential=creds) as agents_client: - # Initialize agent functions - functions = AsyncFunctionTool(functions=user_async_functions) - - # Create agent - agent = await agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are helpful agent", - tools=functions.definitions, - ) - print(f"Created agent, agent ID: {agent.id}") - - # Create thread for communication - thread = await agents_client.threads.create() - print(f"Created thread, ID: {thread.id}") - - # Create and send message - message = await agents_client.messages.create( - thread_id=thread.id, role="user", content="Hello, what's the time?" - ) - print(f"Created message, ID: {message.id}") - - # Create and run agent task - run = await agents_client.runs.create(thread_id=thread.id, agent_id=agent.id) - print(f"Created run, ID: {run.id}") - - # Polling loop for run status - while run.status in ["queued", "in_progress", "requires_action"]: - time.sleep(4) - run = await agents_client.runs.get(thread_id=thread.id, run_id=run.id) - - if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): - tool_calls = run.required_action.submit_tool_outputs.tool_calls - if not tool_calls: - print("No tool calls provided - cancelling run") - await agents_client.runs.cancel(thread_id=thread.id, run_id=run.id) - break - - tool_outputs = [] - for tool_call in tool_calls: - if isinstance(tool_call, RequiredFunctionToolCall): - try: - output = await functions.execute(tool_call) - tool_outputs.append( - ToolOutput( - tool_call_id=tool_call.id, - output=output, - ) - ) - except Exception as e: - print(f"Error executing tool_call {tool_call.id}: {e}") - - print(f"Tool outputs: {tool_outputs}") - if tool_outputs: - await agents_client.runs.submit_tool_outputs( - thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs - ) - - print(f"Current run status: {run.status}") - - print(f"Run completed with status: {run.status}") - - # Delete the agent when done - await agents_client.delete_agent(agent.id) - print("Deleted agent") - - # Fetch and log messages - messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) - async for msg in messages: - last_part = msg.content[-1] - if isinstance(last_part, MessageTextContent): - print(f"{msg.role}: {last_part.text.value}") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_image_input_base64_async.py b/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_image_input_base64_async.py deleted file mode 100644 index 6c2aa34302ec..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_image_input_base64_async.py +++ /dev/null @@ -1,113 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use basic agent operations using image file input for the - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_image_input_base64.py - - Before running the sample: - - pip install azure-ai-projects azure-identity - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. -""" -import asyncio -import os, time, base64 -from typing import List -from azure.ai.agents.aio import AgentsClient -from azure.identity.aio import DefaultAzureCredential -from azure.ai.agents.models import ( - ListSortOrder, - MessageTextContent, - MessageInputContentBlock, - MessageImageUrlParam, - MessageInputTextBlock, - MessageInputImageUrlBlock, -) - -asset_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../assets/image_file.png")) - - -def image_to_base64(image_path: str) -> str: - """ - Convert an image file to a Base64-encoded string. - - :param image_path: The path to the image file (e.g. 'image_file.png') - :return: A Base64-encoded string representing the image. - :raises FileNotFoundError: If the provided file path does not exist. - :raises OSError: If there's an error reading the file. - """ - if not os.path.isfile(image_path): - raise FileNotFoundError(f"File not found at: {image_path}") - - try: - with open(image_path, "rb") as image_file: - file_data = image_file.read() - return base64.b64encode(file_data).decode("utf-8") - except Exception as exc: - raise OSError(f"Error reading file '{image_path}'") from exc - - -async def main(): - async with DefaultAzureCredential() as creds: - async with AgentsClient(endpoint=os.environ["PROJECT_ENDPOINT"], credential=creds) as agents_client: - - agent = await agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are helpful agent", - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = await agents_client.threads.create() - print(f"Created thread, thread ID: {thread.id}") - - input_message = "Hello, what is in the image ?" - image_base64 = image_to_base64(asset_file_path) - img_url = f"data:image/png;base64,{image_base64}" - url_param = MessageImageUrlParam(url=img_url, detail="high") - content_blocks: List[MessageInputContentBlock] = [ - MessageInputTextBlock(text=input_message), - MessageInputImageUrlBlock(image_url=url_param), - ] - message = await agents_client.messages.create(thread_id=thread.id, role="user", content=content_blocks) - print(f"Created message, message ID: {message.id}") - - run = await agents_client.runs.create(thread_id=thread.id, agent_id=agent.id) - - # Poll the run as long as run status is queued or in progress - while run.status in ["queued", "in_progress", "requires_action"]: - # Wait for a second - time.sleep(1) - run = await agents_client.runs.get(thread_id=thread.id, run_id=run.id) - print(f"Run status: {run.status}") - - if run.status == "failed": - print(f"Run failed: {run.last_error}") - - await agents_client.delete_agent(agent.id) - print("Deleted agent") - - messages = agents_client.messages.list( - thread_id=thread.id, - order=ListSortOrder.ASCENDING, - ) - - async for msg in messages: - last_part = msg.content[-1] - if isinstance(last_part, MessageTextContent): - print(f"{msg.role}: {last_part.text.value}") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_image_input_file_async.py b/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_image_input_file_async.py deleted file mode 100644 index 081a0ae3f111..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_image_input_file_async.py +++ /dev/null @@ -1,97 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use basic agent operations using image file input for the - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_image_input_file.py - - Before running the sample: - - pip install azure-ai-projects azure-identity - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. -""" -import asyncio -import os, time -from typing import List -from azure.ai.agents.aio import AgentsClient -from azure.identity.aio import DefaultAzureCredential -from azure.ai.agents.models import ( - ListSortOrder, - MessageTextContent, - MessageInputContentBlock, - MessageImageFileParam, - MessageInputTextBlock, - MessageInputImageFileBlock, - FilePurpose, -) - -asset_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../assets/image_file.png")) - - -async def main(): - async with DefaultAzureCredential() as creds: - async with AgentsClient(endpoint=os.environ["PROJECT_ENDPOINT"], credential=creds) as agents_client: - - agent = await agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are helpful agent", - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = await agents_client.threads.create() - print(f"Created thread, thread ID: {thread.id}") - - image_file = await agents_client.files.upload_and_poll( - file_path=asset_file_path, purpose=FilePurpose.AGENTS - ) - print(f"Uploaded file, file ID: {image_file.id}") - - input_message = "Hello, what is in the image ?" - file_param = MessageImageFileParam(file_id=image_file.id, detail="high") - content_blocks: List[MessageInputContentBlock] = [ - MessageInputTextBlock(text=input_message), - MessageInputImageFileBlock(image_file=file_param), - ] - message = await agents_client.messages.create(thread_id=thread.id, role="user", content=content_blocks) - print(f"Created message, message ID: {message.id}") - - run = await agents_client.runs.create(thread_id=thread.id, agent_id=agent.id) - - # Poll the run as long as run status is queued or in progress - while run.status in ["queued", "in_progress", "requires_action"]: - # Wait for a second - time.sleep(1) - run = await agents_client.runs.get(thread_id=thread.id, run_id=run.id) - print(f"Run status: {run.status}") - - if run.status == "failed": - print(f"Run failed: {run.last_error}") - - await agents_client.delete_agent(agent.id) - print("Deleted agent") - - messages = agents_client.messages.list( - thread_id=thread.id, - order=ListSortOrder.ASCENDING, - ) - - async for msg in messages: - last_part = msg.content[-1] - if isinstance(last_part, MessageTextContent): - print(f"{msg.role}: {last_part.text.value}") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_image_input_url_async.py b/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_image_input_url_async.py deleted file mode 100644 index 7efed0084ade..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_image_input_url_async.py +++ /dev/null @@ -1,91 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use basic agent operations using image url input for the - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_image_input_url.py - - Before running the sample: - - pip install azure-ai-projects azure-identity - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. -""" -import asyncio -import os, time -from typing import List -from azure.ai.agents.aio import AgentsClient -from azure.identity.aio import DefaultAzureCredential -from azure.ai.agents.models import ( - ListSortOrder, - MessageTextContent, - MessageInputContentBlock, - MessageImageUrlParam, - MessageInputTextBlock, - MessageInputImageUrlBlock, -) - - -async def main(): - async with DefaultAzureCredential() as creds: - async with AgentsClient(endpoint=os.environ["PROJECT_ENDPOINT"], credential=creds) as agents_client: - - agent = await agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are helpful agent", - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = await agents_client.threads.create() - print(f"Created thread, thread ID: {thread.id}") - - image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" - input_message = "Hello, what is in the image ?" - url_param = MessageImageUrlParam(url=image_url, detail="high") - content_blocks: List[MessageInputContentBlock] = [ - MessageInputTextBlock(text=input_message), - MessageInputImageUrlBlock(image_url=url_param), - ] - message = await agents_client.messages.create(thread_id=thread.id, role="user", content=content_blocks) - print(f"Created message, message ID: {message.id}") - - run = await agents_client.runs.create(thread_id=thread.id, agent_id=agent.id) - - # Poll the run as long as run status is queued or in progress - while run.status in ["queued", "in_progress", "requires_action"]: - # Wait for a second - time.sleep(1) - run = await agents_client.runs.get(thread_id=thread.id, run_id=run.id) - print(f"Run status: {run.status}") - - if run.status == "failed": - print(f"Run failed: {run.last_error}") - - await agents_client.delete_agent(agent.id) - print("Deleted agent") - - messages = agents_client.messages.list( - thread_id=thread.id, - order=ListSortOrder.ASCENDING, - ) - - async for msg in messages: - last_part = msg.content[-1] - if isinstance(last_part, MessageTextContent): - print(f"{msg.role}: {last_part.text.value}") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_json_schema_async.py b/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_json_schema_async.py deleted file mode 100644 index 81ecc816d5e9..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_json_schema_async.py +++ /dev/null @@ -1,106 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use agents with JSON schema output format. - -USAGE: - python sample_agents_json_schema_async.py - - Before running the sample: - - pip install azure-ai-agents azure-identity pydantic - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. -""" - -import asyncio -import os - -from enum import Enum -from pydantic import BaseModel, TypeAdapter -from azure.ai.agents.aio import AgentsClient -from azure.identity.aio import DefaultAzureCredential -from azure.ai.agents.models import ( - ListSortOrder, - MessageTextContent, - MessageRole, - ResponseFormatJsonSchema, - ResponseFormatJsonSchemaType, - RunStatus, -) - - -# Create the pydantic model to represent the planet names and there masses. -class Planets(str, Enum): - Earth = "Earth" - Mars = "Mars" - Jupyter = "Jupyter" - - -class Planet(BaseModel): - planet: Planets - mass: float - - -async def main(): - async with DefaultAzureCredential() as creds: - async with AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=creds, - ) as agents_client: - - agent = await agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="Extract the information about planets.", - response_format=ResponseFormatJsonSchemaType( - json_schema=ResponseFormatJsonSchema( - name="planet_mass", - description="Extract planet mass.", - schema=Planet.model_json_schema(), - ) - ), - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = await agents_client.threads.create() - print(f"Created thread, thread ID: {thread.id}") - - message = await agents_client.messages.create( - thread_id=thread.id, - role="user", - content=("The mass of the Mars is 6.4171E23 kg; the mass of the Earth is 5.972168E24 kg;"), - ) - print(f"Created message, message ID: {message.id}") - - run = await agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) - - if run.status != RunStatus.COMPLETED: - print(f"The run did not succeed: {run.status=}.") - - await agents_client.delete_agent(agent.id) - print("Deleted agent") - - messages = agents_client.messages.list( - thread_id=thread.id, - order=ListSortOrder.ASCENDING, - ) - - async for msg in messages: - if msg.role == MessageRole.AGENT: - last_part = msg.content[-1] - if isinstance(last_part, MessageTextContent): - planet = TypeAdapter(Planet).validate_json(last_part.text.value) - print(f"The mass of {planet.planet} is {planet.mass} kg.") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_run_with_toolset_async.py b/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_run_with_toolset_async.py deleted file mode 100644 index 4985e790668a..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_run_with_toolset_async.py +++ /dev/null @@ -1,88 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use agent operations with toolset from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_run_with_toolset_async.py - - Before running the sample: - - pip install azure-ai-agents azure-identity aiohttp - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model. -""" - -import os, asyncio -from azure.ai.agents.aio import AgentsClient -from azure.identity.aio import DefaultAzureCredential -from azure.ai.agents.models import AsyncFunctionTool, AsyncToolSet, ListSortOrder, MessageTextContent -from utils.user_async_functions import user_async_functions - - -async def main() -> None: - - async with DefaultAzureCredential() as creds: - async with AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=creds, - ) as agents_client: - - # Initialize agent toolset with user functions and code interpreter - # [START create_agent_with_async_function_tool] - functions = AsyncFunctionTool(user_async_functions) - - toolset = AsyncToolSet() - toolset.add(functions) - agents_client.enable_auto_function_calls(toolset) - - agent = await agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are a helpful agent", - toolset=toolset, - ) - # [END create_agent_with_async_function_tool] - print(f"Created agent, ID: {agent.id}") - - # Create thread for communication - thread = await agents_client.threads.create() - print(f"Created thread, ID: {thread.id}") - - # Create message to thread - message = await agents_client.messages.create( - thread_id=thread.id, - role="user", - content="Hello, send an email with the datetime and weather information in New York?", - ) - print(f"Created message, ID: {message.id}") - - # Create and process agent run in thread with tools - run = await agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) - print(f"Run finished with status: {run.status}") - - if run.status == "failed": - print(f"Run failed: {run.last_error}") - - # Delete the agent when done - await agents_client.delete_agent(agent.id) - print("Deleted agent") - - # Fetch and log all messages - messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) - async for msg in messages: - last_part = msg.content[-1] - if isinstance(last_part, MessageTextContent): - print(f"{msg.role}: {last_part.text.value}") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_stream_eventhandler_async.py b/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_stream_eventhandler_async.py deleted file mode 100644 index db5539cb641a..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_stream_eventhandler_async.py +++ /dev/null @@ -1,105 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use agent operations with an event handler in streaming from - the Azure Agents service using a asynchronous client. - -USAGE: - python sample_agents_stream_eventhandler_async.py - - Before running the sample: - - pip install azure-ai-agents azure-identity aiohttp - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model. -""" -import asyncio -from typing import Any, Optional - -from azure.ai.agents.aio import AgentsClient -from azure.ai.agents.models import ( - ListSortOrder, - MessageTextContent, - MessageDeltaChunk, - RunStep, - ThreadMessage, - ThreadRun, -) -from azure.ai.agents.models import AsyncAgentEventHandler -from azure.identity.aio import DefaultAzureCredential - -import os - - -class MyEventHandler(AsyncAgentEventHandler[str]): - - async def on_message_delta(self, delta: "MessageDeltaChunk") -> Optional[str]: - return f"Text delta received: {delta.text}" - - async def on_thread_message(self, message: "ThreadMessage") -> Optional[str]: - return f"ThreadMessage created. ID: {message.id}, Status: {message.status}" - - async def on_thread_run(self, run: "ThreadRun") -> Optional[str]: - return f"ThreadRun status: {run.status}" - - async def on_run_step(self, step: "RunStep") -> Optional[str]: - return f"RunStep type: {step.type}, Status: {step.status}" - - async def on_error(self, data: str) -> Optional[str]: - return f"An error occurred. Data: {data}" - - async def on_done(self) -> Optional[str]: - return "Stream completed." - - async def on_unhandled_event(self, event_type: str, event_data: Any) -> Optional[str]: - return f"Unhandled Event Type: {event_type}, Data: {event_data}" - - -async def main() -> None: - - async with DefaultAzureCredential() as creds: - async with AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=creds, - ) as agents_client: - agent = await agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-agent", instructions="You are helpful agent" - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = await agents_client.threads.create() - print(f"Created thread, thread ID {thread.id}") - - message = await agents_client.messages.create( - thread_id=thread.id, role="user", content="Hello, tell me a joke" - ) - print(f"Created message, message ID {message.id}") - - async with await agents_client.runs.stream( - thread_id=thread.id, agent_id=agent.id, event_handler=MyEventHandler() - ) as stream: - async for event_type, event_data, func_return in stream: - print(f"Received data.") - print(f"Streaming receive Event Type: {event_type}") - print(f"Event Data: {str(event_data)[:100]}...") - print(f"Event Function return: {func_return}\n") - - await agents_client.delete_agent(agent.id) - print("Deleted agent") - - messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) - async for msg in messages: - last_part = msg.content[-1] - if isinstance(last_part, MessageTextContent): - print(f"{msg.role}: {last_part.text.value}") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_stream_eventhandler_with_functions_async.py b/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_stream_eventhandler_with_functions_async.py deleted file mode 100644 index d81f5ad91543..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_stream_eventhandler_with_functions_async.py +++ /dev/null @@ -1,148 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use agent operations with an event handler and toolset from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_stream_eventhandler_with_functions_async.py - - Before running the sample: - - pip install azure-ai-agents azure-identity aiohttp - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model. -""" -import asyncio -from typing import Any - -import os -from azure.ai.agents.aio import AgentsClient -from azure.ai.agents.models import ( - AsyncAgentEventHandler, - AsyncFunctionTool, - ListSortOrder, - MessageTextContent, - MessageDeltaChunk, - RequiredFunctionToolCall, - RunStep, - SubmitToolOutputsAction, - ThreadMessage, - ThreadRun, - ToolOutput, -) -from azure.identity.aio import DefaultAzureCredential -from utils.user_async_functions import user_async_functions - - -class MyEventHandler(AsyncAgentEventHandler[str]): - - def __init__(self, functions: AsyncFunctionTool, agents_client: AgentsClient) -> None: - super().__init__() - self.functions = functions - self.agents_client = agents_client - - async def on_message_delta(self, delta: "MessageDeltaChunk") -> None: - print(f"Text delta received: {delta.text}") - - async def on_thread_message(self, message: "ThreadMessage") -> None: - print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") - - async def on_thread_run(self, run: "ThreadRun") -> None: - print(f"ThreadRun status: {run.status}") - - if run.status == "failed": - print(f"Run failed. Error: {run.last_error}") - - if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): - tool_calls = run.required_action.submit_tool_outputs.tool_calls - - tool_outputs = [] - for tool_call in tool_calls: - if isinstance(tool_call, RequiredFunctionToolCall): - try: - output = await self.functions.execute(tool_call) - tool_outputs.append( - ToolOutput( - tool_call_id=tool_call.id, - output=output, - ) - ) - except Exception as e: - print(f"Error executing tool_call {tool_call.id}: {e}") - - print(f"Tool outputs: {tool_outputs}") - if tool_outputs: - await self.agents_client.runs.submit_tool_outputs_stream( - thread_id=run.thread_id, run_id=run.id, tool_outputs=tool_outputs, event_handler=self - ) - - async def on_run_step(self, step: "RunStep") -> None: - print(f"RunStep type: {step.type}, Status: {step.status}") - - async def on_error(self, data: str) -> None: - print(f"An error occurred. Data: {data}") - - async def on_done(self) -> None: - print("Stream completed.") - - async def on_unhandled_event(self, event_type: str, event_data: Any) -> None: - print(f"Unhandled Event Type: {event_type}, Data: {event_data}") - - -async def main() -> None: - async with DefaultAzureCredential() as creds: - async with AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=creds, - ) as agents_client: - - # [START create_agent_with_function_tool] - functions = AsyncFunctionTool(functions=user_async_functions) - - agent = await agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are a helpful agent", - tools=functions.definitions, - ) - # [END create_agent_with_function_tool] - print(f"Created agent, ID: {agent.id}") - - thread = await agents_client.threads.create() - print(f"Created thread, thread ID {thread.id}") - - message = await agents_client.messages.create( - thread_id=thread.id, - role="user", - content="Hello, send an email with the datetime and weather information in New York? Also let me know the details.", - ) - print(f"Created message, message ID {message.id}") - - async with await agents_client.runs.stream( - thread_id=thread.id, - agent_id=agent.id, - event_handler=MyEventHandler(functions, agents_client), - ) as stream: - await stream.until_done() - - await agents_client.delete_agent(agent.id) - print("Deleted agent") - - messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) - async for msg in messages: - last_part = msg.content[-1] - if isinstance(last_part, MessageTextContent): - print(f"{msg.role}: {last_part.text.value}") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_stream_eventhandler_with_toolset_async.py b/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_stream_eventhandler_with_toolset_async.py deleted file mode 100644 index 21ad7c3c1b82..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_stream_eventhandler_with_toolset_async.py +++ /dev/null @@ -1,117 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use agent operations with an event handler and toolset from - the Azure Agents service using a asynchronous client. - -USAGE: - python sample_agents_stream_eventhandler_with_toolset_async.py - - Before running the sample: - - pip install azure-ai-agents azure-identity aiohttp - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model. -""" -import asyncio -from typing import Any - -from azure.ai.agents.aio import AgentsClient -from azure.ai.agents.models import MessageDeltaChunk, RunStep, ThreadMessage, ThreadRun -from azure.ai.agents.models import ( - AsyncAgentEventHandler, - AsyncFunctionTool, - AsyncToolSet, - ListSortOrder, - MessageTextContent, -) -from azure.identity.aio import DefaultAzureCredential - -import os - -from utils.user_async_functions import user_async_functions - - -class MyEventHandler(AsyncAgentEventHandler): - - async def on_message_delta(self, delta: "MessageDeltaChunk") -> None: - print(f"Text delta received: {delta.text}") - - async def on_thread_message(self, message: "ThreadMessage") -> None: - print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") - - async def on_thread_run(self, run: "ThreadRun") -> None: - print(f"ThreadRun status: {run.status}") - - if run.status == "failed": - print(f"Run failed. Error: {run.last_error}") - - async def on_run_step(self, step: "RunStep") -> None: - print(f"RunStep type: {step.type}, Status: {step.status}") - - async def on_error(self, data: str) -> None: - print(f"An error occurred. Data: {data}") - - async def on_done(self) -> None: - print("Stream completed.") - - async def on_unhandled_event(self, event_type: str, event_data: Any) -> None: - print(f"Unhandled Event Type: {event_type}, Data: {event_data}") - - -async def main() -> None: - async with DefaultAzureCredential() as creds: - async with AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=creds, - ) as agents_client: - - # Initialize toolset with user functions - functions = AsyncFunctionTool(user_async_functions) - toolset = AsyncToolSet() - toolset.add(functions) - - agents_client.enable_auto_function_calls(user_async_functions) - agent = await agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are helpful agent", - toolset=toolset, - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = await agents_client.threads.create() - print(f"Created thread, thread ID {thread.id}") - - message = await agents_client.messages.create( - thread_id=thread.id, - role="user", - content="Hello, send an email with the datetime and weather information in New York? Also let me know the details", - ) - print(f"Created message, message ID {message.id}") - - async with await agents_client.runs.stream( - thread_id=thread.id, agent_id=agent.id, event_handler=MyEventHandler() - ) as stream: - await stream.until_done() - - await agents_client.delete_agent(agent.id) - print("Deleted agent") - - messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) - async for msg in messages: - last_part = msg.content[-1] - if isinstance(last_part, MessageTextContent): - print(f"{msg.role}: {last_part.text.value}") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_stream_iteration_async.py b/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_stream_iteration_async.py deleted file mode 100644 index f3ce35dd591d..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_stream_iteration_async.py +++ /dev/null @@ -1,94 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use agent operations with interation in streaming from - the Azure Agents service using a asynchronous client. - -USAGE: - python sample_agents_stream_iteration_async.py - - Before running the sample: - - pip install azure-ai-agents azure-identity aiohttp - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model. -""" -import asyncio - -from azure.ai.agents.aio import AgentsClient -from azure.ai.agents.models import AgentStreamEvent -from azure.ai.agents.models import ( - MessageDeltaChunk, - RunStep, - ThreadMessage, - ThreadRun, - ListSortOrder, - MessageTextContent, -) -from azure.identity.aio import DefaultAzureCredential - -import os - - -async def main() -> None: - async with DefaultAzureCredential() as creds: - async with AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=creds, - ) as agents_client: - agent = await agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-agent", instructions="You are helpful agent" - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = await agents_client.threads.create() - print(f"Created thread, thread ID {thread.id}") - - message = await agents_client.messages.create( - thread_id=thread.id, role="user", content="Hello, tell me a joke" - ) - print(f"Created message, message ID {message.id}") - - async with await agents_client.runs.stream(thread_id=thread.id, agent_id=agent.id) as stream: - async for event_type, event_data, _ in stream: - - if isinstance(event_data, MessageDeltaChunk): - print(f"Text delta received: {event_data.text}") - - elif isinstance(event_data, ThreadMessage): - print(f"ThreadMessage created. ID: {event_data.id}, Status: {event_data.status}") - - elif isinstance(event_data, ThreadRun): - print(f"ThreadRun status: {event_data.status}") - elif isinstance(event_data, RunStep): - print(f"RunStep type: {event_data.type}, Status: {event_data.status}") - - elif event_type == AgentStreamEvent.ERROR: - print(f"An error occurred. Data: {event_data}") - - elif event_type == AgentStreamEvent.DONE: - print("Stream completed.") - break - - else: - print(f"Unhandled Event Type: {event_type}, Data: {event_data}") - - await agents_client.delete_agent(agent.id) - print("Deleted agent") - - messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) - async for msg in messages: - last_part = msg.content[-1] - if isinstance(last_part, MessageTextContent): - print(f"{msg.role}: {last_part.text.value}") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_stream_with_base_override_eventhandler_async.py b/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_stream_with_base_override_eventhandler_async.py deleted file mode 100644 index 751bfbbb5806..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_stream_with_base_override_eventhandler_async.py +++ /dev/null @@ -1,115 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to override the base event handler, parse the events, and iterate through them. - In your use case, you might not want to write the iteration code similar to sample_agents_stream_iteration_async.py. - If you have multiple places to call stream, you might find the iteration code cumbersome. - This example shows how to override the base event handler, parse the events, and iterate through them, which can be - reused in multiple stream calls to help keep the code clean. - -USAGE: - python sample_agents_stream_with_base_override_eventhandler_async.py - - Before running the sample: - - pip install azure-ai-agents azure-identity aiohttp - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model. -""" -import asyncio -import json -from typing import AsyncGenerator, Optional - -from azure.ai.agents.aio import AgentsClient -from azure.ai.agents.models._models import ( - MessageDeltaChunk, - MessageDeltaTextContent, -) -from azure.ai.agents.models import AgentStreamEvent, BaseAsyncAgentEventHandler, ListSortOrder, MessageTextContent -from azure.identity.aio import DefaultAzureCredential - -import os - - -# Our goal is to parse the event data in a string and return the chunk in text for each iteration. -# Because we want the iteration to be a string, we define str as the generic type for BaseAsyncAgentEventHandler -# and override the _process_event method to return a string. -# The get_stream_chunks method is defined to return the chunks as strings because the iteration is a string. -class MyEventHandler(BaseAsyncAgentEventHandler[Optional[str]]): - - async def _process_event(self, event_data_str: str) -> Optional[str]: - - event_lines = event_data_str.strip().split("\n") - event_type: Optional[str] = None - event_data = "" - for line in event_lines: - if line.startswith("event:"): - event_type = line.split(":", 1)[1].strip() - elif line.startswith("data:"): - event_data = line.split(":", 1)[1].strip() - - if not event_type: - raise ValueError("Event type not specified in the event data.") - - if event_type == AgentStreamEvent.THREAD_MESSAGE_DELTA.value: - - event_obj: MessageDeltaChunk = MessageDeltaChunk(**json.loads(event_data)) - - for content_part in event_obj.delta.content: - if isinstance(content_part, MessageDeltaTextContent): - if content_part.text is not None: - return content_part.text.value - return None - - async def get_stream_chunks(self) -> AsyncGenerator[str, None]: - async for chunk in self: - if chunk: - yield chunk - - -async def main() -> None: - - async with DefaultAzureCredential() as creds: - async with AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=creds, - ) as agents_client: - agent = await agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-agent", instructions="You are helpful agent" - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = await agents_client.threads.create() - print(f"Created thread, thread ID {thread.id}") - - message = await agents_client.messages.create( - thread_id=thread.id, role="user", content="Hello, tell me a joke" - ) - print(f"Created message, message ID {message.id}") - - async with await agents_client.runs.stream( - thread_id=thread.id, agent_id=agent.id, event_handler=MyEventHandler() - ) as stream: - async for chunk in stream.get_stream_chunks(): - print(chunk) - - await agents_client.delete_agent(agent.id) - print("Deleted agent") - - messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) - async for msg in messages: - last_part = msg.content[-1] - if isinstance(last_part, MessageTextContent): - print(f"{msg.role}: {last_part.text.value}") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_vector_store_batch_enterprise_file_search_async.py b/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_vector_store_batch_enterprise_file_search_async.py deleted file mode 100644 index a6d8a4b7e444..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_vector_store_batch_enterprise_file_search_async.py +++ /dev/null @@ -1,122 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -""" -DESCRIPTION: - This sample demonstrates how to use agent operations to add files to an existing vector store and perform search from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_vector_store_batch_enterprise_file_search_async.py - - Before running the sample: - - pip install azure-ai-agents azure-identity azure-ai-ml aiohttp - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model. -""" -import asyncio -import os - -from azure.ai.agents.aio import AgentsClient -from azure.ai.agents.models import ( - FileSearchTool, - ListSortOrder, - MessageTextContent, - VectorStoreDataSource, - VectorStoreDataSourceAssetType, -) -from azure.identity.aio import DefaultAzureCredential - - -async def main(): - - async with DefaultAzureCredential() as creds: - async with AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=creds, - ) as agents_client: - # We will upload the local file to Azure and will use it for vector store creation. - asset_uri = os.environ["AZURE_BLOB_URI"] - ds = VectorStoreDataSource( - asset_identifier=asset_uri, - asset_type=VectorStoreDataSourceAssetType.URI_ASSET, - ) - vector_store = await agents_client.vector_stores.create_and_poll(file_ids=[], name="sample_vector_store") - print(f"Created vector store, vector store ID: {vector_store.id}") - - # Add the file to the vector store or you can supply file ids in the vector store creation - vector_store_file_batch = await agents_client.vector_store_file_batches.create_and_poll( - vector_store_id=vector_store.id, data_sources=[ds] - ) - print(f"Created vector store file batch, vector store file batch ID: {vector_store_file_batch.id}") - - # Create a file search tool - file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) - - # Notices that FileSearchTool as tool and tool_resources must be added or the agent unable to search the file - agent = await agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are helpful agent", - tools=file_search_tool.definitions, - tool_resources=file_search_tool.resources, - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = await agents_client.threads.create() - print(f"Created thread, thread ID: {thread.id}") - - message = await agents_client.messages.create( - thread_id=thread.id, - role="user", - content="What feature does Smart Eyewear offer?", - ) - print(f"Created message, message ID: {message.id}") - - run = await agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) - print(f"Created run, run ID: {run.id}") - - file_search_tool.remove_vector_store(vector_store.id) - print(f"Removed vector store from file search, vector store ID: {vector_store.id}") - - await agents_client.update_agent( - agent_id=agent.id, - tools=file_search_tool.definitions, - tool_resources=file_search_tool.resources, - ) - print(f"Updated agent, agent ID: {agent.id}") - - thread = await agents_client.threads.create() - print(f"Created thread, thread ID: {thread.id}") - - message = await agents_client.messages.create( - thread_id=thread.id, - role="user", - content="What feature does Smart Eyewear offer?", - ) - print(f"Created message, message ID: {message.id}") - - run = await agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) - print(f"Created run, run ID: {run.id}") - - await agents_client.vector_stores.delete(vector_store.id) - print("Deleted vector store") - - await agents_client.delete_agent(agent.id) - print("Deleted agent") - - messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) - async for msg in messages: - last_part = msg.content[-1] - if isinstance(last_part, MessageTextContent): - print(f"{msg.role}: {last_part.text.value}") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_vector_store_batch_file_search_async.py b/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_vector_store_batch_file_search_async.py deleted file mode 100644 index 753d25ca56c9..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_vector_store_batch_file_search_async.py +++ /dev/null @@ -1,114 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use agent operations to add files to an existing vector store and perform search from - the Azure Agents service using a asynchronous client. - -USAGE: - python sample_agents_vector_store_batch_file_search_async.py - - Before running the sample: - - pip install azure-ai-agents azure-identity aiohttp - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model. -""" - -import asyncio -import os -from azure.ai.agents.aio import AgentsClient -from azure.ai.agents.models import FileSearchTool, FilePurpose, ListSortOrder, MessageTextContent -from azure.identity.aio import DefaultAzureCredential - -asset_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../assets/product_info_1.md")) - - -async def main() -> None: - async with DefaultAzureCredential() as creds: - async with AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=creds, - ) as agents_client: - # Upload a file and wait for it to be processed - file = await agents_client.files.upload_and_poll(file_path=asset_file_path, purpose=FilePurpose.AGENTS) - print(f"Uploaded file, file ID: {file.id}") - - # Create a vector store with no file and wait for it to be processed - vector_store = await agents_client.vector_stores.create_and_poll(file_ids=[], name="sample_vector_store") - print(f"Created vector store, vector store ID: {vector_store.id}") - - # Add the file to the vector store or you can supply file ids in the vector store creation - vector_store_file_batch = await agents_client.vector_store_file_batches.create_and_poll( - vector_store_id=vector_store.id, file_ids=[file.id] - ) - print(f"Created vector store file batch, vector store file batch ID: {vector_store_file_batch.id}") - - # Create a file search tool - file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) - - # Notices that FileSearchTool as tool and tool_resources must be added or the agent unable to search the file - agent = await agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are helpful agent", - tools=file_search_tool.definitions, - tool_resources=file_search_tool.resources, - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = await agents_client.threads.create() - print(f"Created thread, thread ID: {thread.id}") - - message = await agents_client.messages.create( - thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" - ) - print(f"Created message, message ID: {message.id}") - - run = await agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) - print(f"Created run, run ID: {run.id}") - - file_search_tool.remove_vector_store(vector_store.id) - print(f"Removed vector store from file search, vector store ID: {vector_store.id}") - - await agents_client.update_agent( - agent_id=agent.id, tools=file_search_tool.definitions, tool_resources=file_search_tool.resources - ) - print(f"Updated agent, agent ID: {agent.id}") - - thread = await agents_client.threads.create() - print(f"Created thread, thread ID: {thread.id}") - - message = await agents_client.messages.create( - thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" - ) - print(f"Created message, message ID: {message.id}") - - run = await agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) - print(f"Created run, run ID: {run.id}") - - await agents_client.files.delete(file.id) - print("Deleted file") - - await agents_client.vector_stores.delete(vector_store.id) - print("Deleted vector store") - - await agents_client.delete_agent(agent.id) - print("Deleted agent") - - messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) - async for msg in messages: - last_part = msg.content[-1] - if isinstance(last_part, MessageTextContent): - print(f"{msg.role}: {last_part.text.value}") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_vector_store_enterprise_file_search_async.py b/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_vector_store_enterprise_file_search_async.py deleted file mode 100644 index 54c595c6d5e6..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_vector_store_enterprise_file_search_async.py +++ /dev/null @@ -1,88 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -""" -DESCRIPTION: - This sample demonstrates how to add files to agent during the vector store creation. - -USAGE: - python sample_agents_vector_store_enterprise_file_search_async.py - - Before running the sample: - - pip install azure-ai-agents azure-identity azure-ai-ml aiohttp - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model. -""" -import asyncio -import os - -from azure.ai.agents.aio import AgentsClient -from azure.ai.agents.models import ( - FileSearchTool, - VectorStoreDataSource, - VectorStoreDataSourceAssetType, - ListSortOrder, - MessageTextContent, -) -from azure.identity.aio import DefaultAzureCredential - - -async def main(): - async with DefaultAzureCredential() as credential: - async with AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=credential, - ) as agents_client: - # We will upload the local file to Azure and will use it for vector store creation. - asset_uri = os.environ["AZURE_BLOB_URI"] - ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) - vector_store = await agents_client.vector_stores.create_and_poll( - data_sources=[ds], name="sample_vector_store" - ) - print(f"Created vector store, vector store ID: {vector_store.id}") - - # Create a file search tool - file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) - - # Notices that FileSearchTool as tool and tool_resources must be added or the agent unable to search the file - agent = await agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are helpful agent", - tools=file_search_tool.definitions, - tool_resources=file_search_tool.resources, - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = await agents_client.threads.create() - print(f"Created thread, thread ID: {thread.id}") - - message = await agents_client.messages.create( - thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" - ) - print(f"Created message, message ID: {message.id}") - - run = await agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) - print(f"Created run, run ID: {run.id}") - - await agents_client.vector_stores.delete(vector_store.id) - print("Deleted vector store") - - await agents_client.delete_agent(agent.id) - print("Deleted agent") - - messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) - async for msg in messages: - last_part = msg.content[-1] - if isinstance(last_part, MessageTextContent): - print(f"{msg.role}: {last_part.text.value}") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_vector_store_file_search_async.py b/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_vector_store_file_search_async.py deleted file mode 100644 index ab2efdebbd03..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_vector_store_file_search_async.py +++ /dev/null @@ -1,86 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -""" -DESCRIPTION: - This sample demonstrates how to add files to agent during the vector store creation. - -USAGE: - python sample_agents_vector_store_file_search_async.py - - Before running the sample: - - pip install azure-ai-agents azure-identity aiohttp - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model. -""" -import asyncio -import os - -from azure.ai.agents.aio import AgentsClient -from azure.ai.agents.models import FileSearchTool, FilePurpose, MessageTextContent, ListSortOrder -from azure.identity.aio import DefaultAzureCredential - -asset_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../assets/product_info_1.md")) - - -async def main(): - async with DefaultAzureCredential() as credential: - async with AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=credential, - ) as agents_client: - # Upload a file and wait for it to be processed - file = await agents_client.files.upload_and_poll(file_path=asset_file_path, purpose=FilePurpose.AGENTS) - print(f"Uploaded file, file ID: {file.id}") - - # Create a vector store with no file and wait for it to be processed - vector_store = await agents_client.vector_stores.create_and_poll( - file_ids=[file.id], name="sample_vector_store" - ) - print(f"Created vector store, vector store ID: {vector_store.id}") - - # Create a file search tool - file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) - - # Notices that FileSearchTool as tool and tool_resources must be added or the agent unable to search the file - agent = await agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are helpful agent", - tools=file_search_tool.definitions, - tool_resources=file_search_tool.resources, - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = await agents_client.threads.create() - print(f"Created thread, thread ID: {thread.id}") - - message = await agents_client.messages.create( - thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" - ) - print(f"Created message, message ID: {message.id}") - - run = await agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) - print(f"Created run, run ID: {run.id}") - - await agents_client.vector_stores.delete(vector_store.id) - print("Deleted vector store") - - await agents_client.delete_agent(agent.id) - print("Deleted agent") - - messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) - async for msg in messages: - last_part = msg.content[-1] - if isinstance(last_part, MessageTextContent): - print(f"{msg.role}: {last_part.text.value}") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_with_file_search_attachment_async.py b/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_with_file_search_attachment_async.py deleted file mode 100644 index ad3dcb664504..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_with_file_search_attachment_async.py +++ /dev/null @@ -1,88 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use agent operations to create messages with file search attachments from - the Azure Agents service using a asynchronous client. - -USAGE: - python sample_agents_with_file_search_attachment_async.py - - Before running the sample: - - pip install azure-ai-agents azure-identity aiohttp - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model. -""" -import asyncio - -from azure.ai.agents.aio import AgentsClient -from azure.ai.agents.models import FilePurpose -from azure.ai.agents.models import FileSearchTool, MessageAttachment, ListSortOrder, MessageTextContent -from azure.identity.aio import DefaultAzureCredential - -import os - -asset_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../assets/product_info_1.md")) - - -async def main() -> None: - async with DefaultAzureCredential() as creds: - async with AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=creds, - ) as agents_client: - # Upload a file and wait for it to be processed - file = await agents_client.files.upload_and_poll(file_path=asset_file_path, purpose=FilePurpose.AGENTS) - - # Create agent - agent = await agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are helpful agent", - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = await agents_client.threads.create() - print(f"Created thread, thread ID: {thread.id}") - - # Create a message with the file search attachment - # Notice that vector store is created temporarily when using attachments with a default expiration policy of seven days. - attachment = MessageAttachment(file_id=file.id, tools=FileSearchTool().definitions) - message = await agents_client.messages.create( - thread_id=thread.id, - role="user", - content="What feature does Smart Eyewear offer?", - attachments=[attachment], - ) - print(f"Created message, message ID: {message.id}") - - run = await agents_client.runs.create_and_process( - thread_id=thread.id, agent_id=agent.id, polling_interval=4 - ) - print(f"Created run, run ID: {run.id}") - - print(f"Run completed with status: {run.status}") - - await agents_client.files.delete(file.id) - print("Deleted file") - - await agents_client.delete_agent(agent.id) - print("Deleted agent") - - messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) - async for msg in messages: - last_part = msg.content[-1] - if isinstance(last_part, MessageTextContent): - print(f"{msg.role}: {last_part.text.value}") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-agents/samples/agents_async/utils/__init__.py b/sdk/ai/azure-ai-agents/samples/agents_async/utils/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/sdk/ai/azure-ai-agents/samples/agents_async/utils/user_async_functions.py b/sdk/ai/azure-ai-agents/samples/agents_async/utils/user_async_functions.py deleted file mode 100644 index 202efe8b5df9..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_async/utils/user_async_functions.py +++ /dev/null @@ -1,67 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -import asyncio -import os -import sys -import json -import datetime -from typing import Any, Callable, Set, Optional -from azure.ai.agents.telemetry import trace_function - - -# Add parent directory to sys.path to import user_functions -current_dir = os.path.dirname(os.path.abspath(__file__)) -parent_dir = os.path.abspath(os.path.join(current_dir, "..", "..", "..")) -if parent_dir not in sys.path: - sys.path.insert(0, parent_dir) -from samples.utils.user_functions import fetch_current_datetime, fetch_weather, send_email - - -async def send_email_async(recipient: str, subject: str, body: str) -> str: - """ - Sends an email with the specified subject and body to the recipient. - - :param recipient (str): Email address of the recipient. - :param subject (str): Subject of the email. - :param body (str): Body content of the email. - :return: Confirmation message. - :rtype: str - """ - await asyncio.sleep(1) - return send_email(recipient, subject, body) - - -# The trace_func decorator will trace the function call and enable adding additional attributes -# to the span in the function implementation. Note that this will trace the function parameters and their values. -@trace_function() -async def fetch_current_datetime_async(format: Optional[str] = None) -> str: - """ - Get the current time as a JSON string, optionally formatted. - - :param format (Optional[str]): The format in which to return the current time. Defaults to None, which uses a standard format. - :return: The current time in JSON format. - :rtype: str - """ - await asyncio.sleep(1) - current_time = datetime.datetime.now() - - # Use the provided format if available, else use a default format - if format: - time_format = format - else: - time_format = "%Y-%m-%d %H:%M:%S" - - time_json = json.dumps({"current_time": current_time.strftime(time_format)}) - return time_json - - -# Statically defined user functions for fast reference with send_email as async but the rest as sync -user_async_functions: Set[Callable[..., Any]] = { - fetch_current_datetime_async, - fetch_weather, - send_email_async, -} diff --git a/sdk/ai/azure-ai-agents/samples/agents_files_images_inputs/sample_agents_image_input_base64.py b/sdk/ai/azure-ai-agents/samples/agents_files_images_inputs/sample_agents_image_input_base64.py deleted file mode 100644 index a3d573f7579f..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_files_images_inputs/sample_agents_image_input_base64.py +++ /dev/null @@ -1,112 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use basic agent operations using image file input for the - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_image_input_base64.py - - Before running the sample: - - pip install azure-ai-projects azure-identity - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. -""" - -import os, time, base64 -from typing import List -from azure.ai.agents import AgentsClient -from azure.identity import DefaultAzureCredential -from azure.ai.agents.models import ( - MessageTextContent, - MessageInputContentBlock, - MessageImageUrlParam, - MessageInputTextBlock, - MessageInputImageUrlBlock, -) - -asset_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../assets/image_file.png")) - - -def image_to_base64(image_path: str) -> str: - """ - Convert an image file to a Base64-encoded string. - - :param image_path: The path to the image file (e.g. 'image_file.png') - :return: A Base64-encoded string representing the image. - :raises FileNotFoundError: If the provided file path does not exist. - :raises OSError: If there's an error reading the file. - """ - if not os.path.isfile(image_path): - raise FileNotFoundError(f"File not found at: {image_path}") - - try: - with open(image_path, "rb") as image_file: - file_data = image_file.read() - return base64.b64encode(file_data).decode("utf-8") - except Exception as exc: - raise OSError(f"Error reading file '{image_path}'") from exc - - -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) - -with agents_client: - - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are helpful agent", - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = agents_client.threads.create() - print(f"Created thread, thread ID: {thread.id}") - - input_message = "Hello, what is in the image ?" - image_base64 = image_to_base64(asset_file_path) - img_url = f"data:image/png;base64,{image_base64}" - url_param = MessageImageUrlParam(url=img_url, detail="high") - content_blocks: List[MessageInputContentBlock] = [ - MessageInputTextBlock(text=input_message), - MessageInputImageUrlBlock(image_url=url_param), - ] - message = agents_client.messages.create(thread_id=thread.id, role="user", content=content_blocks) - print(f"Created message, message ID: {message.id}") - - run = agents_client.runs.create(thread_id=thread.id, agent_id=agent.id) - - # Poll the run as long as run status is queued or in progress - while run.status in ["queued", "in_progress", "requires_action"]: - # Wait for a second - time.sleep(1) - run = agents_client.runs.get(thread_id=thread.id, run_id=run.id) - print(f"Run status: {run.status}") - - if run.status == "failed": - print(f"Run failed: {run.last_error}") - - agents_client.delete_agent(agent.id) - print("Deleted agent") - - messages = agents_client.messages.list(thread_id=thread.id) - - # The messages are following in the reverse order, - # we will iterate them and output only text contents. - for msg in messages: - last_part = msg.content[-1] - if isinstance(last_part, MessageTextContent): - print(f"{msg.role}: {last_part.text.value}") - - print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-agents/samples/agents_files_images_inputs/sample_agents_image_input_file.py b/sdk/ai/azure-ai-agents/samples/agents_files_images_inputs/sample_agents_image_input_file.py deleted file mode 100644 index f3d4f9e9a13d..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_files_images_inputs/sample_agents_image_input_file.py +++ /dev/null @@ -1,92 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use basic agent operations using image file input for the - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_image_input_file.py - - Before running the sample: - - pip install azure-ai-projects azure-identity - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. -""" - -import os, time -from typing import List -from azure.ai.agents import AgentsClient -from azure.identity import DefaultAzureCredential -from azure.ai.agents.models import ( - ListSortOrder, - MessageTextContent, - MessageInputContentBlock, - MessageImageFileParam, - MessageInputTextBlock, - MessageInputImageFileBlock, - FilePurpose, -) - -asset_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../assets/image_file.png")) - -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) - -with agents_client: - - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are helpful agent", - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = agents_client.threads.create() - print(f"Created thread, thread ID: {thread.id}") - - image_file = agents_client.files.upload_and_poll(file_path=asset_file_path, purpose=FilePurpose.AGENTS) - print(f"Uploaded file, file ID: {image_file.id}") - - input_message = "Hello, what is in the image ?" - file_param = MessageImageFileParam(file_id=image_file.id, detail="high") - content_blocks: List[MessageInputContentBlock] = [ - MessageInputTextBlock(text=input_message), - MessageInputImageFileBlock(image_file=file_param), - ] - message = agents_client.messages.create(thread_id=thread.id, role="user", content=content_blocks) - print(f"Created message, message ID: {message.id}") - - run = agents_client.runs.create(thread_id=thread.id, agent_id=agent.id) - - # Poll the run as long as run status is queued or in progress - while run.status in ["queued", "in_progress", "requires_action"]: - # Wait for a second - time.sleep(1) - run = agents_client.runs.get(thread_id=thread.id, run_id=run.id) - print(f"Run status: {run.status}") - - if run.status == "failed": - print(f"Run failed: {run.last_error}") - - agents_client.delete_agent(agent.id) - print("Deleted agent") - - messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) - - # The messages are following in the reverse order, - # we will iterate them and output only text contents. - for msg in messages: - last_part = msg.content[-1] - if isinstance(last_part, MessageTextContent): - print(f"{msg.role}: {last_part.text.value}") diff --git a/sdk/ai/azure-ai-agents/samples/agents_files_images_inputs/sample_agents_image_input_url.py b/sdk/ai/azure-ai-agents/samples/agents_files_images_inputs/sample_agents_image_input_url.py deleted file mode 100644 index 7eb85b05af64..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_files_images_inputs/sample_agents_image_input_url.py +++ /dev/null @@ -1,90 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use basic agent operations using image url input for the - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_image_input_url.py - - Before running the sample: - - pip install azure-ai-projects azure-identity - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. -""" - -import os, time -from typing import List -from azure.ai.agents import AgentsClient -from azure.identity import DefaultAzureCredential -from azure.ai.agents.models import ( - MessageTextContent, - MessageInputContentBlock, - MessageImageUrlParam, - MessageInputTextBlock, - MessageInputImageUrlBlock, -) - - -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) - -with agents_client: - - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are helpful agent", - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = agents_client.threads.create() - print(f"Created thread, thread ID: {thread.id}") - - image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" - input_message = "Hello, what is in the image ?" - url_param = MessageImageUrlParam(url=image_url, detail="high") - content_blocks: List[MessageInputContentBlock] = [ - MessageInputTextBlock(text=input_message), - MessageInputImageUrlBlock(image_url=url_param), - ] - message = agents_client.messages.create(thread_id=thread.id, role="user", content=content_blocks) - print(f"Created message, message ID: {message.id}") - - run = agents_client.runs.create(thread_id=thread.id, agent_id=agent.id) - - # Poll the run as long as run status is queued or in progress - while run.status in ["queued", "in_progress", "requires_action"]: - # Wait for a second - time.sleep(1) - run = agents_client.runs.get(thread_id=thread.id, run_id=run.id) - print(f"Run status: {run.status}") - - if run.status == "failed": - print(f"Run failed: {run.last_error}") - - agents_client.delete_agent(agent.id) - print("Deleted agent") - - messages = agents_client.messages.list(thread_id=thread.id) - - # The messages are following in the reverse order, - # we will iterate them and output only text contents. - for msg in messages: - last_part = msg.content[-1] - if isinstance(last_part, MessageTextContent): - print(f"{msg.role}: {last_part.text.value}") - - print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-agents/samples/agents_files_images_inputs/sample_agents_json_schema.py b/sdk/ai/azure-ai-agents/samples/agents_files_images_inputs/sample_agents_json_schema.py deleted file mode 100644 index ed239002b2c4..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_files_images_inputs/sample_agents_json_schema.py +++ /dev/null @@ -1,101 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use agents with JSON schema output format. - -USAGE: - python sample_agents_json_schema.py - - Before running the sample: - - pip install azure-ai-agents azure-identity pydantic - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. -""" - -import os - -from enum import Enum -from pydantic import BaseModel, TypeAdapter -from azure.ai.agents import AgentsClient -from azure.identity import DefaultAzureCredential -from azure.ai.agents.models import ( - MessageTextContent, - MessageRole, - ResponseFormatJsonSchema, - ResponseFormatJsonSchemaType, - RunStatus, -) - -# [START create_agents_client] -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) -# [END create_agents_client] - - -# Create the pydantic model to represent the planet names and there masses. -class Planets(str, Enum): - Earth = "Earth" - Mars = "Mars" - Jupyter = "Jupyter" - - -class Planet(BaseModel): - planet: Planets - mass: float - - -with agents_client: - - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="Extract the information about planets.", - response_format=ResponseFormatJsonSchemaType( - json_schema=ResponseFormatJsonSchema( - name="planet_mass", - description="Extract planet mass.", - schema=Planet.model_json_schema(), - ) - ), - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = agents_client.threads.create() - print(f"Created thread, thread ID: {thread.id}") - - message = agents_client.messages.create( - thread_id=thread.id, - role="user", - content=("The mass of the Mars is 6.4171E23 kg; the mass of the Earth is 5.972168E24 kg;"), - ) - print(f"Created message, message ID: {message.id}") - - run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) - - if run.status != RunStatus.COMPLETED: - print(f"The run did not succeed: {run.status=}.") - - agents_client.delete_agent(agent.id) - print("Deleted agent") - - messages = agents_client.messages.list(thread_id=thread.id) - - # The messages are following in the reverse order, - # we will iterate them and output only text contents. - for msg in messages: - if msg.role == MessageRole.AGENT: - last_part = msg.content[-1] - if isinstance(last_part, MessageTextContent): - planet = TypeAdapter(Planet).validate_json(last_part.text.value) - print(f"The mass of {planet.planet} is {planet.mass} kg.") diff --git a/sdk/ai/azure-ai-agents/samples/agents_files_images_inputs/sample_agents_vector_store_batch_enterprise_file_search.py b/sdk/ai/azure-ai-agents/samples/agents_files_images_inputs/sample_agents_vector_store_batch_enterprise_file_search.py deleted file mode 100644 index ad11c7553570..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_files_images_inputs/sample_agents_vector_store_batch_enterprise_file_search.py +++ /dev/null @@ -1,105 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -""" -DESCRIPTION: - This sample demonstrates how to create the vector store with the list of files. - -USAGE: - python sample_agents_vector_store_batch_enterprise_file_search.py - - Before running the sample: - - pip install azure-ai-agents azure-identity azure-ai-ml - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. -""" - -import os -from azure.ai.agents import AgentsClient -from azure.ai.agents.models import FileSearchTool, VectorStoreDataSource, VectorStoreDataSourceAssetType, ListSortOrder -from azure.identity import DefaultAzureCredential - -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) - -with agents_client: - - # We will upload the local file to Azure and will use it for vector store creation. - asset_uri = os.environ["AZURE_BLOB_URI"] - - # [START attach_files_to_store] - # Create a vector store with no file and wait for it to be processed - vector_store = agents_client.vector_stores.create_and_poll(data_sources=[], name="sample_vector_store") - print(f"Created vector store, vector store ID: {vector_store.id}") - - ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) - # Add the file to the vector store or you can supply data sources in the vector store creation - vector_store_file_batch = agents_client.vector_store_file_batches.create_and_poll( - vector_store_id=vector_store.id, data_sources=[ds] - ) - print(f"Created vector store file batch, vector store file batch ID: {vector_store_file_batch.id}") - - # Create a file search tool - file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) - # [END attach_files_to_store] - - # Notices that FileSearchTool as tool and tool_resources must be added or the agent unable to search the file - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are helpful agent", - tools=file_search_tool.definitions, - tool_resources=file_search_tool.resources, - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = agents_client.threads.create() - print(f"Created thread, thread ID: {thread.id}") - - message = agents_client.messages.create( - thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" - ) - print(f"Created message, message ID: {message.id}") - - run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) - print(f"Created run, run ID: {run.id}") - - file_search_tool.remove_vector_store(vector_store.id) - print(f"Removed vector store from file search, vector store ID: {vector_store.id}") - - agents_client.update_agent( - agent_id=agent.id, tools=file_search_tool.definitions, tool_resources=file_search_tool.resources - ) - print(f"Updated agent, agent ID: {agent.id}") - - thread = agents_client.threads.create() - print(f"Created thread, thread ID: {thread.id}") - - message = agents_client.messages.create( - thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" - ) - print(f"Created message, message ID: {message.id}") - - run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) - print(f"Created run, run ID: {run.id}") - - agents_client.vector_stores.delete(vector_store.id) - print("Deleted vector store") - - agents_client.delete_agent(agent.id) - print("Deleted agent") - - messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) - - for msg in messages: - if msg.text_messages: - last_text = msg.text_messages[-1] - print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-agents/samples/agents_files_images_inputs/sample_agents_vector_store_batch_file_search.py b/sdk/ai/azure-ai-agents/samples/agents_files_images_inputs/sample_agents_vector_store_batch_file_search.py deleted file mode 100644 index 26b4ae31e71b..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_files_images_inputs/sample_agents_vector_store_batch_file_search.py +++ /dev/null @@ -1,109 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -""" -DESCRIPTION: - This sample demonstrates how to use agent operations to add files to an existing vector store and perform search from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_vector_store_batch_file_search.py - - Before running the sample: - - pip install azure-ai-agents azure-identity - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. -""" - -import os -from azure.ai.agents import AgentsClient -from azure.ai.agents.models import FileSearchTool, FilePurpose, ListSortOrder -from azure.identity import DefaultAzureCredential - -asset_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../assets/product_info_1.md")) - -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) - -with agents_client: - - # Upload a file and wait for it to be processed - file = agents_client.files.upload_and_poll(file_path=asset_file_path, purpose=FilePurpose.AGENTS) - print(f"Uploaded file, file ID: {file.id}") - - # Create a vector store with no file and wait for it to be processed - vector_store = agents_client.vector_stores.create_and_poll(data_sources=[], name="sample_vector_store") - print(f"Created vector store, vector store ID: {vector_store.id}") - - # Add the file to the vector store or you can supply file ids in the vector store creation - vector_store_file_batch = agents_client.vector_store_file_batches.create_and_poll( - vector_store_id=vector_store.id, file_ids=[file.id] - ) - print(f"Created vector store file batch, vector store file batch ID: {vector_store_file_batch.id}") - - # Create a file search tool - # [START create_agent_with_tools_and_tool_resources] - file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) - - # Notices that FileSearchTool as tool and tool_resources must be added or the agent unable to search the file - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are helpful agent", - tools=file_search_tool.definitions, - tool_resources=file_search_tool.resources, - ) - # [END create_agent_with_tools_and_tool_resources] - print(f"Created agent, agent ID: {agent.id}") - - thread = agents_client.threads.create() - print(f"Created thread, thread ID: {thread.id}") - - message = agents_client.messages.create( - thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" - ) - print(f"Created message, message ID: {message.id}") - - run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) - print(f"Created run, run ID: {run.id}") - - file_search_tool.remove_vector_store(vector_store.id) - print(f"Removed vector store from file search, vector store ID: {vector_store.id}") - - agents_client.update_agent( - agent_id=agent.id, tools=file_search_tool.definitions, tool_resources=file_search_tool.resources - ) - print(f"Updated agent, agent ID: {agent.id}") - - thread = agents_client.threads.create() - print(f"Created thread, thread ID: {thread.id}") - - message = agents_client.messages.create( - thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" - ) - print(f"Created message, message ID: {message.id}") - - run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) - print(f"Created run, run ID: {run.id}") - - agents_client.vector_stores.delete(vector_store.id) - print("Deleted vector store") - - agents_client.delete_agent(agent.id) - print("Deleted agent") - - messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) - - for msg in messages: - if msg.text_messages: - last_text = msg.text_messages[-1] - print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-agents/samples/agents_files_images_inputs/sample_agents_vector_store_file_search.py b/sdk/ai/azure-ai-agents/samples/agents_files_images_inputs/sample_agents_vector_store_file_search.py deleted file mode 100644 index 04c821b1a828..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_files_images_inputs/sample_agents_vector_store_file_search.py +++ /dev/null @@ -1,80 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -""" -DESCRIPTION: - This sample demonstrates how to add files to agent during the vector store creation. - -USAGE: - python sample_agents_vector_store_file_search.py - - Before running the sample: - - pip install azure-ai-agents azure-identity - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. -""" - -import os -from azure.ai.agents import AgentsClient -from azure.ai.agents.models import FileSearchTool, FilePurpose, ListSortOrder -from azure.identity import DefaultAzureCredential - -asset_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../assets/product_info_1.md")) - -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) - -with agents_client: - - # Upload a file and wait for it to be processed - file = agents_client.files.upload_and_poll(file_path=asset_file_path, purpose=FilePurpose.AGENTS) - print(f"Uploaded file, file ID: {file.id}") - - # Create a vector store with no file and wait for it to be processed - vector_store = agents_client.vector_stores.create_and_poll(file_ids=[file.id], name="sample_vector_store") - print(f"Created vector store, vector store ID: {vector_store.id}") - - # Create a file search tool - file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) - - # Notices that FileSearchTool as tool and tool_resources must be added or the agent unable to search the file - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are helpful agent", - tools=file_search_tool.definitions, - tool_resources=file_search_tool.resources, - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = agents_client.threads.create() - print(f"Created thread, thread ID: {thread.id}") - - message = agents_client.messages.create( - thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" - ) - print(f"Created message, message ID: {message.id}") - - run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) - print(f"Created run, run ID: {run.id}") - - agents_client.vector_stores.delete(vector_store.id) - print("Deleted vector store") - - agents_client.delete_agent(agent.id) - print("Deleted agent") - - messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) - - for msg in messages: - if msg.text_messages: - last_text = msg.text_messages[-1] - print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-agents/samples/agents_files_images_inputs/sample_agents_with_code_interpreter_file_attachment.py b/sdk/ai/azure-ai-agents/samples/agents_files_images_inputs/sample_agents_with_code_interpreter_file_attachment.py deleted file mode 100644 index 08087f1918ef..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_files_images_inputs/sample_agents_with_code_interpreter_file_attachment.py +++ /dev/null @@ -1,111 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# ------------------------------------ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use agent operations with code interpreter through file attachment from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_with_code_interpreter_file_attachment.py - - Before running the sample: - - pip install azure-ai-agents azure-identity - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. -""" - -import os -from azure.ai.agents import AgentsClient -from azure.ai.agents.models import CodeInterpreterTool, MessageAttachment -from azure.ai.agents.models import FilePurpose, MessageRole -from azure.identity import DefaultAzureCredential -from pathlib import Path - -asset_file_path = os.path.abspath( - os.path.join(os.path.dirname(__file__), "../assets/synthetic_500_quarterly_results.csv") -) - -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) - -with agents_client: - - # Upload a file and wait for it to be processed - file = agents_client.files.upload_and_poll(file_path=asset_file_path, purpose=FilePurpose.AGENTS) - print(f"Uploaded file, file ID: {file.id}") - - # [START create_agent_and_message_with_code_interpreter_file_attachment] - # Notice that CodeInterpreter must be enabled in the agent creation, - # otherwise the agent will not be able to see the file attachment for code interpretation - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are helpful agent", - tools=CodeInterpreterTool().definitions, - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = agents_client.threads.create() - print(f"Created thread, thread ID: {thread.id}") - - # Create an attachment - attachment = MessageAttachment(file_id=file.id, tools=CodeInterpreterTool().definitions) - - # Create a message - message = agents_client.messages.create( - thread_id=thread.id, - role="user", - content="Could you please create bar chart in TRANSPORTATION sector for the operating profit from the uploaded csv file and provide file to me?", - attachments=[attachment], - ) - # [END create_agent_and_message_with_code_interpreter_file_attachment] - print(f"Created message, message ID: {message.id}") - - run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) - print(f"Run finished with status: {run.status}") - - if run.status == "failed": - # Check if you got "Rate limit is exceeded.", then you want to get more quota - print(f"Run failed: {run.last_error}") - - agents_client.files.delete(file.id) - print("Deleted file") - - messages = agents_client.messages.list(thread_id=thread.id) - print(f"Messages: {messages}") - - last_msg = agents_client.messages.get_last_message_text_by_role(thread_id=thread.id, role=MessageRole.AGENT) - if last_msg: - print(f"Last Message: {last_msg.text.value}") - - for msg in messages: - # Save every image file in the message - for img in msg.image_contents: - file_id = img.image_file.file_id - file_name = f"{file_id}_image_file.png" - agents_client.files.save(file_id=file_id, file_name=file_name) - print(f"Saved image file to: {Path.cwd() / file_name}") - - # Print details of every file-path annotation - for ann in msg.file_path_annotations: - print("File Paths:") - print(f" Type: {ann.type}") - print(f" Text: {ann.text}") - print(f" File ID: {ann.file_path.file_id}") - print(f" Start Index: {ann.start_index}") - print(f" End Index: {ann.end_index}") - - agents_client.delete_agent(agent.id) - print("Deleted agent") diff --git a/sdk/ai/azure-ai-agents/samples/agents_files_images_inputs/sample_agents_with_file_search_attachment.py b/sdk/ai/azure-ai-agents/samples/agents_files_images_inputs/sample_agents_with_file_search_attachment.py deleted file mode 100644 index febfb33da3a8..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_files_images_inputs/sample_agents_with_file_search_attachment.py +++ /dev/null @@ -1,77 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use agent operations to create messages with file search attachments from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_with_file_search_attachment.py - - Before running the sample: - - pip install azure-ai-agents azure-identity - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. -""" -import os -from azure.ai.agents import AgentsClient -from azure.ai.agents.models import FilePurpose, FileSearchTool, MessageAttachment, ListSortOrder -from azure.identity import DefaultAzureCredential - -asset_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../assets/product_info_1.md")) - -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) - -with agents_client: - - # Upload a file and wait for it to be processed - file = agents_client.files.upload_and_poll(file_path=asset_file_path, purpose=FilePurpose.AGENTS) - print(f"Uploaded file, file ID: {file.id}") - - # Create agent - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are helpful agent", - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = agents_client.threads.create() - print(f"Created thread, thread ID: {thread.id}") - - # Create a message with the file search attachment - # Notice that vector store is created temporarily when using attachments with a default expiration policy of seven days. - # [START create_message_with_attachment] - attachment = MessageAttachment(file_id=file.id, tools=FileSearchTool().definitions) - message = agents_client.messages.create( - thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?", attachments=[attachment] - ) - # [END create_message_with_attachment] - print(f"Created message, message ID: {message.id}") - - run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) - print(f"Created run, run ID: {run.id}") - - agents_client.files.delete(file.id) - print("Deleted file") - - agents_client.delete_agent(agent.id) - print("Deleted agent") - - messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) - for msg in messages: - if msg.text_messages: - last_text = msg.text_messages[-1] - print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-agents/samples/agents_files_images_inputs/sample_agents_with_resources_in_thread.py b/sdk/ai/azure-ai-agents/samples/agents_files_images_inputs/sample_agents_with_resources_in_thread.py deleted file mode 100644 index b010ad13540e..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_files_images_inputs/sample_agents_with_resources_in_thread.py +++ /dev/null @@ -1,97 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use agent operations with file searching from - the Azure Agents service using a synchronous client. The file is attached to thread. - -USAGE: - python sample_agents_with_resources_in_thread.py - - Before running the sample: - - pip install azure-ai-agents azure-identity - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. -""" - -import os -from azure.ai.agents import AgentsClient -from azure.ai.agents.models import FileSearchTool, FilePurpose, ListSortOrder -from azure.identity import DefaultAzureCredential - -asset_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../assets/product_info_1.md")) - -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) - -with agents_client: - - # Upload file and create vector store - # [START create_agent_and_thread_for_file_search] - file = agents_client.files.upload_and_poll(file_path=asset_file_path, purpose=FilePurpose.AGENTS) - print(f"Uploaded file, file ID: {file.id}") - - vector_store = agents_client.vector_stores.create_and_poll(file_ids=[file.id], name="my_vectorstore") - print(f"Created vector store, vector store ID: {vector_store.id}") - - # Create file search tool with resources followed by creating agent - file_search = FileSearchTool(vector_store_ids=[vector_store.id]) - - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="Hello, you are helpful agent and can search information from uploaded files", - tools=file_search.definitions, - ) - - print(f"Created agent, ID: {agent.id}") - - # Create thread with file resources. - # If the agent has multiple threads, only this thread can search this file. - thread = agents_client.threads.create(tool_resources=file_search.resources) - # [END create_agent_and_thread_for_file_search] - print(f"Created thread, ID: {thread.id}") - - # Create message to thread - message = agents_client.messages.create( - thread_id=thread.id, role="user", content="Hello, what Contoso products do you know?" - ) - print(f"Created message, ID: {message.id}") - - # Create and process agent run in thread with tools - run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) - print(f"Run finished with status: {run.status}") - - if run.status == "failed": - # Check if you got "Rate limit is exceeded.", then you want to get more quota - print(f"Run failed: {run.last_error}") - - # [START teardown] - # Delete the file when done - agents_client.vector_stores.delete(vector_store.id) - print("Deleted vector store") - - agents_client.files.delete(file_id=file.id) - print("Deleted file") - - # Delete the agent when done - agents_client.delete_agent(agent.id) - print("Deleted agent") - # [END teardown] - - # Fetch and log all messages - messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) - for msg in messages: - if msg.text_messages: - last_text = msg.text_messages[-1] - print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-agents/samples/agents_multiagent/sample_agents_agent_team.py b/sdk/ai/azure-ai-agents/samples/agents_multiagent/sample_agents_agent_team.py deleted file mode 100644 index 8289b8a90426..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_multiagent/sample_agents_agent_team.py +++ /dev/null @@ -1,78 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use multiple agents using AgentTeam with traces. - - The team consists of - • one leader agent - automatically created by AgentTeam from the - configuration in `utils/agent_team_config.yaml` - • two worker agents - `Coder` and `Reviewer`, defined in the code below - - IMPORTANT - leader-agent model configuration - `utils/agent_team_config.yaml` contains the key TEAM_LEADER_MODEL. - Its value must be the name of a **deployed** model in your Azure AI - project (e.g. "gpt-4o-mini"). - If this model deployment is not available, AgentTeam cannot instantiate - the leader agent and the sample will fail. - -USAGE: - python sample_agents_agent_team.py - - Before running the sample: - - 1. pip install azure-ai-agents azure-identity - 2. Ensure `utils/agent_team_config.yaml` is present and TEAM_LEADER_MODEL points - to a valid model deployment. - 3. Set these environment variables with your own values: - PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - MODEL_DEPLOYMENT_NAME - The model deployment name used for the worker agents. -""" - -import os -from azure.ai.agents import AgentsClient -from azure.identity import DefaultAzureCredential -from utils.agent_team import AgentTeam, _create_task -from utils.agent_trace_configurator import AgentTraceConfigurator - -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) - -agents_client.enable_auto_function_calls({_create_task}) - -model_deployment_name = os.getenv("MODEL_DEPLOYMENT_NAME") - -if model_deployment_name is not None: - AgentTraceConfigurator(agents_client=agents_client).setup_tracing() - with agents_client: - agent_team = AgentTeam("test_team", agents_client=agents_client) - agent_team.add_agent( - model=model_deployment_name, - name="Coder", - instructions="You are software engineer who writes great code. Your name is Coder.", - ) - agent_team.add_agent( - model=model_deployment_name, - name="Reviewer", - instructions="You are software engineer who reviews code. Your name is Reviewer.", - ) - agent_team.assemble_team() - - print("A team of agents specialized in software engineering is available for requests.") - while True: - user_input = input("Input (type 'quit' or 'exit' to exit): ") - if user_input.lower() == "quit": - break - elif user_input.lower() == "exit": - break - agent_team.process_request(request=user_input) - - agent_team.dismantle_team() -else: - print("Error: Please define the environment variable MODEL_DEPLOYMENT_NAME.") diff --git a/sdk/ai/azure-ai-agents/samples/agents_multiagent/sample_agents_agent_team_custom_team_leader.py b/sdk/ai/azure-ai-agents/samples/agents_multiagent/sample_agents_agent_team_custom_team_leader.py deleted file mode 100644 index 1a8c9f33a6bd..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_multiagent/sample_agents_agent_team_custom_team_leader.py +++ /dev/null @@ -1,117 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to multiple agents using AgentTeam with traces. - -USAGE: - python sample_agents_agent_team_custom_team_leader.py - - Before running the sample: - - pip install azure-ai-agents azure-identity - - Set these environment variables with your own values: - PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - MODEL_DEPLOYMENT_NAME - the name of the model deployment to use. -""" - -import os -from typing import Optional, Set -from azure.ai.agents import AgentsClient -from azure.identity import DefaultAzureCredential -from utils.agent_team import AgentTeam, AgentTask -from utils.agent_trace_configurator import AgentTraceConfigurator -from azure.ai.agents.models import FunctionTool, ToolSet - -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) - -model_deployment_name = os.getenv("MODEL_DEPLOYMENT_NAME") - - -def create_task(team_name: str, recipient: str, request: str, requestor: str) -> str: - """ - Requests another agent in the team to complete a task. - - :param team_name (str): The name of the team. - :param recipient (str): The name of the agent that is being requested to complete the task. - :param request (str): A description of the to complete. This can also be a question. - :param requestor (str): The name of the agent who is requesting the task. - :return: True if the task was successfully received, False otherwise. - :rtype: str - """ - task = AgentTask(recipient=recipient, task_description=request, requestor=requestor) - team: Optional[AgentTeam] = None - try: - team = AgentTeam.get_team(team_name) - except: - pass - if team is not None: - team.add_task(task) - return "True" - return "False" - - -# Any additional functions that might be used by the agents: -agent_team_default_functions: Set = { - create_task, -} - -default_function_tool = FunctionTool(functions=agent_team_default_functions) - -agents_client.enable_auto_function_calls({create_task}) - -if model_deployment_name is not None: - AgentTraceConfigurator(agents_client=agents_client).setup_tracing() - with agents_client: - agent_team = AgentTeam("test_team", agents_client=agents_client) - toolset = ToolSet() - toolset.add(default_function_tool) - agent_team.set_team_leader( - model=model_deployment_name, - name="TeamLeader", - instructions="You are an agent named 'TeamLeader'. You are a leader of a team of agents. The name of your team is 'test_team'." - "You are an agent that is responsible for receiving requests from user and utilizing a team of agents to complete the task. " - "When you are passed a request, the only thing you will do is evaluate which team member should do which task next to complete the request. " - "You will use the provided create_task function to create a task for the agent that is best suited for handling the task next. " - "You will respond with the description of who you assigned the task and why. When you think that the original user request is " - "processed completely utilizing all the talent available in the team, you do not have to create anymore tasks. " - "Using the skills of all the team members when applicable is highly valued. " - "Do not create parallel tasks. " - "Here are the other agents in your team: " - "- Coder: You are software engineer who writes great code. Your name is Coder. " - "- Reviewer: You are software engineer who reviews code. Your name is Reviewer.", - toolset=toolset, - ) - agent_team.add_agent( - model=model_deployment_name, - name="Coder", - instructions="You are software engineer who writes great code. Your name is Coder.", - ) - agent_team.add_agent( - model=model_deployment_name, - name="Reviewer", - instructions="You are software engineer who reviews code. Your name is Reviewer.", - ) - agent_team.assemble_team() - - print("A team of agents specialized in software engineering is available for requests.") - while True: - user_input = input("Input (type 'quit' or 'exit' to exit): ") - if user_input.lower() == "quit": - break - elif user_input.lower() == "exit": - break - agent_team.process_request(request=user_input) - - agent_team.dismantle_team() -else: - print("Error: Please define the environment variable MODEL_DEPLOYMENT_NAME.") diff --git a/sdk/ai/azure-ai-agents/samples/agents_multiagent/sample_agents_multi_agent_team.py b/sdk/ai/azure-ai-agents/samples/agents_multiagent/sample_agents_multi_agent_team.py deleted file mode 100644 index ba3fff30140f..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_multiagent/sample_agents_multi_agent_team.py +++ /dev/null @@ -1,133 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use an AgentTeam to execute a multi-step - user request with automatic function calling and trace collection. - - The team consists of - • one leader agent - created automatically from the configuration in - `utils/agent_team_config.yaml` - • three worker agents - `TimeWeatherAgent`, `SendEmailAgent`, and - `TemperatureAgent`, each defined in the code below with its own tools - - IMPORTANT - leader-agent model configuration - `utils/agent_team_config.yaml` contains the key TEAM_LEADER_MODEL. - Its value must be the name of a **deployed** model in your Azure AI - project (e.g. "gpt-4o-mini"). - If this deployment does not exist, AgentTeam cannot instantiate the - leader agent and the sample will fail. - -USAGE: - python sample_agents_multi_agent_team.py - - Before running the sample: - - 1. pip install azure-ai-agents azure-identity - 2. Ensure `utils/agent_team_config.yaml` is present and TEAM_LEADER_MODEL points - to a valid model deployment. - 3. Set these environment variables with your own values: - PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - MODEL_DEPLOYMENT_NAME - The model deployment name used for the worker agents. -""" - -import os -from typing import Set - -from utils.user_functions_with_traces import ( - fetch_current_datetime, - fetch_weather, - send_email_using_recipient_name, - convert_temperature, -) - -from azure.ai.agents import AgentsClient -from azure.ai.agents.models import ToolSet, FunctionTool -from azure.identity import DefaultAzureCredential -from utils.agent_team import AgentTeam, _create_task -from utils.agent_trace_configurator import AgentTraceConfigurator - -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) - -user_function_set_1: Set = {fetch_current_datetime, fetch_weather} - -user_function_set_2: Set = {send_email_using_recipient_name} - -user_function_set_3: Set = {convert_temperature} - -agents_client.enable_auto_function_calls( - { - _create_task, - fetch_current_datetime, - fetch_weather, - send_email_using_recipient_name, - convert_temperature, - } -) - -model_deployment_name = os.getenv("MODEL_DEPLOYMENT_NAME") - -if model_deployment_name is not None: - AgentTraceConfigurator(agents_client=agents_client).setup_tracing() - with agents_client: - - functions = FunctionTool(functions=user_function_set_1) - toolset1 = ToolSet() - toolset1.add(functions) - - agent_team = AgentTeam("test_team", agents_client=agents_client) - - agent_team.add_agent( - model=model_deployment_name, - name="TimeWeatherAgent", - instructions="You are a specialized agent for time and weather queries.", - toolset=toolset1, - can_delegate=True, - ) - - functions = FunctionTool(functions=user_function_set_2) - toolset2 = ToolSet() - toolset2.add(functions) - - agent_team.add_agent( - model=model_deployment_name, - name="SendEmailAgent", - instructions="You are a specialized agent for sending emails.", - toolset=toolset2, - can_delegate=False, - ) - - functions = FunctionTool(functions=user_function_set_3) - toolset3 = ToolSet() - toolset3.add(functions) - - agent_team.add_agent( - model=model_deployment_name, - name="TemperatureAgent", - instructions="You are a specialized agent for temperature conversion.", - toolset=toolset3, - can_delegate=False, - ) - - agent_team.assemble_team() - - user_request = ( - "Hello, Please provide me current time in '%Y-%m-%d %H:%M:%S' format, and the weather in New York. " - "Finally, convert the Celsius to Fahrenheit and send an email to Example Recipient with summary of results." - ) - - # Once process_request is called, the TeamLeader will coordinate. - # The loop in process_request will pick up tasks from the queue, assign them, and so on. - agent_team.process_request(request=user_request) - - agent_team.dismantle_team() -else: - print("Error: Please define the environment variable MODEL_DEPLOYMENT_NAME.") diff --git a/sdk/ai/azure-ai-agents/samples/agents_multiagent/utils/agent_team.py b/sdk/ai/azure-ai-agents/samples/agents_multiagent/utils/agent_team.py deleted file mode 100644 index ce304992536d..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_multiagent/utils/agent_team.py +++ /dev/null @@ -1,436 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -import os -import yaml # type: ignore - -from opentelemetry import trace -from opentelemetry.trace import Span # noqa: F401 # pylint: disable=unused-import -from typing import Any, Dict, Optional, Set, List -from azure.ai.agents import AgentsClient -from azure.ai.agents.models import FunctionTool, ToolSet, MessageRole, Agent, AgentThread - -tracer = trace.get_tracer(__name__) - - -class _AgentTeamMember: - """ - Represents an individual agent on a team. - - :param model: The model (e.g. GPT-4) used by this agent. - :param name: The agent's name. - :param instructions: The agent's initial instructions or "personality". - :param toolset: An optional ToolSet with specialized tools for this agent. - :param can_delegate: Whether this agent has delegation capability (e.g., 'create_task'). - Defaults to True. - """ - - def __init__( - self, model: str, name: str, instructions: str, toolset: Optional[ToolSet] = None, can_delegate: bool = True - ) -> None: - self.model = model - self.name = name - self.instructions = instructions - self.agent_instance: Optional[Agent] = None - self.toolset: Optional[ToolSet] = toolset - self.can_delegate = can_delegate - - -class AgentTask: - """ - Encapsulates a task for an agent to perform. - - :param recipient: The name of the agent who should receive the task. - :param task_description: The description of the work to be done or question to be answered. - :param requestor: The name of the agent or user requesting the task. - """ - - def __init__(self, recipient: str, task_description: str, requestor: str) -> None: - self.recipient = recipient - self.task_description = task_description - self.requestor = requestor - - -class AgentTeam: - """ - A class that represents a team of agents. - - """ - - # Static container to store all instances of AgentTeam - _teams: Dict[str, "AgentTeam"] = {} - - _agents_client: AgentsClient - _agent_thread: Optional[AgentThread] = None - _team_leader: Optional[_AgentTeamMember] = None - _members: List[_AgentTeamMember] = [] - _tasks: List[AgentTask] = [] - _team_name: str = "" - _current_request_span: Optional[Span] = None - _current_task_span: Optional[Span] = None - - def __init__(self, team_name: str, agents_client: AgentsClient): - """ - Initialize a new AgentTeam and set it as the singleton instance. - """ - # Validate that the team_name is a non-empty string - if not isinstance(team_name, str) or not team_name: - raise ValueError("Team name must be a non-empty string.") - # Check for existing team with the same name - if team_name in AgentTeam._teams: - raise ValueError(f"A team with the name '{team_name}' already exists.") - self.team_name = team_name - if agents_client is None: - raise ValueError("No AgentsClient provided.") - self._agents_client = agents_client - # Store the instance in the static container - AgentTeam._teams[team_name] = self - - # Get the directory of the current file - current_dir = os.path.dirname(os.path.abspath(__file__)) - # Construct the full path to the config file - file_path = os.path.join(current_dir, "agent_team_config.yaml") - with open(file_path, "r") as config_file: - config = yaml.safe_load(config_file) - self.TEAM_LEADER_INSTRUCTIONS = config["TEAM_LEADER_INSTRUCTIONS"] - self.TEAM_LEADER_INITIAL_REQUEST = config["TEAM_LEADER_INITIAL_REQUEST"] - self.TEAM_LEADER_TASK_COMPLETENESS_CHECK_INSTRUCTIONS = config[ - "TEAM_LEADER_TASK_COMPLETENESS_CHECK_INSTRUCTIONS" - ] - self.TEAM_MEMBER_CAN_DELEGATE_INSTRUCTIONS = config["TEAM_MEMBER_CAN_DELEGATE_INSTRUCTIONS"] - self.TEAM_MEMBER_NO_DELEGATE_INSTRUCTIONS = config["TEAM_MEMBER_NO_DELEGATE_INSTRUCTIONS"] - self.TEAM_LEADER_MODEL = config["TEAM_LEADER_MODEL"].strip() - - @staticmethod - def get_team(team_name: str) -> "AgentTeam": - """Static method to fetch the AgentTeam instance by name.""" - team = AgentTeam._teams.get(team_name) - if team is None: - raise ValueError(f"No team found with the name '{team_name}'.") - return team - - @staticmethod - def _remove_team(team_name: str) -> None: - """Static method to remove an AgentTeam instance by name.""" - if team_name not in AgentTeam._teams: - raise ValueError(f"No team found with the name '{team_name}'.") - del AgentTeam._teams[team_name] - - def add_agent( - self, model: str, name: str, instructions: str, toolset: Optional[ToolSet] = None, can_delegate: bool = True - ) -> None: - """ - Add a new agent (team member) to this AgentTeam. - - :param model: The model name (e.g. GPT-4) for the agent. - :param name: The name of the agent being added. - :param instructions: The initial instructions/personality for the agent. - :param toolset: An optional ToolSet to configure specific tools (functions, etc.) - for this agent. If None, we'll create a default set. - :param can_delegate: If True, the agent can delegate tasks (via create_task). - If False, the agent does not get 'create_task' in its ToolSet - and won't mention delegation in instructions. - """ - if toolset is None: - toolset = ToolSet() - - if can_delegate: - # If agent can delegate, ensure it has 'create_task' - try: - function_tool = toolset.get_tool(FunctionTool) - function_tool.add_functions(agent_team_default_functions) - except ValueError: - default_function_tool = FunctionTool(agent_team_default_functions) - toolset.add(default_function_tool) - - member = _AgentTeamMember( - model=model, - name=name, - instructions=instructions, - toolset=toolset, - can_delegate=can_delegate, - ) - self._members.append(member) - - def set_team_leader(self, model: str, name: str, instructions: str, toolset: Optional[ToolSet] = None) -> None: - """ - Set the team leader for this AgentTeam. - - If team leader has not been set prior to the call to assemble_team, - then a default team leader will be set. - - :param model: The model name (e.g. GPT-4) for the agent. - :param name: The name of the team leader. - :param instructions: The instructions for the team leader. These instructions - are not modified by the implementation, so all required - information about other team members and how to pass tasks - to them should be included. - :param toolset: An optional ToolSet to configure specific tools (functions, etc.) - for the team leader. - """ - member = _AgentTeamMember(model=model, name=name, instructions=instructions, toolset=toolset) - self._team_leader = member - - def add_task(self, task: AgentTask) -> None: - """ - Add a new task to the team's task list. - - :param task: The task to be added. - """ - self._tasks.append(task) - - def _create_team_leader(self) -> None: - """ - Create the team leader agent. - """ - assert self._agents_client is not None, "agents_client must not be None" - assert self._team_leader is not None, "team leader has not been added" - - self._team_leader.agent_instance = self._agents_client.create_agent( - model=self._team_leader.model, - name=self._team_leader.name, - instructions=self._team_leader.instructions, - toolset=self._team_leader.toolset, - ) - - def _set_default_team_leader(self): - """ - Set the default 'TeamLeader' agent with awareness of all other agents. - """ - toolset = ToolSet() - toolset.add(default_function_tool) - instructions = self.TEAM_LEADER_INSTRUCTIONS.format(agent_name="TeamLeader", team_name=self.team_name) + "\n" - # List all agents (will be empty at this moment if you haven't added any, or you can append after they're added) - for member in self._members: - instructions += f"- {member.name}: {member.instructions}\n" - - self._team_leader = _AgentTeamMember( - model=self.TEAM_LEADER_MODEL, - name="TeamLeader", - instructions=instructions, - toolset=toolset, - can_delegate=True, - ) - - def assemble_team(self): - """ - Create the team leader agent and initialize all member agents with - their configured or default toolsets. - """ - assert self._agents_client is not None, "agents_client must not be None" - - if self._team_leader is None: - self._set_default_team_leader() - - self._create_team_leader() - - for member in self._members: - if member is self._team_leader: - continue - - team_description = "" - for other_member in self._members: - if other_member != member: - team_description += f"- {other_member.name}: {other_member.instructions}\n" - - if member.can_delegate: - extended_instructions = self.TEAM_MEMBER_CAN_DELEGATE_INSTRUCTIONS.format( - name=member.name, - team_name=self._team_name, - original_instructions=member.instructions, - team_description=team_description, - ) - else: - extended_instructions = self.TEAM_MEMBER_NO_DELEGATE_INSTRUCTIONS.format( - name=member.name, - team_name=self._team_name, - original_instructions=member.instructions, - team_description=team_description, - ) - member.agent_instance = self._agents_client.create_agent( - model=member.model, name=member.name, instructions=extended_instructions, toolset=member.toolset - ) - - def dismantle_team(self) -> None: - """ - Delete all agents (including the team leader) from the project client. - """ - assert self._agents_client is not None, "agents_client must not be None" - - if self._team_leader and self._team_leader.agent_instance: - print(f"Deleting team leader agent '{self._team_leader.name}'") - self._agents_client.delete_agent(self._team_leader.agent_instance.id) - for member in self._members: - if member is not self._team_leader and member.agent_instance: - print(f"Deleting agent '{member.name}'") - self._agents_client.delete_agent(member.agent_instance.id) - AgentTeam._remove_team(self.team_name) - - def _add_task_completion_event( - self, - span: Span, - result: str, - ) -> None: - - attributes: Dict[str, Any] = {} - attributes["agent_team.task.result"] = result - span.add_event(name=f"agent_team.task_completed", attributes=attributes) - - def process_request(self, request: str) -> None: - """ - Handle a user's request by creating a team and delegating tasks to - the team leader. The team leader may generate additional tasks. - - :param request: The user's request or question. - """ - assert self._agents_client is not None, "project client must not be None" - assert self._team_leader is not None, "team leader must not be None" - - if self._agent_thread is None: - self._agent_thread = self._agents_client.threads.create() - print(f"Created thread with ID: {self._agent_thread.id}") - - with tracer.start_as_current_span("agent_team_request") as current_request_span: - self._current_request_span = current_request_span - if self._current_request_span is not None: - self._current_request_span.set_attribute("agent_team.name", self.team_name) - team_leader_request = self.TEAM_LEADER_INITIAL_REQUEST.format(original_request=request) - _create_task( - team_name=self.team_name, - recipient=self._team_leader.name, - request=team_leader_request, - requestor="user", - ) - while self._tasks: - task = self._tasks.pop(0) - with tracer.start_as_current_span("agent_team_task") as current_task_span: - self._current_task_span = current_task_span - if self._current_task_span is not None: - self._current_task_span.set_attribute("agent_team.name", self.team_name) - self._current_task_span.set_attribute("agent_team.task.recipient", task.recipient) - self._current_task_span.set_attribute("agent_team.task.requestor", task.requestor) - self._current_task_span.set_attribute("agent_team.task.description", task.task_description) - print( - f"Starting task for agent '{task.recipient}'. " - f"Requestor: '{task.requestor}'. " - f"Task description: '{task.task_description}'." - ) - message = self._agents_client.messages.create( - thread_id=self._agent_thread.id, - role="user", - content=task.task_description, - ) - print(f"Created message with ID: {message.id} for task in thread {self._agent_thread.id}") - agent = self._get_member_by_name(task.recipient) - if agent and agent.agent_instance: - run = self._agents_client.runs.create_and_process( - thread_id=self._agent_thread.id, agent_id=agent.agent_instance.id - ) - print(f"Created and processed run for agent '{agent.name}', run ID: {run.id}") - text_message = self._agents_client.messages.get_last_message_text_by_role( - thread_id=self._agent_thread.id, role=MessageRole.AGENT - ) - if text_message and text_message.text: - print(f"Agent '{agent.name}' completed task. " f"Outcome: {text_message.text.value}") - if self._current_task_span is not None: - self._add_task_completion_event(self._current_task_span, result=text_message.text.value) - - # If no tasks remain AND the recipient is not the TeamLeader, - # let the TeamLeader see if more delegation is needed. - if not self._tasks and not task.recipient == "TeamLeader": - team_leader_request = self.TEAM_LEADER_TASK_COMPLETENESS_CHECK_INSTRUCTIONS - _create_task( - team_name=self.team_name, - recipient=self._team_leader.name, - request=team_leader_request, - requestor="user", - ) - # self._current_task_span.end() - self._current_task_span = None - # self._current_request_span.end() - self._current_request_span = None - - def _get_member_by_name(self, name) -> Optional[_AgentTeamMember]: - """ - Retrieve a team member (agent) by name. - If no member with the specified name is found, returns None. - - :param name: The agent's name within this team. - """ - if name == "TeamLeader": - return self._team_leader - for member in self._members: - if member.name == name: - return member - return None - - """ - Requests another agent in the team to complete a task. - - :param span (Span): The event will be added to this span - :param team_name (str): The name of the team. - :param recipient (str): The name of the agent that is being requested to complete the task. - :param request (str): A description of the to complete. This can also be a question. - :param requestor (str): The name of the agent who is requesting the task. - :return: True if the task was successfully received, False otherwise. - :rtype: str - """ - - -def _add_create_task_event( - span: Span, - team_name: str, - requestor: str, - recipient: str, - request: str, -) -> None: - - attributes: Dict[str, Any] = {} - attributes["agent_team.task.team_name"] = team_name - attributes["agent_team.task.requestor"] = requestor - attributes["agent_team.task.recipient"] = recipient - attributes["agent_team.task.description"] = request - span.add_event(name=f"agent_team.create_task", attributes=attributes) - - -def _create_task(team_name: str, recipient: str, request: str, requestor: str) -> str: - """ - Requests another agent in the team to complete a task. - - :param team_name (str): The name of the team. - :param recipient (str): The name of the agent that is being requested to complete the task. - :param request (str): A description of the to complete. This can also be a question. - :param requestor (str): The name of the agent who is requesting the task. - :return: True if the task was successfully received, False otherwise. - :rtype: str - """ - task = AgentTask(recipient=recipient, task_description=request, requestor=requestor) - team: Optional[AgentTeam] = None - try: - team = AgentTeam.get_team(team_name) - span: Optional[Span] = None - if team._current_task_span is not None: - span = team._current_task_span - elif team._current_request_span is not None: - span = team._current_request_span - - if span is not None: - _add_create_task_event( - span=span, team_name=team_name, requestor=requestor, recipient=recipient, request=request - ) - except: - pass - if team is not None: - team.add_task(task) - return "True" - return "False" - - -# Any additional functions that might be used by the agents: -agent_team_default_functions: Set = { - _create_task, -} - -default_function_tool = FunctionTool(functions=agent_team_default_functions) diff --git a/sdk/ai/azure-ai-agents/samples/agents_multiagent/utils/agent_team_config.yaml b/sdk/ai/azure-ai-agents/samples/agents_multiagent/utils/agent_team_config.yaml deleted file mode 100644 index ad711427b5fb..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_multiagent/utils/agent_team_config.yaml +++ /dev/null @@ -1,43 +0,0 @@ -TEAM_LEADER_MODEL: | - gpt-4o-mini - -TEAM_LEADER_INSTRUCTIONS: | - You are an agent named '{agent_name}'. You are a leader of a team of agents. The name of your team is '{team_name}'. - You are an agent that is responsible for receiving requests from user and utilizing a team of agents to complete the task. - When you are passed a request, the only thing you will do is evaluate which team member should do which task next to complete the request. - You will use the provided _create_task function to create a task for the agent that is best suited for handling the task next. - You will respond with the description of who you assigned the task and why. When you think that the original user request is - processed completely utilizing all the talent available in the team, you do not have to create anymore tasks. - Using the skills of all the team members when applicable is highly valued. - Do not create parallel tasks. - Here are the other agents in your team: - -TEAM_LEADER_INITIAL_REQUEST: | - Please create a task for agent in the team that is best suited to next process the following request. - Use the _create_task function available for you to create the task. The request is: - {original_request} - -TEAM_LEADER_TASK_COMPLETENESS_CHECK_INSTRUCTIONS: | - Check the discussion so far and especially the most recent agent response in the thread and if you see a potential task - that could improve the final outcome, then use the _create_task function to create the task. - Do not ever ask user confirmation for creating a task. - If the request is completely processed, you do not have to create a task. - -TEAM_MEMBER_CAN_DELEGATE_INSTRUCTIONS: | - You are an agent named '{name}'. You are a member in a team of agents. The name of your team is '{team_name}'. - {original_instructions} - - - You can delegate tasks when appropriate. To delegate, call the _create_task function, using your own name as the 'requestor'. - - Provide a brief account of any tasks you assign and the outcome. - - Ask for help from other team members if you see they have the relevant expertise. - - Once you believe your assignment is complete, respond with your final answer or actions taken. - - Below are the other agents in your team: {team_description} - -TEAM_MEMBER_NO_DELEGATE_INSTRUCTIONS: | - You are an agent named '{name}'. You are a member in a team of agents. The name of your team is '{team_name}'. - {original_instructions} - - - You do not delegate tasks. Instead, focus solely on fulfilling the tasks assigned to you. - - If you have suggestions for tasks better suited to another agent, simply mention it in your response, but do not call _create_task yourself. - - Once you believe your assignment is complete, respond with your final answer or actions taken. - - Below are the other agents in your team: {team_description} diff --git a/sdk/ai/azure-ai-agents/samples/agents_multiagent/utils/agent_trace_configurator.py b/sdk/ai/azure-ai-agents/samples/agents_multiagent/utils/agent_trace_configurator.py deleted file mode 100644 index 7c03cb453d58..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_multiagent/utils/agent_trace_configurator.py +++ /dev/null @@ -1,73 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -import os -import sys -from typing import cast -from opentelemetry import trace -from opentelemetry.sdk.trace import TracerProvider -from opentelemetry.sdk.trace.export import SimpleSpanProcessor, ConsoleSpanExporter -from azure.ai.agents import AgentsClient -from azure.monitor.opentelemetry import configure_azure_monitor - - -class AgentTraceConfigurator: - def __init__(self, agents_client: AgentsClient): - self.agents_client = agents_client - - def enable_azure_monitor_tracing(self): - application_insights_connection_string = os.environ.get("APPLICATIONINSIGHTS_CONNECTION_STRING") - if not application_insights_connection_string: - print("APPLICATIONINSIGHTS_CONNECTION_STRING environment variable was not set.") - print("Please create APPLICATIONINSIGHTS_CONNECTION_STRING with the Application Insights,") - print("connection string. It should be enabled for this project.") - print("Enable it via the 'Tracing' tab in your AI Foundry project page.") - exit() - configure_azure_monitor(connection_string=application_insights_connection_string) - - def enable_console_tracing_without_genai(self): - exporter = ConsoleSpanExporter() - trace.set_tracer_provider(TracerProvider()) - tracer = trace.get_tracer(__name__) - provider = cast(TracerProvider, trace.get_tracer_provider()) - provider.add_span_processor(SimpleSpanProcessor(exporter)) - print("Console tracing enabled without agent traces.") - - def enable_console_tracing_with_agent(self): - span_exporter = ConsoleSpanExporter() - tracer_provider = TracerProvider() - tracer_provider.add_span_processor(SimpleSpanProcessor(span_exporter)) - trace.set_tracer_provider(tracer_provider) - tracer = trace.get_tracer(__name__) - try: - from azure.ai.agents.telemetry import AIAgentsInstrumentor - - agents_instrumentor = AIAgentsInstrumentor() - if not agents_instrumentor.is_instrumented(): - agents_instrumentor.instrument() - except Exception as exc: # pylint: disable=broad-exception-caught - print(f"Could not call `AIAgentsInstrumentor().instrument()`. Exception: {exc}") - print("Console tracing enabled with agent traces.") - - def display_menu(self): - print("Select a tracing option:") - print("1. Enable Azure Monitor tracing") - print("2. Enable console tracing without enabling gen_ai agent traces") - print("3. Enable console tracing with gen_ai agent traces") - print("4. Do not enable traces") - - def setup_tracing(self): - self.display_menu() - choice = input("Enter your choice (1-4): ") - - if choice == "1": - self.enable_azure_monitor_tracing() - elif choice == "2": - self.enable_console_tracing_without_genai() - elif choice == "3": - self.enable_console_tracing_with_agent() - elif choice == "4": - print("No tracing enabled.") - else: - print("Invalid choice. Please select a valid option.") diff --git a/sdk/ai/azure-ai-agents/samples/agents_multiagent/utils/user_functions_with_traces.py b/sdk/ai/azure-ai-agents/samples/agents_multiagent/utils/user_functions_with_traces.py deleted file mode 100644 index 2c4f2377ddaf..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_multiagent/utils/user_functions_with_traces.py +++ /dev/null @@ -1,111 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -import json -import datetime -from typing import Any, Callable, Set, Optional -from opentelemetry import trace - - -tracer = trace.get_tracer(__name__) - - -# These are the user-defined functions that can be called by the agent. -@tracer.start_as_current_span("fetch_current_datetime") # type: ignore -def fetch_current_datetime(format: Optional[str] = None) -> str: - """ - Get the current time as a JSON string, optionally formatted. - - :param format (Optional[str]): The format in which to return the current time. Defaults to None, which uses a standard format. - :return: The current time in JSON format. - :rtype: str - """ - current_time = datetime.datetime.now() - - # Use the provided format if available, else use a default format - if format: - time_format = format - else: - time_format = "%Y-%m-%d %H:%M:%S" - - time_json = json.dumps({"current_time": current_time.strftime(time_format)}) - return time_json - - -@tracer.start_as_current_span("fetch_weather") # type: ignore -def fetch_weather(location: str) -> str: - """ - Fetches the weather information for the specified location. - - :param location (str): The location to fetch weather for. - :return: Weather information as a JSON string. - :rtype: str - """ - # In a real-world scenario, you'd integrate with a weather API. - # Here, we'll mock the response. - mock_weather_data = {"New York": "Sunny, 25°C", "London": "Cloudy, 18°C", "Tokyo": "Rainy, 22°C"} - weather = mock_weather_data.get(location, "Weather data not available for this location.") - weather_json = json.dumps({"weather": weather}) - return weather_json - - -@tracer.start_as_current_span("send_email_using_recipient_name") # type: ignore -def send_email_using_recipient_name(recipient: str, subject: str, body: str) -> str: - """ - Sends an email with the specified subject and body to the recipient. - - :param recipient (str): Name of the recipient. - :param subject (str): Subject of the email. - :param body (str): Body content of the email. - :return: Confirmation message. - :rtype: str - """ - # In a real-world scenario, you'd use an SMTP server or an email service API. - # Here, we'll mock the email sending. - print(f"Sending email to {recipient}...") - print(f"Subject: {subject}") - print(f"Body:\n{body}") - - message_json = json.dumps({"message": f"Email successfully sent to {recipient}."}) - return message_json - - -@tracer.start_as_current_span("convert_temperature") # type: ignore -def convert_temperature(celsius: float) -> str: - """Converts temperature from Celsius to Fahrenheit. - - :param celsius (float): Temperature in Celsius. - :rtype: float - - :return: Temperature in Fahrenheit. - :rtype: str - """ - fahrenheit = (celsius * 9 / 5) + 32 - return json.dumps({"fahrenheit": fahrenheit}) - - -# Example User Input for Each Function -# 1. Fetch Current DateTime -# User Input: "What is the current date and time?" -# User Input: "What is the current date and time in '%Y-%m-%d %H:%M:%S' format?" - -# 2. Fetch Weather -# User Input: "Can you provide the weather information for New York?" - -# 3. Send Email Using Recipient Name -# User Input: "Send an email to John Doe with the subject 'Meeting Reminder' and body 'Don't forget our meeting at 3 PM.'" - -# 4. Convert Temperature -# User Input: "Convert 25 degrees Celsius to Fahrenheit." - - -# Statically defined user functions for fast reference -user_functions: Set[Callable[..., Any]] = { - fetch_current_datetime, - fetch_weather, - send_email_using_recipient_name, - convert_temperature, -} diff --git a/sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_eventhandler_with_bing_grounding.py b/sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_eventhandler_with_bing_grounding.py deleted file mode 100644 index f63e475d631c..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_eventhandler_with_bing_grounding.py +++ /dev/null @@ -1,124 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use Agent operations with an event handler and - the Bing grounding tool. It uses a synchronous client. - -USAGE: - python sample_agents_stream_eventhandler_with_bing_grounding.py - - Before running the sample: - - pip install azure-ai-agents azure-identity - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. - 3) AZURE_BING_CONNECTION_ID - The connection id of the Bing connection, as found in the "Connected resources" tab - in your Azure AI Foundry project. -""" - -import os -from typing import Any -from azure.identity import DefaultAzureCredential -from azure.ai.agents import AgentsClient -from azure.ai.agents.models import ( - MessageDeltaChunk, - RunStep, - ThreadMessage, - ThreadRun, - AgentEventHandler, - BingGroundingTool, - MessageRole, - MessageDeltaTextUrlCitationAnnotation, - MessageDeltaTextContent, -) - - -# When using FunctionTool with ToolSet in agent creation, the tool call events are handled inside the stream -# method and functions gets automatically called by default. -class MyEventHandler(AgentEventHandler): - - def on_message_delta(self, delta: "MessageDeltaChunk") -> None: - print(f"Text delta received: {delta.text}") - if delta.delta.content and isinstance(delta.delta.content[0], MessageDeltaTextContent): - delta_text_content = delta.delta.content[0] - if delta_text_content.text and delta_text_content.text.annotations: - for delta_annotation in delta_text_content.text.annotations: - if isinstance(delta_annotation, MessageDeltaTextUrlCitationAnnotation): - print( - f"URL citation delta received: [{delta_annotation.url_citation.title}]({delta_annotation.url_citation.url})" - ) - - def on_thread_message(self, message: "ThreadMessage") -> None: - print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") - - def on_thread_run(self, run: "ThreadRun") -> None: - print(f"ThreadRun status: {run.status}") - - if run.status == "failed": - print(f"Run failed. Error: {run.last_error}") - - def on_run_step(self, step: "RunStep") -> None: - print(f"RunStep type: {step.type}, Status: {step.status}") - - def on_error(self, data: str) -> None: - print(f"An error occurred. Data: {data}") - - def on_done(self) -> None: - print("Stream completed.") - - def on_unhandled_event(self, event_type: str, event_data: Any) -> None: - print(f"Unhandled Event Type: {event_type}, Data: {event_data}") - - -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) - -with agents_client: - - bing_connection_id = os.environ["AZURE_BING_CONNECTION_ID"] - print(f"Bing Connection ID: {bing_connection_id}") - - # Initialize agent bing tool and add the connection id - bing = BingGroundingTool(connection_id=bing_connection_id) - - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are a helpful agent", - tools=bing.definitions, - ) - print(f"Created agent, ID: {agent.id}") - - thread = agents_client.threads.create() - print(f"Created thread, thread ID {thread.id}") - - message = agents_client.messages.create( - thread_id=thread.id, - role=MessageRole.USER, - content="How does wikipedia explain Euler's Identity?", - ) - print(f"Created message, message ID {message.id}") - - with agents_client.runs.stream(thread_id=thread.id, agent_id=agent.id, event_handler=MyEventHandler()) as stream: - stream.until_done() - - agents_client.delete_agent(agent.id) - print("Deleted agent") - - response_message = agents_client.messages.get_last_message_by_role(thread_id=thread.id, role=MessageRole.AGENT) - if response_message: - for text_message in response_message.text_messages: - print(f"Agent response: {text_message.text.value}") - for annotation in response_message.url_citation_annotations: - print(f"URL Citation: [{annotation.url_citation.title}]({annotation.url_citation.url})") diff --git a/sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_eventhandler_with_functions.py b/sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_eventhandler_with_functions.py deleted file mode 100644 index 83d56502a119..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_eventhandler_with_functions.py +++ /dev/null @@ -1,147 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use agent operations with an event handler and toolset from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_stream_eventhandler_with_functions.py - - Before running the sample: - - pip install azure-ai-agents azure-identity - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. -""" -from typing import Any - -import os, sys -from azure.ai.agents import AgentsClient -from azure.ai.agents.models import ( - AgentEventHandler, - FunctionTool, - ListSortOrder, - MessageDeltaChunk, - RequiredFunctionToolCall, - RunStep, - SubmitToolOutputsAction, - ThreadMessage, - ThreadRun, - ToolOutput, -) -from azure.identity import DefaultAzureCredential - -current_path = os.path.dirname(__file__) -root_path = os.path.abspath(os.path.join(current_path, os.pardir, os.pardir)) -if root_path not in sys.path: - sys.path.insert(0, root_path) -from samples.utils.user_functions import user_functions - -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) - - -class MyEventHandler(AgentEventHandler): - - def __init__(self, functions: FunctionTool) -> None: - super().__init__() - self.functions = functions - - def on_message_delta(self, delta: "MessageDeltaChunk") -> None: - print(f"Text delta received: {delta.text}") - - def on_thread_message(self, message: "ThreadMessage") -> None: - print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") - - def on_thread_run(self, run: "ThreadRun") -> None: - print(f"ThreadRun status: {run.status}") - - if run.status == "failed": - print(f"Run failed. Error: {run.last_error}") - - if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): - tool_calls = run.required_action.submit_tool_outputs.tool_calls - - tool_outputs = [] - for tool_call in tool_calls: - if isinstance(tool_call, RequiredFunctionToolCall): - try: - output = functions.execute(tool_call) - tool_outputs.append( - ToolOutput( - tool_call_id=tool_call.id, - output=output, - ) - ) - except Exception as e: - print(f"Error executing tool_call {tool_call.id}: {e}") - - print(f"Tool outputs: {tool_outputs}") - if tool_outputs: - # Once we receive 'requires_action' status, the next event will be DONE. - # Here we associate our existing event handler to the next stream. - agents_client.runs.submit_tool_outputs_stream( - thread_id=run.thread_id, run_id=run.id, tool_outputs=tool_outputs, event_handler=self - ) - - def on_run_step(self, step: "RunStep") -> None: - print(f"RunStep type: {step.type}, Status: {step.status}") - - def on_error(self, data: str) -> None: - print(f"An error occurred. Data: {data}") - - def on_done(self) -> None: - print("Stream completed.") - - def on_unhandled_event(self, event_type: str, event_data: Any) -> None: - print(f"Unhandled Event Type: {event_type}, Data: {event_data}") - - -with agents_client: - - # [START create_agent_with_function_tool] - functions = FunctionTool(user_functions) - - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are a helpful agent", - tools=functions.definitions, - ) - # [END create_agent_with_function_tool] - print(f"Created agent, ID: {agent.id}") - - thread = agents_client.threads.create() - print(f"Created thread, thread ID {thread.id}") - - message = agents_client.messages.create( - thread_id=thread.id, - role="user", - content="Hello, send an email with the datetime and weather information in New York? Also let me know the details.", - ) - print(f"Created message, message ID {message.id}") - - with agents_client.runs.stream( - thread_id=thread.id, agent_id=agent.id, event_handler=MyEventHandler(functions) - ) as stream: - stream.until_done() - - agents_client.delete_agent(agent.id) - print("Deleted agent") - - messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) - for msg in messages: - if msg.text_messages: - last_text = msg.text_messages[-1] - print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_eventhandler_with_toolset.py b/sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_eventhandler_with_toolset.py deleted file mode 100644 index c1004de68469..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_eventhandler_with_toolset.py +++ /dev/null @@ -1,118 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use agent operations with an event handler and toolset from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_stream_eventhandler_with_toolset.py - - Before running the sample: - - pip install azure-ai-agents azure-identity - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. -""" - -from azure.ai.agents import AgentsClient -from azure.ai.agents.models import ( - MessageDeltaChunk, - ListSortOrder, - RunStep, - ThreadMessage, - ThreadRun, -) -from azure.ai.agents.models import AgentEventHandler -from azure.identity import DefaultAzureCredential -from azure.ai.agents.models import FunctionTool, ToolSet - -import os, sys -from typing import Any - -current_path = os.path.dirname(__file__) -root_path = os.path.abspath(os.path.join(current_path, os.pardir, os.pardir)) -if root_path not in sys.path: - sys.path.insert(0, root_path) -from samples.utils.user_functions import user_functions - -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) - - -# When using FunctionTool with ToolSet in agent creation, the tool call events are handled inside the create_stream -# method and functions gets automatically called by default. -class MyEventHandler(AgentEventHandler): - - def on_message_delta(self, delta: "MessageDeltaChunk") -> None: - print(f"Text delta received: {delta.text}") - - def on_thread_message(self, message: "ThreadMessage") -> None: - print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") - - def on_thread_run(self, run: "ThreadRun") -> None: - print(f"ThreadRun status: {run.status}") - - if run.status == "failed": - print(f"Run failed. Error: {run.last_error}") - - def on_run_step(self, step: "RunStep") -> None: - print(f"RunStep type: {step.type}, Status: {step.status}") - - def on_error(self, data: str) -> None: - print(f"An error occurred. Data: {data}") - - def on_done(self) -> None: - print("Stream completed.") - - def on_unhandled_event(self, event_type: str, event_data: Any) -> None: - print(f"Unhandled Event Type: {event_type}, Data: {event_data}") - - -with agents_client: - # [START create_agent_with_function_tool] - functions = FunctionTool(user_functions) - toolset = ToolSet() - toolset.add(functions) - agents_client.enable_auto_function_calls(toolset) - - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are a helpful agent", - toolset=toolset, - ) - # [END create_agent_with_function_tool] - print(f"Created agent, ID: {agent.id}") - - thread = agents_client.threads.create() - print(f"Created thread, thread ID {thread.id}") - - message = agents_client.messages.create( - thread_id=thread.id, - role="user", - content="Hello, send an email with the datetime and weather information in New York? Also let me know the details", - ) - print(f"Created message, message ID {message.id}") - - with agents_client.runs.stream(thread_id=thread.id, agent_id=agent.id, event_handler=MyEventHandler()) as stream: - stream.until_done() - - agents_client.delete_agent(agent.id) - print("Deleted agent") - - messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) - for msg in messages: - if msg.text_messages: - last_text = msg.text_messages[-1] - print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_iteration_with_bing_grounding.py b/sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_iteration_with_bing_grounding.py deleted file mode 100644 index cbb500367ad2..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_iteration_with_bing_grounding.py +++ /dev/null @@ -1,116 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use Agent operations with the Bing grounding - tool, and iteration in streaming. It uses a synchronous client. - -USAGE: - python sample_agents_stream_iteration_with_bing_grounding.py - - Before running the sample: - - pip install azure-ai-agents azure-identity - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. - 3) AZURE_BING_CONNECTION_ID - The ID of the Bing connection, as found in the "Connected resources" tab - in your Azure AI Foundry project. -""" - -import os -from azure.ai.agents import AgentsClient -from azure.ai.agents.models import AgentStreamEvent, RunStepDeltaChunk -from azure.ai.agents.models import ( - MessageDeltaChunk, - RunStep, - ThreadMessage, - ThreadRun, - BingGroundingTool, - MessageRole, - MessageDeltaTextContent, - MessageDeltaTextUrlCitationAnnotation, -) -from azure.identity import DefaultAzureCredential - -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) - -with agents_client: - bing_connection_id = os.environ["AZURE_BING_CONNECTION_ID"] - bing = BingGroundingTool(connection_id=bing_connection_id) - print(f"Bing Connection ID: {bing_connection_id}") - - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are a helpful agent", - tools=bing.definitions, - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = agents_client.threads.create() - print(f"Created thread, thread ID {thread.id}") - - message = agents_client.messages.create( - thread_id=thread.id, role=MessageRole.USER, content="How does wikipedia explain Euler's Identity?" - ) - print(f"Created message, message ID {message.id}") - - with agents_client.runs.stream(thread_id=thread.id, agent_id=agent.id) as stream: - - for event_type, event_data, _ in stream: - - if isinstance(event_data, MessageDeltaChunk): - print(f"Text delta received: {event_data.text}") - if event_data.delta.content and isinstance(event_data.delta.content[0], MessageDeltaTextContent): - delta_text_content = event_data.delta.content[0] - if delta_text_content.text and delta_text_content.text.annotations: - for delta_annotation in delta_text_content.text.annotations: - if isinstance(delta_annotation, MessageDeltaTextUrlCitationAnnotation): - print( - f"URL citation delta received: [{delta_annotation.url_citation.title}]({delta_annotation.url_citation.url})" - ) - - elif isinstance(event_data, RunStepDeltaChunk): - print(f"RunStepDeltaChunk received. ID: {event_data.id}.") - - elif isinstance(event_data, ThreadMessage): - print(f"ThreadMessage created. ID: {event_data.id}, Status: {event_data.status}") - - elif isinstance(event_data, ThreadRun): - print(f"ThreadRun status: {event_data.status}") - - if event_data.status == "failed": - print(f"Run failed. Error: {event_data.last_error}") - - elif isinstance(event_data, RunStep): - print(f"RunStep type: {event_data.type}, Status: {event_data.status}") - - elif event_type == AgentStreamEvent.ERROR: - print(f"An error occurred. Data: {event_data}") - - elif event_type == AgentStreamEvent.DONE: - print("Stream completed.") - - else: - print(f"Unhandled Event Type: {event_type}, Data: {event_data}") - - agents_client.delete_agent(agent.id) - print("Deleted agent") - - response_message = agents_client.messages.get_last_message_by_role(thread_id=thread.id, role=MessageRole.AGENT) - if response_message: - for text_message in response_message.text_messages: - print(f"Agent response: {text_message.text.value}") - for annotation in response_message.url_citation_annotations: - print(f"URL Citation: [{annotation.url_citation.title}]({annotation.url_citation.url})") diff --git a/sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_iteration_with_file_search.py b/sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_iteration_with_file_search.py deleted file mode 100644 index dd767c3285e3..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_iteration_with_file_search.py +++ /dev/null @@ -1,110 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use agent operations with file search tools and iteration in streaming from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_stream_iteration_with_file_search.py - - Before running the sample: - - pip install azure-ai-agents azure-identity - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. -""" - -import os -from azure.ai.agents import AgentsClient -from azure.ai.agents.models import AgentStreamEvent, FileSearchTool, RunStepDeltaChunk -from azure.ai.agents.models import MessageDeltaChunk, RunStep, ThreadMessage, ThreadRun, FilePurpose, ListSortOrder -from azure.identity import DefaultAzureCredential - -asset_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../assets/product_info_1.md")) - -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) - -with agents_client: - - # Upload file and create vector store - file = agents_client.files.upload_and_poll(file_path=asset_file_path, purpose=FilePurpose.AGENTS) - print(f"Uploaded file, file ID: {file.id}") - - vector_store = agents_client.vector_stores.create_and_poll(file_ids=[file.id], name="my_vectorstore") - print(f"Created vector store, vector store ID: {vector_store.id}") - - # Create file search tool with resources followed by creating agent - file_search = FileSearchTool(vector_store_ids=[vector_store.id]) - - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="Hello, you are helpful agent and can search information from uploaded files", - tools=file_search.definitions, - tool_resources=file_search.resources, - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = agents_client.threads.create() - print(f"Created thread, thread ID {thread.id}") - - message = agents_client.messages.create( - thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" - ) - print(f"Created message, message ID {message.id}") - - with agents_client.runs.stream(thread_id=thread.id, agent_id=agent.id) as stream: - - for event_type, event_data, _ in stream: - - if isinstance(event_data, MessageDeltaChunk): - print(f"Text delta received: {event_data.text}") - - elif isinstance(event_data, RunStepDeltaChunk): - print(f"RunStepDeltaChunk received. ID: {event_data.id}.") - - elif isinstance(event_data, ThreadMessage): - print(f"ThreadMessage created. ID: {event_data.id}, Status: {event_data.status}") - for annotation in event_data.file_citation_annotations: - print( - f"Citation {annotation.text} from file ID: {annotation.file_citation.file_id}, start index: {annotation.start_index}, end index: {annotation.end_index}" - ) - - elif isinstance(event_data, ThreadRun): - print(f"ThreadRun status: {event_data.status}") - - if event_data.status == "failed": - print(f"Run failed. Error: {event_data.last_error}") - - elif isinstance(event_data, RunStep): - print(f"RunStep type: {event_data.type}, Status: {event_data.status}") - - elif event_type == AgentStreamEvent.ERROR: - print(f"An error occurred. Data: {event_data}") - - elif event_type == AgentStreamEvent.DONE: - print("Stream completed.") - - else: - print(f"Unhandled Event Type: {event_type}, Data: {event_data}") - - agents_client.delete_agent(agent.id) - print("Deleted agent") - - messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) - for msg in messages: - if msg.text_messages: - last_text = msg.text_messages[-1] - print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_iteration_with_toolset.py b/sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_iteration_with_toolset.py deleted file mode 100644 index c2f58bb1f005..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_iteration_with_toolset.py +++ /dev/null @@ -1,107 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use agent operations with toolset and iteration in streaming from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_stream_iteration_with_toolset.py - - Before running the sample: - - pip install azure-ai-agents azure-identity - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. -""" - -import os, sys -from azure.ai.agents import AgentsClient -from azure.ai.agents.models import AgentStreamEvent, RunStepDeltaChunk -from azure.ai.agents.models import ( - MessageDeltaChunk, - ListSortOrder, - RunStep, - ThreadMessage, - ThreadRun, -) -from azure.ai.agents.models import FunctionTool, ToolSet -from azure.identity import DefaultAzureCredential - -current_path = os.path.dirname(__file__) -root_path = os.path.abspath(os.path.join(current_path, os.pardir, os.pardir)) -if root_path not in sys.path: - sys.path.insert(0, root_path) -from samples.utils.user_functions import user_functions - -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) - -functions = FunctionTool(user_functions) -toolset = ToolSet() -toolset.add(functions) - -with agents_client: - agents_client.enable_auto_function_calls(toolset) - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are a helpful agent", - toolset=toolset, - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = agents_client.threads.create() - print(f"Created thread, thread ID {thread.id}") - - message = agents_client.messages.create(thread_id=thread.id, role="user", content="Hello, what's the time?") - print(f"Created message, message ID {message.id}") - - with agents_client.runs.stream(thread_id=thread.id, agent_id=agent.id) as stream: - - for event_type, event_data, _ in stream: - - if isinstance(event_data, MessageDeltaChunk): - print(f"Text delta received: {event_data.text}") - - elif isinstance(event_data, RunStepDeltaChunk): - print(f"RunStepDeltaChunk received. ID: {event_data.id}.") - - elif isinstance(event_data, ThreadMessage): - print(f"ThreadMessage created. ID: {event_data.id}, Status: {event_data.status}") - - elif isinstance(event_data, ThreadRun): - print(f"ThreadRun status: {event_data.status}") - - if event_data.status == "failed": - print(f"Run failed. Error: {event_data.last_error}") - - elif isinstance(event_data, RunStep): - print(f"RunStep type: {event_data.type}, Status: {event_data.status}") - - elif event_type == AgentStreamEvent.ERROR: - print(f"An error occurred. Data: {event_data}") - - elif event_type == AgentStreamEvent.DONE: - print("Stream completed.") - - else: - print(f"Unhandled Event Type: {event_type}, Data: {event_data}") - - agents_client.delete_agent(agent.id) - print("Deleted agent") - - messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) - for msg in messages: - if msg.text_messages: - last_text = msg.text_messages[-1] - print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_with_base_override_eventhandler.py b/sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_with_base_override_eventhandler.py deleted file mode 100644 index bbd0d688b3b9..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_with_base_override_eventhandler.py +++ /dev/null @@ -1,105 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to override the base event handler and parse the events and iterate through them - In your use case, you might not want to write the iteration code similar to sample_agents_stream_iteration_async.py. - If you have multiple places to call stream, you might find the iteration code cumbersome. - This example shows how to override the base event handler, parse the events, and iterate through them, which can be reused in multiple stream calls to help keep the code clean. - -USAGE: - python sample_agents_stream_with_base_override_eventhandler.py - - Before running the sample: - - pip install azure-ai-agents azure-identity - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. -""" -import json -from typing import Generator, Optional - -from azure.ai.agents import AgentsClient -from azure.ai.agents.models import ( - MessageDeltaChunk, - MessageDeltaTextContent, - ListSortOrder, -) -from azure.ai.agents.models import AgentStreamEvent, BaseAgentEventHandler -from azure.identity import DefaultAzureCredential - -import os - - -# Our goal is to parse the event data in a string and return the chunk in text for each iteration. -# Because we want the iteration to be a string, we define str as the generic type for BaseAsyncAgentEventHandler -# and override the _process_event method to return a string. -# The get_stream_chunks method is defined to return the chunks as strings because the iteration is a string. -class MyEventHandler(BaseAgentEventHandler[Optional[str]]): - - def _process_event(self, event_data_str: str) -> Optional[str]: # type: ignore[return] - event_lines = event_data_str.strip().split("\n") - event_type: Optional[str] = None - event_data = "" - for line in event_lines: - if line.startswith("event:"): - event_type = line.split(":", 1)[1].strip() - elif line.startswith("data:"): - event_data = line.split(":", 1)[1].strip() - - if not event_type: - raise ValueError("Event type not specified in the event data.") - - if event_type == AgentStreamEvent.THREAD_MESSAGE_DELTA.value: - - event_obj: MessageDeltaChunk = MessageDeltaChunk(**json.loads(event_data)) - - for content_part in event_obj.delta.content: - if isinstance(content_part, MessageDeltaTextContent): - if content_part.text is not None: - return content_part.text.value - return None - - def get_stream_chunks(self) -> Generator[str, None, None]: - for chunk in self: - if chunk: - yield chunk - - -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) - -with agents_client: - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-agent", instructions="You are helpful agent" - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = agents_client.threads.create() - print(f"Created thread, thread ID {thread.id}") - - message = agents_client.messages.create(thread_id=thread.id, role="user", content="Hello, tell me a joke") - print(f"Created message, message ID {message.id}") - - with agents_client.runs.stream(thread_id=thread.id, agent_id=agent.id, event_handler=MyEventHandler()) as stream: - for chunk in stream.get_stream_chunks(): - print(chunk) - - agents_client.delete_agent(agent.id) - print("Deleted agent") - - messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) - for msg in messages: - if msg.text_messages: - last_text = msg.text_messages[-1] - print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_basics_async_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_basics_async_with_azure_monitor_tracing.py deleted file mode 100644 index 2f656421be26..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_basics_async_with_azure_monitor_tracing.py +++ /dev/null @@ -1,87 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use basic agent operations from - the Azure Agents service using a asynchronous client with Azure Monitor tracing. - View the results in the "Tracing" tab in your Azure AI Foundry project page. - -USAGE: - python sample_agents_basics_async_with_azure_monitor_tracing.py - - Before running the sample: - - pip install azure-ai-agents azure-identity opentelemetry-sdk azure-monitor-opentelemetry aiohttp - - Set these environment variables with your own values: - * PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - * AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat - messages, which may contain personal data. False by default. - * AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat - messages, which may contain personal data. False by default. - * APPLICATIONINSIGHTS_CONNECTION_STRING - Set to the connection string of your Application Insights resource. - This is used to send telemetry data to Azure Monitor. You can also get the connection string programmatically - from AIProjectClient using the `telemetry.get_connection_string` method. A code sample showing how to do this - can be found in the `sample_telemetry_async.py` file in the azure-ai-projects telemetry samples. -""" -import asyncio -import time -from azure.ai.agents.aio import AgentsClient -from azure.ai.agents.models import ListSortOrder, MessageTextContent -from azure.identity.aio import DefaultAzureCredential -from opentelemetry import trace -import os -from azure.monitor.opentelemetry import configure_azure_monitor - -scenario = os.path.basename(__file__) -tracer = trace.get_tracer(__name__) - - -async def main() -> None: - - async with DefaultAzureCredential() as creds: - agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=creds, - ) - - # Enable Azure Monitor tracing - application_insights_connection_string = os.environ["APPLICATIONINSIGHTS_CONNECTION_STRING"] - configure_azure_monitor(connection_string=application_insights_connection_string) - - with tracer.start_as_current_span(scenario): - async with agents_client: - agent = await agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are helpful agent", - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = await agents_client.threads.create() - print(f"Created thread, thread ID: {thread.id}") - - message = await agents_client.messages.create( - thread_id=thread.id, role="user", content="Hello, tell me a joke" - ) - print(f"Created message, message ID: {message.id}") - - run = await agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) - print(f"Run completed with status: {run.status}") - - await agents_client.delete_agent(agent.id) - print("Deleted agent") - - messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) - async for msg in messages: - last_part = msg.content[-1] - if isinstance(last_part, MessageTextContent): - print(f"{msg.role}: {last_part.text.value}") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_basics_async_with_console_tracing.py b/sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_basics_async_with_console_tracing.py deleted file mode 100644 index 37c2cc42b83a..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_basics_async_with_console_tracing.py +++ /dev/null @@ -1,93 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use basic agent operations from - the Azure Agents service using a asynchronous client with tracing to console. - -USAGE: - python sample_agents_basics_async_with_console_tracing.py - - Before running the sample: - - pip install azure-ai-agents azure-identity opentelemetry-sdk azure-core-tracing-opentelemetry aiohttp - - If you want to export telemetry to OTLP endpoint (such as Aspire dashboard - https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/standalone?tabs=bash) - install: - - pip install opentelemetry-exporter-otlp-proto-grpc - - Set these environment variables with your own values: - * PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - * AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat - messages, which may contain personal data. False by default. -""" -import asyncio -import time -import sys -from azure.core.settings import settings - -settings.tracing_implementation = "opentelemetry" -from opentelemetry import trace -from opentelemetry.sdk.trace import TracerProvider -from opentelemetry.sdk.trace.export import SimpleSpanProcessor, ConsoleSpanExporter -from azure.ai.agents.aio import AgentsClient -from azure.ai.agents.models import ListSortOrder, MessageTextContent -from azure.identity.aio import DefaultAzureCredential -from opentelemetry import trace -import os -from azure.ai.agents.telemetry import AIAgentsInstrumentor - -# Setup tracing to console -# Requires opentelemetry-sdk -span_exporter = ConsoleSpanExporter() -tracer_provider = TracerProvider() -tracer_provider.add_span_processor(SimpleSpanProcessor(span_exporter)) -trace.set_tracer_provider(tracer_provider) -tracer = trace.get_tracer(__name__) - -AIAgentsInstrumentor().instrument() - -scenario = os.path.basename(__file__) -tracer = trace.get_tracer(__name__) - - -@tracer.start_as_current_span(__file__) -async def main() -> None: - - async with DefaultAzureCredential() as creds: - async with AgentsClient(endpoint=os.environ["PROJECT_ENDPOINT"], credential=creds) as agent_client: - - agent = await agent_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-agent", instructions="You are helpful agent" - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = await agent_client.threads.create() - print(f"Created thread, thread ID: {thread.id}") - - message = await agent_client.messages.create( - thread_id=thread.id, role="user", content="Hello, tell me a joke" - ) - print(f"Created message, message ID: {message.id}") - - run = await agent_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) - print(f"Run completed with status: {run.status}") - - await agent_client.delete_agent(agent.id) - print("Deleted agent") - - messages = agent_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) - async for msg in messages: - last_part = msg.content[-1] - if isinstance(last_part, MessageTextContent): - print(f"{msg.role}: {last_part.text.value}") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_basics_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_basics_with_azure_monitor_tracing.py deleted file mode 100644 index 0633bdfd9893..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_basics_with_azure_monitor_tracing.py +++ /dev/null @@ -1,79 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use basic agent operations from - the Azure Agents service using a synchronous client with Azure Monitor tracing. - View the results in the "Tracing" tab in your Azure AI Foundry project page. - -USAGE: - python sample_agents_basics_with_azure_monitor_tracing.py - - Before running the sample: - - pip install azure-ai-agents azure-identity azure-monitor-opentelemetry - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. - 3) AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat - messages, which may contain personal data. False by default. - 4) APPLICATIONINSIGHTS_CONNECTION_STRING - Set to the connection string of your Application Insights resource. - This is used to send telemetry data to Azure Monitor. You can also get the connection string programmatically - from AIProjectClient using the `telemetry.get_connection_string` method. A code sample showing how to do this - can be found in the `sample_telemetry.py` file in the azure-ai-projects telemetry samples. -""" - -import os -from azure.ai.agents import AgentsClient -from azure.ai.agents.models import ListSortOrder -from azure.identity import DefaultAzureCredential - -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) - -# [START enable_tracing] -from opentelemetry import trace -from azure.monitor.opentelemetry import configure_azure_monitor - -# Enable Azure Monitor tracing -application_insights_connection_string = os.environ["APPLICATIONINSIGHTS_CONNECTION_STRING"] -configure_azure_monitor(connection_string=application_insights_connection_string) - -scenario = os.path.basename(__file__) -tracer = trace.get_tracer(__name__) - -with tracer.start_as_current_span(scenario): - with agents_client: - # [END enable_tracing] - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-agent", instructions="You are helpful agent" - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = agents_client.threads.create() - print(f"Created thread, thread ID: {thread.id}") - - message = agents_client.messages.create( - thread_id=thread.id, role="user", content="Hello, tell me a hilarious joke" - ) - print(f"Created message, message ID: {message.id}") - - run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) - print(f"Run completed with status: {run.status}") - - agents_client.delete_agent(agent.id) - print("Deleted agent") - - messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) - for msg in messages: - if msg.text_messages: - last_text = msg.text_messages[-1] - print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_basics_with_console_tracing.py b/sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_basics_with_console_tracing.py deleted file mode 100644 index 294e4201a14c..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_basics_with_console_tracing.py +++ /dev/null @@ -1,85 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use basic agent operations from - the Azure Agents service using a synchronous client with tracing to console. - -USAGE: - python sample_agents_basics_with_console_tracing.py - - Before running the sample: - - pip install azure-ai-agents azure-identity opentelemetry-sdk azure-core-tracing-opentelemetry - - If you want to export telemetry to OTLP endpoint (such as Aspire dashboard - https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/standalone?tabs=bash) - install: - - pip install opentelemetry-exporter-otlp-proto-grpc - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. - 3) AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat - messages, which may contain personal data. False by default. -""" - -import os, time -from azure.core.settings import settings - -settings.tracing_implementation = "opentelemetry" -# Install opentelemetry with command "pip install opentelemetry-sdk". -from opentelemetry import trace -from opentelemetry.sdk.trace import TracerProvider -from opentelemetry.sdk.trace.export import SimpleSpanProcessor, ConsoleSpanExporter -from azure.ai.agents import AgentsClient -from azure.ai.agents.models import ListSortOrder -from azure.identity import DefaultAzureCredential -from azure.ai.agents.telemetry import AIAgentsInstrumentor - -# Setup tracing to console -# Requires opentelemetry-sdk -span_exporter = ConsoleSpanExporter() -tracer_provider = TracerProvider() -tracer_provider.add_span_processor(SimpleSpanProcessor(span_exporter)) -trace.set_tracer_provider(tracer_provider) -tracer = trace.get_tracer(__name__) - -AIAgentsInstrumentor().instrument() - -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) - -scenario = os.path.basename(__file__) -with tracer.start_as_current_span(scenario): - with agents_client: - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-agent", instructions="You are helpful agent" - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = agents_client.threads.create() - print(f"Created thread, thread ID: {thread.id}") - - message = agents_client.messages.create(thread_id=thread.id, role="user", content="Hello, tell me a joke") - print(f"Created message, message ID: {message.id}") - - run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) - print(f"Run completed with status: {run.status}") - - agents_client.delete_agent(agent.id) - print("Deleted agent") - - messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) - for msg in messages: - if msg.text_messages: - last_text = msg.text_messages[-1] - print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_basics_with_console_tracing_custom_attributes.py b/sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_basics_with_console_tracing_custom_attributes.py deleted file mode 100644 index 2484a390677c..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_basics_with_console_tracing_custom_attributes.py +++ /dev/null @@ -1,113 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use basic agent operations from - the Azure Agents service using a synchronous client with tracing to console and adding - custom attributes to the span. - -USAGE: - python sample_agents_basics_with_console_tracing_custom_attributes.py - - Before running the sample: - - pip install azure-ai-agents azure-identity opentelemetry-sdk azure-core-tracing-opentelemetry - - If you want to export telemetry to OTLP endpoint (such as Aspire dashboard - https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/standalone?tabs=bash) - install: - - pip install opentelemetry-exporter-otlp-proto-grpc - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. - 3) AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat - messages, which may contain personal data. False by default. -""" - -import os, sys, time -from typing import cast -from azure.core.settings import settings - -settings.tracing_implementation = "opentelemetry" -# Install opentelemetry with command "pip install opentelemetry-sdk". -from opentelemetry import trace -from opentelemetry.sdk.trace import TracerProvider, SpanProcessor, ReadableSpan, Span -from opentelemetry.sdk.trace.export import SimpleSpanProcessor, ConsoleSpanExporter -from azure.ai.agents import AgentsClient -from azure.ai.agents.models import ListSortOrder -from azure.identity import DefaultAzureCredential -from azure.ai.agents.telemetry import AIAgentsInstrumentor - - -# Define the custom span processor that is used for adding the custom -# attributes to spans when they are started. -class CustomAttributeSpanProcessor(SpanProcessor): - def __init__(self): - pass - - def on_start(self, span: Span, parent_context=None): - # Add this attribute to all spans - span.set_attribute("trace_sample.sessionid", "123") - - # Add another attribute only to create_message spans - if span.name == "create_message": - span.set_attribute("trace_sample.message.context", "abc") - - def on_end(self, span: ReadableSpan): - # Clean-up logic can be added here if necessary - pass - - -# Setup tracing to console -# Requires opentelemetry-sdk -span_exporter = ConsoleSpanExporter() -tracer_provider = TracerProvider() -tracer_provider.add_span_processor(SimpleSpanProcessor(span_exporter)) -trace.set_tracer_provider(tracer_provider) -tracer = trace.get_tracer(__name__) - -AIAgentsInstrumentor().instrument() - -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) - -# Add the custom span processor to the global tracer provider -provider = cast(TracerProvider, trace.get_tracer_provider()) -provider.add_span_processor(CustomAttributeSpanProcessor()) - -scenario = os.path.basename(__file__) -tracer = trace.get_tracer(__name__) - -with tracer.start_as_current_span(scenario): - with agents_client: - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-agent", instructions="You are helpful agent" - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = agents_client.threads.create() - print(f"Created thread, thread ID: {thread.id}") - - message = agents_client.messages.create(thread_id=thread.id, role="user", content="Hello, tell me a joke") - print(f"Created message, message ID: {message.id}") - - run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) - print(f"Run completed with status: {run.status}") - - agents_client.delete_agent(agent.id) - print("Deleted agent") - - messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) - for msg in messages: - if msg.text_messages: - last_text = msg.text_messages[-1] - print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_stream_eventhandler_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_stream_eventhandler_with_azure_monitor_tracing.py deleted file mode 100644 index d1549dc23851..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_stream_eventhandler_with_azure_monitor_tracing.py +++ /dev/null @@ -1,115 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use agent operations with an event handler in streaming from - the Azure Agents service using a synchronous client with Azure Monitor tracing. - View the results in the "Tracing" tab in your Azure AI Foundry project page. - -USAGE: - python sample_agents_stream_eventhandler_with_azure_monitor_tracing.py - - Before running the sample: - - pip install azure-ai-agents azure-identity opentelemetry-sdk azure-monitor-opentelemetry - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. - 3) AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat - messages, which may contain personal data. False by default. - 4) APPLICATIONINSIGHTS_CONNECTION_STRING - Set to the connection string of your Application Insights resource. - This is used to send telemetry data to Azure Monitor. You can also get the connection string programmatically - from AIProjectClient using the `telemetry.get_connection_string` method. A code sample showing how to do this - can be found in the `sample_telemetry.py` file in the azure-ai-projects telemetry samples. -""" - -import os -from azure.ai.agents import AgentsClient -from azure.identity import DefaultAzureCredential -from azure.ai.agents.models import ( - AgentEventHandler, - MessageDeltaChunk, - ListSortOrder, - ThreadMessage, - ThreadRun, - RunStep, -) -from typing import Any -from opentelemetry import trace -from azure.monitor.opentelemetry import configure_azure_monitor - -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) - - -class MyEventHandler(AgentEventHandler): - def on_message_delta(self, delta: "MessageDeltaChunk") -> None: - print(f"Text delta received: {delta.text}") - - def on_thread_message(self, message: "ThreadMessage") -> None: - if len(message.content): - print( - f"ThreadMessage created. ID: {message.id}, " - f"Status: {message.status}, Content: {message.content[0].as_dict()}" - ) - else: - print(f"ThreadMessage created. ID: {message.id}, " f"Status: {message.status}") - - def on_thread_run(self, run: "ThreadRun") -> None: - print(f"ThreadRun status: {run.status}") - - def on_run_step(self, step: "RunStep") -> None: - print(f"RunStep type: {step.type}, Status: {step.status}") - - def on_error(self, data: str) -> None: - print(f"An error occurred. Data: {data}") - - def on_done(self) -> None: - print("Stream completed.") - - def on_unhandled_event(self, event_type: str, event_data: Any) -> None: - print(f"Unhandled Event Type: {event_type}, Data: {event_data}") - - -# Enable Azure Monitor tracing -application_insights_connection_string = os.environ["APPLICATIONINSIGHTS_CONNECTION_STRING"] -configure_azure_monitor(connection_string=application_insights_connection_string) - -scenario = os.path.basename(__file__) -tracer = trace.get_tracer(__name__) - -with tracer.start_as_current_span(scenario): - with agents_client: - # Create an agent and run stream with event handler - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-agent", instructions="You are a helpful agent" - ) - print(f"Created agent, agent ID {agent.id}") - - thread = agents_client.threads.create() - print(f"Created thread, thread ID {thread.id}") - - message = agents_client.messages.create(thread_id=thread.id, role="user", content="Hello, tell me a joke") - print(f"Created message, message ID {message.id}") - - with agents_client.runs.stream( - thread_id=thread.id, agent_id=agent.id, event_handler=MyEventHandler() - ) as stream: - stream.until_done() - - agents_client.delete_agent(agent.id) - print("Deleted agent") - - messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) - for msg in messages: - if msg.text_messages: - last_text = msg.text_messages[-1] - print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_stream_eventhandler_with_console_tracing.py b/sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_stream_eventhandler_with_console_tracing.py deleted file mode 100644 index 9dcc0b42456b..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_stream_eventhandler_with_console_tracing.py +++ /dev/null @@ -1,130 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use agent operations with an event handler in streaming from - the Azure Agents service using a synchronous client with tracing to console. - -USAGE: - python sample_agents_stream_eventhandler_with_console_tracing.py - - Before running the sample: - - pip install azure-ai-agents azure-identity opentelemetry-sdk azure-core-tracing-opentelemetry - - If you want to export telemetry to OTLP endpoint (such as Aspire dashboard - https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/standalone?tabs=bash) - install: - - pip install opentelemetry-exporter-otlp-proto-grpc - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. - 3) AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat - messages, which may contain personal data. False by default. -""" - -import os, sys -from azure.core.settings import settings - -settings.tracing_implementation = "opentelemetry" -from opentelemetry import trace -from opentelemetry.sdk.trace import TracerProvider -from opentelemetry.sdk.trace.export import SimpleSpanProcessor, ConsoleSpanExporter -from azure.ai.agents import AgentsClient -from azure.identity import DefaultAzureCredential -from azure.ai.agents.models import ( - AgentEventHandler, - MessageDeltaChunk, - ListSortOrder, - ThreadMessage, - ThreadRun, - RunStep, -) -from typing import Any -from azure.ai.agents.telemetry import AIAgentsInstrumentor - -# Setup tracing to console -# Requires opentelemetry-sdk -span_exporter = ConsoleSpanExporter() -tracer_provider = TracerProvider() -tracer_provider.add_span_processor(SimpleSpanProcessor(span_exporter)) -trace.set_tracer_provider(tracer_provider) -tracer = trace.get_tracer(__name__) - -AIAgentsInstrumentor().instrument() - -scenario = os.path.basename(__file__) -tracer = trace.get_tracer(__name__) - -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) - - -class MyEventHandler(AgentEventHandler): - def on_message_delta(self, delta: "MessageDeltaChunk") -> None: - print(f"Text delta received: {delta.text}") - - def on_thread_message(self, message: "ThreadMessage") -> None: - if len(message.content): - print( - f"ThreadMessage created. ID: {message.id}, " - f"Status: {message.status}, Content: {message.content[0].as_dict()}" - ) - else: - print(f"ThreadMessage created. ID: {message.id}, " f"Status: {message.status}") - - def on_thread_run(self, run: "ThreadRun") -> None: - print(f"ThreadRun status: {run.status}") - - def on_run_step(self, step: "RunStep") -> None: - print(f"RunStep type: {step.type}, Status: {step.status}") - - def on_error(self, data: str) -> None: - print(f"An error occurred. Data: {data}") - - def on_done(self) -> None: - print("Stream completed.") - - def on_unhandled_event(self, event_type: str, event_data: Any) -> None: - print(f"Unhandled Event Type: {event_type}, Data: {event_data}") - - -scenario = os.path.basename(__file__) -tracer = trace.get_tracer(__name__) - -with tracer.start_as_current_span(scenario): - with agents_client: - # Create an agent and run stream with event handler - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-agent", instructions="You are a helpful agent" - ) - print(f"Created agent, agent ID {agent.id}") - - thread = agents_client.threads.create() - print(f"Created thread, thread ID {thread.id}") - - message = agents_client.messages.create(thread_id=thread.id, role="user", content="Hello, tell me a joke") - print(f"Created message, message ID {message.id}") - - with agents_client.runs.stream( - thread_id=thread.id, agent_id=agent.id, event_handler=MyEventHandler() - ) as stream: - stream.until_done() - - agents_client.delete_agent(agent.id) - print("Deleted agent") - - messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) - for msg in messages: - if msg.text_messages: - last_text = msg.text_messages[-1] - print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_toolset_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_toolset_with_azure_monitor_tracing.py deleted file mode 100644 index ccc8168c622a..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_toolset_with_azure_monitor_tracing.py +++ /dev/null @@ -1,128 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use agent operations with toolset from - the Azure Agents service using a synchronous client with Azure Monitor tracing. - View the results in the "Tracing" tab in your Azure AI Foundry project page. - -USAGE: - python sample_agents_toolset_with_azure_monitor_tracing.py - - Before running the sample: - - pip install azure-ai-agents azure-identity opentelemetry-sdk azure-monitor-opentelemetry - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. - 3) AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat - messages, which may contain personal data. False by default. - 4) APPLICATIONINSIGHTS_CONNECTION_STRING - Set to the connection string of your Application Insights resource. - This is used to send telemetry data to Azure Monitor. You can also get the connection string programmatically - from AIProjectClient using the `telemetry.get_connection_string` method. A code sample showing how to do this - can be found in the `sample_telemetry.py` file in the azure-ai-projects telemetry samples. -""" -from typing import Any, Callable, Set - -import os, time, json -from azure.ai.agents import AgentsClient -from azure.identity import DefaultAzureCredential -from azure.ai.agents.models import ( - FunctionTool, - ToolSet, - ListSortOrder, -) -from opentelemetry import trace -from azure.monitor.opentelemetry import configure_azure_monitor -from azure.ai.agents.telemetry import trace_function - -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) - -# Enable Azure Monitor tracing -application_insights_connection_string = os.environ["APPLICATIONINSIGHTS_CONNECTION_STRING"] -configure_azure_monitor(connection_string=application_insights_connection_string) - -scenario = os.path.basename(__file__) -tracer = trace.get_tracer(__name__) - - -# The trace_func decorator will trace the function call and enable adding additional attributes -# to the span in the function implementation. Note that this will trace the function parameters and their values. -@trace_function() -def fetch_weather(location: str) -> str: - """ - Fetches the weather information for the specified location. - - :param location (str): The location to fetch weather for. - :return: Weather information as a JSON string. - :rtype: str - """ - # In a real-world scenario, you'd integrate with a weather API. - # Here, we'll mock the response. - mock_weather_data = {"New York": "Sunny, 25°C", "London": "Cloudy, 18°C", "Tokyo": "Rainy, 22°C"} - - # Adding attributes to the current span - span = trace.get_current_span() - span.set_attribute("requested_location", location) - - weather = mock_weather_data.get(location, "Weather data not available for this location.") - weather_json = json.dumps({"weather": weather}) - return weather_json - - -# Statically defined user functions for fast reference -user_functions: Set[Callable[..., Any]] = { - fetch_weather, -} - -# Initialize function tool with user function -functions = FunctionTool(functions=user_functions) -toolset = ToolSet() -toolset.add(functions) - -# To enable tool calls executed automatically -agents_client.enable_auto_function_calls(toolset) - -with tracer.start_as_current_span(scenario): - with agents_client: - # Create an agent and run user's request with function calls - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are a helpful agent", - toolset=toolset, - ) - print(f"Created agent, ID: {agent.id}") - - thread = agents_client.threads.create() - print(f"Created thread, ID: {thread.id}") - - message = agents_client.messages.create( - thread_id=thread.id, - role="user", - content="Hello, what is the weather in New York?", - ) - print(f"Created message, ID: {message.id}") - - run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id, toolset=toolset) - print(f"Run completed with status: {run.status}") - - # Delete the agent when done - agents_client.delete_agent(agent.id) - print("Deleted agent") - - # Fetch and log all messages - messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) - for msg in messages: - if msg.text_messages: - last_text = msg.text_messages[-1] - print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_toolset_with_console_tracing.py b/sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_toolset_with_console_tracing.py deleted file mode 100644 index fd7b65e3d990..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_telemetry/sample_agents_toolset_with_console_tracing.py +++ /dev/null @@ -1,141 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use agent operations with toolset from - the Azure Agents service using a synchronous client with tracing to console. - -USAGE: - python sample_agents_toolset_with_console_tracing.py - - Before running the sample: - - pip install azure-ai-agents azure-identity opentelemetry-sdk azure-core-tracing-opentelemetry - - If you want to export telemetry to OTLP endpoint (such as Aspire dashboard - https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/standalone?tabs=bash) - install: - - pip install opentelemetry-exporter-otlp-proto-grpc - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. - 3) AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat - messages, which may contain personal data. False by default. -""" -from typing import Any, Callable, Set - -import os, sys, time, json -from azure.core.settings import settings - -settings.tracing_implementation = "opentelemetry" -# Install opentelemetry with command "pip install opentelemetry-sdk". -from opentelemetry import trace -from opentelemetry.sdk.trace import TracerProvider -from opentelemetry.sdk.trace.export import SimpleSpanProcessor, ConsoleSpanExporter -from azure.ai.agents import AgentsClient -from azure.identity import DefaultAzureCredential -from azure.ai.agents.models import ( - FunctionTool, - ToolSet, - ListSortOrder, -) -from azure.ai.agents.telemetry import trace_function -from azure.ai.agents.telemetry import AIAgentsInstrumentor - -# Setup tracing to console -# Requires opentelemetry-sdk -span_exporter = ConsoleSpanExporter() -tracer_provider = TracerProvider() -tracer_provider.add_span_processor(SimpleSpanProcessor(span_exporter)) -trace.set_tracer_provider(tracer_provider) -tracer = trace.get_tracer(__name__) - -AIAgentsInstrumentor().instrument() - -scenario = os.path.basename(__file__) -tracer = trace.get_tracer(__name__) - -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) - - -# The trace_func decorator will trace the function call and enable adding additional attributes -# to the span in the function implementation. Note that this will trace the function parameters and their values. -@trace_function() -def fetch_weather(location: str) -> str: - """ - Fetches the weather information for the specified location. - - :param location (str): The location to fetch weather for. - :return: Weather information as a JSON string. - :rtype: str - """ - # In a real-world scenario, you'd integrate with a weather API. - # Here, we'll mock the response. - mock_weather_data = {"New York": "Sunny, 25°C", "London": "Cloudy, 18°C", "Tokyo": "Rainy, 22°C"} - - # Adding attributes to the current span - span = trace.get_current_span() - span.set_attribute("requested_location", location) - - weather = mock_weather_data.get(location, "Weather data not available for this location.") - weather_json = json.dumps({"weather": weather}) - return weather_json - - -# Statically defined user functions for fast reference -user_functions: Set[Callable[..., Any]] = { - fetch_weather, -} - -# Initialize function tool with user function -functions = FunctionTool(functions=user_functions) -toolset = ToolSet() -toolset.add(functions) - -# To enable tool calls executed automatically -agents_client.enable_auto_function_calls(toolset) - -with tracer.start_as_current_span(scenario): - with agents_client: - # Create an agent and run user's request with function calls - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are a helpful agent", - toolset=toolset, - ) - print(f"Created agent, ID: {agent.id}") - - thread = agents_client.threads.create() - print(f"Created thread, ID: {thread.id}") - - message = agents_client.messages.create( - thread_id=thread.id, - role="user", - content="Hello, what is the weather in New York?", - ) - print(f"Created message, ID: {message.id}") - - run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id, toolset=toolset) - print(f"Run completed with status: {run.status}") - - # Delete the agent when done - agents_client.delete_agent(agent.id) - print("Deleted agent") - - # Fetch and log all messages - messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) - for msg in messages: - if msg.text_messages: - last_text = msg.text_messages[-1] - print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-agents/samples/agents_tools/__init__.py b/sdk/ai/azure-ai-agents/samples/agents_tools/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_azure_ai_search.py b/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_azure_ai_search.py deleted file mode 100644 index ee1a88e8435f..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_azure_ai_search.py +++ /dev/null @@ -1,128 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - - -""" -DESCRIPTION: - This sample demonstrates how to use agent operations with the - Azure AI Search tool from the Azure agents service using a synchronous client. - -PREREQUISITES: - You will need an Azure AI Search Resource. - If you already have one, you must create an agent that can use an existing Azure AI Search index: - https://learn.microsoft.com/azure/ai-services/agents/how-to/tools/azure-ai-search?tabs=azurecli%2Cpython&pivots=overview-azure-ai-search - - If you do not already have an agent Setup with an Azure AI Search resource, follow the guide for a Standard agent setup: - https://learn.microsoft.com/azure/ai-services/agents/quickstart?pivots=programming-language-python-azure - -USAGE: - python sample_agents_azure_ai_search.py - - Before running the sample: - - pip install azure-ai-projects azure-identity - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. - 3) AI_SEARCH_CONNECTION_NAME - The connection name of the AI Search connection to your Foundry project, - as found under the "Name" column in the "Connected Resources" tab in your Azure AI Foundry project. -""" - -import os -from azure.ai.agents import AgentsClient -from azure.identity import DefaultAzureCredential -from azure.ai.agents.models import AzureAISearchQueryType, AzureAISearchTool, ListSortOrder, MessageRole - -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) - -# [START create_agent_with_azure_ai_search_tool] -conn_id = os.environ["AI_AZURE_AI_CONNECTION_ID"] - -print(conn_id) - -# Initialize agent AI search tool and add the search index connection id -ai_search = AzureAISearchTool( - index_connection_id=conn_id, index_name="sample_index", query_type=AzureAISearchQueryType.SIMPLE, top_k=3, filter="" -) - -# Create agent with AI search tool and process agent run -with agents_client: - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are a helpful agent", - tools=ai_search.definitions, - tool_resources=ai_search.resources, - ) - # [END create_agent_with_azure_ai_search_tool] - print(f"Created agent, ID: {agent.id}") - - # Create thread for communication - thread = agents_client.threads.create() - print(f"Created thread, ID: {thread.id}") - - # Create message to thread - message = agents_client.messages.create( - thread_id=thread.id, - role="user", - content="What is the temperature rating of the cozynights sleeping bag?", - ) - print(f"Created message, ID: {message.id}") - - # Create and process agent run in thread with tools - run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) - print(f"Run finished with status: {run.status}") - - if run.status == "failed": - print(f"Run failed: {run.last_error}") - - # Fetch run steps to get the details of the agent run - run_steps = agents_client.run_steps.list(thread_id=thread.id, run_id=run.id) - for step in run_steps: - print(f"Step {step['id']} status: {step['status']}") - step_details = step.get("step_details", {}) - tool_calls = step_details.get("tool_calls", []) - - if tool_calls: - print(" Tool calls:") - for call in tool_calls: - print(f" Tool Call ID: {call.get('id')}") - print(f" Type: {call.get('type')}") - - azure_ai_search_details = call.get("azure_ai_search", {}) - if azure_ai_search_details: - print(f" azure_ai_search input: {azure_ai_search_details.get('input')}") - print(f" azure_ai_search output: {azure_ai_search_details.get('output')}") - print() # add an extra newline between steps - - # Delete the agent when done - agents_client.delete_agent(agent.id) - print("Deleted agent") - - # [START populate_references_agent_with_azure_ai_search_tool] - # Fetch and log all messages - messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) - for message in messages: - if message.role == MessageRole.AGENT and message.url_citation_annotations: - placeholder_annotations = { - annotation.text: f" [see {annotation.url_citation.title}] ({annotation.url_citation.url})" - for annotation in message.url_citation_annotations - } - for message_text in message.text_messages: - message_str = message_text.text.value - for k, v in placeholder_annotations.items(): - message_str = message_str.replace(k, v) - print(f"{message.role}: {message_str}") - else: - for message_text in message.text_messages: - print(f"{message.role}: {message_text.text.value}") - # [END populate_references_agent_with_azure_ai_search_tool] diff --git a/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_azure_functions.py b/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_azure_functions.py deleted file mode 100644 index 0b30ba59ac26..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_azure_functions.py +++ /dev/null @@ -1,98 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use azure function agent operations from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_azure_functions.py - - Before running the sample: - - pip install azure-ai-agents azure-identity - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. - 3) STORAGE_SERVICE_ENDPONT - the storage service queue endpoint, triggering Azure function. - Please see Getting Started with Azure Functions page for more information on Azure Functions: - https://learn.microsoft.com/azure/azure-functions/functions-get-started -""" - -import os -from azure.ai.agents import AgentsClient -from azure.ai.agents.models import AzureFunctionStorageQueue, AzureFunctionTool, MessageRole -from azure.identity import DefaultAzureCredential - -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) - -with agents_client: - - # [START create_agent_with_azure_function_tool] - storage_service_endpoint = os.environ["STORAGE_SERVICE_ENDPONT"] - - azure_function_tool = AzureFunctionTool( - name="foo", - description="Get answers from the foo bot.", - parameters={ - "type": "object", - "properties": { - "query": {"type": "string", "description": "The question to ask."}, - "outputqueueuri": {"type": "string", "description": "The full output queue uri."}, - }, - }, - input_queue=AzureFunctionStorageQueue( - queue_name="azure-function-foo-input", - storage_service_endpoint=storage_service_endpoint, - ), - output_queue=AzureFunctionStorageQueue( - queue_name="azure-function-tool-output", - storage_service_endpoint=storage_service_endpoint, - ), - ) - - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="azure-function-agent-foo", - instructions=f"You are a helpful support agent. Use the provided function any time the prompt contains the string 'What would foo say?'. When you invoke the function, ALWAYS specify the output queue uri parameter as '{storage_service_endpoint}/azure-function-tool-output'. Always responds with \"Foo says\" and then the response from the tool.", - tools=azure_function_tool.definitions, - ) - print(f"Created agent, agent ID: {agent.id}") - # [END create_agent_with_azure_function_tool] - - # Create a thread - thread = agents_client.threads.create() - print(f"Created thread, thread ID: {thread.id}") - - # Create a message - message = agents_client.messages.create( - thread_id=thread.id, - role="user", - content="What is the most prevalent element in the universe? What would foo say?", - ) - print(f"Created message, message ID: {message.id}") - - run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) - if run.status == "failed": - print(f"Run failed: {run.last_error}") - - # Get messages from the thread - messages = agents_client.messages.list(thread_id=thread.id) - - # Get the last message from agent - last_msg = agents_client.messages.get_last_message_text_by_role(thread_id=thread.id, role=MessageRole.AGENT) - if last_msg: - print(f"Last Message: {last_msg.text.value}") - - # Delete the agent once done - agents_client.delete_agent(agent.id) diff --git a/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_bing_custom_search.py b/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_bing_custom_search.py deleted file mode 100644 index 7bda57a11237..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_bing_custom_search.py +++ /dev/null @@ -1,88 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use Agent operations with the Bing Custom Search tool from - the Azure Agents service using a synchronous client. - For more information on the Bing Custom Search tool, see: https://aka.ms/AgentCustomSearchDoc - -USAGE: - python sample_agents_bing_custom_search.py - - Before running the sample: - - pip install azure-ai-agents azure-identity - - Set this environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. - 3) BING_CUSTOM_CONNECTION_ID - The ID of the Bing Custom Search connection, in the format of: - /subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/providers/Microsoft.MachineLearningServices/workspaces/{workspace-name}/connections/{connection-name} -""" - -import os -from azure.ai.agents import AgentsClient -from azure.identity import DefaultAzureCredential -from azure.ai.agents.models import BingCustomSearchTool - - -# Create an Azure AI Client from a connection string, copied from your AI Studio project. -# At the moment, it should be in the format ";;;" -# Customer needs to login to Azure subscription via Azure CLI and set the environment variables - -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) - -conn_id = os.environ["BING_CUSTOM_CONNECTION_ID"] - -# Initialize Bing Custom Search tool with connection id and instance name -bing_custom_tool = BingCustomSearchTool(connection_id=conn_id, instance_name="") - -# Create Agent with the Bing Custom Search tool and process Agent run -with agents_client: - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are a helpful agent", - tools=bing_custom_tool.definitions, - ) - print(f"Created agent, ID: {agent.id}") - - # Create thread for communication - thread = agents_client.threads.create() - print(f"Created thread, ID: {thread.id}") - - # Create message to thread - message = agents_client.messages.create( - thread_id=thread.id, - role="user", - content="How many medals did the USA win in the 2024 summer olympics?", - ) - print(f"Created message, ID: {message.id}") - - # Create and process Agent run in thread with tools - run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) - print(f"Run finished with status: {run.status}") - - if run.status == "failed": - print(f"Run failed: {run.last_error}") - - # Delete the Agent when done - agents_client.delete_agent(agent.id) - print("Deleted agent") - - # Fetch and log all messages - messages = agents_client.messages.list(thread_id=thread.id) - for msg in messages: - if msg.text_messages: - for text_message in msg.text_messages: - print(f"Agent response: {text_message.text.value}") - for annotation in msg.url_citation_annotations: - print(f"URL Citation: [{annotation.url_citation.title}]({annotation.url_citation.url})") diff --git a/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_bing_grounding.py b/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_bing_grounding.py deleted file mode 100644 index c03d484def7e..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_bing_grounding.py +++ /dev/null @@ -1,105 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use agent operations with the Bing grounding tool from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_bing_grounding.py - - Before running the sample: - - pip install azure-ai-agents azure-identity - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. - 3) AZURE_BING_CONNECTION_ID - The ID of the Bing connection, in the format of: - /subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/providers/Microsoft.MachineLearningServices/workspaces/{workspace-name}/connections/{connection-name} -""" - -import os -from azure.ai.agents import AgentsClient -from azure.ai.agents.models import MessageRole, BingGroundingTool -from azure.identity import DefaultAzureCredential - - -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) - -# [START create_agent_with_bing_grounding_tool] -conn_id = os.environ["AZURE_BING_CONNECTION_ID"] - -# Initialize agent bing tool and add the connection id -bing = BingGroundingTool(connection_id=conn_id) - -# Create agent with the bing tool and process agent run -with agents_client: - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are a helpful agent", - tools=bing.definitions, - ) - # [END create_agent_with_bing_grounding_tool] - - print(f"Created agent, ID: {agent.id}") - - # Create thread for communication - thread = agents_client.threads.create() - print(f"Created thread, ID: {thread.id}") - - # Create message to thread - message = agents_client.messages.create( - thread_id=thread.id, - role=MessageRole.USER, - content="How does wikipedia explain Euler's Identity?", - ) - print(f"Created message, ID: {message.id}") - - # Create and process agent run in thread with tools - run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) - print(f"Run finished with status: {run.status}") - - if run.status == "failed": - print(f"Run failed: {run.last_error}") - - # Fetch run steps to get the details of the agent run - run_steps = agents_client.run_steps.list(thread_id=thread.id, run_id=run.id) - for step in run_steps: - print(f"Step {step['id']} status: {step['status']}") - step_details = step.get("step_details", {}) - tool_calls = step_details.get("tool_calls", []) - - if tool_calls: - print(" Tool calls:") - for call in tool_calls: - print(f" Tool Call ID: {call.get('id')}") - print(f" Type: {call.get('type')}") - - bing_grounding_details = call.get("bing_grounding", {}) - if bing_grounding_details: - print(f" Bing Grounding ID: {bing_grounding_details.get('requesturl')}") - - print() # add an extra newline between steps - - # Delete the agent when done - agents_client.delete_agent(agent.id) - print("Deleted agent") - - # Print the Agent's response message with optional citation - response_message = agents_client.messages.get_last_message_by_role(thread_id=thread.id, role=MessageRole.AGENT) - if response_message: - for text_message in response_message.text_messages: - print(f"Agent response: {text_message.text.value}") - for annotation in response_message.url_citation_annotations: - print(f"URL Citation: [{annotation.url_citation.title}]({annotation.url_citation.url})") diff --git a/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_code_interpreter.py b/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_code_interpreter.py deleted file mode 100644 index 16efeae47cc8..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_code_interpreter.py +++ /dev/null @@ -1,110 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use agent operations with code interpreter from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_code_interpreter.py - - Before running the sample: - - pip install azure-ai-agents azure-identity - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. -""" - -import os -from azure.ai.agents import AgentsClient -from azure.ai.agents.models import CodeInterpreterTool -from azure.ai.agents.models import FilePurpose, MessageRole -from azure.identity import DefaultAzureCredential -from pathlib import Path - -asset_file_path = os.path.abspath( - os.path.join(os.path.dirname(__file__), "../assets/synthetic_500_quarterly_results.csv") -) - -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) - -with agents_client: - - # Upload a file and wait for it to be processed - # [START upload_file_and_create_agent_with_code_interpreter] - file = agents_client.files.upload_and_poll(file_path=asset_file_path, purpose=FilePurpose.AGENTS) - print(f"Uploaded file, file ID: {file.id}") - - code_interpreter = CodeInterpreterTool(file_ids=[file.id]) - - # Create agent with code interpreter tool and tools_resources - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are helpful agent", - tools=code_interpreter.definitions, - tool_resources=code_interpreter.resources, - ) - # [END upload_file_and_create_agent_with_code_interpreter] - print(f"Created agent, agent ID: {agent.id}") - - thread = agents_client.threads.create() - print(f"Created thread, thread ID: {thread.id}") - - # Create a message - message = agents_client.messages.create( - thread_id=thread.id, - role="user", - content="Could you please create bar chart in TRANSPORTATION sector for the operating profit from the uploaded csv file and provide file to me?", - ) - print(f"Created message, message ID: {message.id}") - - run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) - print(f"Run finished with status: {run.status}") - - if run.status == "failed": - # Check if you got "Rate limit is exceeded.", then you want to get more quota - print(f"Run failed: {run.last_error}") - - agents_client.files.delete(file.id) - print("Deleted file") - - # [START get_messages_and_save_files] - messages = agents_client.messages.list(thread_id=thread.id) - print(f"Messages: {messages}") - - for msg in messages: - # Save every image file in the message - for img in msg.image_contents: - file_id = img.image_file.file_id - file_name = f"{file_id}_image_file.png" - agents_client.files.save(file_id=file_id, file_name=file_name) - print(f"Saved image file to: {Path.cwd() / file_name}") - - # Print details of every file-path annotation - for ann in msg.file_path_annotations: - print("File Paths:") - print(f" Type: {ann.type}") - print(f" Text: {ann.text}") - print(f" File ID: {ann.file_path.file_id}") - print(f" Start Index: {ann.start_index}") - print(f" End Index: {ann.end_index}") - # [END get_messages_and_save_files] - - last_msg = agents_client.messages.get_last_message_text_by_role(thread_id=thread.id, role=MessageRole.AGENT) - if last_msg: - print(f"Last Message: {last_msg.text.value}") - - agents_client.delete_agent(agent.id) - print("Deleted agent") diff --git a/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_code_interpreter_attachment_enterprise_search.py b/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_code_interpreter_attachment_enterprise_search.py deleted file mode 100644 index db05d687eb9d..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_code_interpreter_attachment_enterprise_search.py +++ /dev/null @@ -1,85 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -""" -DESCRIPTION: - This sample demonstrates how to use agent operations with code interpreter from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_code_interpreter_attachment_enterprise_search.py - - Before running the sample: - - pip install azure-ai-agents azure-identity - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. - 3) AZURE_BLOB_URI - The URI of the blob storage where the file is uploaded. In the format: - azureml://subscriptions/{subscription-id}/resourcegroups/{resource-group-name}/workspaces/{workspace-name}/datastores/{datastore-name}/paths/{path-to-file} -""" - -import os -from azure.ai.agents import AgentsClient -from azure.ai.agents.models import ( - CodeInterpreterTool, - MessageAttachment, - MessageRole, - VectorStoreDataSource, - VectorStoreDataSourceAssetType, -) -from azure.identity import DefaultAzureCredential - -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) - -with agents_client: - - code_interpreter = CodeInterpreterTool() - - # notice that CodeInterpreter must be enabled in the agent creation, otherwise the agent will not be able to see the file attachment - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are helpful agent", - tools=code_interpreter.definitions, - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = agents_client.threads.create() - print(f"Created thread, thread ID: {thread.id}") - - # [START upload_file_and_create_message_with_code_interpreter] - # We will upload the local file to Azure and will use it for vector store creation. - asset_uri = os.environ["AZURE_BLOB_URI"] - ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) - - # Create a message with the attachment - attachment = MessageAttachment(data_source=ds, tools=code_interpreter.definitions) - message = agents_client.messages.create( - thread_id=thread.id, role="user", content="What does the attachment say?", attachments=[attachment] - ) - # [END upload_file_and_create_message_with_code_interpreter] - - print(f"Created message, message ID: {message.id}") - - run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) - print(f"Run finished with status: {run.status}") - - if run.status == "failed": - # Check if you got "Rate limit is exceeded.", then you want to get more quota - print(f"Run failed: {run.last_error}") - - agents_client.delete_agent(agent.id) - print("Deleted agent") - - last_msg = agents_client.messages.get_last_message_text_by_role(thread_id=thread.id, role=MessageRole.AGENT) - if last_msg: - print(f"Last Message: {last_msg.text.value}") diff --git a/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_connected_agent.py b/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_connected_agent.py deleted file mode 100644 index 1ad5f890f32b..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_connected_agent.py +++ /dev/null @@ -1,96 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use Agent operations with the Connected Agent tool from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_connected_agent.py - - Before running the sample: - - pip install azure-ai-agents azure-identity - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. -""" - -import os -from azure.ai.agents import AgentsClient -from azure.ai.agents.models import ConnectedAgentTool, MessageRole -from azure.identity import DefaultAzureCredential - - -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) - -connected_agent_name = "stock_price_bot" - -stock_price_agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name=connected_agent_name, - instructions=( - "Your job is to get the stock price of a company. If asked for the Microsoft stock price, always return $350." - ), -) - -# [START create_agent_with_connected_agent_tool] -# Initialize Connected Agent tool with the agent id, name, and description -connected_agent = ConnectedAgentTool( - id=stock_price_agent.id, name=connected_agent_name, description="Gets the stock price of a company" -) - -# Create agent with the Connected Agent tool and process assistant run -with agents_client: - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-assistant", - instructions="You are a helpful assistant, and use the connected agents to get stock prices.", - tools=connected_agent.definitions, - ) - # [END create_agent_with_connected_agent_tool] - - print(f"Created agent, ID: {agent.id}") - - # Create thread for communication - thread = agents_client.threads.create() - print(f"Created thread, ID: {thread.id}") - - # Create message to thread - message = agents_client.messages.create( - thread_id=thread.id, - role=MessageRole.USER, - content="What is the stock price of Microsoft?", - ) - print(f"Created message, ID: {message.id}") - - # Create and process Agent run in thread with tools - run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) - print(f"Run finished with status: {run.status}") - - if run.status == "failed": - print(f"Run failed: {run.last_error}") - - # Delete the Agent when done - agents_client.delete_agent(agent.id) - print("Deleted agent") - - # Delete the connected Agent when done - agents_client.delete_agent(stock_price_agent.id) - print("Deleted stock price agent") - - # Fetch and log all messages - messages = agents_client.messages.list(thread_id=thread.id) - for msg in messages: - if msg.text_messages: - last_text = msg.text_messages[-1] - print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_enterprise_file_search.py b/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_enterprise_file_search.py deleted file mode 100644 index 37ed15355117..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_enterprise_file_search.py +++ /dev/null @@ -1,82 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -""" -DESCRIPTION: - This sample demonstrates how to add files to agent during the vector store creation. - -USAGE: - python sample_agents_enterprise_file_search.py - - Before running the sample: - - pip install azure-ai-agents azure-identity azure-ai-ml - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. - 3) AZURE_BLOB_URI - The URI of the blob storage where the file is uploaded. In the format: - azureml://subscriptions/{subscription-id}/resourcegroups/{resource-group-name}/workspaces/{workspace-name}/datastores/{datastore-name}/paths/{path-to-file} -""" - -import os -from azure.ai.agents import AgentsClient -from azure.ai.agents.models import FileSearchTool, ListSortOrder, VectorStoreDataSource, VectorStoreDataSourceAssetType -from azure.identity import DefaultAzureCredential - -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) - -with agents_client: - - # [START upload_file_and_create_agent_with_file_search] - # We will upload the local file to Azure and will use it for vector store creation. - asset_uri = os.environ["AZURE_BLOB_URI"] - - # Create a vector store with no file and wait for it to be processed - ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) - vector_store = agents_client.vector_stores.create_and_poll(data_sources=[ds], name="sample_vector_store") - print(f"Created vector store, vector store ID: {vector_store.id}") - - # Create a file search tool - file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) - - # Notices that FileSearchTool as tool and tool_resources must be added or the agent unable to search the file - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are helpful agent", - tools=file_search_tool.definitions, - tool_resources=file_search_tool.resources, - ) - # [END upload_file_and_create_agent_with_file_search] - print(f"Created agent, agent ID: {agent.id}") - - thread = agents_client.threads.create() - print(f"Created thread, thread ID: {thread.id}") - - message = agents_client.messages.create( - thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" - ) - print(f"Created message, message ID: {message.id}") - - run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) - print(f"Created run, run ID: {run.id}") - - agents_client.vector_stores.delete(vector_store.id) - print("Deleted vector store") - - agents_client.delete_agent(agent.id) - print("Deleted agent") - - messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) - for msg in messages: - if msg.text_messages: - last_text = msg.text_messages[-1] - print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_fabric.py b/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_fabric.py deleted file mode 100644 index e60e64ffe8a3..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_fabric.py +++ /dev/null @@ -1,86 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_agents_fabric.py - -DESCRIPTION: - This sample demonstrates how to use Agent operations with the Microsoft Fabric grounding tool from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_fabric.py - - Before running the sample: - - pip install azure-ai-agents azure-identity - - Set this environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. - 3) FABRIC_CONNECTION_ID - The ID of the Fabric connection, in the format of: - /subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/providers/Microsoft.MachineLearningServices/workspaces/{workspace-name}/connections/{connection-name} -""" - -import os -from azure.ai.agents import AgentsClient -from azure.identity import DefaultAzureCredential -from azure.ai.agents.models import FabricTool, ListSortOrder - -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) - -# [START create_agent_with_fabric_tool] -conn_id = os.environ["FABRIC_CONNECTION_ID"] - -print(conn_id) - -# Initialize an Agent Fabric tool and add the connection id -fabric = FabricTool(connection_id=conn_id) - -# Create an Agent with the Fabric tool and process an Agent run -with agents_client: - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are a helpful agent", - tools=fabric.definitions, - ) - # [END create_agent_with_fabric_tool] - print(f"Created Agent, ID: {agent.id}") - - # Create thread for communication - thread = agents_client.threads.create() - print(f"Created thread, ID: {thread.id}") - - # Create message to thread - message = agents_client.messages.create( - thread_id=thread.id, - role="user", - content="", - ) - print(f"Created message, ID: {message.id}") - - # Create and process an Agent run in thread with tools - run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) - print(f"Run finished with status: {run.status}") - - if run.status == "failed": - print(f"Run failed: {run.last_error}") - - # Delete the Agent when done - agents_client.delete_agent(agent.id) - print("Deleted agent") - - # Fetch and log all messages - messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) - for msg in messages: - if msg.text_messages: - last_text = msg.text_messages[-1] - print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_file_search.py b/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_file_search.py deleted file mode 100644 index 47f6f937e54c..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_file_search.py +++ /dev/null @@ -1,103 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use agent operations with file searching from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_file_search.py - - Before running the sample: - - pip install azure-ai-agents azure-identity - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. -""" - -import os -from azure.ai.agents import AgentsClient -from azure.ai.agents.models import ( - FileSearchTool, - FilePurpose, - ListSortOrder, -) -from azure.identity import DefaultAzureCredential - -asset_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../assets/product_info_1.md")) - -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) - -with agents_client: - - # Upload file and create vector store - # [START upload_file_create_vector_store_and_agent_with_file_search_tool] - file = agents_client.files.upload_and_poll(file_path=asset_file_path, purpose=FilePurpose.AGENTS) - print(f"Uploaded file, file ID: {file.id}") - - vector_store = agents_client.vector_stores.create_and_poll(file_ids=[file.id], name="my_vectorstore") - print(f"Created vector store, vector store ID: {vector_store.id}") - - # Create file search tool with resources followed by creating agent - file_search = FileSearchTool(vector_store_ids=[vector_store.id]) - - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="Hello, you are helpful agent and can search information from uploaded files", - tools=file_search.definitions, - tool_resources=file_search.resources, - ) - # [END upload_file_create_vector_store_and_agent_with_file_search_tool] - - print(f"Created agent, ID: {agent.id}") - - # Create thread for communication - thread = agents_client.threads.create() - print(f"Created thread, ID: {thread.id}") - - # Create message to thread - message = agents_client.messages.create( - thread_id=thread.id, role="user", content="Hello, what Contoso products do you know?" - ) - print(f"Created message, ID: {message.id}") - - # Create and process agent run in thread with tools - run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) - print(f"Run finished with status: {run.status}") - - if run.status == "failed": - # Check if you got "Rate limit is exceeded.", then you want to get more quota - print(f"Run failed: {run.last_error}") - - # [START teardown] - # Delete the file when done - agents_client.vector_stores.delete(vector_store.id) - print("Deleted vector store") - - agents_client.files.delete(file_id=file.id) - print("Deleted file") - - # Delete the agent when done - agents_client.delete_agent(agent.id) - print("Deleted agent") - # [END teardown] - - # Fetch and log all messages - messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) - - # Print last messages from the thread - for msg in messages: - if msg.text_messages: - last_text = msg.text_messages[-1] - print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_functions.py b/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_functions.py deleted file mode 100644 index 2dee8cd21bba..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_functions.py +++ /dev/null @@ -1,115 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use agent operations with custom functions from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_functions.py - - Before running the sample: - - pip install azure-ai-agents azure-identity - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. -""" -import os, time, sys -from azure.ai.agents import AgentsClient -from azure.identity import DefaultAzureCredential -from azure.ai.agents.models import ( - FunctionTool, - ListSortOrder, - RequiredFunctionToolCall, - SubmitToolOutputsAction, - ToolOutput, -) - -current_path = os.path.dirname(__file__) -root_path = os.path.abspath(os.path.join(current_path, os.pardir, os.pardir)) -if root_path not in sys.path: - sys.path.insert(0, root_path) -from samples.utils.user_functions import user_functions - -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) - -# Initialize function tool with user functions -functions = FunctionTool(functions=user_functions) - -with agents_client: - # Create an agent and run user's request with function calls - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are a helpful agent", - tools=functions.definitions, - ) - print(f"Created agent, ID: {agent.id}") - - thread = agents_client.threads.create() - print(f"Created thread, ID: {thread.id}") - - message = agents_client.messages.create( - thread_id=thread.id, - role="user", - content="Hello, send an email with the datetime and weather information in New York?", - ) - print(f"Created message, ID: {message.id}") - - run = agents_client.runs.create(thread_id=thread.id, agent_id=agent.id) - print(f"Created run, ID: {run.id}") - - while run.status in ["queued", "in_progress", "requires_action"]: - time.sleep(1) - run = agents_client.runs.get(thread_id=thread.id, run_id=run.id) - - if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): - tool_calls = run.required_action.submit_tool_outputs.tool_calls - if not tool_calls: - print("No tool calls provided - cancelling run") - agents_client.runs.cancel(thread_id=thread.id, run_id=run.id) - break - - tool_outputs = [] - for tool_call in tool_calls: - if isinstance(tool_call, RequiredFunctionToolCall): - try: - print(f"Executing tool call: {tool_call}") - output = functions.execute(tool_call) - tool_outputs.append( - ToolOutput( - tool_call_id=tool_call.id, - output=output, - ) - ) - except Exception as e: - print(f"Error executing tool_call {tool_call.id}: {e}") - - print(f"Tool outputs: {tool_outputs}") - if tool_outputs: - agents_client.runs.submit_tool_outputs(thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs) - - print(f"Current run status: {run.status}") - - print(f"Run completed with status: {run.status}") - - # Delete the agent when done - agents_client.delete_agent(agent.id) - print("Deleted agent") - - # Fetch and log all messages - messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) - for msg in messages: - if msg.text_messages: - last_text = msg.text_messages[-1] - print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_logic_apps.py b/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_logic_apps.py deleted file mode 100644 index 2712ce427b7a..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_logic_apps.py +++ /dev/null @@ -1,132 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use agents with Logic Apps to execute the task of sending an email. - -PREREQUISITES: - 1) Create a Logic App within the same resource group as your Azure AI Project in Azure Portal - 2) To configure your Logic App to send emails, you must include an HTTP request trigger that is - configured to accept JSON with 'to', 'subject', and 'body'. The guide to creating a Logic App Workflow - can be found here: - https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/agents-logic-apps#create-logic-apps-workflows-for-function-calling - -USAGE: - python sample_agents_logic_apps.py - - Before running the sample: - - pip install azure-ai-agents azure-identity azure-mgmt-logic - - Set this environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. - - Replace the following values in the sample with your own values: - 1) - The name of the Logic App you created. - 2) - The name of the trigger in the Logic App you created (the default name for HTTP - triggers in the Azure Portal is "When_a_HTTP_request_is_received"). - 3) - The email address of the recipient. -""" - - -import os -import sys -from typing import Set - -from azure.ai.agents import AgentsClient -from azure.ai.agents.models import ToolSet, FunctionTool -from azure.identity import DefaultAzureCredential - -# Example user function -current_path = os.path.dirname(__file__) -root_path = os.path.abspath(os.path.join(current_path, os.pardir, os.pardir)) -if root_path not in sys.path: - sys.path.insert(0, root_path) -from samples.utils.user_functions import fetch_current_datetime - -# Import AzureLogicAppTool and the function factory from user_logic_apps -from utils.user_logic_apps import AzureLogicAppTool, create_send_email_function - -# [START register_logic_app] - -# Create the agents client -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) - -# Extract subscription and resource group from the project scope -subscription_id = os.environ["SUBSCRIPTION_ID"] -resource_group = os.environ["resource_group_name"] - -# Logic App details -logic_app_name = "" -trigger_name = "" - -# Create and initialize AzureLogicAppTool utility -logic_app_tool = AzureLogicAppTool(subscription_id, resource_group) -logic_app_tool.register_logic_app(logic_app_name, trigger_name) -print(f"Registered logic app '{logic_app_name}' with trigger '{trigger_name}'.") - -# Create the specialized "send_email_via_logic_app" function for your agent tools -send_email_func = create_send_email_function(logic_app_tool, logic_app_name) - -# Prepare the function tools for the agent -functions_to_use: Set = { - fetch_current_datetime, - send_email_func, # This references the AzureLogicAppTool instance via closure -} -# [END register_logic_app] - -with agents_client: - # Create an agent - functions = FunctionTool(functions=functions_to_use) - toolset = ToolSet() - toolset.add(functions) - - agents_client.enable_auto_function_calls(toolset) - - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="SendEmailAgent", - instructions="You are a specialized agent for sending emails.", - toolset=toolset, - ) - print(f"Created agent, ID: {agent.id}") - - # Create a thread for communication - thread = agents_client.threads.create() - print(f"Created thread, ID: {thread.id}") - - # Create a message in the thread - message = agents_client.messages.create( - thread_id=thread.id, - role="user", - content="Hello, please send an email to with the date and time in '%Y-%m-%d %H:%M:%S' format.", - ) - print(f"Created message, ID: {message.id}") - - # Create and process an agent run in the thread - run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) - print(f"Run finished with status: {run.status}") - - if run.status == "failed": - print(f"Run failed: {run.last_error}") - - # Delete the agent when done - agents_client.delete_agent(agent.id) - print("Deleted agent") - - # Fetch and log all messages - messages = agents_client.messages.list(thread_id=thread.id) - for msg in messages: - if msg.text_messages: - last_text = msg.text_messages[-1] - print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_multiple_connected_agents.py b/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_multiple_connected_agents.py deleted file mode 100644 index ae189ad435fc..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_multiple_connected_agents.py +++ /dev/null @@ -1,115 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use Agent operations with the Connected Agent tool from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_multiple_connected_agents.py - - Before running the sample: - - pip install azure-ai-agents azure-identity - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. -""" - -import os -from azure.ai.agents import AgentsClient -from azure.ai.agents.models import ConnectedAgentTool, MessageRole -from azure.identity import DefaultAzureCredential - - -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) - -connected_agent_name = "stock_price_bot" -weather_agent_name = "weather_bot" - -stock_price_agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name=connected_agent_name, - instructions=( - "Your job is to get the stock price of a company. If asked for the Microsoft stock price, always return $350." - ), -) - -weather_agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name=weather_agent_name, - instructions=( - "Your job is to get the weather for a given location. If asked for the weather in Seattle, always return 60 degrees and cloudy." - ), -) - -# Initialize Connected Agent tools with the agent id, name, and description -connected_agent = ConnectedAgentTool( - id=stock_price_agent.id, name=connected_agent_name, description="Gets the stock price of a company" -) -connected_weather_agent = ConnectedAgentTool( - id=weather_agent.id, name=weather_agent_name, description="Gets the weather for a given location" -) - -# Create agent with the Connected Agent tool and process assistant run -with agents_client: - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-assistant", - instructions="You are a helpful assistant, and use the connected agents to get stock prices and weather.", - tools=[ - connected_agent.definitions[0], - connected_weather_agent.definitions[0], - ], - ) - # [END create_agent_with_connected_agent_tool] - - print(f"Created agent, ID: {agent.id}") - - # Create thread for communication - thread = agents_client.threads.create() - print(f"Created thread, ID: {thread.id}") - - # Create message to thread - message = agents_client.messages.create( - thread_id=thread.id, - role=MessageRole.USER, - content="What is the stock price of Microsoft and the weather in Seattle?", - ) - print(f"Created message, ID: {message.id}") - - # Create and process Agent run in thread with tools - run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) - print(f"Run finished with status: {run.status}") - - if run.status == "failed": - print(f"Run failed: {run.last_error}") - - # Delete the Agent when done - agents_client.delete_agent(agent.id) - print("Deleted agent") - - # Delete the connected Agent when done - agents_client.delete_agent(stock_price_agent.id) - print("Deleted stock price agent") - - # Delete the connected Agent when done - agents_client.delete_agent(weather_agent.id) - print("Deleted weather agent") - - # Fetch and log all messages - messages = agents_client.messages.list(thread_id=thread.id) - for msg in messages: - if msg.text_messages: - last_text = msg.text_messages[-1] - print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_openapi.py b/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_openapi.py deleted file mode 100644 index fa29a238cc4f..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_openapi.py +++ /dev/null @@ -1,121 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use agent operations with the - OpenAPI tool from the Azure Agents service using a synchronous client. - To learn more about OpenAPI specs, visit https://learn.microsoft.com/openapi - -USAGE: - python sample_agents_openapi.py - - Before running the sample: - - pip install azure-ai-agents azure-identity jsonref - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. -""" - -import os -import jsonref -from azure.ai.agents import AgentsClient -from azure.identity import DefaultAzureCredential -from azure.ai.agents.models import OpenApiTool, OpenApiAnonymousAuthDetails - -weather_asset_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../assets/weather_openapi.json")) - -countries_asset_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../assets/countries.json")) - -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) -# [START create_agent_with_openapi] - -with open(weather_asset_file_path, "r") as f: - openapi_weather = jsonref.loads(f.read()) - -with open(countries_asset_file_path, "r") as f: - openapi_countries = jsonref.loads(f.read()) - -# Create Auth object for the OpenApiTool (note that connection or managed identity auth setup requires additional setup in Azure) -auth = OpenApiAnonymousAuthDetails() - -# Initialize agent OpenApi tool using the read in OpenAPI spec -openapi_tool = OpenApiTool( - name="get_weather", spec=openapi_weather, description="Retrieve weather information for a location", auth=auth -) -openapi_tool.add_definition( - name="get_countries", spec=openapi_countries, description="Retrieve a list of countries", auth=auth -) - -# Create agent with OpenApi tool and process agent run -with agents_client: - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are a helpful agent", - tools=openapi_tool.definitions, - ) - # [END create_agent_with_openapi] - - print(f"Created agent, ID: {agent.id}") - - # Create thread for communication - thread = agents_client.threads.create() - print(f"Created thread, ID: {thread.id}") - - # Create message to thread - message = agents_client.messages.create( - thread_id=thread.id, - role="user", - content="What's the weather in Seattle and What is the name and population of the country that uses currency with abbreviation THB?", - ) - print(f"Created message, ID: {message.id}") - - # Create and process agent run in thread with tools - run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) - print(f"Run finished with status: {run.status}") - - if run.status == "failed": - print(f"Run failed: {run.last_error}") - - run_steps = agents_client.run_steps.list(thread_id=thread.id, run_id=run.id) - - # Loop through each step - for step in run_steps: - print(f"Step {step['id']} status: {step['status']}") - - # Check if there are tool calls in the step details - step_details = step.get("step_details", {}) - tool_calls = step_details.get("tool_calls", []) - - if tool_calls: - print(" Tool calls:") - for call in tool_calls: - print(f" Tool Call ID: {call.get('id')}") - print(f" Type: {call.get('type')}") - - function_details = call.get("function", {}) - if function_details: - print(f" Function name: {function_details.get('name')}") - print() # add an extra newline between steps - - # Delete the agent when done - agents_client.delete_agent(agent.id) - print("Deleted agent") - - # Fetch and log all messages - messages = agents_client.messages.list(thread_id=thread.id) - for msg in messages: - if msg.text_messages: - last_text = msg.text_messages[-1] - print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_openapi_connection_auth.py b/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_openapi_connection_auth.py deleted file mode 100644 index 13e3076f5f17..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_openapi_connection_auth.py +++ /dev/null @@ -1,101 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_agents_openapi_connection_auth.py - -DESCRIPTION: - This sample demonstrates how to use agent operations with the - OpenAPI tool from the Azure Agents service using a synchronous client, using - custom key authentication against the TripAdvisor API. - To learn more about OpenAPI specs, visit https://learn.microsoft.com/openapi - -USAGE: - python sample_agents_openapi_connection_auth.py - - Before running the sample: - - Set up an account at https://www.tripadvisor.com/developers and get an API key. - - Set up a custom key connection and save the connection name following the steps at - https://aka.ms/azsdk/azure-ai-agents/custom-key-setup - - Save that connection name as the PROJECT_OPENAPI_CONNECTION_NAME environment variable - - pip install azure-ai-agents azure-identity jsonref - - Set this environment variables with your own values: - PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - OPENAPI_CONNECTION_ID - the connection ID for the OpenAPI connection, taken from Azure AI Foundry. - MODEL_DEPLOYMENT_NAME - name of the model deployment in the project to use Agents against -""" - -import os -import jsonref -from azure.ai.agents import AgentsClient -from azure.identity import DefaultAzureCredential -from azure.ai.agents.models import OpenApiTool, OpenApiConnectionAuthDetails, OpenApiConnectionSecurityScheme - -asset_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../assets/tripadvisor_openapi.json")) - -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) - -model_name = os.environ["MODEL_DEPLOYMENT_NAME"] -connection_id = os.environ["OPENAPI_CONNECTION_ID"] - -print(connection_id) - -with open(asset_file_path, "r") as f: - openapi_spec = jsonref.loads(f.read()) - -# Create Auth object for the OpenApiTool (note that connection or managed identity auth setup requires additional setup in Azure) -auth = OpenApiConnectionAuthDetails(security_scheme=OpenApiConnectionSecurityScheme(connection_id=connection_id)) - -# Initialize an Agent OpenApi tool using the read in OpenAPI spec -openapi = OpenApiTool( - name="get_weather", spec=openapi_spec, description="Retrieve weather information for a location", auth=auth -) - -# Create an Agent with OpenApi tool and process Agent run -with agents_client: - agent = agents_client.create_agent( - model=model_name, name="my-agent", instructions="You are a helpful agent", tools=openapi.definitions - ) - print(f"Created agent, ID: {agent.id}") - - # Create thread for communication - thread = agents_client.threads.create() - print(f"Created thread, ID: {thread.id}") - - # Create message to thread - message = agents_client.messages.create( - thread_id=thread.id, - role="user", - content="Summarize the reviews for the top rated hotel in Paris", - ) - print(f"Created message: {message['id']}") - - # Create and process an Agent run in thread with tools - run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) - print(f"Run finished with status: {run.status}") - - if run.status == "failed": - print(f"Run failed: {run.last_error}") - - # Delete the Agent when done - agents_client.delete_agent(agent.id) - print("Deleted agent") - - # Fetch and log all messages - messages = agents_client.messages.list(thread_id=thread.id) - for msg in messages: - if msg.text_messages: - last_text = msg.text_messages[-1] - print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_run_with_toolset.py b/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_run_with_toolset.py deleted file mode 100644 index 1665feb9a9f8..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_run_with_toolset.py +++ /dev/null @@ -1,94 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use agent operations with toolset from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_run_with_toolset.py - - Before running the sample: - - pip install azure-ai-agents azure-identity - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. -""" - -import os, sys -from azure.ai.agents import AgentsClient -from azure.identity import DefaultAzureCredential -from azure.ai.agents.models import FunctionTool, ToolSet, CodeInterpreterTool - -current_path = os.path.dirname(__file__) -root_path = os.path.abspath(os.path.join(current_path, os.pardir, os.pardir)) -if root_path not in sys.path: - sys.path.insert(0, root_path) -from samples.utils.user_functions import user_functions - -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) - -# Create agent with toolset and process agent run -with agents_client: - # Initialize agent toolset with user functions and code interpreter - # [START create_agent_toolset] - functions = FunctionTool(user_functions) - code_interpreter = CodeInterpreterTool() - - toolset = ToolSet() - toolset.add(functions) - toolset.add(code_interpreter) - - # To enable tool calls executed automatically - agents_client.enable_auto_function_calls(toolset) - - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are a helpful agent", - toolset=toolset, - ) - # [END create_agent_toolset] - print(f"Created agent, ID: {agent.id}") - - # Create thread for communication - thread = agents_client.threads.create() - print(f"Created thread, ID: {thread.id}") - - # Create message to thread - message = agents_client.messages.create( - thread_id=thread.id, - role="user", - content="Hello, send an email with the datetime and weather information in New York?", - ) - print(f"Created message, ID: {message.id}") - - # Create and process agent run in thread with tools - # [START create_and_process_run] - run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) - # [END create_and_process_run] - print(f"Run finished with status: {run.status}") - - if run.status == "failed": - print(f"Run failed: {run.last_error}") - - # Delete the agent when done - agents_client.delete_agent(agent.id) - print("Deleted agent") - - # Fetch and log all messages - messages = agents_client.messages.list(thread_id=thread.id) - for msg in messages: - if msg.text_messages: - last_text = msg.text_messages[-1] - print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_sharepoint.py b/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_sharepoint.py deleted file mode 100644 index f8448ea1b18f..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_sharepoint.py +++ /dev/null @@ -1,89 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_agents_sharepoint.py - -DESCRIPTION: - This sample demonstrates how to use agent operations with the - Sharepoint tool from the Azure Agents service using a synchronous client. - The sharepoint tool is currently available only to whitelisted customers. - For access and onboarding instructions, please contact azureagents-preview@microsoft.com. - -USAGE: - python sample_agents_sharepoint.py - - Before running the sample: - - pip install azure-ai-agents azure-identity - - Set this environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. - 3) SHAREPOINT_CONNECTION_ID - The ID of the Sharepoint connection, in the format of: - /subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/providers/Microsoft.MachineLearningServices/workspaces/{workspace-name}/connections/{connection-name} -""" - -import os -from azure.ai.agents import AgentsClient -from azure.identity import DefaultAzureCredential -from azure.ai.agents.models import SharepointTool - - -# Create an Azure AI Client from a connection string, copied from your AI Studio project. -# At the moment, it should be in the format ";;;" -# Customer needs to login to Azure subscription via Azure CLI and set the environment variables - -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) - -conn_id = os.environ["SHAREPOINT_CONNECTION_ID"] - -# Initialize Sharepoint tool with connection id -sharepoint = SharepointTool(connection_id=conn_id) - -# Create agent with Sharepoint tool and process agent run -with agents_client: - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="my-agent", - instructions="You are a helpful agent", - tools=sharepoint.definitions, - ) - print(f"Created agent, ID: {agent.id}") - - # Create thread for communication - thread = agents_client.threads.create() - print(f"Created thread, ID: {thread.id}") - - # Create message to thread - message = agents_client.messages.create( - thread_id=thread.id, - role="user", - content="Hello, summarize the key points of the ", - ) - print(f"Created message, ID: {message.id}") - - # Create and process agent run in thread with tools - run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) - print(f"Run finished with status: {run.status}") - - if run.status == "failed": - print(f"Run failed: {run.last_error}") - - # Delete the agent when done - agents_client.delete_agent(agent.id) - print("Deleted agent") - - # Fetch and log all messages - messages = agents_client.messages.list(thread_id=thread.id) - for msg in messages: - if msg.text_messages: - last_text = msg.text_messages[-1] - print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-agents/samples/agents_tools/utils/user_logic_apps.py b/sdk/ai/azure-ai-agents/samples/agents_tools/utils/user_logic_apps.py deleted file mode 100644 index 979fd5eca143..000000000000 --- a/sdk/ai/azure-ai-agents/samples/agents_tools/utils/user_logic_apps.py +++ /dev/null @@ -1,80 +0,0 @@ -import json -import requests -from typing import Dict, Any, Callable - -from azure.identity import DefaultAzureCredential -from azure.mgmt.logic import LogicManagementClient - - -class AzureLogicAppTool: - """ - A service that manages multiple Logic Apps by retrieving and storing their callback URLs, - and then invoking them with an appropriate payload. - """ - - def __init__(self, subscription_id: str, resource_group: str, credential=None): - if credential is None: - credential = DefaultAzureCredential() - self.subscription_id = subscription_id - self.resource_group = resource_group - self.logic_client = LogicManagementClient(credential, subscription_id) - - self.callback_urls: Dict[str, str] = {} - - def register_logic_app(self, logic_app_name: str, trigger_name: str) -> None: - """ - Retrieves and stores a callback URL for a specific Logic App + trigger. - Raises a ValueError if the callback URL is missing. - """ - callback = self.logic_client.workflow_triggers.list_callback_url( - resource_group_name=self.resource_group, - workflow_name=logic_app_name, - trigger_name=trigger_name, - ) - - if callback.value is None: - raise ValueError(f"No callback URL returned for Logic App '{logic_app_name}'.") - - self.callback_urls[logic_app_name] = callback.value - - def invoke_logic_app(self, logic_app_name: str, payload: Dict[str, Any]) -> Dict[str, Any]: - """ - Invokes the registered Logic App (by name) with the given JSON payload. - Returns a dictionary summarizing success/failure. - """ - if logic_app_name not in self.callback_urls: - raise ValueError(f"Logic App '{logic_app_name}' has not been registered.") - - url = self.callback_urls[logic_app_name] - response = requests.post(url=url, json=payload) - - if response.ok: - return {"result": f"Successfully invoked {logic_app_name}."} - else: - return {"error": (f"Error invoking {logic_app_name} " f"({response.status_code}): {response.text}")} - - -def create_send_email_function(service: AzureLogicAppTool, logic_app_name: str) -> Callable[[str, str, str], str]: - """ - Returns a function that sends an email by invoking the specified Logic App in LogicAppService. - This keeps the LogicAppService instance out of global scope by capturing it in a closure. - """ - - def send_email_via_logic_app(recipient: str, subject: str, body: str) -> str: - """ - Sends an email by invoking the specified Logic App with the given recipient, subject, and body. - - :param recipient: The email address of the recipient. - :param subject: The subject of the email. - :param body: The body of the email. - :return: A JSON string summarizing the result of the operation. - """ - payload = { - "to": recipient, - "subject": subject, - "body": body, - } - result = service.invoke_logic_app(logic_app_name, payload) - return json.dumps(result) - - return send_email_via_logic_app diff --git a/sdk/ai/azure-ai-agents/samples/assets/countries.json b/sdk/ai/azure-ai-agents/samples/assets/countries.json deleted file mode 100644 index 58d3df70d28d..000000000000 --- a/sdk/ai/azure-ai-agents/samples/assets/countries.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "openapi": "3.1.0", - "info": { - "title": "RestCountries.NET API", - "description": "Web API version 3.1 for managing country items, based on previous implementations from restcountries.eu and restcountries.com.", - "version": "v3.1" - }, - "servers": [ - { "url": "https://restcountries.net" } - ], - "auth": [], - "paths": { - "/v3.1/currency": { - "get": { - "description": "Search by currency.", - "operationId": "LookupCountryByCurrency", - "parameters": [ - { - "name": "currency", - "in": "query", - "description": "The currency to search for.", - "required": true, - "schema": { - "type": "string" - } - } - ], - "responses": { - "200": { - "description": "Success", - "content": { - "text/plain": { - "schema": { - "type": "string" - } - } - } - } - } - } - } - }, - "components": { - "schemes": {} - } - } \ No newline at end of file diff --git a/sdk/ai/azure-ai-agents/samples/assets/image_file.png b/sdk/ai/azure-ai-agents/samples/assets/image_file.png deleted file mode 100644 index 50ae6c65367af30a10642fc910cef97bfe765796..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 183951 zcmdS>hdbB*`#z3qX-cJ1Dnv=jDmzq0JJ}>7D?(&%Dv=o#6`^c06WL@$QTCoi_TF3H z^Y;9_kKb{;{)C_7c)ngoJjVTgzg@TMI?wYuubYRo)Mct|blWH>D5$Ppk&vaJ*qlQ_ zL3w%$C4OUlV2~C6Cukw5Y$0cI&%#>MOqb%Oro}@e6APpJT8vh@X6E-zjL&lLaGX8O zsBdBM&|HX<^TGe`1so=3dYnH)C93ctTOVGzZB9W!Uqt>}XIyP5O0kZD;;O_&d7I#o z4r}?n{mXn4hU*5mux*>;@Z7YI_eowU9dF|S-e!BPt2M{E4$x?OUGn+#ZI8Y8@i{vC z@kh#EpFdj9^hEY`_BccCPv4eOj*-;U=8lz|#!jj|hKcDn{gsbJ| z|9;ZFXFrVp=X03OoV#H~^FJT-qDAxnPcJFqhT1wNCN_Z|KYnc4y7i*6@`3K|Zoii= z*B?4`NZ{PL2(53Pv1&PdBO@a>)NZns1o7NwQpvZSj`lDRlbsyvI5*N&z`$?Z{5(6pT$YG5vw*7s0_hT*cPe+uLX6$a!)Eu`uAX6jLq7LV0c6XdD-^u+nd|k zB2=@?Vr3#I+dDdLs{Il>#l*D!=+UD#%X93V1sGv$%rfhh{$h0d zyUrOor2zLUW+eRBx@&7|AI=QO92PX&xVE^5x6^tTO9y{p&`t`@F6O+?b=b5>=Yw8tF znr*AlTb}qMepuLMSAS(lam5=>^NxHm*KPVW;hVCpMm_CjhxT2d@^Jqyzb*AskeSPq zcl;(=?U|-Rc}<<2^gDO%WL5aM*P!9!an<;P%!@*IzfuT0F3G1Em$>hK%cUunU#?($ z_eJ8J4C95tnD;XroUE){il0%_Cf3EgFuHe-rSkoGU*~_Tv{?HKDk>^T<8jhq>xD%` zY7%arU07NQzY%yQD?f%y)%U#P(%ye-%bZdnLat588ipN>4GsH7+cL_^7@WQ7cn$vE zcjnZ{FS*R%7+LoPKMx;1yx_6txb)|HH|rDc&>lHw^)x;{zI1+Oq=hC~J+EA@W2PbE zGWE(rkFQEbORDbn*X(y#RQkupo^RN^^J|gV^rM(12Pro$!^YEii=48k%>?7Us!;Y; zmh)7NYRds7Tt8TA3HR$c z-u*(8q!9V1Ap)lBD_*mU%gg&qwdv{T{GJrPE%Y#3E{%97_y-1RkmBk)>!f(>~d$OlYz+zyR)AHzl;qu;jO)xk(_=cKfM5f&= z+r7%*t*&C4^{Y>xKK(t?^0_GW*|W{9Umh6NI|iv-bo|mP?7VikV0G>ghem!v({fR9 z@nqHcf4}i57qKU~O}i!1j5}Jdq#t9d?ZWku9jR$M@w-rRPW9&PmoH!X`TIY`sxb=) z9N4_`@YirjuN_W_R^y!_0TTQaB&vH#ea>Dx(~JE%F+DAwZqTqp{R>M~s7Udx^Q%uk zd|=G$q@|~SJ~8ny7>|QuV!YrVUBZbp?xeD=Tw9mp*I)l$nCubKc=fHMw=ZR5@s$FAvm2D5vUd#f^F4Ps8;ezSh_K$nHIO(Blxl@oyJuQLc+V z+Z1hV1om^N#y0U9gXYh`fWF@nGW*@Ge8+*NU|mY`4x&gckXB_26~TRVX%gN zDXK}Tf-E_|l&@XeHaFJcg*~f1+5HXq^-Em5j`TLy_1pKK-X*edNJ zOCi`0-<$H0?O2C?)FT%%g^X}~uTp#!a?V=o$IIg?_V<72oDmQdJj%?x;p)|^1`O-Z z+fHq)i@qryBNf7B(kUEKk9?$o&6*Lbq>ZIne5CXwHMxO9J=YyE-=9TmEG3xSYkuZl zH^l`I{LnPNKM)Poo; z@jiHV7cFg!=^@-O5@7m!SFy0K_`+x8L3ZZa>LcdWcOJcU=FFLgL-i+B3!IK@r{$2q zHEuWRTB@zTu<`qnJ=@TaF`5%nzhmAz?YSuz`LnQa-QeKhm8(}}Y}s*ZIaZ@Z-@Ir) z*-UEHe^jVooNZ0f++a61vK58O#b0j+3fNno`+L@J*uZP7jYNt|ImW`mBqE~LV^zAw z&gN~yE+G;96%asXH}ji{$;JNnUVRC_8q@B}S&X$G6crVHXldyh5U{Vdu5LFCjSDWl z2eFxHH5xP^azZNjD}FJ#JYHZfYQn@k)R}i~s3|!)Rh{DvGaK8s4C9U>WJk$lNl8h~ za{rUfnWow=g6@nqCf?byeLKzT>j7-82*z-W!CICCg_^tNRf=KKcd_=^H_yrqu@5Ac z=SKew4{yJ9>lPCJmf}Ovk&%o%JbO_!1RR%mjJxvL?J|$h(HYES)9jWhe8;WrR@tVW zYqOh{Q+@C2kdP2=rxlx!uJOUaz+pZV<7b~ge=c7tDUp!4bg8JTi@}%S{QkVTrCCFV zwZ;Bs+;o0rdR$zbn6Iv-cVOl@^S&MQ!Zwc(bqAtWjF5&Fy1h8%U*?cHgsl5}bkqmY zSX^7X=Yk^B7)p4i^*H@ldzQ;*y($8ZG%Q)&hbQq5ty1rUY(GBjIezWK^*0;?)nOYx zDkXCqbKA3L&%{W}*Aa9TXxn5@BR@RM7!Xk$pskFDFb75yoGCOJH_n3F6U z<=NQ8%e!k`YPZnQqZhS(b@lZ0@`lV7CNyjGHwLgN6akP}Iu&GRR``;Q^QXv_ZMdVO zqbA)@$zq^-9bWMyFHhuAg0kJrz;3BvfyW3%MtqV@RlHI%mq9)2X^s5D>gwu?Np~zP zc&tWSU5|>p{mnV89VloXeJ#B0$B(Uf_VbtC@jZ}^xI`s0a_rbK#$(4e&X08zBWic= z+Qs*)LhOq z>)qB@8FI?Nb#Q##7q7aD?H+7+rF-h^tpoR)BCpca2tOEV&$XrbeDBwun>TkMYBz1) zPhM4;r1coPcNaUp|4#qr66dwWnB4~Q7JvIc|4PPp#U0CjU}Q9b%3f4eb<3tX`e$h= zC6c#TqDnf~!`~MG1@_`SC!|7!Z|`bgDQ`fWN}8|Ajx7Yf;FvksH}~ruf4ELYUS3|| zvmc|a>8B7JF_!O0GA7%%$Umy_TDDE z;giFnS%iPc18jrvV{xvoo1$;Nb9=mDvlz>(I3tt$_qSSGTN9ur{%q%4AxpNgj+_%e z6W`I6=F?N}*irH`@T|g~4iWpglKMEghRW%&fdTiW+2IJM)ujU>c051@ZjQ@yTdDpb z=}e4fbZMd9={Ki@J-^J>m~A<%+Aj(WREo?zIZ(r_sivjHw4Yt&X+yly_F=%MkJCCGC{7 zv=T%bx7CPpdc>R{0QA*YEc&&PTd^(3;{3*C7Y!p#ll_%)zm7#7C85g_xQuyeW>7or zMdVZu?k9YDD+M6M@Ba7BZeCtqt@8pxLPc$DhdMhu5AYcsKuUE%CD1~}+$60HR7!F0 zw7a{f-_3V?2<+|QmL%ZuH0xQi8-R0U>^GPX)*d%%P5W`z@;K8f@BP}1hxm*xAW$TS zIaM~NN}u1doA$@5x%Mpc<`hjA{g8+VX2k^M$6IMw^lwGv1RmnX0e!f)b*$0va5js(XAzYg4DXHip`Dzlcc4S*l7E_=9J@Kdbp}F~43*L2q zSM64?`xpA&naD?*2A!Mb%&rKIi|f z$VF{lKzu6LInq*=Q%QKzaz0hj}lRa=oXA8j7v$fUbLz!+85|RJ0QAr6Kn@s)A+l+?F%>k#BiW(Xel2cQ=VpjaL zv17vjn0Ung&dSRA15B@U`~BzMyez&br9(|FH>W;E zq2HM+_~k*H;@`f66HH9gePd%g&!0bElkM%jBVaJxVjmI!vXy6}BeKUpZIqXO$Wa<= zvaNwbC5N$!nkiEOA?Kad(^PY9CLO3uN4mSU>Q81)nTf2Sv@x@=TuQ5~s^Yv=w)o@3 z`@4U?zX-Q9>C7zzrb)8Cz4Xw`tSIv8EBQu^e213U^N;! z#gBv)e`ep;CJkpFApSu$)Ym#fb!xIt+zS00NAol)Dyn?E8->xQ^#U`embSJ6or(8h z!9CwR_f^Q*?%%&((jc3gDZI!VtUH`Z3or~2V0Yp>Axnc$&V-Kkb}q{y`I_wddikBZ zZ^&;!MV0Jbr!GUMg;MPGdUmizyfv`^x9=#dLNJ}kH#ZH7F)u?!fZL@XxZ<4dFtci zqpzkp+>)5|_zH5dOuB)Bjh&rmBW+Y?2XJ*6Dyif!!pSY)ak($+XLA11HQl`~c;|0~ zzTKOK0%zx%nXBNpuEsN8^{PVSex{F(jy?!)-L?CL=(hO)=H0so0dp?tihgvXIyA(q z@mdJHqOjoK+Nq!oE5N-Zyl2MPL*;-upFDX&YA|8Ug#UqWgNIHjBng3lNnE+I1>p zfq@$Q*#GqHJtS_BM|NZ<8;y4rl%ZwF^+A zNEC?41vxn!^p#S;^NF6Wt}Bp;`x+lJGxM~(qM6wla_QaQwv$Uoh=97vCr$5Lf5E{) z7=Iy!FEdj>`OcjPe0;3(C#IqLxDD5@R|q$p>`$?mRgf2LL%alJ4Y6URHH+O28ZqqO zzm(B+^V+o{a$iS|_+*%LZ7UEVL>{zzy4{|cpRiIa!tQzd!eh!2DkY zE2GkqlHVY|(yV(~Wx_Y22tGpsJaCM!7{m;9h@c)UHvXLQ`Lm0c*Y@()>|P%g;yvBy zcxlHw^TaP)peQOTVti8}lWSw@e^QRp=MX=~OXjB~_}t2nb2f`p$7SW@{`=!8rJX)> zyl3=%8=tuU2>;&EaRiGVE*J}NJT2cCCjLyKqUGw?P(yqKHmc3U@5d9Zg)W-`uAT~y zUJe9}Bt#6%{g!fuK?55&J>816Y?e&0;L5KdbCG=QvYj$6n<(CjI@;y$SyEI~To{T^ z<^l-SRJ(ME(rTpnG14+iRc~J@64<^|iX{N0u9hZU`Mj-bQ(|fxc5`xaQrvbvKR@0@ z`{0~$d#143t^IuE#U^)@mH(dG501+PcDk}MT~(9~7)x{}0D*@1(*X7)UrhH`(Ke2@ zXVJK9rsi8P|Bx#?f9^qfSHy^6BOoz($X9OMkN~#$Gc@FhhaAfz@jV&QyK&Q|Kd27w zUS8q`25g59AN~#L08fF|v;PG62En!J`3@e<&B~EcQH9^WZAwZ?nwXk8(>fpUzMn-x zp6fqhXZ>?}k&kB6?a3PFUHm&TOf+h!9B|v@x3i_usQKZ&Bm2itdzEvn*moWlcoZ3V zEMWe&vhoL+h)d8{o;*AoL<=I-obPxJDbx*@tlysGusAhP7eh^IVqLq!g~kn$*jn2w zMC@k1LZR3~k?H(Tv@uDI5XX221_Z0ZyAda7X-MAZRdcMUsOg2JZ8fBjN{cr$I6Ok6 zhne=2W;M=2Ymv7Vc>Q2*q$RvZ`t@|CS?x@RLG5R~Te)_#*OWyc{-z+Y2t^CEar2!o zMuqr_hWreuID`ZPn!JkHd=ZhUoMC)qC%v#cwp)4|GMU!QKuuBboMB_a?wgyr>}K@6 zkDOB+cck6ed!MrYNDm&gq{t;Ye&f^Lm1O^}fOQc%gBVrlj31321-&GXV5~DQzIICF z-RI9|RWgiyY9cNZ=}pB~@=n6~>gM6&{gXO}&ffDL;5YnFNsM2y4z5E4r!__ywWMAe zT_^kYd|Phj!#_{TC!RVk&HOM=6)hk{9=ikNN2ABVWo~Y6Vsf$&LIM_mDx{)uZrOCS zm6y=}ix)2%blnLP+jJv{=jwx77hJ_2uP)DP1Jcmhk2EA}Fc9i7)R-7Arpnt(n9gyUh2!h8B}*$qxAXJ zlhRUFCypH(P%9;LDlb&`OknI~Yt$pjAI9P*#!8W*2r8BQPXlHm$mGvaG(8%Tc(*b*OkB*L?*fMec)eZ!ZW-iYQ7W>(w6O4ETJW1@ zK9*j?u3&Lf=Y>Ddh-w2uy-tT$I@7er?P5N)oZWPv&oz@dfVnWU-tt9`9PmWtY>PO3 zp-61yp^y>EN1b{0+JG!Y>FxmCsLX&0;tWG{Qxcai_uEzJ>Z53YVKYO6O#T$+e}hFF zDjb(lbBe<%OH53R_P@1N0Yvl@7ngMP_c>Wvg-uPgd3Lj6_)jAF;Ihg9){g8;4%N&L zll)~MwN!LGU;U1XS4ZDGl-S#=lR!3l%Flupy4}0~1Jt^ZWz!6sDj9V%0KcMi|Kby7 zIr@<=4{+V3K(Y(ttD&L@3=GV)9NvG)ht4(X`kTJE&E4RqHStQEBkI%0y zYydIYCvvRZ%W}A}u&RohmP57L&fKqVs<(n$znU>YqktaN;@7O>{rmSbZ6=urcL3m) z(QURf-%U&V40~F$@X02+S5y}oZ}vXfw4KXw$s!=^jk!4jVaEY33T)t-w*!=^bgVZ* zg!WDM{i<8EKXuywpXkh>=4a=BPCMv$wt$7p*%wD&m6zY^M$PaX^pX@1x>uhrDE7R@ zLWCn#W4DJ?F8h59Uut1{|Nb;PJ3`cVeez{7vGvhXA)cTFzp*-WH2fD7WhX#e4hY+D z?LV!|D7@p>cI+Y~D9!E?4?3C-q_RXpV4>Ht&4vn?GWGoojIh*ig+SdY-GGnEP?QVb&Jl38H&Vmx87jvIq z2+T0&e{0GGPI;BfB!AYc&Ar3oBLb;ZN1MXZ3j)0gVUzx=Jwx| zYlyl|D&JO6L@`%Q;hLcVfV;Lf|aw3jLZS#_VN|Q>?LraPj<5g zGG^CUUWM6AcB|NPvYns#{sSG0%F0T*JAWAfuaTJRf_9zq-a|t}q#&89JB;x$AIzv9 z8v(8taTctEPm7|Ucp+Odv9Vq2+o1?Iu8##z52;PKF?K!14JU-5;lBAN(b`4#c z50>s89))NS$?q9&85tdI2tA9RPlioN4#!0xe_EqBNv2goT0wiuAfpS-3P|TvjHlv+ z0c`@zU%9xrv_31x(_R?M8J__46>gYCMI=`w>5({M9+*x41VtBll!2(tA?5tN|F;Pa z9M}Tpm*~7aH1y`hi^q_3Gyz7l#(L1BAn>9z@`a1b231wnyAK{DYV>Y@4D1xg^E4$T zBlFRy~`N48$uY&BKY-^0VFRP+W)I?(8oXgZ3=Tep~@p;Ruiv97juVs*aD zP{evKWwxsCsrUAe66iDB!jn$-4s(h_L6^DzNU1tcJzeJcKjh1}`df?R1JFiXldQAz z>%RpawU6h2&?+C}I24W+RQO^aK5*O3p6u3i!+2Hk1ht&~>(;Hi)MX9XhNwm$OtFrg z_i(S1!Q&9ikRak6DZ_t&W^(J6@AawU)Nf;u=Bm?8%Uj*7O&BR^v^&+aM1%u^K{qEU5shc;y0eV`}cg{Xz zaQd2KJx-z1A&i|=SXOp9la&hc0y;LIpzPF$he4hCpqQvK(3WA6wA_s4=QC<%R8fkP zJx;PTve#wo2iMngsAmH9bEm%;wJ-@&+>nwg#gmu>UmfQC+k>6^@bCBC6t`(x!Ay$r zrS5nLM2SV*KwOqPD0?~vg)~gkYrlSL+DU`@*vBX&UdF7rAJgf+n}Lj!3JVB1ETjw+>JnxOcI#t#rjhGC$_33HZd80CYltrV-tece8n^kd z@8`h;mGo`g+}uUWz`35GBKCbtcUg}fUDxF>?NeUtiN~n@_0c;0_Dt@C#KbUZ)?IXT zo1fmqPO#=+~rFV8zgyMo+rV14m=*7o_qSPd`3{>3Tl8xHJ@uC8GZkLC7?5i z2>POq+2y~?QMb7)2Bb-k4T%745&)+KXc8&N*frH5XiQyj-L{WQt7J=nU9YzS1rTM9 zzK$EJbNmtdbSMN_^q+;mw9C+^pwQv%6eM@e+q?O$;Qj2t%J}fPmcAJ@z5>)E-9*p# zvY^>;_TJB3zfNt{SuLev5Iqc0R1CXj9YwN6K`HuApEQdpWz2xFt}Ex-aOW?Noy(hZ zT%6Kv&#|Vy&{*B<dyafMzIZ{`v$9 zOLwWyA@ZaK3a9YRq~Dq-*K}GX?MfQ8p`qc-Xd5k16%m9b>N1a=IMIWmNs23nTDE`x zrAM3okjA8G_r|^3HW3hvD|0}eidBf;fBN+44_N%ehrJ)PeerOkmb!ku7`11IC|l{% ztu#-oY#Tl*guA^#WHhxh-fWvy{(>ai4SAR1QTI2mp1ND-CD9C#iMqBAy(Asn7iu9X z?gK$*MMdd>7We&1|9JJ{w@CTF!Tmz-^6PlO;iWR4txwJO-FZYP)~G4Z-Z~g6mvW_4 z`+WoDsAS>@xb~AVe zr~Y1@!3{BfyuY`+D+`)FGVu<@{A|O8XVeS^c_y_b=%WF*?4we1!sd3EYtg%8zcVa; zWYF7;?u&x3KCF?#ot4h3T>$y%n%KuR|D392qwaX_)Izzl!`0=U#28pobNy545O40 zK`vwBYPQH`LS2!1=Yy9~QYL4cG~$K#5_iC*=Z8=JTl%?`L4}%`)%*m^aY$7lfE1b&oje zk<#J~AW{-0>HH4>;4dI(YI1GO6PIK0Wo+`Xk9m6O!wbi!PLLj&B># zInhk&Mo#m5*>N+7hx2>HJUT#WKBcP^c87QUB1YFUt62`niZgG)&+)I+Vm>E6PCrt4=t_uzayn^ri2hc zr-eV812)rxij;nRyn%GOK<6bF^432>O$i5N(C^4juUxK_lIKT5z-vFd>|%%&EDgYj zH7T0nBq)oD!-x(YDsso>JO24zvo`W7n|MTaPL4K!eXL?4-P(M?zpcaL=mh%t?Xt14 z8BlE+$r5EheR=|Fe(Vlj^l?0hv}fFr4Z%<(e`V_WpF$W$=-|olLBHa@WKr%(Wv%C~f)6O2qE3F&F;B`901Q3K zr@as)HA#o3`orT^yOx%gF!emUSfylo%AJSZ-@bk8aBEuYbPLr{HpK*Y(2_Ww)*qo| zW`O_T-r=!j%Fxw_p9r3yg9)P8Ci~V3ND1H2nWUD}m(!#@#~U0Ia{}KRe&$vR*f@+U zPtiQ@LCP&copEhKr_AGiUG)7U3K1W)WZBRXCAqvt2vTT3*eml_)Ak!vU+#hm!Pb@z z$Hjn04z_DI)}`pm2~y5b6S(2Qaq-v4wzxkt+(Zrhn4!Ol^q->jZ5s;6dT@-3KmanR z8xUT?!JiGrXSaDKHT9Iw595#A1Nw9@6UCwx8msx9Lr(GUH-xzSSWaR>g1lQM-#$w! ziTYoHk?-dtnLlq_=Zae`X_K|D!kQ!j71 zopSeL{&0{6*q`FT{h}z4m!BUd`R4b?h&R|)1RnOp%*@l67`^;M`w8l;FDwDHsR+xy=Xc2 z^H$nH5+g0EIJuai-2Ve~-CTVV68AXts3VQs=$RBdT$hmI>-^LMopd@8VS9 zcUYKEy-Z02ASdvJSE8ru%5YtzGa4|9LRFXLEok015d7Qw`E$iZNPMH7unvhCYE?ka zg$lKI_G#t!?_UX;H5c*94qt2ecbd1p?6#`*?jwR=*vxl4UUeE zlW3xPHj3CxT$p(zbwuv{w72Eh4=>~_FT1-6$X>V{`FHa2d=3i_cN`XwR+fTI256_= z_|Tp`OmteQ*i}#hhBC9WN$wFc(Wu74rQlA}Bjo<_*v;q%JX>1~>FV6u-ri1r9Ux&X zbtZ5J?k!#aU0NCsOj}ZhEt~eel6|JU6rU$+GQV)QKrclC#;Vh58I}5d<JhA1oxy>B!g z&W+s8i(c7~O80`bOK)YWg42MeqntD|0qH&L%RUT3OojZ2ywY#_Bc5#Fh^v(Wkw6nGG zq{1!HC~H_;rKpDPoo83w+mxbNEKva?(CHM%{x{HYMgOgshkSX^loT>R$%bs|rb*p%d_1y&e z4^Fp7aKge=yQSxyNq$ZI5Aj=9SL*hE1tvAoVvTr@98ef6oA*;WuoBrqn<~ z{2TQ8Y~Cg%11cLHSpC#Yz# z!Co*N##vJjknJQ|d`7oi|A?+wR@h7KJ=UOXG>%oET?>dCfXMW8UL?1$=>a+Tcm6XJ zq9XugqltEXrp4gP!l4L{d=!NyakqRWC8cQ5`5};!ze8H~!;OhVj}=`WIlJTFnT@wX zg?HyA+fMgEveC*PHrWC>O5gA+;EzZ^P1BB!E0G^qPh_v_DG4+yUf27AYNofR#|314 zYXzVvcKc7IK({k6i~M}Yf2+TW8-g|B;u}#FP&B(?Gpx_zu15SxI)4U0zl8E_nPf;b z*!lG!Qv0cjL+b#4cLuR6pT`oN=m*88@e4 zIwP`ETS`KwuRgsw4PiDQS|1RT7!{k^_v_to{n=x8AdeA*ur*Pjr>6&=n@7~53m0_n z^a`4xgTXv>Fv$y0UmI1wXhT%6IrR|fTsImnuY6QV|ERx(+x(#2AqwoRXmf{q)bHL+ z-x~qQr|WM}%s&XjSC0Cp>X=&$(DD`fN2x~fod$<2;ddrIJNUn3mmlWC3$U;>oNU<% z)b2R*@y(QYF$l%LEJZFJs&bZD)CAWtYzO>kH067zrMS*zA=n%Bv!F1jTy~5D>|#?&I=#e!epGC&`I+;?f&rQJE&y6lHYjZb zQX1v;1d_i1b0Lp#!r7I4#y^D*-M}(h4Tr}_%7o6=Hb*EO(uRJdL49yZcx$Y>#-Mrf zPMz6}4bQL*L|13zPzM_CPJiitW47T12zkE{)m75hC{AVgKXdAW!_U&_q!XXGld=LT zS_hr^j`4NN_+KyhNoW8+idPn= z&GW3KouZ?JJ>58jtjA6ZnY8CS9zvf{sbvg$O}LCR+SuAJ2S?3_*+xbLo;=H;!O%qL z4+G7gA_mT8Sq$!h)ShI1aOppd>C|DRV-gbndP8pNl_uNosFm00eecJO52P0$0T4U! z%!PBTtXCeS-Vx*;9UhKv%72uwSq44%Juf$4^InvdeJPbU85|NaOP{a$4ozxe_Jy2E z8lPrnsjZuZ1sT{#c!D6_GnGpku2ydGw>LUDFIy2U%_>jdJ$z_8oGF(82BvuDm7NZY zIw{CENMK10Sq>7Oz(p85JxOT8CmPXKq53cLC}$aAk~ z+Y^EF_aoZ3-|4DS0yRQ^Bm5HA(#*)*)YKlTlWz|J%E_dj)4N5h*7@#Lbl0^9Ga^dZ zGj!PUV#R4QGZq^+Zyug~nEIj8wp3{T?>BfQB`R#QjMt^QCH|B;z5aI7)pCz-*qR_- zTdB7vtL3;6$+JFI=H6M$XDkr=o8nFV%#y9donkhj7k@qZ=}XbFJAT|Jk0ad5;ANWT zaV$ggOM#C4K3$P;4Csi*o~hC+?Ns!_2mY;0`})mz`1+Q?Tw@l(BBp^rEq;0Y`b6Ka zLvZ?1qO?6NC@8pQVqgg^;u(Oi7WSasi9bH3BI>}?GP2cv;sttg`T1x1OdRG?ev~)S zM@P3`hyqSCt6sfeqx! zl#zjf*e3t8E&f#*L1VZ5s}_O0*?6cF&193D!SQCEXFwxyccLYBPoSz0Cx%%^V=)O= z@juJF%uGyqv4)@C`T5y5<2FS)`4A7M~tZAmoVQbPhEtR8I$A7bsEe>1&K z0Xw^Q`$Q@N z{n!U=_iblB>F{O9Nu0{jsrE1SARBdwh=|B6$K}OyHF^Suj?Ux$RsBoC5{gdA`NeNq z+uHht#y@2{0z%#P_p^m7H0g)2T=0Joc6)E=AgZe^-Fo#!6E#~YWky4b$FO>MI58tD z`4X;d-d{<^D&Urx$&NmCdRv{f$}XrPF->#jA}Z-eOiOOQqK}hf8NO(VQ~-^aW0#X)ImEDV?DhS*-bp9>3(K`r7LRGurtDJ zm7??GrPx`ejBHA#RyVFHc*LHw+zcXpx}t0}0Rq)hb3PyyRvmX65)lPdMNmmDjAmmn zAxS;Y6IJa{q=Pu7X^3a6CRO)3#qFwaLXF`OB>9fa`Vd;#`6ejHlX%^sZ7bP7aGoDi zlcBzM`Er?Xh)aO;W?tSMvh;yWoF21JJ~-UebYISd*B+j;_1LOavw0)|h8I01gH>l^ zj4Oj+CvX{K*1PrD2x>*+;_8y9pZq+%rKP)%&%TZW6kj3B%yzyJf}cX@*M_)?c8^x# z2Z75er?scz>q%`xqC8gjA{i>0-louaq+yP~cI)GmvZ-ur)ZHK1=Fh=oU^ zKor)Nw=tVNmrCQO`c-gQ!W}aw&qr37{RXrop%V0ql1aXC4Y>ESzl+tWxd|%_Y~({tw$A zyfei%YS3hOIJuw{?Zk+0G~>W>NZ-6k zN&VEJ;@0OGAGB+Oj|e=x(5uSyrh@p=vGE8v*0p;ET)Uo>Vq1d{+@zsLlgWPo|`d2B2z#qF&ra71XKPEmIF3h9>nUxcl)I@{9F`w;)mnwlCi zezCT;Y!X0UhxVeFwDc~R+~1F{_VwGf!@g+Id3GjG=!4x_L%;eF3dLk~A4HaWBOHCi zQA5N;;&cHA-`c zIFr%7tPP+UxKtX*0zpUdesBhWkXXQo;yoD3;1wbuyp%Sd&9VT{Fy?mLqob>5TT2cnxq4ht z_`@*n@9v%^Yi+uOVXFV+%kr%h52ns&1Wo;l%J!1g7L};8w8>gY3xZ&O#A$6&&*fOm z&)%>H>Mu`bOB-i}zIj_wwY)N*@>YHq7lFrIC$;SK+PW@;+Mf+yjJ_1yCNoEE2|%6U_ccAsv;!;_q} z{I}*PGnJD9iRS>o7U?8}0bQ~sGXSIbTl~70V9#&af7i0#-tkb~zP-Kng5n+V19rc( zy#>cTzArR?CZaST0u@>%S8m;+Z9GYR(_f>!7lPsr{F-+_bgIaYdaM7*`2+lENMYKj z_~Dk2&-B~Auw%&a2{dCetw9hoJe=ji*U&rJJ6rowsCV(*6x|`gf6v`nozXLvhJQQP zVNq6j{&(R9nnw2*_$ zA>AM0D_Vqn?K;&Kqb4a$_D( z*ly+u&t7XdQ876ZVMfo@cb1)rDUOWJe*Z~U28%#L6-W!>hH4S=tfG$P_;E>jK8pHe z4UOK5X?*B~v;Fwq&N_cVnqM_8+Y<5hG%+zzeff+l7S-dmVZ%pPSP+JV|K{XRf7)+Z z&i-<`bvpG!9}ORExwE$0lIkzb?~dB9%JteXvnP|f417W9I1%kJWg-R~18dgI*uD=d zi_oWoEH-Y@=C)dy44!xPec^fX;#7_;eG8?cc#VE9Joo;Y;cHX#gBO{!`)C{8X zS0Mu1?)miTlXAL2KsncMd5gDaSiC--l#Biiv;gPvuEtf+!v$Ea)7RMZ0L+05&)PU5 zOMgyH{jcRy@NX4|Aka@O3ADd{UHp^Qw;Qr27>8A^v>;RN zcs-&>dv<8<{uWgCZ;(fJ@7;SW;m#+mP2gmAQ{7~cdUqU!(-w2EMC>lC;Cqdf`O#rN zzMQm^_5G&Icg}_;0!mVzJl6*C(sZPmJ8az&lowOD4)W}VXeqIc4hTGCbXx4CtyxM* z;R?{nm`XPX2qvBGi2Cat)$Qd}lH0b!XHk>e+0$P{S{(Twas*x+_y8r%_Klbd_b5GB5InEnul?}rE9w*D9x%=wtUE||V5%LG;^)_(a zVhD;8(BwWkZ~|#725t}Vd|kBX72Ft!?BI>8DsMv4YWm>RK>wA?#xa$~+HDSPI?p~y zsav%?VIb5K3p+|&UB2q{LR%Y(a zA#IE<$@j109KoZCtKHBlZTkA<%kVSu4%krBJie)X*p`6)l1u>0VCdyK2^`>z}z4kgW% z@z~lJ6Kw+e1wW^yNq~sWuwlYedDP`ud*>@Tw2&3Pm|9u6y?b{kJUslVB|4r4{f4I? zpV~4lC6T!|C2DuHdm@CloYjNT2V>NG#Kgs2E#aiJ`^AivyEm~o_Dc=du6h6d$&z<4 zfTLH9v>L7?Nc_I+O|de?cP)98WEYKK@4(2nUX9B;lJwp?OnJ!IduC^5e#Jlr@fu`9 zofuwSWTkG29aELxOB#~c;<2h(sg>hbrKA)OZoAX*@HH0H*2adU)gm)^&9@p(JEK46 z3Rx@+ISkU_VF*zta;*^0pAS_ASSTrL*S4d-u@!)JuiBlYI~hCSo39ZNdu^r#x?@Ee zfKzGzuVo(wpHt8AgjrA-z5}aSz&Kmdp-;cJ;|9fnjnnQq4Km z$thz{jxwEAg-}LbC#PbDBCb|B14Z}J=nr-1YZ$tvgA7gE_@FEQ2=4jnh=%H?iDxM{{nT#5s_!vJTMMsC$smX?wj^m+&)sM&#yPyDpU3yl`&j*(7L^_PFAX|ft9e5D=6d~_!H`tYBenLY| z_rP@+#U=Vva&NdstvV%BgA-H%ze6a9yEv)$36xcZx<>d`Od)Q>P;8rzhA<0v1WVp@WUnP~rpt7<1t1bE7J zGv0Bbq>`vW6>1t@#Z#!7o=}zyfLwCTY#c(0T>u77PumTwk9AUt*7;aK1*UhQG1qbf z<&Is6_LJS4(0(>bar}EDvM}Dj5-p?(NuMCNy%eI8Sg!~ky13zX)6r-;^>Z<| zL>DEF-_HhLbeL7XiKKvGS~e9uP0hy$#@?(OYCgot7wqTv2=t|GU|E6a1PB}3wReF$ z_g@aopF49VRr=>*k}qP;>oqZy-7S3L5U>b`3fh2TX_bf#eV^Z|YHA8&M_?%WP%EC1 z;)REQTL~#Lz;Yj({4U;V;KZ-sQr?Q3yK zB{2;D`@5L^OXXTxTErNWo_x|dp9pyvwyMt?^eY;-ueI)9YWD+#i7E=C`UkTyRPu^7%iw28h?(ysnuf7FsvKJg$q-kux3Sr4BT^5P6;wW;5B2vO z;Lo&bADmi~e}B-_WFz}TAR%EaNC{a-Ca~2A{@YDU_A!_Xa=P@z9DSR?`x?9C#t*zq zPENjdW*YBYT)O!Wof%2<6B~#**;~l6R`0zdC3II!K_MYJGy~7Hu6bWn)4qAL;4%sU zq(lFhL05|!G(&6rZ-CETI+%LCph*l?i(L)y&Zs9>?m$Wvu$@AT%jEkqho2?~ne5w* z?s1{}Zf1#LC^`SR54e)D$tf2sH{O0QlRK+b;x0_}H~>CF^k;`G3hCkNfID{hPhzm6`?$Sj8>D;fGib!@wt)pl4%LbhOsW!sM1MTfUoTA=`h0aQt!7WFFjTuZVjEHUiv77DKoF zu;)-Zyc%uo>8}*2stn36_Egp?n-IE&2Q2lx6_>5Rp z^b$(cpecipv#VslHkMZocm5_K0Mysl3pZ(_q0biKkp^|Js`p?k0#n}33bQaWy`4(5 z7$5iw*+z1hR5Fl#3|ZGdGaDa+=&wnl=ZkaiMzzJMUKtE%I|Q_3nd{X|;el*(bUCg| z>_4D&62s&`gsDRjUgW3+GVNxGqI`BqeiH_i7G~;hNe=TFw<|L;Kisr&<6TULuu-Yc zPJ-aA0HgjlMq1mvOBZGwj3d#!2KRD@eVw^#xhWdg1gey%-vPEqwC~C zD>O9e7@ApFShUtw7R8c{dpB?JpG}zox!RL>4y^#l9Er1&f8-~W3Y{c~+2nCHn8y$d-qi+O;Px)P63^SHV!|^Suua4rQ3n5-h4hstl^US^G z`X-p1GF%tq8=DaW1`RjeO#Ye;4tgQxfuBLV*GGpzn7?8dK7&R#qyMX0l$=$Hy;q_M{Q`(Ad#bLZoy zacBT+fm<+FQ%H;kXAWoaA?uMzAmrjxD&qqK%CRpGA)wx#vra0X$Op{%2FrA$+=h<- z8)A*R7z)n}Mnkv5>n^T%J05ozcf(}@0vYZ1H0e@(#`QY9htt1&+5fuNEbH2Mbx{)H zYXM_TlEY+t8@>f?1eFlgW_mEx|AXo@iI~Fv3dH9lbi&~azEHkipO=Fp>B7qnT%G_- zZ%cEdUhrJ*M(SydU)KaM$7S3u5Kv`{fTrt`Bh_*_5^fS=owmDq3iR&@kj@{Jnuegg z^k_E^WL<{A_>wPuVy&M7O_nM^fmJLINBNIkO=2MHyL_al&>^@HEFfbj-i#sK)9Seg zAe`+EFaN>#!ss&e?43HkK%?&DC zeEVTk`-AA0t5lf4iA&}*U?bQ{&|3|1oW*GCJ{U{HNgtN~I${~VkWKU=c29xB|3Eq` z5)VYJ>K_!a1gUU^uSXNzucB;n?g*L}#G!_nM%U00)Dg5O_W+A|;}9l_+lfzTXlQ2b z-0R^S!(k*_scXA?dbrUYPJ7U{pNvNwy!R1ndT;Sz8>VqcEa3)REip)03ikk93>$b1 z>bF3!6~hw`6?ODP6m6wmz4nH~hx9A#j{Q9>biJJ14TMd?0E(b&HU5FY<$#a{YY@K) zM~M-h0-1y}BKh<25Tx_^%0a_xI8zAcj|8$7iePXUnU(-UH`M%rj4Bw*fc1Hs*Sx0m z;*E-)ZoLBMB|3i03D;4~kX1fmN`f2@k)NNB7Q{}N{*MapfCXGqU!N5Z)O2a)etWj% z_5Np4gnZ+wBt^cX>FEy6QVb6z@r1s5Me``3s||SJE=l&x;h3t3k)GXoHVs^i49H!% za)nIyxkDe)BKGO(59<;V68r)KJs`P{bmY(y_mkrq7&uN-cwJt+9Gpx$gFHZ_F>)#p zF%d%;r%1Icahz>XBJaoca>bL0z&ve{?UgWT5>a3WMUZq%aTbHH%>>0ZS21mI7j)hZ zpLaS8D1I6Ht0v#k9x`iTaq$MYG>S~_z=w=!(}$DYl16P`Zq^^Dgkp`);Ga4nn+6J_ z!oyn*!H;-<(~Mg7z*o2Lxa^~Z(v6$AM^gBRQ%47|cQ^ATA_DGe6hU_q2dV6e{w-sy zLpmcaB@(D(lhDSuZ4DnO3E|%JmH0RzIa(UuO(c^^#Ki?=rw~UUz!dNpN;$E&WBIoi z+<_8+hSy0l=0_aR{ocVWw?Y%Fo~z0k;JgK7TNkt^)crF_5{Jq45wZ3HXT!7@#MNX& zqD;iz4Xw0pzk+&=mPu$$YAv}eg3H`UWGHDK%>7}$a)I&mQ> zTVpCntU+IlmME-^^+>bda7MY{54+QchgTrAGhttl(JhioVKj){>MXGVRL zlB<%3dyX`#kn;bV$^Aws4(1q%Jr5m%OK`ipHevXB5~8MR=EHTUU&KsES_MAKTqWs# zSP4?SWdxxO5brb@A;ax$tJ?PJ<;#b?<@<2h#8q87@JfT&8yOcRBx%S2eUjcdV}-0K zWUBZyv3yJfaw)yT$sdQ09C_jA_aWVyxZH_s?o;eO+)d1tBwtyhzULo;Do@Jjk>c z@Zo>C@Xz=-HOM?t72X_QUx%f@SO*z-nr(d#v4?mIYI4cER58djTAmor{CvY|!&i77 za46O>I;Jx?ED1v>bkA=C$ry)g&uVb=;diwXMMg)!$U z2FID*cy6E9$0g^JM~3>pwcrRTSLS`*t@IDDg?RN-bKnaR#Gy_O*v`ZnNQ{U<#!P&C z`-y3@ujkbd9UN{afXav~iJTRVdLRmq{dICI2dUb>3>U#UR>qzGZSb4T2?`BOn#{$d zPW-${Qr1ay88H~@3!I3<7Jma;k-&tn8B(_2+qbb__yu3g9#N*;HK(`zo1jfx&t`fEnU_vT)pogUE2nTjZ$zS{>Oz zf#iwFg5BVY+B0_j^+=CKUHKh}uJ2(a%VM;Z4BcDzf`0{Te3GrlT*n5D%67MtGz(fq2R3e$ucU zr?=_cyVng8PDir^6YmR?Weor64#%?}C;Wf3eFt35Yxw@lF++rmsAvx&w2M%PjI^|* zQyHbCsX?KXNGS~|QluU2DYH^(&`@a@?M1Z5`@f#oq0DoBzt8Xge$MAu_4a+g-{-lX z`@XL0zV7{MaC>pi%a9^2XI>%@WdgFAPV0NIDIXjd(80T7nPgN|C@LzVLG@10mKYc@ z@O`?#L_7@8d*F&s8Ij?GGYvu2+a?Wk7Mj$p?_XvW{M(Vk<`A4AXuF+O07XTZ*w)_Q3gmr~#Q7v7mD4__V2YtJOZ3X%CJ4GT&mu(=L?D`M_v;q5L!_J7G#P>5b5|WJeDbb6 z`#WTrMf)p;4Z|PKhTU<6j~U`#>H7M(`1r?&mZSkzGhg_rSkcnb7_B8pnv}0f2x{~u zs&Cqarn^hvLJ#*J(c-1a9+six?s&WxfD>ZZTdFY0 zvXCGIdzzykQqOg@U0mGUhVtfUDIkf9gjMG{wP1;B-G`XbIE>B9WMmIufDfF2prdsX zP_anOKHj3)#sWkINmw?N=1j>Y=FBtoMl5?;UQR_7%JV@+#G~whJ^WxMr|lLI5kZpB zU~|kH@!})X;@rXZbM(xvXdY=EmVbyYBN9i!Ny;ix8m=Jveym!l0nL*6_D*_;4w2=Q z_ZO_@3{K&vYubKy z={jL)UU`z`!LzC!Ff?g9C#O7g6uAxE6R)zy@YpA{LXJ2YAfv>lO`A+U)Nh0(fl8^@ z9xv4C!8mAe^;Fz=<5PhXwmPxU0xM zZxF7?eT7eXdxs@x5;66J!>6$p-Xc+gJF8qw)*&KCK(oz91SA8yez{bBwKxu-!8&AX z8$)Kbh@bab^Z);q-4kI*L&(00-R=LpgPqLD;ZcmNC^ zvDL_)l#Bu%%#S-(@!_K>TV-T@|CPT z+9jo979u%pQK<9WA!4NwgIV#JWPK?dN)MTwO5MvjVP$jMMM#7AFl@ z0}+2hNcWZr2+-p;ZYHj>IK9z4v&Q(-M%NogC@_es3hkyDFc?G@2CGd6)N7>Z1VOSW zYQlD8&k*=Cd@%@mgz|@T`xR+4+?SwWRp@FAZpIQL!t}dHbaY9q2{yt_n6qe+3l3t4 zgK-}oLl%J(s<1C{Vv)5!P6py20^!6~fHUOdAafHH6(w{R>F^=Zu5|S(Uf$_A?&mSt zb8>UfcziV-O%vFM!D3g>Q{H8BTo%TuhA>WQca-Q(fLS;=IUmAkhxj@nHH=V+b|r$c ztNjFL18;W*@*hxV6LXmaDxW{!DXJ1YoD29B>zSZ^@YP_4X%WNllhbk>W`22((SQpklSegXDe%$Bu*EprC4qDWtlrk6M^}&qaQ!+x9_)Ms zD=br*gH%$TpWhq)&mQHXMhr_v$-AM)!8(ve<+N#<|4HG_e1XBx*h5ATB018!NHVDhRr2~lI= zO7yiko*y&s4&HQ;D9+gU_QIC=^Qj_4X+m|Eq#^h-MQq3S{qxeBL^JqSxNHg~FX@95 zHGMw~w<5uK304AH5eZfp+(Uo_;4gxaa$mnuMrJ@yzm<+SyDYAPa*#B^1tDEQbCeAB z*q%`N#UMo3ar%M#O|k70AZQNmK#8ppAVJU>`}W=4xeVX^<)xsL4O)hQfl&g!{*nvMO-6HN?uuHJX%)zmQEkaZxK%k9o`G0+5xD+qH zcZl}ZyhL7roccJ7y8DNQ-XSiWfQuC96_0>`i%79iA`u-bBAG0>Nn()#ZPCM0WjN4% zeM#lcr*nE#>b>miUwrxX#Z+g(abHFL^8c#``@J<5lehU3^J|#>5_2pVhzLPMIR2Ge z0C#3CvZ1l<6;;o2kkX7&-_W4{)_>08+3x~wIUL`$_kk!k?|}d>rPbwfbc>oeCSNr3 zoR}?kbuCGqzkB`49DZqbfQzdnG+2tNr*oUfzFlqt-u=wIZ|^>Tb>XvsXaC%}mXE!? zTlDleU^RZct$d}Ts`L1(b}5+o>s!fajW%AFix<~f2)SHz9lLV8w;bQ1wy#%CessZN zl;BG5;ZTcS)x6TOWo!7d#c0>`&nFH)5amU_@1IY+=FtC{%e^jbPb|I$E6eamX+85V zxAWW2ez=czxBvOsdWCubr*vAkdwf1&Ys(JQ5nv_~9G~#W&EMbqbC3OgWy9~90YuXA z+&TX=CYdcyzdXsWpNsLL1$mNRe{>@W2z=cnWG`HJsMTQ3V#a@20sm(M1IS`^ak=;$ zuTte@YsAMcimbk$p2ZsUBdxaJ+-8I3ZHC2*i8m^c$NbW=<=^eOwU2cQel^iRc0WXe z$OGlIX0e6*9Weemd(vf#f4v{%N1J7frJoPW^~!GujhT44MDW{!pxwC1x`E?&h?s8C zSbU{j$^Uvd!k0+|x{*+J{Fg<7V>&`cvMMc`Q3{a*7>@RD$YA_&Hn)H!y#Kk znD}x3{P^Jt@W6PEf<{M?KMThxWL1k`2kPSLN_Y+6`t@($PMke^HcIv&xW)5mBt?~X z16C@AaY1J59+(+urkTq%b+CewL6ut%@{jO)5O~6-kp9@QW27}4nZyf2s&skNU3B0O z6MkT3F7EEmkg|a%EQaCO=hm_rNO@nTYqBYt$hl0U$oQDAJHFH9A~(A9D7c2dt6b+S zX86ORkhkO~mb*Y;Zucf4LxGJrA%IYzk;a(SM_zEiOknbY1)(h*Sw}0^pq*h7a+Kg6 zWUY^31Af=G&lnCEOF()#goGHMCy3%~WScnzEpJ1g39&-MpLJ1S5Jnu#mhoNP?_=2u zMz0!mlFI;EkO9Q>roP?*C?9EZ0`3Y`?^Y;D3CxOczd|EvMMXDCSt8WYOopTDm0dkU zD539}x9rtH$~z0HwzD9jiCyU2xf3Y_A}0M^AYdNnZ68uS1=lf*qo6|-K%pSRB7_EE zqd@eaF9HYBX_Sbl?dG7Ef=PG!t{%?uE01l>e6DnFGWY_>u1Uul z6r>Hy1cis&lM}Y2-L~jlGWV(=@~u}@RX?8jKSf7p!6N4XGeh{_EO>RwomqlwI14wD zpqJT3`fC;s_I0^K5e{BxR0nO?d(izY9XEI zr(!u?hVV1)Ic}pP6?N3(?CdRuR|&jeb1J{V^mT^y+6o&|UyQvL)7e@!DIj;mQ(|9% zTPs!3cMCpOU~@^$fJFl($R4oWfbDLg(=3(CFJ9`@@VmgePe5bt)mUWD6bI~s^M;%BB**1*KjycB zmC?@Fv?w6oU`0b1&4|i^Lcx}P0y5n6q&=>C{lNQqiLt3EDF>iLAVVBr46x1zVs!SI zBJ|DWA+HMy>9H3dA$CyoI77X?YCv%_jK&)6`y7zxyK2>T%Xy3{m!2J)fYe@A164g` zzsKH^>_487>ICls4PBD96T1!Yfj!1}2xWRdj#{xp$Y=v8C=9_|(>9D}h# zQ;pkzZD7aT1bjw3N`U^%0QFBid;y0MoDqB_Wm%+i53xf;Lmh$5Zbt#DE{TKOeOJ%( z)Iih4d2KV5Lj^Fes02=&(bWVz)(-?P=McqVd-!lzRT`27N^YydjT$~)Zcr64>s(BM zC0B&7PFG_iSArhh^y#NH1yn0TK~$O?KR$s1EHy%E3Qi|%q)ixX!NDQZA#5$h)%S`T z>Gis5!tqOLJr;5?b4cM$(F;^h7AhV%#^AVOCTazJ5Y}KqB<0Y|B4iNfm@K^>Q&WHGTU=J9*iUwr>R+y8PlKDtK70iLrkD z1}=-^#+^Az^Ru{F6h|+~X$4#x5)6N=%cq&6$0KiP_@=RO3R?9j4x4UD1Ii@;D-Hql zJHR7(`d44Wd)$^dD9M$cV;U@WqQYMGa_~SPT}_ zNmF1e)COd#6wv@CDReFu@Xfc_ZrRjs9UbGF3a3Pm_VPquOJn_3q)TU>9bF@yYEk>v zD>q<~P4mLwM=pN|x?Jg>i24flU)*GF7f=Bbi<%MZuIHO!+!SC9LRnKDfZS0^;#CNC zJS+xJ^*gLLfg`-3wkEBDSNGq4$JtEFIyItxXRuL)OF`$d)COS%1s}7~9l^ol3h$pH z{tQLc2LT}4Zh{Pc)Q$N5Nh|c)wQC=!Xms$d!0-q#2`g4OJ3BY2heFeZ80XkPSo5P^ zW`!SJT&4xK`kP5u=wr&lxYyztUNr+()_xpDys-221s^?JhvTS2SZp3STc0NuV4=RD zVrdYR?S8&Ik|h(9yH$Pj7=;C0unbA@x307$J>uvM)wa=R9kFL3I(Sn@e?W$hQcJZ*Q2l(8ZWd~Dej9=i{~QU50=&7B7<>~QXC-q zA{4N0D)NJ;$Y&=|fR55Ee>y<35DUT?`9uX-Y^I@x76^zFIadq~3>S3yCamJSCqa%2T6gP3>%)+1F6oztF8Umb6zF1D zkmqj(&ASJk5u{SU;skO+@esPxk7$|bGY+andaO57-m(c6sVj#!NJ%k6SRd?oZw)|g zT>C6Ug)_Hr>&G-W1N>gGLjGMFa40gX2>pmriYK!MTlvAv6QMF`%0e20=GUM&Eqvg+ zXujL0)-Y-Tv*-l~Zch7YDAby^V~BNKY%fa)U}iUDf%N6hfY z?cl{5m))HsWQ7hhhJbI^cJ%9Ou`}P+B?Z_5f z+!6ygW84lx_1(a}Sz%%|^ z?06SI5KlD%YXA%3CYT2Dbloxd>&No_qSQAo;mZZ&{%_}j{sZr=EG(hHduh;K(Eyo$ zMQ)x≫gTEde-k|6EP{8jj}ceyw}sfmHAL@W_pNN~cesMiuZQKK#=$^?!zI{yYfv z5~2oqe*cp?36~R=yCXS4nB0Ow=vaP4`F_kYL@$alFj^zyyqmX8h=AeS1$;LP_+I+~ z1OBNVq+#Nz+0GAz))x>=JOaxKvlRXQ!}onxUjN5=vyo?BtKqD{wq-z#&)yL2 zM$mJA9I1>Bv3r9D?JV>eo<)uTyWkhEl)ZQ^lvn77s z#IG4A+SCvBa>jrFklql$06K;#g!l%h`T!Lk5@lHZ;S?f(7vW`bciw@RdNdRg9DLX0 z&88^L)Vc6Ae+a#U4v?_s&EL~a!J7($o^?W&6b6Qh4@kfW+X;bI1O)6495|^ z^j=6N3H`&0CcbZq^6P+IWLvVcX7~NF}66nQ~p~0@M+tTA!?}#m2X3 zsh92>zomvWpct6|W)1%vdLH$_vj*ntGQBP>m|Lrq{v1N2kAN8xW5M{7sfb7_)~Yj~ z4?Aose`-dKUQgCaNF$K?VZHnt2u?w zkh{&O#RDmZ0fo#PftmY!2-_W}&St5HpQkJW3Su`= zv;x!#&k%t}2l^2l@c98`&4|d8QIbjFka@(p`~~f>19E;-XOV)!IF|Is6O!x-0tX{# zV;DR-z_6fhLJJg;NFXfy5;qvZ86nj&+@hTJv4VVqn}f*}r`Xg_zo5 zxDu)wh$Bw_QWRQ|96TCtecp7V*8*o z=v8qUdobIIhcS*cdJ}p2kNoP7b+^iaXPuC2!t)H3$@1OLIv+fczB!Ye6s*%NIlp9y zR#qba^DHsvsAmn1TMD+7ye4cFMQEw@?6o9s;zOkXQUAYT0xAa{`!%ToTc$j~#XS*P z&Ss|TeVOC{ivu+U{`H#Tn=40f?2>{Fa$=Ypi2R__aLIjg@kP8XoU)m;KlSxh4BG4h zzl37(2~boFY@3BV0^L(g5&q-i4SuT^deO5Tb8+ugr6w}cO(x>4MT`9Im;!No)70c! zRQ3I%yqvnWYIP&%ozJ^p+GtU+Z|Tu+dKz%dy@((9lY@_Vq|)!e%QdbOHlt?|N&&W# zxwVFi@exp-l8J7Fe}lD1V&>EHE~fLEnh%m2q6J9s-zpFJ(A>(7Di^qkQZTLrs3Vzt zHu>HTz9&Bt{(^>|Ms(%1CiDIMg{dB&8k*`~xn9${?pW;6rlOQdOR2_g>@&j0j z5^R-BF^1e7vQ?tX{ebC|K|nKYDh7bkD zDgOC5#KK=>Y%FA=+lLR*sMHBpN-KIzzmF<2A0m}Rd&x#FuL&feVnN;PO&s1MP*F_U z_JCf7pIiyV;EVW%EJ@g|&BYY3=`goG?c~(EtD68%XmNML>F!bvTNHMv5D|(=N3S5p zH4!i3ra=LXq?x4I9)2^2M{uN|2Xif+&k|J7*)WnrunJSWN*(EAg%SQGNEuzqk(?pz zop@*Wjcl|B5N~j$*g`Rn?=C;Ae(}1>S!nTM+a1feg=C%pMHKGaSpy9qM zE^tj}(6z>n!viOf;PH~&R}zLTU`;5F!gKaxTA7$X5{ zmdqYriV;6}hN&5yN^9ZV8eG5=%x5I*n$9)Y-|C?I7nl728O{f&L3P2GGv#ye%>*Mt!yua8ywM>c z;ZJ&uHBPA($pRd>Bd`bN;kc)fXy8N|PmeBy;}BZVz&%c&pr_d#8Ggh{Gqk=xTHRDz zTg$ikdQ&O(HXMm46e12sG>fP)Mwg49m>Cy^jHxGlCTFaf|Jp;BJWODaO$YdWI@oUS z4wxmoBO95%`3^*y%#fLv>{GS)F7)h(?elzPM_mMj%GGbNnJZoO;$^ zilXY@&>&qP$;cf!hc^fcDs%`#^*?WP0~lQ#&u$Igm{&@C417?e`5Rs(XE-DWLO#4+ z&t%TG1av)b0fGaO#Z-^7zcK*!GUZQ-yTn-yjLgY#y@Jkr5MdSlt7(@2nHBJ;}l=W3Y@)RMoIO8yhUpT@27|Mt)UeePJW!Dh+S2burfm(p_kF6+2&M`*znn zVX?l3j*eMyXo=Q`^T(d%0Wxd^4_LEp<9Ej(p|8H9gthdqq1sr6mNjXW*=<-H4eG{V zVw|V|`?hrmLw%rFh%Jrj1;ot;aJ50*?(g6Es7mN-YiL<8U5FIU*v@Uk z_pyPN+*9#K=|b=f5}dFph(Q)54465Zt8T@Opw-fm3L*2O@^aydlkb;$N+Ke{8HZt( zFS@!3)f?8O5L56y*V|Sa9&#_H>YS`!4jl3W9tU%3?z6^lnWJV40 zX`4)KpwWnHTOtBqp^vBpw$0JiWR?J#S`Cqi@_yT)-slhZ=mQGuRYUia_Zm~KbTgmu z-Kpq8VDPMgnW}nQgyvKu{ez0Cp#o_2BS>g;HChV4u;rk9 z-eta!>q|Gh0B$sDP|W^(LrweKs*_Cr-t#cj=oHCHMYN62eId>w+x5$efe zrte+y#8DSf)Z}`RnI~F6Q2^xny`7TMl{{g*C0vizP?oJ*r)2b~7LY8WB*g)R9M-6uU&n2)LL*MCLK^9D zKoxNEf8}vo)hu@Jbl7pG)yl*8h#O>S6iUwfeWF}h%9Yu3a`d>}w;BlA>ZI~4op&Ik zi+Arf=VHOQ7)i$TyZa{DPU;1}=*e@=k4jGjF3BM*ff4gm=TH-t+cm z#GSfGFsHn{TkH{$4gZk5YUc+aqnE_sdvqwkzTz?56=)nTx+>9XplWSjxT>V8O8w5s zgBuYP&vqT-;(n)7rQ^fV4V5lf?0I}M$k0-7Fk7?qKUt?7Td zT0zI__Xnn6l{=q3yLn@KZ4gifV)NII8Mx9ahUd_W{1C=t2{qVy#2JS|<29R$Z))c6 z+O-SWl8|Eq2u=bBJTk@cpJvr1uwsLCx6GaEuP|YDPGiL=Xd^OQ^8Ef-Ky#j6$bAs$ zWI+y?uKWbC(nEy62^7+Nh0NRZiB zk?(G2DPqGE522=ByhAajm!7Fge2Do-y2$fkwfG3hk<Ok7h!}c ze@}9hVQS^4TYt6MN16IWYC325$SLhZK^-2beJzY%#gw(n4bebUD z@w&V*55W$7?F>E$gS<9UP4_S!y17(WoQ-W$e-^~jd6LtStD0C^F7UyCXtR}y0=~pC z;E`#e&Wo}#p+n5((UECuzRUewE|T?LxmmeQTqY?s_v(pLh;+?~$iyE%Vryep0onjh zgP4ZNlP6E7#;V1KRvDRc`2akQRK5BOy7;?ryI2Dkj}%#uK1odwQOmT_x@N#7GlI7=&gkZ-s^$tX#WnnY7=# z>hf}x(#L_{Cws35@`Qmy50Ugk?DG)k>W74OF@JVM-A+E@BPV*X?A?%Qh!{}%N1zdL!I{o;nd%Ehhw@t!6nuH6h z*w+ZfCaQu76jFoWge^11bd1epXdA@?XDg;H99dxo{*qW1&{8sNXw`Hrcsj1e1T38a zWYml{#{4tTnugNHQ6H6{+YmS3OsHlq8y_(uNrg9VH5_FA3&KdjRNU1h?>zx~8RAQT z?oQ&Ffj^$ysIoZ(oM8waC>hOHE?$#mPeyD%MVjSg0g2saY)a)M3k!><81LxVKpp9_ zhc~_hZ5T^IpajjAqWn3CaYL;`1%Atisc5-lPq+jczz5JXT0(NH5kP>Gl(^=~xHRe6jVr2Bg- zBfsmsop@qvuF7|reyP`V!VJz6-hmhvQ4s6tV6`lbNK6E{zz*OE^A;cU#OQEp^GS$M zA0uiWfQ@3(YnYtbfBIEG;` zvKX>~IrHZ`*4H0+lZbqTOg&_3^XmYv+ut5vMB1VVp7{eeEo+$zxa%E&E|9@Ri7|Qq zq}+YfoZ3KVRfXCSR;qk1-Gogp9Mcx*ACWG(n{qcz>2_z(8M-sU<&24Zhi{gZ-|&^>v6o6*Q`a$f&cW15no;oQ_AWFW(MUoS~gIl ztD5QeW@T=G?qO_X^(Rfe$`xwT|kQorZs-8?+5kwed>NQZqoA?f+V zda;gMz~f2pmvSv@T0L3Kd3vsxymXmU^#WYj)J6tG>9|2ychDT~a+tvtZMXBQO;`mE~cn8VQGQk|8fx&&1*J)L;vD zLqh{EJ}~$76TyWNU6Pm^sR>8PBj%Yt`YorYvJ}HqG=_{Lfg-vXyn(WcN+|RIKxKEp z!TM!t({)=J``Kt*e}N(?7M;c0wCz4RB^lR501LoJmIM05gcJt81HoZoVeI}3O?KEl zW06~z&QQyDW6q<=ypI-(ef@}ZarE;%%={c^pZ?~G!_Pah#U>l7<`APnVq*tOqmJBR z{LD@UksC`;!TTC?HPSm`*|y>DN~HQ;&nvc`@b&Rimd#;%E%5D66p918Z2Xe3KhH5e zZ28yNA2;nPt_A#H{83iKEPc|0<52V(`%*+N{pMM0iC-f9>QnZe6s?K%DQNvp-a%Qq zl{_ZeXE+n}^$AW8+sZRTJw6>FYQoi=xD`eis=mx@>>??f6pQ_9l8o}>R#YZ2M`=xT z;NZQP%YI%WgP|7P`uzzcRU_klbM*g8-xID%gLL zX(oU>TCujT=&RWvng~J0;~e>ILs2M>XM$=gYZm$Fha-lFHwBojLzINOhAN<%muT7z zLrIXT#WHqR>qSjo3#HE{vn|P9Niu4KXte^dyhdR9xUY(PcfWF_T-In$Y#BO)y?uOI zGy7`I(wB~Xi9`OG-oj2@i|oaR4kOC@Z?L~#eVZ~(Q1*EAYk>(Snpp7FoRmxKzxk|A z^6_Z+RPjvE&7t?@pD@S3_3hjm@_i^!|M-1RbPG5K>dKfI7}ju(TP;iH&JBErdvFA8 zr*SKGFpHa;;A)q1g|FT!=OkwAJ3*9h?OH`p9ptyJzga_;nj5k+ZrzfB+t6K70?t;*=BTW!6S}=a$SB2(jMr3@yTP3) z@94Sn#$C6gN03lDn6nl6ahDD+sZJWn#{@8cxX__bbRv|t-(Jf~C-n$6DqpK`(^2yw z+942-&E*=$l#$#sN6h^8b(A^V&y=@c$C(+Dvt`Sc2&4)Ib@wx2qs1IWH}0!k@l~D3 zHSStYrj(s{=S92u(Axd!=1;~IQ0k{>uiNb+-M?eoHWRWhf43K=CC?c9ag_RPCGAt@ z^zNk-=DmHui|m5D+}u4|ww(LZhQPOsy@lga|C+HsIe2XTQ|zFBZant2(z)l&(i`jo0QrJmxtRhnOtl-{zeiNxh7-`O))FdsdX;fEP_%Y(W@RS41<1xH^Oi6>(EdrahR=6Vc;>>= zgMDNG8s=MTK>ZwNB;l7n?p+)i^6B?hh>3`(HU(N;w_AUM8c&7|l`8HA4jMgwab^Gg zA?%oDne)?vK@sG?`^W?Lb-0X~qz8EPmgdS_?L2v29M*G>ShCycA3C(-?V0&+ zetYUOL@`6hD(4Aj^f@>n#xC%Y5{?d_m0lc-jEs{FeaHUAVKrmI!M>*pwO3RpB?1#W zFz(Y+=7eG|vS6Mz{O4kjGd#b#mV00N)<5NA|PpM42#%$#y0hk!w4L|oh&E%OrDw70JO zrI7C>qL0T%U&tUPug{*{yr;89gT;L-FE6iT)##$%C3dn?47cL+*_C(@-j({IK8y6U={S8ojjH2TkjkOxevtf!#uBe zdcHN~bTIpxYSAltSp1WRU?)w}$2v%cBC^DtcdCQ5>#9oj^9My9Y3$#BokL(7L+u~F zYZZEx_N9+|WR9u7h*M)%`(x0VmtbXCaR_ic9}mwS1%=B9tHi$sJ0L}Cfy3%I7mOP% zr#rmE^83>fQIL4Bk670B)!ObRPBy4M4ejdC5)XJ_Hw2?EP_lMxa-uwP5?TM0L@IdG zrXjBp{Y^+wE525Ploo|^0PZ`tT%1vQIdW=i0?&*T5FFBvGRQC5-o$bRjlhxsJxgvir zwnFWxAtPWiJAL=tgu|6H!VS4zqCo2VxjO^JmC-2{gT10(odmJ-x0gdDIXt0XvLC<= zE93Old=b_GOQ2gwatpIYW9>)y>l4Q(i#xF6Q5%ME@CpejgPsrzHiC#E3L`$%l8lC$ z#(k+DKN4v0@OcP2tz~UtNwKeu(S~t)#kgZ~&Kb6|i6PjJa(m6qKPwn%r)o7w?t;-+ z323|oaD&)Qy9hvlnv;>0H5QuztTrJ@-z)*Dh613Qp?Juh~w5_(l&8CN-i2;aKfe$Wfe zRBcPwg44^yR8s>ioRCIMjM(kO31wplIaL{!r%kCz zuL{GBMQyJ_@^T2DVgO%m^91P^6&aJZo#cyq6&05xx&!eO5e-F;hMT`(_Ql0yN7`p1 zEp4sK$|BJn*bg|N?Vbq`BDyUS+(aF$$YRP^Vv8FBYe7ex$gAXBU0wG=B3%aQJbeAN z$V6?_$JlE^e?OUqvtcW)(nu-u+nS}!;Xyo{&9rVC8Purdx| zVO)Rw?GuMmG<2M1=M`Noes$dp3-Iba!H;nD};Gow9k3!RVU@;=O@Ct{3LkVK_FS5Ypm*cQIY}hh%?e4Cw?%#SlLj3b#|fT zCwZzV*oNX>(tF(oQK2V&rs8l^d|`yuutkMdllCaa8yhlvd3)bRPitw~&dp|+y|D}Z zQM1Ztjctt6b4w2etB}0q?hd0pWme{@ zN=lQAvWRJ)eJTj-S(mynxXhj;yKr+C%7%Y+Zq{qec9~Llrxx2Y&@_)X(Rf^jy34Hd zYJCqHw{N4Oz5@=V1z=AJu$=@nvUi~W29ZV)ux%O2Z;O@}Pu;qijDYTrHq6^Cc~7C5 zO!LFZ`vS416pc`vTwJZltIMt)x~}#vu-YPgu$Bxk$b9$_*_ikn$9b4lEqQ)N-URuvT#MD<>n;*C>dgzCPGojs&n7gAkC zWGyJE5cdq3%@T61U>bnGUCHzJZX1-2ZMwYm-fbcKasVt!^L=^bw(}Jz=*b>&6T}m- zKP!4WsL_kU!JDN&)m!Ae3WAF#otXjiJ2}ZKZ(hA%nMA88nu9NpB!?J~)r?T_$MJQOHQ>sGDnFI3) zgUavqgjdza8ttk-!O+C7bAceuSG?z@V;^-3@4GX??lmqIO9EK3K`02X$nIgimwjooxN`_tWwdi z|L|~X+i>STr9crsf{y|!Hbh?Uc?S)>WCJy12f7 z?|^&ux(N)0ze~C%WX2CcE##}OHrf-hv*mG?!~)}W>1fPaXV-6HA0mMa6}^SC_l0P9 z5{pcv#d4{s&MeD`$8Y+U_;keIvhpRiTP@0tBThwrJXuzB>0sgGHiIZMkos4C3oMjN z`(%Q+6O1%0YF6wYsJSaJL8(d-AN!L42!*mC-&M9% z?L}~760#~go+vegcqWWC3=e15@V+h%7<>QvD-N}lBgLC%GnZNA#G@CZC40%S&sn8X zP~6}MEtcaBe4jgt6X~6{01r=?g*>R<1dMW&dx30*N$6NZ&0Ftk)))|LgE;1_&{no($rX`zmGQ90zG*(pYQT~DO;NZ!4ZK3A^v`?lGvJAv3a{Jaxb z{W{S*Iq$+P7h2+P3NZ}v1vcbdHYG_cO*^I_Zx`4=~HoU>5l z3Dw?YKCHatxrG#0Cfr&`RmO1yZKbmcQ<2hBO-X#rrTKhh^y`F?yNay}kSgxq=YNww{GB4CqsN>n>_9DF1p6t^KM4-CVX@v)& zj?~#c6Q^6^r6mZ3Y;5!+&7=xLTV<&PnVd9|pEdCOm;v)k9CQX&QE*p|tQCtqRflFw z)S=}PEGCN9UWXB7EYoK`P(WyOb#t>Izx@poxW>Q6IXo<^7-{KnS1e|0mclRE0=yAR znH~COj+Le$<&^sH;ID|VFc0(QntE)WjnTTa5MtAEd$b1wB=eFw)N|0*t&M(lUk;%o zFJw?AN5r4IcI;(#jQi1|~Mx-LEP zu8{`TOBq@~xoCMH>_VbYH#7GHAV0W75Vtmcb3S=n^KSk$AH8 z`^#JBGct0@si&ETpXy7m3x^c7$`91_5w!1*_CRD;K6L-%l4BVP+&ApvIF7*Kh6z)m zuwYe;3h;l-E#N_7C+FNb4Fgh61e#6EIeY8TQsl!a_zYnv9P|$!qX=j`~;$6+FyFIS34hM4}Z=w>bV$?z1C@ z$D-z!kVvl*hQ_0Or$~)b7m0FFxaPfBJ6;?k?^vnD1RjK}H5}M-6=>l)ieXZVA(@^HKI;-aK?sh!nVt4gzoWH*DhAic^)Ma3sBE{-Ylq|x)v2uT)qNtWG# zb?BUEcDEl6*>Z7FN!SRX%s^`t*^kDfaBS$Y=|N1aSR?|89;qK)-QACw6u#Ugv6I#A z!)erz>wyX=W6KQU_XZbtpq%87%?L)65{^(AyQ*oo9O^tZD$+xCCZco$&$$VZW-;=Z zD3ITVc$igL_E|{uLJ9eWW1<+uwGgOy^n@^%48z{H*t#k3&g>1tdyr2l8XL#p+RA|V z1cLM^tCj~?H(njY{euj4#Yr$eRTa7@3M)~(!+=@=tl*613}x7`?85W=wBd#19CDSYQw={imrS@e zZ|oyZB=HNC`czZ#vA?7k=vPWwEfr|4Nl(W827{ddv2b$!^dq~Y3#`&7IbQuJ<%N*+ z^*e$#G8ixX`Um{_;Bs)~tYz&0XnB(oa{nWc(=3rbq5i~`Jy*Epx;e7bHK*4Hw6`CX z+Y58-bSDb^@A$LhheXDH^0N1nJ{NYPp>2|BnmSk4Z`f5H$ ztLfh-jQtBmY2Jq%zn9J9KyT5NK~|Q>Fw@oJw-h(V;)8t%l$Tm1C?=k((ykIt zz1*gOyO}gY#=EF1bKwm`aONg+z5fegDg8_ zsBM6A6d~LN4fEiD?D6w7_KcI0hDrbK9s)q!pKKhsA|Fso;H?cKExR*p$I(|NrDrS+ zd(hIvA&0Y8*v*)-(Obzc1V%hRj-{e7jp=@5XECbcH{aR zMWinL{bgArxZMFf1|NyJKhrkJuT3v-@UpXShUM?82`0{!cXmZ&DU& zsB_45%jTqO${k)rW716KqlD$)MV(nDPT~IDEnqvFiUGlEq#6zh4Yk8R z9+>8_v(?o}HDTvODN3?lOStMc%^zbde^e<%S86FJ+$0ztlvK7#W0TFfBB%8NbK!_QPh9*@J_&os_awDb1-ehxY94_0M7 zV`%WwWVSPzkC6{eu^o%V(0O4oj>2y4E2LcL+b;6Q0y4XrGdHl3>4{ETTd(S{u1hN5fuQH zK3dMSSLag>5LLr(6Xlp99lFAn@!a^C-bJ72nIkrYB4;I}jSZrN`(c*+IKnqb`%GC@ zKtpgP&j)J8OfuPZ@L(thuas0x!*)v3KT;yi8;|D)y!=l05?u+|%ZmK&SXHegBZ+DN9#mMpV=Nu+wy)&=qo$=bF zceu=NFX`NyKV8SSf21sTKKNb2Ba?;$P&7J2E`fZ+3#2Oh(?_IzrePmuQc!B5b!?o> z;TM+$<;S;|gU92*n#A1yN=va{w?%gq2{{QTKK|YD>fUBg&<+})-U%F?iT^IXi*4(V^Y?>Rw1pM)!8Y z0~nM?RWel2r7XXHOWG4aSu=?)NS!!9hdlk`GAwD2`5U+g(}q(S;DFTH`u4}F;y=K( zF98jJU(+VoKib)}&YJl`5p|yXdNC*{{_3R2lYJ-`n+ev7)?M>zqvRIwFF?(eLJUS> z-rjR(!-yOzfwJ{Qg@r1XmPzZW!)8PZh@&VDFviO;F=&lrKzr_nj5fMAHRs*soOdOt z7J?uTYy-B2&g4ucA@2IneoraS{+xA=$p9+lG(Ffd{t5O!@2XTpLkMyaSmDkf47I~< zK<#d|y$`3dn%~0VDO`5g9Fq3czz#!8M zfcRr_*X^Z*gv#o8M#27E2@G6Wozw$Oa3m5O)IoMc zF=h2@O-T+McAd&_jS+H8*U)J&YFpc-;e@~-i89;fV^cAba`$w>nD z#?O!dHiHI+DB| z)te+inkJ?vbOCFr!H~bvEmjQyDBjaM?++PM4}lg8*uG;&TT~Ab2dJrCgO+@m0@Tk-A}#-I=f;RH^d^(lzV{;MhMO-;!Wc| z2KS^6%h$;0cKWT}2vLkD2lPD{B?gZ&!o0ej+M4KvX*ZE@>k~F;zwNs}0*CP+tLV4Z9{H;003a#LvV(}JeBe?SVolBQ9A=xwtA{w` z(0p5hA-8Rk0^cGYWm*Cb{yNBqv8!|AR-mt>C9z_^qPn{JUCS<)wMCLI>)H#SJqx!e zP}M%!pnV_tH=OBIo9xg7Mw0O!Y`blq_u)yVeys)$y@~WJk~}TN5>LS?M%lDwbf&ct zPFU^*DFY1SCHVPw!2BgF3Cxy?aC$|w#vL@fPN~zNPkFjfi<;hg+GNkblUd6Ge!KlA zM^}zu)ok;G$N`HqL7kO>#USE-NP73Bp2x~8pv92rJ`@}3X=^%Wxk^1D?4j5aGj5x* z5d>mB{Pa5krrQ$*mHUVa0Ct(B5{Gc7EdW#U@bl+qNj*1S8k_O4r)lxDsR3ud9p-Ij zerL?SQ^W>k#!IYPwHpFed+Ml8Nwd!_{M;em-I5=lFx7QI91WSuC^SatBBD)Lww>C& zCnQN~;wGqEm#$@Drv=2p{Lyso(zKD1LhJsN!P?2=fREFJ8^t5#41XwbZnG@R5E9dG zPNiSEPqzROE+U$UgdSSYW;N?y>TF+*MK+~L(yq3u5&pNI1fQpZpxa{fSIY96xgm~h zUn=G=@p)Xu{PR&bqe{16w|EN&&^pX7hf>SD^n0SKVv%CcKKSzN?;1+uN7(1Z%o7O4 zJUL=3G&+U=b`m1Prc5HKP0`}@8#9*uhWL=ujYlt%a09tS&!zB=80-3NPkcIQNs4!q z4vUzh$(Ap=7n76piYB|hYPA26q4(+gYK1~aGlJ%2afkug*v?ogT0bCd6=*+6wLb}~ zQP=b5!v&40qtwya{pdT{QpCDsN!u|?)xgc30HX{q8np)`mRYPwRAII{q2-nc(;osF z<6;J_w2;GTy|_8g#3;*XKFpiZ+#u)7GF7|hc(*sSPQ%=tSe@!idLb?uy~BVJa4JKx zXSc)nkj8vtsAZ|mOz`*sioJ`P6b~Le7_ED$*KaS~sD>A?08Aa8E_+>_$`H;I;|(1! zryL}^u2DSEg4s)Mtk5lArdy(-#zy0|qj@h-LBwe;mZ=9j2QhbuJSn99HDH+$=Msl$)Y>7}64 zyQ1*>y4!g_F!xp1I?w18I(pkgoQvCjmv==I*J$7T)*K}yDnL(}hGsPA(&gNkmt%eu z*nMB8y8Fs2?qADp2k7yS6+xgWTB)e!S@p~O31GN!+{R*WxwOR_P6!#UOym1NUd9&-G}!l z8YqYKdLd*F58KJT0Nx&s)8I9=23wYBzxykiif+V$2*v)@vZ}dFzSq+{e`Rtr(xb56 zk;w}+gFw-5^So}?Ek~HuS%qTYRY%^;w)fz$=%`W#RAO|tTB-F5zKCQfq-gz6RP8;< z?2J=1h|%%g{)QSxZ>#M=r{38LcPdX2y{|NSP`lw+%Y-*BL=+Dl8HQvrga3NA#|)(lcsfCe{xV%%^(_TneWK5*mxXr zND4o#Vtp1^q@1II&EhX?q~7!@B=FOR-_m2KS#Z=xm!nA1K$xvSL*iJGua#^Mt#%ZVotIg@s?X&Os|s^@3gu@u3@Bc)_p1DW6@TZ@`^B_yX{82U__xtbpVu4 zr3BdsxgIa*zIW>Dy~wc%u7nsij+**aVe4OGENL^m59gv`X_&%b7IkQY++N6=*1^bP zq*XP`ptKh}9PUIM9s`FQVz<#<-V7MOQBAjR-yZEQ*9Du_tjbJ`u$t2L;e=#a=c7)j z$fC_u3(QiO)t@~mk2LVL@m})qz)~B=tYCA@m%g8ce~+v zi}h6hMuguOt6^lnBl*Z)V^O)AHI^>Y_RWJE{oY@ZbSfaFQH2QY<2RAq##eBegB1Cs z-r&0-?djvwI8MU)bA5<azvtl!d>v>c#5`7B!XpXhXO60n2j`wWPMy^k1dP7 zop7_Ga&qU%G+F8(M2UObQov2|vb`;KH^%V?xtnaJ5X@!c%WWc14Cy9HlplKTuiO=& zoYe(-2P%U$=GiH{f0C*HbaelTwnKx_UMvw#IiN>2uvTbWDgi=?alwM^o;bW?A;JnP zKh7Osaz-HY^Iq!Ulk@Z1?5G29k{5#ShumJs6G3-{iTBxmdM+~L3+1r>=4&^a$u;bQr1vS7lOB@Q2|C^LIAdiItYux zwv%=JZX&f|M8D~o0MA&rrDEihMhYLq|6yKr1wh-a%~p=HS=VP2ijAR!52kOlJ!Hk> zCvIVom}7P)G?XxIr4aeP3hThM>V0iZBCk!(5@?Ucpy4uUMIlk4LMO#32dd{r0_HP$Az>UyG8w(bvuh1wS|?CvLg+v?8B`JF`6xNpz#{ff+pHiPerT6 zZ$SoM>@eXkqOHQNJxuB+ zkf@M2rqz~SHaVg4BqXKCzvY~nl1ZuDldgNbKjS8wxVSn}O2?SYUiIgwLLsX5K@H7; z#5D{|H`98%6oN9ye7c0%kn}B(F~Mko-kqA7$|oS80y7|UJb^ZU!xG+>LG`Jz_9D(C80{t%`HDq}- z;t4=M)V<+fKE|}2Xwt)VD*$@LtmHSAV#_wU*!dp^lu9TzqvI3`*xZ?f>L0JjNNnQ<+dYMLV9FzVlGBx9Hd^#u>qH!cb2XdyRwS-!2N##2nVd}YVy%Y+<}ZY7QL&@N!7+P>KQ20wxT%*`7BK&vHr$x z!c>frP(LF#lR;c&4L7VJieQ*)hmoNsNk)dqW7OmH&X0a@-Vg@-LL7P_3?mk?=zOgP zG!*MHdj7(N+%3g{Zy?pLx~J<*R2|6v)re#P5c0?nG!CBT4<|4Df2@58Sj}ns|B0~^ zgT_{3oGhV4(W;CgN0zjRv}m$bN@>%ceXTfJNGnN;%9heU2GbX#MotpuQ)oL#pFJO*i2SLOJJD$(- zANCs0u?0}wMvXMaF*no3i@=ANIdtF6 z_-veUmqS&rP^U?X9v>-l_X`*7)Xa&fMh6`oncQPb@=Bh_@n4n?V~iock(GvYxw|SA z#{?-5I7iZWq?mK0@c;u*$cMSO!NY(c@LXJ<7R)aI1|A7O{=LQd{x@?T^A;Z@405l$ zU0T$MVtKF$7z=|y;odwh&SuPk1Fvf%*W!%cW!kTDWw;)z!DC_S{a+#F|Ct#7w$n?| zPX`Wt=%Or(Jxi>Z(9CTxE0No&aQ-i*WBNQl-!9}y*8ecper1mP`O}6h7^YF|EDj>(z~ zjwv#!qk2X$Mljv!=;>_0{_5w{)2`q(WZEgH9nUcN9tp+K4oJwU3bxpv?toA*+W|Jj zu|_+5*bZx06`gpw9f(^OPu1swB8Ey$V#%m%H7ckcgF_}ju~vKs1Sl(1#K$>C;Jn4ipvfzu%ji)si>6SbNd{O(SU721tf(nf-q}}-ZGw_hP9f$QU>gP zZ_h0M;>rHb8i61|1qvObZZ03RbIRVJSK1fQ_N`*ClPY)rn#qF;NY2iR%&}ko3^o7y z2_6nAno;U`F?J*8;z~)q`su|*`I3lHTZT0r4ONkK@%kGqbr`!=ZP2>NVWe0rv!Bv6 zIMGK7NUITD42BajBO_=p_?P}ud*O8Ij&XrvivUAX2o6)Wd&!N1w9KERr@J$OSyBbS z|5-owO~HS|d2xJ=U4o?B)FGvw-}O3wyR&RoxKM<^!KGpUtUkWEP?f$#sA7loc9k2Rpwzvt%jf zT7PAX`|fwTbm| zB$F-A^(k*+z0GG)U8LjKzTFplp&|Tlzt0$5Bx|yHV}Lc+(q5p3Qb(tX%3q0mE!8@5 z=0M`(GF48~2~v6Amt5ce?iikAZa*^Piqzhn*2b@+jo{tQrTh~-1WZ=@d>Kv@1E50k z3w(}Z`1VKxg4>2B{$TW6Mo92~r7mSt_!VA4`0+y%&9@uOyj^f^Zl`7$ja87GZ$vD9xgvoM-4jAdhV3`rj+YABqBN@$Z!~EEdqxE=>eu1YF453dLAOEZ79( zWUZ!#h*abczmohci6^#COh1LZaqwyuUx(+F$QbHM65+N37-p?(cr_$M8T>g`QI``Q z_bVJ*AP6Hk^=el>^MH>EyBB{}4t-&*rN0<4|PCLP)$ZI zB3O`pe|CBW;Vk+a7Y690wrNucqS3H970R)2KaC4vg+S!sV6ihV#R2GBT8UbyWTQEHD;QV)r|Z1|45x&gke9pOuG)g(}bEeq@5(c`0FRhe(<9Mde{Q*2su`J1xA>^ zPtU^f&LQ*v;UQv${)r;9O)FuPP(st@@>1fVb@4;qg zhuHQ*2h)@xmw}(l&BMb(=(9x)Mm!ZPjv9V{xQpZKc$H6;vIOo z1Uin?8aWteh*3k3{A_^qB*`fT6RGdLX!`VJ(EE;D<$$?HS6@^n0rqTpJi~1^(Bs}$ zD7eL~YBzz2cm`E_5+%724} z)DU@>6uBdTYv9b!r5+XE2+)&=Ehnh4pR_EF3#Zr#LSVF!OaK6ON8ytlZ3G9yEx7@$ zeRyj-7+isZimZLrp;x-nfWE!J*Vfce2j)#|G}ZUnfyr&aobSAlsS>J!7feG>FED6% z!zllS+fQKSg}F#UDkF}aQrqg)c0v#c%mKy(D9C2NwD`1A+2gV%f^S42LHX&?yy6!l zMTD)Q9fw!7;@4#BP5^|;C>r1o#yrG#%7>4l#{<*ax$mZ7o-pnD*@4!69G20%79%J~ zz`u$ObV(dfr)DZ{Wfi#F4^eJYmPD?+6rUY>#ZR+t0xw$8y@wS07cL`pCd6eNXdRlR z@Jp|g_QpT8SWDWm{WXaq4?zK) zi%Xb*>GItMXZ}(;$*G<5YdrF6xK;Vzsf+AR*&q@U46tSC-TnC0w{GRiyHycJ)w>t_ z%oP5TC|&v9MhrZ{3wVG|s!h2GPeJcqJVZwJPO-G);4Z{N4QmhC3NnBVrA!?#Ms3&% z8|G7(bZkLH`ttPX&qiunpS-RAJOSGS6KL#P16oYk?PL9KLsBx#2Vf5UU*S9 zSCq&YarkBZ9a>+J*O}B95R;J68R7`o>rg9rqQC+rUbodF1Lho{NBbg5@C_=1FPLN_ z2PYf=be@zV=$c-$ckEoE72Br3Oh+{tIgWTGf=BPM+VlF>ljOpe)}BN{YI2{L2ZVOv z?5++A?@9wUNuXDLNZoJqHU$9Wd?CGF@}q4cfvT49Xl^u;6*R#z%zJSbB$I>_fT}RC zk8OvRcwkq z*RfQyaCh-6uW`UH1Z3C8(@F!xIH11w?!(m7nj2{%IS|lC5o3>y{luwLYWU(5RYlHL z%~(@BkJg*q0Fx3*hOJXuKa#W`Tije+jMYY|rwS}hed9mKFe3#6My_NK+ns8U7DHmG z()#sY79me`+C`2(Fj7)dI%`qs+L4fuFj*h{th=|jaMkn9*@i|hk$OEB5h85&^XKRI zXKLuL0Y2C(_ZtS+L{D9gmH;qLc~d+7c5}_SOLD>P2YO$Woiv%brz_BQ+zHte^CQ~L z=bSLQd@*);Zz9BLvu|9yl`?UU{SD-W^g8vtc546q8`E4Luh?tX9tNx^0!S2-^-*XL zf+ke;*GzMrlc5ok9TMErVk$ZF$sK>yl`Bo68`gF-%yD;v%C4hroJi_;SNU14RWDtu zh_%a4I+R9K;&FZH+xlVS*U{fz>2RHM`L1=LZi9fx%lvB|_p8qNbV;e}_+R)`q8cKo zYG!sFoQGkpC>M)ZewH~}{m&x{1PdLDk+ zq-VHqw%Wc#YE@%XQwu=i#T{br`=Be>@OC%Pi1+;fbw~|Vwx^k-BSR0u`)R44ZS|Z) z)WmFF-%S8eH@rLZmh80HOzU+^Fy8c1&$8MRb0a?_>vob)9ekyFT_?E9aNvjRW?%+2 zNT}FlngJbeH4YmuCZSjeV>0|6K?$SZUzaK*vN)n#9=#p*>)SG-@fZfW)|me?IlqFQ zNaYyBbIlj9O>M^Mf%(Y4uZ2sT?8S{uMm_opDk^MZpuo^?Xn%qkaW0eT z8ap4QFNC{Db@7_{XXc0Ou+Ec-GKx=2KmudW4r~2}Y?ECXDG0p`INz^BFFl5!A8is` zkg6)EH>vF+f*){e)p7H>GcA^|Xo|_ro$Kf8By3_gle%YO4hna}n)dc~7u(z2xFp?a z(Jp)P!aRDtcsZ1#?G$g&j1RrWuVVLV+bKw5Uu179>+L-x_1TU1Ex|=1;I5CULIQDT ztsy_iD<0v?X5V*G=+y(b%{*LN__-MO}Z1cGmzW29im%*oMZp-X@1F_O9s|5KTrjl%{}KBHX70K>O{ghW_p0GDkq`6vL<1h(Cq_qePzoVpM6oeL9o zsCk>>M@oGT_3N&|mL6eQ2qxmKu1>6}UbL~{BJ9zDqK{y%*>ma7sjcl5}@-j*&nC z=1CDw%2tE2oBU}RHYg+=sG?_&N6rZH%<`luh!JJ|hbVcxZI4XM=om~{0XmkJycFb$ zbld3uGiV*^@lKQ{XDvcD#O;1{V{6?DbqIYh_0pdzLYyNJFT@~jMroHt$`$gB5~`u^ zCKr6{Se%nAV)5HV5wo4zL1p#3NQ(WhCQFx7cVv9M3hDq~bJt0WR^kWUndl#?C8Tg}eWFfXonN2dCl(STo zb0r-2!&^-M2-JmTTkATGVZ!_Bscu(Z?IU=ZXM`ce)K| zkK(NA!Z2}p1(MjHRmG6WtJkAar`oN7zCz=mB`q2tB3@lJuha)Yc#GZ~C3QGaM zfkfYeQdwkV9u;&ny^-r|fZijiyLAg0^a~`3RL*b!21_Qz*7_oxBwt$Th1(wgnm0UlPGIHpoN);syY>#g zPnuRr3d;Mb5E;@v-?L|Dq}}MM@xVfa+DfgxeUyClv)grs=91enDwLC4Nh}N~s66o1 z6HiMS<@_LibIIQ_GH98{P6+xLQ0j2X#vy9R=(phy`nNvDbP;qYnEvsLkw*Um?z~_E zZU{P5!fn3%Q8@QCaj2$TUt1h&MoCdP&w`*3tqD52P6gZi!Rtm0{uh)jJNx{#speT&j}fpd&-PR2eFfp3OvoZbpgc&*7Q zkNivrs1m%X!F>15cQzMFHs%Pw&CWK#o)1Il;N(Pv_EtnoB*7CNCg1gD^g4GQ20Q4d zrrqxVlqIixp^}cb7yJ=WGk_gDPY^YD8O;CM6i?M-x%S?h{NGVP4{N>NI zk%PL#-ox&Cnh6ynb}OB*tjfq-)KjE~?-Nw$Cdod({N*PYjQeVLFBFly2@N%7r2rh% zXm|-wF&tPF+6=G(`$f2~Z18~M+G0d2F{|8F*BRt~dTQoTz_kXSpLR~Z8IR`xT)^VR zw>P}QG`Ow;9%npOPyQ3HF(?5(iqt8(76}}z{h!g5-?qj)PN$@}zRfxgtpyrgW%a$8 z9GlviGl3w|xom3R0-Nt&>Ze116oI(?;CqNQ$xkjsMAzWdEuNPlHhQyZb^l?_I3$_s zU4)A(EG%r8yD9AklZv5mYvjQ8T<7{6<{*5)%N#f0x-_TD6mE8+4j$U<&h<)ISa-Cc z*PBo|hwHT=Dc8XS-u*}o4=2P08YP0le^|o(Bnk6d?vL~An`$C@0S3m7L_OP>RVzeSKw!Ij+qi^+WB zd<|B-AlQOvA;4*l|8Zq6eoHA3;W?&t>~ybIjW1CMF2ggqX45ufLLuFdY1KHI#A~%tWS@&7OP#+yHdo@7T{_;T@77hI;%W zfr~IcY*FC0`SO>bdh}#c9Ib~-LzurnWS|&A(4Ir*9EtV35)M;!NU9j&gT{zFy+XwnKAs^XOrk`)TESNhh#qqlfu{;uUi9; zPF0}_lCW0)rHQYt0iPol#fND?B}VuJg-^#sUtNOZh(qE+#g7vHrz9>#gyUi&{F*Yi z&`}``amI`pc^I67XeB};UR$H~q7z|v!A7X{rzvrnoRLQPqBe5(pKuQ~QaWPf?ITvG zFoan6Kbfao`7Mk*k4!Lo@f- za}u+WT=8?iA6i)C6>T0jbIo%yfkH(W0TUKxM`@rRCJKSj9+dW!j1Ttc7I1D-WKwn; zvkEyni3kGGGA>3x)aeC1U)NRvt|H;?3cS5iHY&78gaxhjk<99y*aW2op6{rXmrn=B zFjpul8UY5NlFjH;4VHk;zBhwd*`PdYl*<`^Y(~D-SnJc%5;*a@F$=!LxPDs=LXG1Y zRA3UnmXv(Zc-RoKMcwv`myyCy0#fe+QAm(3M8(mrcM$sFi{R3U$0&#$ryW_Pxng3! z^Zv!Pp1g9|T_~)T!8cIcBV4&0LFo+QUD;jVb6~dDF^bJCG3&1XDxFBFQ87VYOHB0P2I{V!hevt12srChZXcOE~IQD4_O;_EacK5I35cfryy zsd;I`x3L%_Pm_x5>7G~jID(`dJ65w1Q$+S(-X95)HiIi7RT`2ar={J%0t0s%MzN6~ zJ~8JFG15(!NtKVM9|ayT z{Dp{{CQ<^B;pPGIzxSp|ku=yK7MpBtNLLZeD+C{xi}D)}u>2FmV?*46y(o_m`!^4c zhND$hl&yd>^deHUcJ*|%la!B_UorsqkHVN?EXV`w>b5WAzY)1VitPK&xl52_5Uuu$MKQpTsxyjHvoND2+(8GP%FofCB)^9C+!A_T z3EN^y7ZXkTbTRo?-8p^wVRDdC3;<<&;}mf5HZM-FnJi;tTi@!-fK=Nz)0s5Ad zItpK7KxJj+Ru9uZ*MSDReC0|w25qlizdoPlc!12T2zl=NWCZ(~i)h7Odo4Pu5Jb2w;;F*A9Wsy}3%~^?tQ?}O zL5Nqvmw45jmQoR3))V{*dhupxanmD#ATmuqo%vLUz$=c&=DNl+G08HpIkRqUDT{jL(Ftl1P7Mgw4AVPFT5-XBdi zsvoUru`9k--lQ2$6Nz7gA`{^pXGl^9iAU7_WUjk8)Ca63Ql#z~K7*C?(SAW7mrkGl5556I(fR4(4_d z98lw9j=r3}GWL(lNTPU#uvzOuGes9Od@7R8ch!JD3`a9=0S4U=dH+2h@g=yMoM|DL z(qU4id@#Ooy|KSU>-yR4NKWle%h$HnPPC2!HMtIZW?PgKRzI9srlof$TL)@bei9v` zR&+ZIT|#F=wX2(3K0sRJq-F=|hmz^oX*pMr(t+tv*2vf_Y5j;D1Kg2gV^n?{Z~baO zm^t?k1J{RcHXBY)LwX0UOtfp1 zQeO8)@F*~%Bj&6??AatfzRLh?J4&`EAPg3a5$YF;?tT>h z?@hMME|m2>{Z_1q=HoAK2?iVOFkH0ye10^{hOh@<(+B|aX87jBBdI~;U4eSyNS4-*Yl*T(RWrk{;#6VJoIS@_f;C2QD@h$RU|?pk0JyULpmq zit>}Ly}eC)!agQiRGOSZ`U!;e>MDBcBJ`0l!ked~8Fc8PxBNl;MNhmmrzyH|B?aEk zv)eUx?a3K;OFmNlCI8v7!c1iQ#Jp~s+1)n&Y3le%lS1MDrt@w07YnE=FqgcGD@Gt> z`N*?=zV<4dqJ97Vl@)aR&G<;$* z=)RBilq3a7L=P2gkRLXBD4$N07yg>#9tYKm|GeLOHw*@OFFKp6R4c4gFIFeqYy6sG zH8H(F#|$}bn4!S7NX;~*5Y^V7IB{D!n~3zgQ;WK4{>i%5KTJYwT~|ZCW9{`+3v4~= zok^&2&}W=sl87L#^uUP8KIU&99=Gjo$roZx(6%^(;b^Z*7khlpK%%T}I+?BKxrs%U ze2ziVu(4iI?mIT5?J=qS1_Xa`d%e1#oz4{h%q!T1I`fUDl!U6z>c$Q)VYq8|#HhUfI+d}1{PP{w z4h`2$(q$qatVLkzc%VhlTs?(`6>T8xz}6SgY<7CZBetmK95`SjP)F<_aHN%32z#1+ zK2xMdzBjXxAz1CZo=utF2!hm%;nMM6f$Y?!8lnumnJ5xPwAe^}c{}(*5hj#>6vgnw ze=bw&XhG66Mj%~UF-L7-Kfz6{?G!S^V1BQ5uaq@HxzMLRX>E=CF(;~*h0B! zFN|xv%`vy_HDt$$O&g0Ee#Wx~pjXjs*w=q)hX#?=^qKc%aPkKDbc)g4#3-8|VuLnq zpK=ddc=ixMd+6oK_Jlk*NET4XiJa*E8VcG%X|=_84V(ou5z7EYa8KsUHRs@S`UgiFOuB7- zyf$3Ew>`6`gB5SdEN6QQAefMVZIJEqhLJ-G zJ(b`AE zz_O-x3y3Ug$cBi9o`h}W2DP#2?H24Hrzl9qng}xp;yQ{N9IsFp!)}J$v`Vn|)6tBj zox34}+lBcZn#qodPF(;m!Xc}SDwY~IZd^-8P1N5EEvdOkGx|@G_F?ZLO#RG4zNktA zw^#YlX(Ov4D14_NspqzIvZ#p23ji!y@0uPoT#ZDk>}3R;Y;luqJk}R8P0?boElqwm z)wXgv!G$hiO_-Ij(Zb>u&Yd9Wgl%N~CR@8GWg{}C1$*?|g*V;^w*4F98oju!zvaDa z06U9_uypg%C{$!#bETw7Qj6eT&JYR|^77ryXCZp0Pe-^BK{1p7ErJ0%w(HVvG~XRK zx)K{vK30AJMir6N8Tw3BNIz&U0G^C0D2U`PY7W70m4@VA9YKfYGl)kSL20ZIWFxDG zPy}&AsE(Sn@J}QJDRyvm>kOV>;i^GoG29#Imr~oJv1z5gUy6FsEw;VzG(Lu!-4g3* zxK5h%-;g%!=bXVcX=Q)us+)E=@GKoGKJKgTP6~UfXztYdYA4-S2oZ2#PYqefGx6eo zRit}j;7PzN%Bi217S_7C+=RC`;ri|Z2uf`M@{;L}-`vGi1Io#A6i^s%lx!rZSn+dC zo*x2DZMo)4brIlX5bg&c@`PlcIA@|Lua}cT1hYLa3(MPn$MZaq`e>zau|@5?ds`c40R>IhKFLFqFgp zdlJ=(U(9#%CQRjmX17jDEq>(?i`9;ClKnd=SmoS*kSs%g;V#jgPmjBmdMHFg?ze~8tVB9M7F_dN$ zoe4muL01RF7gXN5d2tRT1i_#{DGjFcHA<-?VEW$^G^XkoUsjp`E(N+Lwax_WlRf1W zQV~~&Z+gqVeeY(^M|-^bzZT;_e(lnlt2db}n0tv->TSZoK|#{E1|(l0CkBKWpOP~X z=10#n1vHO7fLgxi%eiqF7E#pmA9-x@BSisBjz_^4vSYz+&@bb5-m-01BnMCFi znx#cka-kbz2Z6JpSh9PFbC+V z@~cR3hQV^iTY%|M&xPGzn)14C8&hfo<-g!a!8hx_K&6s=lhSET?6h=j1GR@K%{m<= zz1_M%STOyQ#qQHDOm+OE2T#YeO_#YWSg!gATnm5IRsh9gN=c<(D%QCJslLO4DxuJ0fkB zM8HsrZl^5OFMp<`b7am~&Ux4b!hmHMRXz`#ylBTMIDzNV@GNk3LwC&i@&gR#F+{w< zS{V2`4uc9YMe z#r&x{#aZU*g+Dnr;^oUtKfe)J1E^*XkPmJqf?CG*h8+3uzNwvQrg!4QkE6fdSn)7Q zZ~gxiP|#T4IFxk;C>y}GqhC?>U+PfEJs#N9hd3_ixp0nhQX*kfL#^|UF^p&{hdS0r^Ha2L)2>6j@PsX*i8(9Rl?cpBqp4s4QuR~a$Gw8cUb z!4-b}twLinqOSjL5kRtFD?wSF1CInHG*)FR^$Oax2~1T#-{aI}{>Q2%!vdM78Nyq~ zjQAsH?j{D`hIzjXUD2&+^{c7I7(PC`<>Q~PIAh7T>GHt?gML+34q6^^V%plFhnNaO zgYug?XC$8&V(xc8zh-7dcbrI+(Nmj@!krcGi=W)dZ1Ry2k-UwqsPV(>kx)fCMVv70 zFU=Wwn>-}qA)yP0uyoRqK;*3sIs$XXZrhy zYaHc~REO&whm4lM;8L|RsjJSiNBKT<@^4(W_9)-!ifaQJ`8O)~J@8e3wXjd^HRzXL zD$z8Ewtbo#8T$twrRVT>m6twdeC%o4XXU}+pxt~)HWlg0LIOjbXC%0M+UZ~Es|vQB;b zqId@f2O$+-zFX#-nr8<-(-aC-qA*b z>p?pRt2X?N$t&OrJJA_X!sHiO19p_IB*!2kN7myHazUVCeGDixHC%GnD#A7 zzy#uop2Lybf+M!-$P|<3AK#_c6(oQ~mo|HQ=r&sUvKG{8y9#5?)F+8JPyT({%@6mz z#Y@IaP#hq+9;h&L&b)c)kIpbMmdhRGQ^s=0HQzKyLV(uR=g(}t@$$3!l`fMFKmghT z6TIcr>j>K42q^~a;a;=7XkU31vt+HR%A3YvXmT`^m6g?xj0iX~^s(&x<7W**UV(-c z`gm}>#FQ4S*}h-HmuvC)de8za-0U%5-vk~12x{+%4+17LD@Rl^lP7!0PE0-?=w%(ZB-Fg8C_GO$ycuZ$AnSW9uEehE2W5$#| z9mA8$n8TwN$;)FJhsy9@R*zP8?b5wv$Nr9*!Et=qq@u1a0u-tds++FmTwmg)y3I{H zx@{ngknKAsM_e@47>>Cl3_&CrdQ@VoH+q_|*{f42${9CVJrc%QG3tV!-l~HJD2&hm zG$#}d850wOFr5(8q!jT0&`Z=h9Y8@2sAUSBtwhxBQzUY3+H?-zEEi8KrTdf@&ngwZ zXE2O?ZrK6vx({z2uw#dh`{_zp%z@|7L60}sizrnMvgYpF7u(g<1%G}ZT;P;RjTAC> z18>UiCps0D%u!QiV}+MTz5vV70;sVPq~V-KTO4)RLYtB9MTm3`W?NpSkzeVZ-KbD# ztPV)*-)y>(C5CaArz!Oq&$A*7b-Zcm59WU=3*y{ryan^$)~`f-Z%2(N&8%r%X$=Oe z1@$RSeD^qdRPlP8r|n#pX2QPfn0XmNK$V}LNMZ|DXxoUe8zSWxI}6B9=)nS1mP2R^AK0_G zmUg2@kG2HguN`NRMLu6lgrp}Qer(%v4~;PT*l@%YLm9QYp#r5}%#t__+u!7ieGwr(tXeue3J#I zPSx2azp)kM>`Os%%N)^jk2A)D5ob~Lms65ORrbmHCTIa}^)_gaeXwA>fPnVfO##xY zn<6fWmq^n|7lws#_{fns>5+Fse{woqeN_~)VFoK3>H%T#;>8)a1;c?NP=-lerl`ee zlMAQ<3j_=5n;eqBv#Rg&hjM&_l~wDCR7NEG{gjJ1TsY+mT_cTkJcXlvxa*(asqUZ* zT(LqbvLKvsV6QngSKIri#GqTQdyt{9!#nwB*)&h`PaMUfcc0dl3CjG zNgr~Q8UU!O3JMCL`Y55HAC)90Bc3r^jQJ4VCu(4AUHN=E<%N*Dk$whh4Zec+$PDLv z__;Zbxca)g1uGWEXBO?WX)Op?Hqh>-3`P{M(a*s79`NbYrx7OlEthLH*!ZP6iTY_Pbcke35!AV8Q{gkXvazGl6OQ+$~sUo_%x}ZTR7L!am@P!8E z=BnA)+0_6cpc{aP&h%bp75<)2U_<+%qesJ0qQxOiJ@>|TCA!GPmD#-mEn2v+)-7#eczjS@M$u8VeH-o_zAc*Om|3E@8P0gZ`z92=2d5+2 zs)571iE1ylt>R!LX_H#JEYW-DJ zT|;L&uAd-1bBkP5y~JC4_04UDLZZNopkV%5ma5< z#wE{k4c(inrs5ahDVF!J;kbTSp_qqzMZM!1p*QEk^2MZ9V>#f_+_KHUBuCuC+na*i z3}Ap4aur*v%63O!$`ZuvT=dtQc*K=o>g_pafl4$<8W6HyPl;xN=cF`8ng>hiAkZkqEGSR1us>L5bt4`- z48Og#0`|;2z^O(~NEoo9@dB+CXu@U#_@~MY&;&aOQkV^BrV=YBUO@gQx`+_u!XifL z$)K3+lbS|MiKz<;UmnuB`{vx;i-b(|^k2KzZ5$k;vsKx}N ztX=uzHe>rU6Zf^)|M|v_{VTcM4oC2rwhI!SB@6eU+MAa%uNKhJE5RW{hJ>R8T~QpR z5#@);P8RReC$X{-cy@B0oemqj5RT-hJzakl42*Oy=PRuq!?^P9;*7s*bZT+F>OIaM z4Iijr26(JBwL(;?`0D0GHv7gq8lzHxnlHJs2MUqxPK972$Hqv-Z0<$?u!oU9vY*MR z?wBRk-y)P^Qfc^kNVMm|GuR_5eyg(41Wl$Pr0d=7u#StyWg>+1rJywg`wxvtpZ&%G z&d8R=*4CuTiFeiSvq#UK*X~UHDFwHXl9c}WL|=0{9EZSHJv#cAdX$1BF(>&Q)lY=C z;5@!AT4cN(s*PxQ-jO47)it!$cVEBVEFP25 z*FkgQdm8{P4r&@&L_VDn10^jTwlpIA@ z{xb&Nt-VY>8^^N8zoK1P51ff;R~S)6cOI=&!{_tGV5)WV5y}GMY`z%{Bqnru@`A0- zV91#)c$ASMtaiL0KU`xyr%BHw2RB&`SKNN`>{%Z98=<=^&_&x&Au_QBQ&Vf*k^yH@ zrH^uE{*7nY#V%o?yy~$BjyZhTusfxW{YA&!EFWo3n3^qqElD#Gk27)Qb>rQm8rV9} zH7A97cA$!q~3VBfID?ASig4)OUY>Ru6lpxd%TrU?W)if5( z0BXh}4}*pnYZS5K@f5P$*@{y#k7(qq-p&21Kj6L(RNL5lTQJ>oIX2MNnKAzU{)hk! z1yYJw2^CyT;7A%+D7cxI2=v1dOY%Y#DcdI=!T)o9l=c`5(DC6j@pz2x1{>E2@l%=k zpiD`L3Sv99G>OXp_43_nnX*l%2pV8zW8nwVL-5iHgWs~{dp^1?eDn!ydRkhV5)ok- zQv{Y(dH0K0&WL%mcSzfUFZ1WBtFkk?U<1%<2unY}R zeB~$j2q}&Wwv20;!*R5{0QN!{jij{J6*RvzWE}41aNx%Rz}YZiLE53?)|#AL=vXe& zLjw}E8kGX!S@oHlz%Y^a03EI{h9%P%$A`~F3@UUBTDQz}*tqiK@+1!y^d*$4Ma>vD<+&7CW#>&-vqNRZ4oc9}5pjJe*N3|W4A>oW z@59HBW@gNg+12fjHe{w9wXOHViQ;DKXqMa580Z%-`cE7%ea^Fbh+k{q&G$q(j&4-~ z8`t5ZN4@OiA$nHF0Zh*m@G*4UoAGoUzVzVmi08Fd{Of}6Wl5eN96Pv$-*u(j5Uqo@ zyo#6n=!z>;$?_*YKp*vTN)cKJOiXf64YOt^iu_zh<0c}-}O?fssQwh{M zRGD04ucGe$V)$dwZqd;t@fue*={a?Q&X&yEGErp$6xx3Zn=F`rE}m`xt5HF zp~dGVsZYtn@l2*F`e(i$7O&?YTqrVnarF#Sd+!q=Y9$43WD-=cfB$|qhX~!~-~T8u z+R8udum-p`@_{lko3L2xPCMZ7*#eMh;ab&HH$m&iOZztgMt$0wXZ0YTSF!Kkzt106 zf_u!$20hCkr`!Lg&YfoaZWdrQQz=vTy!(Aw*(&yf9Vh#jFr&~5pOTsnCa!IIU}tn) z6i!OsBJR}5?tRj>MNT6hTSxp(iLM{xzLLRh+UA1ttVwcSg7=_-aNdrWpZ zz}j{Bah66uukFu2*lC4o?HSfvf-JS)yss+Aoq~U3Wn(aL*=YwZE{^j#X?~Ne+=yAH z)+4|Gs;jWv9+X=l_}Z)4vT|~kNRHExUdedz_WOwVk6}07Jyu7L9yP)vi)#%^mxxT@ zCmRDDz{9Rteq`hZR4}+RSh@EfJP_a)bM_XIoWJ3o!EsKLjPtt&EyK^DVOnKERfq{w z0pozZ3$cIABo(Ki8k~P=;*O9elAOH-|5X_58(?&(Xh=l0=sp*$5Ibr8&RTKcfp9Q# z!m1FB15CIZW5_xn5`v<9MzlfBN@Pk5=jHXpc1kIg@PQ&4VHZkDX~(Wu<`eO?EERM{ zJ2HGiPoEyM^AMMC#&pp-1AxVR8d@~SxeA_|d@OggHBoK?GQq&ofC2_RG`l@GMsH$k zH`TW^Yr}_6xPRT82D^K!4~Y*w`e>VrTCH50qvPZ?MpGZpSnfItY44qBpioIb>W?x7_B4%Ku20e zmhszZjkGjce--cEy`wtr5-{n{t?&VF!qWeSTV>2``*2`vizi%NZzp?Vn(Ak;b=tIm zus?XVip@GWnFHo+8Z_u{5c+#q_x5M??B``%gs<8m)dtwn^Oh;>xG`YiS#6?YaM+R! z*m9z?>V`hZoFL$@m>En2R(^-}Y@@<^0}dea*IyTbgmd2D$Ez0>>;4O9@{gH;ck7;W z<)SfS1-RuljF@*-a9!Sgbt9pRgG=L~roLSIhxEoG{J3F)yYm8iw6$?d{sqpS#~q+- zDuWN2yAQzb@O4U@3qj}I(b0ld|KXyvmj$KGnpKpi$5;m$`b8*ee!lrm;tsTTF5tFI zFgPFN$5(7$95Q@NZ*veZ!0Xsqf4-5?uaeO5-g7K`Im1T;)CjFV#UJ)8DGT5C|87Ri zgLL|7E*wYLu<-WTNZ!ELGcwb5uDM`~JHkiI3I~)$I-3PCa^JQjoKNA0Lyta%D~C?F zc7O|D`D&$I=gvR)msPTyJKL_*N>*C{Qz*BrRtVw#RsAeP{T|L2xCbo!7Y>D&zruw> z;RPJ{R5#%BBUspX)jTzvx!(jZ$=}y{S>%J^+%Hc%$9-z?f>+7ir}qRyhKEgmrbX!2 zXTB4#Ev8T6^dYutG(BRG@k=~wM5i<+)NYC_ariieLsa>S{qsw4J0Tf`{|HcbE_ma| z2_y*nr7bBT7VriuTcsyPcwD^$N8arm{ndM&do?N`MP^d&OU^?0LW7DI64U(Q%%@VrZewyc+Btq*gshB zi{Q<_xy>$P%gf4$Hp(IjA6Kp#`dLH|tDs3DaXcpBt;Yb2NZ$Sf&Ur;v?Mg8W7|y?@ z_n;Z5;RG8w$jI=AOn<;0uTnN0NFd7%?)?|_>Z$GN8~(JnPv~fC*t5rC%8@{Tzy^1n zc?bo?uoWf?a7XS8H`$8-xndV`N%DSslx{L!?5~GI0UfPkBg#PB__a70(OcWjo;}-w z?#>997~0OflHML`6nE>;azzfG6dUsM6}gyaWXO{cFqY!+dTk(*4e*uOOw0qJcw*q> z+s_#P@y8!D_=(6MZ1Ni+O(4%5CN*U6O&_>@@4}GlT#x=|ZG!M)OAg0-4Er20;~T(z zZ%W~}!U&0L@~*C~A}=a$e{5=^X-{>TJ5jx$THJY*tF5;ZkLP$5^U>+lWn%cD^}RYf z%-?Zxa^lPc02W-D`^ov~(8p^w;&kSKC4CRUSD#?js#VuYI>v>7_Mk60okfF2ka8r% zmrIQ2DHyHvxaS<#li(a? z@v$oY+M%2)y)m*YfViZI2LmQ-O-1M!Uq>OlpNL%6HN9Ydfh%NTT2^gJua}>niBs*n zcdEf+6(-j8pBh)KUQLs*L}H%-$%I*tIJJjR=!P%I=LDH(oa;!#B2bRyKg3)v`yR1Z@BG38N*I@Cv{tC#I@y zYV7^%X=9_tY6ZOn6a|e>=JX^6wZGValaxuFmd*Q$@dY!5rm1b#;puhi9daU3l!c0H zF7z0(P3CG1YqX9RFb|2(Jta45?LC7>$p^7wr6=V=sdDx1zNoM%+`J%EZS62(<+%bIGX3-<|DmO@f@B20j6F-fknj(DfrAl%RyFCe> zDR6dC@~ka>8eW<@`+PRruiq{?Gc#dcUPhWot4+QAnv4#=-8MSfhBj}^a^KnsKM-r* z;XCK!O91XZEpl})&8}+Z?F}8?D|e?|44`D#%bjr?EIjm343|d#Gg= zb)Q@P_L%e0==lBor0LTn#zcm%0fJn|n(WqYEQ*-C&ZIF1K+gY+=$h~`B zAqF|N?4Sonb5KI*?(Tg4T;cFxK9if)4Rf|{=S#k@EGPJ0#rWs?pb?}f-&^tE!S64k2IvU0xJ^78T$#+p$n{AWE3?=&aN zR@e!5pR>0g-n$O~Nu*fnw##3xX;u29cfF}+RrFn-td7Vuh8N6F6ao@vIR6;^SCCj$H zK2?qFy(w+_q+kQ@NZtXFOOWJG(j7KUAf)t$2?964@RR+4UDnVLMes$s{{--p5OqrI zdaeLN)t%DY{YA8^M1Aj)RXmEFHvmd#nRdb`1lLGjPXY*&buL^nlo=~< z4VX?&y}ysDX>Y2~HSI*M0mR8`Ro-;R!O~ut`>FqOlxopg1rf(_hRG0YKv- zTPq+m_q=lDO3pj3GINTw`?Z?l#CQlj0uV7Rj4>xPnHZ&RF!DHF#jubdKvW@4f0nRM zo*eSpC#)Fhz80I|kB_##7PRoZo_BCn^)zJ%WH1vSiKD@t2{sAaC3!mX0BSTNVL*7% zB2{)-RaGdc^=eS{^V%x@T(M#fqTluO^iVQbR&uckA?>@YE}bKlEUe-k5LYM_1q03T zmNdH<2e>LQGk~j73s{JQ^^rI-PYKjcg1Eu~SDpfd*7?bXf^j9~ZlZ0A3V0jPD{rfn z_b7?8)rl9uD<&Z}Zjf{eQ<2zUmT3=7_J=7{XIyfYd9<=*r%_34}EuR+>L| zD}a7dD>7+|n>tnN-9hi8-z~xVo)oRi#CXP87+h|{uMo+KHPh!CKR#A^-8v+lQW$WBVJ$R0s5ABJgO8Jp3JiD>0 zFky({aKdkJMfnlE^0?{42h#~FAlyWO&Urn4UW;B&cv8idQjyO$iV->H*I%bUSWuh! z?(JL73vSCopm{{^bVbOSzr`eYQ=iWG!!xISLDNg^M-&c1Z+=ZSvYp4-d%rCrqY4ij zcz0PXb6M#Kc`Qodz5ICs^b+wNx5*OPpecbGDGP;EgjCVrqJO>D@LR40XpFsPNNJCM_SI&YCdoXD99$eBb=V6s-}eK61tu&-#1AI1n+NI#zub65MT+fdK7&HWqN z^VN@X8qx_jx86G--0p+G7U6dO049C3j-3;a1R`HVvfJD_8>ZdB)l08{Tvl(CQegFc z4Tj+nbY)WAi$*?p4hN@Cj}&9;cAq`_^FE*d3=t-RKaS{0w$LkZ8+Upgbu z2?4`~BtI8@u8pnA@He(#BN8eX>WB)OQUU?U@ANAKWH!p#HisOz z#1L_{b@g_oONOOY+6whSpyE`UKaqr`jsu&bE!9?Ps8!silXAYduTM`q<^fK0>^zHAYI=ik?#&-tcqC zkxLx54@dM|P}#XNOA);SoPQ?ioRCgPX794<5BpD&<%O~rQA$AIK!v~rp9QDk*C0hD z9Nk_9-XxP_pEJw=3Go7_bFVpuZr_UTcVGc#Oxpbo%t^ldeh-vVrsJf1u7x=N53 z`yr;P_@rw)?zx0v?rh*MyTEwBpS6we&*5t6&*R#pV$xwWeSYejpaJ52A&%6cKGVAcH)TfO{ zF>C<3zYgT=Y3~+Pu>2DMF+Jy01$#9K$~)ShpFD$|3w0^gqoAQ^4N#SlzF`0Ab`X=G z@FH-6x`$(6In7Dpn>;xTiExxnz3#-A5ZTZB!2p~=g|9ieJa~3ZEbz#og| zh#Od4(%gb6Y#_6cY78y8a^!U$9v<@8fO1C9-w-r(|5JQhKPI{DRKdKYN!$kG8VAgS z$wDfYwZ$&y2c-H(Eme9_G>OKxP_vKwl&q!S5Z|zr^0vYEJVykY&-m%eHZ*rfZ{qf( z(Y1GWzr$~VM!fn^B_ow~ddZRjb5WJvji6*Z$W!C9jP<7=Mt7;=kwKEV*d&>u_uD*4 zw?moe8G2$ouD9*lOt88iKYl#i#qHE7Xu4k~+Ql54F_8Z#WG_@SOj*49wBAI%q!@b; zUJyD^Kc4!0VOu{ z&vn<%F%)l>8$b_tIK>~8^kYZo>c^f}P+#y(-ZRA-+To**evzC^G%&so?mv?S*(Cj) z*ST|*`01Te@VH-2IUfvOnsF3BV*i&$m9H3VV2HI--ZPN$5iyBm)&MPiAWc!Im!B*c2^w8;&L07dpAXdGs8vjjZg-wy z3JDoJb)Qo{S;n$j4WrbaE;zNfB| z8Mojgfx9E7JaKd`2Dn1_sD}>@;cfP$H$1${KCgZy(L^;|0Fo}5)85@IrSUnvhg6WnB6(&NHuY%vWTeUYj1?>$3fcKJ=+2ldOguP>RxA?1d4Gh4h6jI>838Jr!BxtnI+=p=Ce*?qSXlJ6PJ~GKH!Tc>(9FJG4sMGMm8aQY-^H=F`o| zotJPfUwk}TEg%lEO89QQpsW&MLB>uhD@v`UL1&K|WpR>IelJEB8b(zBI;M-arL1I_ za#7#*U>FA-E`0#1_6@unKYq+2h#DJ(E@sZGxL(&*S8(O;y=tN%@PL$DSxoFS#R!A*xU`Yo#=e@*9J}zLEet_bKZk} z$AM;tp=3Kf9u8H5@P(Fht_4x0Lt#UJlI;e;=IQC_Cruh^YQk~nDJmum--Ykbp^_;G z6={aZ6=hZH#R8HBk}QnS1L0#MO;2ItsD@T14^2Xvy?;Iq_bRBfZj0U=8at5FB`B_q zF|SxhBkO11R_nbp>r7^^Tn;2+7z)`-*}>t=m3Mz78weda-ttaKY&Ls!Q6yS28a;z) z@M4D#9a7Ufq)<-x+-J|SwP<=3T-cw*xsc^_z&zm&CqHI-Ve6d>;R20+hLn-wfV|2N zm%gBB+Q2MHJDHCUUTdQReIRiZbShD7VD*Ci!)5k;2%z*C97QzW1VbsVCszPs01y*O zvO?(ZM3TGqPFOiyjhC|*Uz(W)Rs&yLVrK?F<=mp}d~}-xu9PQ@LLm%;Bmp*#sa8V3gZgW~z_q#`&<2_w4BP;{=$ zLh-wLa8n7f;t=*g!O@;}T_gmg0pCaTb|G2Um3Or1F~~tVnjZ_)nRo?GNN99LSv0bK z#`1yuM>t0yPr!~!47?1wZ!UEEwa!$OWiN1gpkT-LP*M4T!Px!~d**(qSQ*YwC&E&T zUwj#fq&NO}M>$P+Q{)Z$?KR!z{4O23NPO()oI}PDTMR7%q$P3=)G7#`QN)r~UVT>= zmWVFCNjP?4Ue}SPoB|6SBRGo?Ob#bQ*mRyI{$91Y)7?}Wf z*`u0-5ff9+E9khU3Xtl9t*A{`y>;bfl=g&_L$9FyA$}0UaB$!HwM?aE-cYh&w9118 zAc8-4g^m1Z$Djsfv9HWBk63df8An+d-F$I6JP8s?Ldk`jWm&cf=(f@0C? zGrLX)=rmYL*lO`s#`Nk;)Thi1nHz3?-ezQ|YRN_rix3MmpEZ8WJSaHW5K<~^lxS)q zd54&K;~`~H6-fj&r{P8+{|TrblLZl2i6ZL4IuBdK6IPIz$Fai58W9}r^G=JdFTCP#lyqYo(8ebu6EdiYG;CduhwnZUOePT z9bv+o1*nK#al+VBk%e#skxxB$cH8vO*@su-qiSeddR)5yi|}F2!r9;WBr`J~$5jAH ze4#+XlMu-}41*yQ6}>u&p+E;{nC zk!NMn2AYmTPxh$3NaK}V$E%)C9ms^Tq?Bvvl>s+=JSyNe>wx$Z{pIGWiTj`K-67Wt z5fj41{a2?BRr~F8MFp9uT5WV}tR%_@XJ-bABQFDaWTTL~20d!Gbs9yb!h|j@`By*} zzjmgv{_^F^DS30pz>ELK+L=K0+;?mLFY{cHOc_$7$QT(*LK!P%D)U?#6(yAWE)t?h zWC}$C;YI@@63LLEgi4W=$SgzT{p{T5ocBHNTJKtCt@mE*tmi!UL;e51-}k%swXc2c zYtQ%>W5&&s`o`+XmqBVNPS<-kY1(o4-q8bN)y9;1QQUu~-|Bj$bx^3++OgF7J?St7z^vbic zK9LP$u)m!c*jmS^=7wyrF^p|k2K^>(r|`M=X*|Dp{rYvld*89PC+Ca^E+xBhWjPDv z@ADm4U9stHasH~O+)yu1PeVPsh%0}pM5VZSu%S-36Z+pAiM1F7azV~B#l#U@)Bogf zEg!Te!(}Xg(4QRCjc>tdgT#_^Wy6Zl&aIqU+Wk(>-}_>i)Ge*I7?4ceP}{KX zzlRm$vKvR7MX$|v%F1BgADwye=KJGf1_;wCj{mGc!<8t=gd@1 zeC*6MoLt%1`YJeo?>~iTvyQ$|SNwUBL+ks7&!s}2`1r)>JH>qLy;;2{JaG3=R=D&b zIP`6j*1c+g(2Oh^z0N@-Ja&-_a+d2}>SOU>ki#{-KTsA_X+!!bY9;CcQ5EK~@kly6 zccy>!tQ*_n&plH!d_C)$u}5D2hoerb)~i;p<%GFe8;T#5jvi{D?mZJmw8ye0uMO7Fg8*`IImHsjy-TeO5^ zD{50J=N%ABG`7wS)`}SW1}KK#<6U$v)+c5SlN|~0gl-{0*JJP|0!=KYnKkD^`x<%iQw&kk5q6d$3DA$naA))VI6K zbfUPGcSn4hb5M~p8nBVFe;`f}+{}e00#t*kzoZ!ZcM*x6tyjM$Bbfu~{!axnNe1K| zb6G#eo|Qt~t?D{)m|2uTx9!lP`WQ=**VFa9SO@}NdR6{lc;zTxD;7*TaZ8KnCblhz zwd)0%qT4w^p*D3xUBi~Qou*mBnGisZlGR8l-3Ja#yfvRCoBLX~Q52bgfZX}e<{?rj zN=SY*fN4Nkt;bNrls731;fS|cES$)L`E_-5q0-2tGJpW7mz}(P@zf1<4EHV#mkAE7 zSC9gh;P`R*PJ9h<&Ozkm;DdkIqE!`VbF;zSx*OG^b)ZBox*VH*X%-6rEf~YndNnWi z1KjQ;6zun)qT*=?ih($QIX2K@otrH)7~;W0gm34RbnvR9kcDKX&~HzH_fe^-sf`S# z_{OgI`OQ^EFoeu|pMPfc z-%0ntX)Lf3+6_=cS)-b`n-z!Do@BqkeH$LL?Mv(l!7DbSl8pU@7-S8I46zG2#eNw< zrj#x5&6>IdfY=;RPm%JV67)_zPGLqxR(eC~j4J<*rO_90H^k_2E99nS=-k>RSEi3t zI5|U%iZuMspa&EhE@PrXfEd|1SeD<(^S!fwSRCT7I8nm1Hn3PBL&;jgILH#7Q(qbD zf79&-e|N!xNO8~kBd9ioGG)j=7uF#fG5FM_Z;YqQ(2kUxT%4{kCu0)XFn)Yi$_xZ$ zGH1xU&X_yx33a3PM(q)n6|8xjvrn<+s~5EI2%W@Hzm}X`kM8I=6|Ej z0Zu7jrZ8$EJ_RN7CMTVxJb7H5bS7uQY}^TC)?)cEq(O|vQK4DRnzdi-^Te$VUfo8e zuZ2?$&4w;@7<>-XFFUQ1vY#;m)e<%ngl*4cW>ioGXIa*M0J^yGp^FB_9%oJmStm|l zs_(j-U39xGh&ZEn6Y(ryPx+}6*1b`h$;Nr%9Z5r<@t(AihbR-vVg@3H{TQh&!E>As z7OcfufY*4!-;;?0reWko|#ld?ncfgbU7Ohr!s+(eaXj1{JyHx(|U^8 zmDMxnk1Pmi>HQnu&l!v?bBs_4eWv`9G33F^Uv3t~Jly;%S} z52j3Zipm%kb8$n4z@PBj6P*fV_GsVmH8W)C1@EV3+-m?G9?>&G??nz@^FuaZNZNS) zSG}>PXsuZl<_YOmXf%}r0%HnS*jKdRJU!^hphhiRlHdWJEcW+XzrI|yP8?5sT3&wA z*KLoX>+?~GgI2uRg~|W2OUAwaxUe*8-aPEjpaZqA^Y34p<^;sHedhU*+4r?IvY1Y( z5)z4QW9PSObS!VS{(kqn`b35|&&-A^?ZK>{EMoE+q14QUL6%pH!aoo%47bi&evL`Fn( zl6gFh!S36L%E@k6$XMIf+=Ci_QyCbeG79tlDKi%s%9cTp*VNvTQBmf)b|%qU3EGMJ z$uC0YO}*vq0LmK^_d5fh2 z#LFp!e*>2yV=BKj+VN`@Nml^8@6+`Phpp1=Y^6El%1+-_RBtbC+$H4f=zg;g6_eZ{I3l=Oe z)BQQ*v{u5Uk!e@1xaZhRRrCJMUW#m1p?DBnOY1f?+{WBU<)vD`OZ_bg@GPwzqgLR0KT9%-)K>+mjmZ-qX;XRe=HMssN@+sOhQ z=U4S{a8Olx`;u131H5XaL&@qROL>*n-<+>7c0+$i7QALdUk}6lPm=DYF7+BccFV4u zz4S!b7b9jXQ!ukU$H?1EPwQ0w$5SF-WyiGG;*>oNd1g&);q}C_)B|qvW!-iKR?lu- zmPaVIwrhSeVil)DVY|%7_Q@1|Lm|lAabl?YzAJ>AH`VG{N=mJjlL1l?L*p;~{76cg z5sB^w+Vrtc*irVwvK9<_@hDE~$H^AkUq>e_OZ@$r5%&+jm~P_G3f0DxY4^&Djs|UQ zAGD7SLeZUi{PDzQ)}Nn-CbXGalC0j&B8PJ-b3~}A=e?7~!LXB+iw0V3r!g$`qW~1y zTR;Yu4&fipSLPd~vF7<Y$#Yvnz)T^a ziF-eVZ&dbU8)FYk2pIB83=~p)v$`(wki<5KvC!Nk*W%38?;O*5Q!p6HCRpxyxGbS* z-tp~=CF=qkHfuJ8D+{0sOj-FhJu2##hP^{Plt|v7T1qyNTu$cf_C571ZYuje<)(wM z-ju2W_TstUWx||tUyd`(D7>S7P}^@E;dI3<13E4JX&#MmE=dxUelg@yld>?!bxGa+fjTf7!KWZ>uEJ3TW_|GJAbH}gC7zJ z$||kb$htF!2DvOf&$x_+i}D(7Ai9(huVu(KLP&97%l4z)jMi9s;M8UP#vpIaIT^&2 zg}F8wsutP=K8TtQRgTkq!O=zsWQz9U?`rSxg6ITN#X*@0 zrw2gA+wua$Wk}-nRSwtgI}aeKV(_JSVa zZ|;qjiT|*WIE+&8SBc>9Jetksw4C$rF2(`u@)n0F;#E6zhzE_sP+X@hnD~I@*gy>i zaHZ(DElo$^^iJIUhLg3q)y95{)|Ed$vd1Z;W)b!7#rrK6Ejpt0N_78Zf==L34x-h& zsXrN)r;L~Or%lT~MMF^z$UL+=y=U24_NT8^d1(kegBdTr6P`TqR*{{zD?G}4<5MbU znERhj!jpEcPYSI-j$^1+m1DqIxA#RO%>BQKP! z>6jZ<|7o;5VsII}yx52%k}4x_-bW!}4WCYbW+Hy790X`~pN%)u1b0JCZKu^nufOr3 z`nt}{F^ALJ7F=IdfuTea@*;MmiRHvAdJKtKPiOt{NjXw-GwddBjQuRKxmKu?741{8 zV}Enn=+5kz+R@!*4-}#6HSkF=xHu)%lqD4xk7*YyitZua4-05XW}ldCG-3bH=^VQp zu`UKS5l_mA&9uM?#rW9waDBswv@9Rto=s?#TF{bj#{194)LwSjp)DErqp7*$Q1s#5csaS;w>XtR5B;($-Jp? z-IrYv5PVUxzn(cV|dzV(f1xn+v$fec`=Tu5%H-P3jLLs>FAZT z?dxGA5yPu?|nI49|TJlbE9TN+VAY<_xJ<$Fby&(Jh zku~j0fiF}c=Lc)0K3k@@nGFyLSF2{v%J{!~$Yy3W#&GPjU@DXee?3NJs#gj-{3WBy z)*XhG`Go}>xf3CtwZ_oNj(EE2bx&yk%EqbBaAUf?dSz87E}@qww~tFC*{*V9UDzEl ziDrGoDz+l{Y`6Bj=mxsiAKKRVTtz~jBup+H>$C-;i}yW-Dc#KdORpVR{qctC^?1AF z`)g0UMgC)LlVO~U*xA?iymkjy0% zHx-#njqPn8@l&)tbbnrnj{y;-;sh4qQ<%rs`A|OYLH{q*^WcnUPq-qwo7AHPgUT8N zTvj%&i80M|f&u9mo^H>Dopzl=~Xbj2>>MVkXF+oo>hU&ByjmooUL8?Ugjk zIJkMU6Myw=+p=Y(v|Z&JELiZO?$;8uFap_NigPHwXFXpNvM!cq-Ctx)%zx;9JO?I| z1&(5+d({=)N21Y1KI`7LT`bzHUmEvFQFKIA?Ro@*|-zi zsD(}=5(gO&7tzYjV|FSmpwx@J&Yv?k^zimfZKzC%)0XTsro@r4?TuxAG=}~U* zA1k>dlW9J+T>9hj&T~C`5k0<+su#P5hckhNYeyN>(lW=x{qQXBinERN-+8erLfqWM zNDdzdqGA-g!^?oY@v$e@=>~7#K7aP}!Uqo~qrBD+YM@g9AQoeTHH@9J=3=|`XXks6 zIx6))x@tcs(N#plB1Q+hC@$I_Pl)wg;AAPI`1y*N0l~G1}G#O$RM!Qv?xVR z+uIC-ULhfVel&lp80LhyUv6Qa@Q*rxNPr-@(MGk4BEmi4Wk&MpBxiWCs?EmDw+0rj zDdkf?1btsKua}gMrPx{M0$Csrwp<|(Vy|shmh-Bx`UdTC3WmJ9a2R$={l+JDDNey* zfc@aXtJkc7JBax|qyRpRp@Z&Hp4+Jov1mm>QF&cl)EalGwYT)=rIFg*DfRK2$e|q7 z^VmACx>;S{*fdhRy&oua37$3K%&=5bKFeTSlv6v7;h2tVtfiMs)Sl{Zsie@q+J6E{ zF0gwjjDv)&Rm&VW;E3Nm_W$U%4n~*_UL1 zf{;1nc9uN zfl(A8AwT4lTdn+SkdMb`Etf+?#$@+!iWqz|Gt&xZ5*hRo>s2gAn0ixESl|DD_3fkg z@?GUy$uVV7Qn@r9k{Aw?YzYTpCVH;8v|0*lWe$uYb`YW-gh5D9u0!CQyIO&X>ml4Q zmypmab2)xTa%Xu@052R5nN~}op5XI)t^5?EoabI0+4GrdEE+yAzFA|1g}IC-0_DCC zE?%>pk+pweTt}JliHkos77!Ve?UrP0vk7S@&{`NIKIC=jbOd8H!rwfPSKuo#7GP?F zpc_M%ePM^MoKCTgSi5#DT0hr$=mk#71aeM3PgLBdct4bFp^x4?rA!mx_U6qS&9C8z zb1sQu=U1uGZ%bNTv3-{*QoR6mTIN#I$Uc!@)XwrX+_9G|7gYbbt3j zB}Jx4{`#M`JZwj*rY^DKea4Nfwf6{P7a>|e?Vff?+pZ0?kEF0(dTUo7GXA9D-xnlb z>wu6)s7W|z(G2haG6#)N3N=O6?`!{-wrzruent6B#vMnAUz_)WdKl~A+<=3Gm~4v8 zDD#*Zg@u=NP3kFj%%O*${&wNA@FQ?v&82cId7{n>bkWG$_>{Z=yBGelfqkSXK-&#; z%0>drjqF}SX4f&ye_MbVWn#WtP+-u-HBo_{f!zlumDPkK1P@t zAU7V&VDc8XRkEkaszC8UhP5jL?>;2#ye!|Q*bv7@5phY)j6KjBOsuW;w7j>b<{`WJ zT{Jb>qS{Z47fE|=qdk2`BJHs_ybLA`EIpM~j>+L&{zxMx1~rS`^*^Lwy!q53y8VJA z;WZIc&0eX)Ah)aL_iBaqLt07?$2yt2{z`aRPAMk&)^C2%z>OBU2`*M-n|FUTSocv% z4H=n1^L=h`%haVxwQCzJ%xpRAThZUL{28UR7`y89?tM4sV6A2kXy^-FMlh`@XCXO+ zH)5TeNwL_E%Fc95tK3C!Y-<9`~K%GXHp)I4H>)d`x$VRc?d}wXuV(a z9eboK2;@&Lcr8y3%cvXsgjVQhIwz0!bS`mpubrbo*TEdY!|1IYjtlFgn{uRE*RJ;e zQsm;eB;J((DVsLB7Eb6F277&s1S`$dXjIg{Mz%7H9~a(tVW5CyAufdjGRU<%%-6=S z@aXs|Tr^fJiZ=EyaJ$)4hL0-x&0ZH?22=`ZNkb15?EnM3`W+vD=0;Dp>@J-ac!_eKRX-l9;p=8YVc3)1OR6VHeo{g5~iqZyrzP5ar4p zqd5ioO%|v;I;j2h7j_%53C6w@SNKiG^jGv*KlkpN(Pt$}iA@cITG-peBu7bndiOa&ZvR@YRhi(A^-CfS?rad?x0V{l{3$|MvZR_hG{d z)@r{@ma%*KN*sf#k0qs_zJ3kUqJ3d#FfpQU^{;n6uPF<79;kFh+NGwU;g~gB;ak-j zesfK-srfnJwmcQN&ZtjLJFkQk6MhC_{2t7umbq}dvd)Ea-XT| zR?hbn*U!d}avVZSp4ZoX-b$ci+ zQeWG=GfTH?b?@EVf{JA$@=m+MEQ1%gM}xHN&X9N64Xu z_vHJ|KR>g*#dg8?kKBIAeE`7TJlhdNCphu?W9l1TgLOJ47CfkQvDN@fOxdk}my4B) z>ZX%e&#jkcux>!B=+-sLbNETr63F|;A8u#x@c&h2^Ef@k*0976PtY%9Q{J6{^V17~ zUFv7X^Ta|jJ^cF=-GK)>LZA?zPxTA8{kU$70L+r+*sWIUY){VIjCyo(0jtJJ+UEVM zo~K&6^ros->|u~}D~BL0Ev+T|p^_)fZ>uPxJ1o0B)Yfrv^4e@IVOXE8`8t#SojsRB z%$E$XoEG(Db{}L&50T@X`J=cN;0KbCj^6MZ>YjWpp}5B00ajAB)dL@Ls0m0Ek&Irb zdlmL>A|F#bXeMmE^bA|Cm(N<4=l+1fJ&E+d2FU{Fqy{@YX6qDt`|?qfA|VZz!m3FD zDuNR5z~R6{b#%L86ahiTuTg7TNcqex$ZWCf7wP*Z?>4o4s~R_N-plQSsznQ#3gJ~h z5K{+pp@5`pmI)1E0of%XX#= zy=v>sN0+RJsNCj-2NiJ&Zq()6x{bxC0iAtTr!^gDXfQgWuhhj7XxU#OUri=OmZD=$F|3)wAdR?PGerjQj*>t{OYMjj0d=NCJ!qe?`A;hoW9PPBMS85+NILzP@u=2a z!}ck;%*RaPF1!@AsB8i5Td3aDWeiC52Y^f_KtMdPmWM}Eu&<1{xxHJrZnATRzM_S( zvG1Z=yDZPvs$Ie!Kox!qv2;Qlx%vG zScc}!4r0O8!%X4ZNVIrg_`f*xV&dbEwZlve)gp_8O`rcoyD&w1Iyz;Cish})KNkyq z3VY66?nq{he)_I!*>1D`_zk zUKab@Ua__KQ@gIRhC4RXBE{y#(mi90QmmCtaYHV`_Gk|U6mAe4m85$7-*TpNKc_862+Z( z^63XPiN5e3MuotMwbkDj3wgV+gEt)(5w6Qyr^VTyZ62j{8(CkWzbW31Lh@qQF3%0q z3;n_RI57&urSjvBv-M^eP2Wq6H0vqsGK7fZQI6AHJNrjSctOZ#HaagMmdT6(r-T8i zGz1u^Yg)*SbEI<;5{V*A*5*nF9h8*=J?*{xYVXaYDRfX$(db9e4H>mRZ&Bb5MkOjs z_OB~{Nzj{0VFOHG^1JEe4)S?`W@5oWm&dR(!yWu1&04wiN0#bRx&xTA0cs~?;4bd~ zhf7p26juu_jafsG_)L3mN74l&MMODl%n*^H|N4$IUnI*Hm!N168`qfIZ=o5btqnwx z3`|nfCDuO;y9YxO5}>{os~}qEYkf z*gZx*YlAD8Ib_>})(v-*VEIxIV;xzCI`_>_n|}Ws6VlkmHQ^5a}JvK@vqp)@LpxV zqI$==ifMVA2#=dR6LpZd(sKNfzqVIHRzHKx7lfG@JE}6o$>i%n>|l8;nQ_s&9}3B9MBa z>Q=Xy&uvbUcBbHQ9g3(4K2FSqVlvU&oX&BwH?-HKSOpq#9a2n+9gA`PY2T(*k&7fA ze$7ExV;%Ityj(uJPxbesSl?%9=IyurMC+C0*7081`aF?j3BcIDKOW4|cMHDghqoVA z(p^Lwn5vb2fXoM`+?&}77X5zc)LGIPsVF#~3+@RzMI&z*lok(B$2Gb76BCiM#d=6q zn_^`lH4lxs6R~VFDrKZO_4ujH%EvV67*nsFl>24A(4ihHjAXV#j6dnO9f1OSMWR{m z7HOQ0cr;tQp45?p0Ew4yqDneAgdgEiAZA7+QZqOvVs~MSg%yxDFQr-eyD)0AC6I5D z#)<9L>A^~&rzssKU}O)B>+Uwfe)`EPtz%CE>dZopUQ*Y_E4_^K4B$#emc@j6=p5B{O2PC%a^DE!QHfp1A=}-fGkjG(hV4=OA zVIe}(*btdGq!kOw8MR5h*F)w3$7)b!eSO8)+%_$-0)h873%%M{p%YJ0DQiB^XGBZi z^;^dxW6q2rY05QUFJzaw1O{LJ)^Xp6$GX7-QvLTis;O83H((PT3%8_!YgB6LZYqqA_urAVQkqy&lqO^O8v z3|Y#Y)3PB{%!8Sv;)sBJ^>diPuPXl3>&TF=DVar@!>ln#s-=j2DfAmu3*nK)PC}vE zBa_ye>AqqPlyb|~t)rO-Vj?G)HcHZ65zBF%%tF8%HlmM0Rhv5My7WY(0{Qiw$n@@* za;>8vzq%6~vvp+ijBtrh4lsjd!iN%G?oUUb>MNcPD^!HlGxK@$XoZ1~Z`4lbG+i9? z*!TBuC44{=(ri;hmordZucW`!WTeDDdRHuu=+wy=t~{4|JZHPT6|FWz6}&xt z=Bsb`l^V&dZ(|vY{8j(eCH5$JA3lGKqEQ>>4K{7DUwwtnVGeV7{Q0;46khr!!>7EN zcp}?UfRJd_=^1kEh-vzAAVr7`uexK;9&t}a93$r}et1j8$ulIfS7Z$?qDcHi7AJ>k zWxnf%HG5=Q3K;PudHGR9hEW?jtTLnGl#oVWR$5(9S$F-%)UulfIMp&|3wZ3^Z`hNL z3lSp(8D+Eui$-2qjiHlg@^x8U8nUY*(Egdp?PXAIfB>IgNq^{yXF8wH(AK zt@GuBuC)}I&UkX8ynH|&*EW|#KZ`urIOx>AT}vo*0INlBlwlbAI6Ul8)gwm6r}=6H zbrYT0_$GaJfuN$EhD7Ko7!t2;n-;w-YQeL}+x#_foR=1Wi$})m;cL6CcJck2*}T+? zZ8rb{ldp3l%|jq#H8k2<=o4>`-}N@0a9KPQ?{haL@;ab*sF|9YbRZrf%?#$nTs)Q- zCqLe4a)BiuOiCuav7=$q1q5a7i*R4;lsk`H|0pcv{39Qp=BlD~_0PPHDa=8YORo(!_OIh4Y4#VWQT(;|T7Tw2)NLPL0Vt7oGgKwRkMK9Y_F`6(o_o%4lz zY!M4$WJ)vUxt=n8OEcXI6Y45bJ$TL$*UV^3i(W?br}T=WoFwY;NotxWxIbI@t0<|V zZtWTK{Oe%G$HS~YcptB2(~|~|h^_c?u_9QJjKRq2?wtjFoBXZ)!lFVNsKT-V&JAcS z$@X`7RoGcqO<74uWERDY(j>eaNTN>gq-!0Fj4Il_OfFI6yzEhCXne5!_38 z%snM={iv+4mGWo#4ehWp>2`kVTC&h~L{`EZBSoqKfHhi0sB0 zL5Ke+>w@XKjpYvGbU!i(i4FuAeb+NDupU*Iur7fjdjjI#18`}(&hDNqZZJ8NhKqgH zBGddgM$#wN-`phWBtzvbmzM9#+cEriN@fHr?K3Nfl>FT7TdKcq5Ag(0q`aVPA%xTM#N@ei{Y zK+}jQ@KDV5-=1Q8g}wq$_ZDC}vkt@Lc3gq3qYRxVlQd$?wv zQ9>(ic(v(=9y5(D{6Cza=$vUB0{OSr;O2^BF$9npObUr7#nE#? zqcsD3OONsw?Ktf_Lu5Ar{j=0aTg}leFT88%z>DL&HKOA|H?m`6oPKiT)shTG zHtTII8J-R6$rV+7`(cvShj7CN9+Wd}FJ4qgypCqsc$T@GM$M!Huf zzwKk>{B$Vw7|Z}kG+me%8iDm`7D(jd0<22D?!z9NO0^-=%$w(^354zm_95aW_8KS| zht*vsRHMCITRFgfQmM+`%Dlysq!{8u=uWMz!Z34z`}VcJzDz|F>VUGE$wnVG_;Au? z)4-qMrWS|m=yoH$aJpC#fUYk=gsrGVCIOW1Bd3)S5&9ESR3oflj$6jxC7Cev=_fUw*Of08-hb&K z@_Kh@`Rt03*gDc!lWvyu?D!^8g5dbmRxDak8{KTjOcZ;c5pL)h7}!WEm$o;&$ZYp~ z5YichuDm}O|F-L*A4;Ey6U4tD5KgJl(+|O_ktIgr{x6wAgd325wxUaC4FChmmy*5KKu?#1Z zuw#6FS4*E?_BuVUq}CQ86zj{J59DaHXfbkFeT6z!Q>n+%^yXK4(hX5x=6*mA!M>+aDKYBk^&+_MmOjYZH#DD0jvG##O5xt&lD#~5x(yaW7^gE0y3_qx$*u_xvME+b zN|8J?8yioB@7N`TbNOmO5aCiNxkDl;0_|jCSJc7jtR<6bOLkVo1<7zV5b8`})~?X7 zqKQ;@pJ|rY%o_t?g$5&#afK#vO+?OYr=e-lSv)*=_D8vtVg-XY%UzMZioAM&s6a@T za%-q^9`#y~Lc;!nv&wm0_lU8N?hM=S-MBGR>Ej=ER1{qkS2joys>jiAq7#y@gRBcg zStsH*c${vGN8b?zGC5AoA$-S*tnqoT1@f`d&&FVclA1_R_8GdGNGAh8SRn@I4=-;1 zE|>Fp&u-*0DGFtE7>_fT?;P9LJz^J+Po58!siD?}U1E`Th_qA4Bq>-#^aH_cZD9d9 zDitaElvp?x{h$s^JV|qv0-92$Ka(X@qq0LpK~qjN`{(z7ukYVm01Mfvw{Z5`$NeY3 z^ekmz=E93}+9`^L3R?nKP}BB_ntg1QlO9%g`qE$LSWVXMqok&$qGB22y+Zc9!5>XV zT*q+EK4lCzI>a-qrCVI1FvOJokkpRL;73nOTUcHQEvtp$i-Nm~rF-tO-;iNT0ebz< zJ!?hW)-s1t@A2{6(bC_oGzWjp{IMM6r_5ZV&?1Do&Dd-_9q^pB_KP>A(ODc%WFd;o zB1l_8bpF&@;UYDpDlpf*uw?$z3tsU}+PMUfO_vhyP#;?QU-S8NdmzcQ<)zsc$Dx7k zr1uS)KBpPi-CXWr#p%Qe52Og;gvbJ)DhC$jNZecMUfbBf^3vKhf;FUF%WC1+d8JeD z4685eL1p{|Qt;rtEp27#35oUr4@_i9L>(ky%;f9{TzCuf52$zX5&GloF+}C|$2%_S z65@o!PF`MW&&v=s0Q#F>Yp@pLUwf3}4v5D77)kP~|2?IJrOzxqo4!oG?PxyA%@Bk_ zHKXTxp+YMSw~>8#p{yV=oHVLN6_`!^sG4}bJT^|kn1tUjEm1jh)J}x7uC;hKW7af7 zL<$1}a@)%OoegU`Lg%}f`1#KNNgbsBB}G;ripvXXxMLi&C%@k|!CSv)V*Vi?b}h4J zd6bQRBZW?FB#LYg`BITL^^egoSbJV@$qyPBhoO6&uj-!Tf7Y@bEpHUv;Kn zZ4>|}baS&m_3aEOC230#_mH$Y+VNjCU>~fB5|X{%d^Na}k#X|(2esC%%bu6{1|p5S z$JgFuccHV|zE_Oc?6=xeAfQqUAU33zS;S^IBDAy8xEGOXj;4 zzkYHyPB>*`+T>^P;=xR)yT?*Hh4GhrESZzpS$wW_2HMU)7kOTN>dT;OLaB})KYpP} z?Y)6DlD&&-y!m%#*bs|HMI5Ip?$-QER95nDq13t06E~|y@0A9Y)w9jHB95`$m`dn^ zFp6)`)MXc)X&~t=euDnTA1p^&=yQSfiJD)po-wB%h=VT*z}vN^p6+>b_LI$TKZ&Fo z2$M(+TzoZJ@zv$6;|$;M)D>AsLPV(gz$mSE6ZB?xULB~d(6QcqBw@?H6TbW?%D!2m zrsh$U_?9s?Dk(&UuPUo-!5A~>1WMDd*I*}_W~1KL_Jmv=(vz8LMt21p!9uUo{%Sd2 zmcEE^M#Q2}T~!?|B?rnrR@70iMZPL01(=gHT%YC>q0xHu=z8kC4nE1ab&H|yg%?Bp zIDc!}&+$NA8dClBof3h_)zwu!B`G@chXpvpA6zK%8Gvs{ zyWA+pmDK}`J-%YSDW^n8N4%9-yYvkG!qJGXhigWU69+=z$r}=;2uZUzKh-GN81be6 z5GPYfaIN!xUmi$@wd0!4!Q-CS%hbiTkZZB0G0k!5*YHY3Zr zMtSA6YS)Vft=L7>N<2fp-{d5VqeU++EHiVK5ab;wuOLbD*Iv}-s58PNvO^h;p_}AD z^AEG}F>3AWS_~pT2;zVZI*j6bq73xWdL1r|`N#Kj;ydLRf-tqz_RnVPPTGqQ`ZcNC zCgd5I-joD2p5zek@>m)4j0scrA46aCc z&kS!WDG`?DAbyE$qON1(6}GV*PKUH+IBkqtPpzw>6g2ndm*qB<`|~itfykmvP5}(+ zX>=)SQd)YEx(knunrRrsl^;JZaB673o(Nw~=4Q0CEoB>o=8DTd@6hLz*h&~f?kqz* zY&y1S-N#}${|WuQ4-tRGiO1#MLes+p7x?`CC4J?YYMP3OCQ{E@ihUD|6E?~oNGcYb z`uaL2?kIe+gGkm{UeZ{zPNp-bRk}2wh6wzW#skdV>B`@H!4M-{uq;vrqY7gQ$`1Fg zb!f&ZgAsHe;$bH2+^Y!4OQ8Z`Oem(*3!eTNK36%Y6soGC^6dSZ0m$o|Q@-qEp7ibe zHMN62(xH$M4O#`eei`IB2QvdExqe zdvU_QCHmU;(x7_YaMZmiw}g*HW!Jc^hTbqVD3_|a$x>|`2Rj|3uCNE+DLO%Rx%R^$#)NB*2Ez` zc{D6;DCrUsMz%e}NkH_&a`sV2ga4cJ8rU;qezQVqrKYz{!b9DTIGk>f}J+Yof z-Dd}XzX==B(Qbci>GOMQN>xx``Kn9xE;cvo)o5N_ z+w3`)F}>57H?>aslI+&wZslGz-Q2yaX$ym{rw*RdZmVX{%GCJHyiIkj>x`&-esm0d zkS1m+?>b)WXB0djwMoCC>SHA{((=8Vc|UBsVaV4(3w!MBG2QBUC1plah?s```t{4d z;5E5v!;P6^R;(C+f~_fynf69&+xMD%(YpP1p9IFo)DTgH2bx(&GG;KJ+pla=1nv|e z!o}rMm+dw3>0r{pEX0pA-&wRDZWE|DU8;Y2?b@|#$I8{3Q70cwFm*o5AZmV!x4L4- zlP=rMS_IB$mZ;@(ZBUDCy5Vp>oqG3H=J%M5AMZE5>)#tUHtf-}CnjNf?iEkeKy(k? z$5WLZT8);^l@74ys#VXiaPbOi*g4|FiLErciWxXImRz|bB--WUyp`}1r%*+zCUyjg z>(9E-VDjV#L-jf++brn2y9vMVDsM|!Sy^Eo(z1oB=OzC$p_?`}rg~^xu=6=Tt}G`` z-B0=C$&;xFSntxYfXpo>t6rxb?Ch5cVG~FwbtqC(Ryz@Rq_(n3_mRU-pXF*G5U4Fe z@Q}597*q_+v0mlt&)72mK^cDYs((?F85C}L_LuwH{5I5i8pN(?&29wn&yM*E7SvH! zS5HbxLiknx{pZhqVPWe0olfSexJ8^qd5gPKb867*ehpjn=I`C9?0S6e{P}<5{P_%x zR}uvN#(KM-o?qFVqor&Ty~bPp(8mSy=KY0)RvN9n`}Da?>NC*T7(I=BRmvt2EXJza z;VX*?hwi8Qj|8nwQBGi8e0fIj06Uwj;scvLZ{X6NtI4`dlvIW96{de_Oqc=}e z+OgRxJz?C~u~+z0+xF~f+_}!1ii(?^dj|g~xa8B3NcjHC7n@}&-DZE>ZMFxJK}jK2 zYegT3GYSQqPElFeu2N|zKq%L@ySRF{t<|aAm#9Y ziKeEbw%u63I8{BWylvs(f&77}C$1&hu^H$nU7NU$vX zdwqCb7w#gEDtr!qpf`HUX=@M3ry`!1nVVmxMxE>I+~#(UfcpltO7~BO{*bo%gbcqxr-N14Z5*0Ai6g% zE6|}b8K@&UcB(bgyF^Goqw6_ zVLNLJ#DS@lh95PCum|+H>)iCWc(N%xZTfe+Kc&Zt7MA|=IA@ote>7$&=Q`-Wlb`Xt zd740EUjpfeMT(Tr1%2DlBAd%_O>0MU>w9(T)KMrlZ{B>8PZX6HefQqI%P7TNFj(f3 z=>6!dsd<~xn`UU38y4lbZ6sHXw6<=E+`NA0I!xyHQ!HJ*dbNlWI3qLjFJ+}`*RHkh zzeL%&!(@MzmM#4#-JcN|zBlSJc<|t|g7X81|9n4^Z?5CvSX@-(3;Fd7b}fK`0f!Zu z3nf0$`4JbilT4^`5a6N;1{tP;>a%NTak3kMF3lYMgtQ%;e65gBZN zL2gDWABBY%D+D`lzcGi5hD%Hu0h!|KnXh)N1%uMFigf#Nr~FT!?uU28W$(?_&!GF7 z5c+B>#_u26MlcWd`X_l5>2tFAW{1IQ-=7>gdbF4-hFpj?vt;jHGq0MXSyEOO04|%y zdkAc|7{T{tPWVZ-a;>{<_5(PQn; z4`ZJ8?s|MNpO)Ue&}7yX4%yKM%QgfA^uLY^^as!MK(&*nPIdpd8X&fsQqaZ4C6#;n z0rKxkPL3La4(pZHhpeh#9X_=jLk7D`ZMM0iQHRRPN=|=wkHUpZ$60rvpbF%~*$=K4 zoS9*CletR;YCpq4gAPiYZ{n!P!LWbEJPB7$BL`J-lvhyW&b^5fN_qPSdo+Ua=Lv8D zO1ABccK571!BlMqLz=GC%;Xqd{fYDE&Fjr9!7>JA(_RHwck9y!jn3v&IysVv%|jZ^ z96WJtkAVZ#hpc>yt7n(@Tn)F0fh&*%zA>A~;P=5->v)A8$ z{CEY0gY}Lb0RfHX&z-xDqHG>DOoIjuQepVhO$OSGB~y&%2)F#&q~tdb4tmun0;;!e zg7>dgk%Yq%wXo=`hNt%g=nn1Dr3)>tdYoD@{@NBA>d*W7NtjJq{k<=!=FaZ5)(6c) z{7AuqNCLH=$z|oy(o|}8G9h98sZ)J$BvfwKZW9Hlr?+>P9zB{UT&gM^$M5c2mx{3M zYBkeQycK0cc7V=^Y0WIBlSQDvzzc*1`|;+crs=`Xsm*adj@=urZipOOk+kFX3ys4W|Jn3!8?17y?yw*AN$Sryx+bzKz9q*;2;M9 zoA$vtVuArdJ$^jRd2r$<&z{Y3H$7zap|WzcnOOrr<-lRDjgU&ME^X7UU0P;lBf|YP zc!nle^W54uM4e-mI1E-&QHfDBF8DkE z9J=oBziX3rcMLAPe?LU;wuS%YNy{~3t+~X1qpF#UwUePoc}wg!e^s>}?5;zfZL`Ra z3_DxikfmN7)Tfl`XHe%V6e1uz)UnlhbKB|Drp@82?=p1NM5eZF@7~R5S6U_a@N#$W zPbR5P9<-)Nf)Cd@<7r}QS|U&bJeG0i&Ye#e0K?B+Ie!NaM*bHD;=Z|j|G3aB zqz>(HdTDBstHB|GKZmvdY-$=gOYR;6i0`?7L$fX+nhd;-HUVvn$GQr-+Hnt8o0eC; zxY0tZYgbJJ1GOVZj+{Jm#*{9BfySn2W58q9>Dzwir0_E}Ex$T%po)#i$J=topn%;pM5Ea!`pI@?KZPLSaKLQSjU0eA)Tp;4`uhd@UNl`+XR;0+-G{!H>IbuVygD|!^zq$*)7VJ*k!%etk=kDlZ)PMhG%IDnm??JiImbLlDX=tqP(Z+A8 zrKP)f^{A#E-M7vgY-u^`cQYNojMF_%$6oAs@o_H|mC#<2m91E`Gz4}#5T37yB30pw z1H*i7`B=5l0cX~QhN{ucnmcEXMQr8A6J}`~dyBV<^Bt9#`-D!*v-UPk z+VrghNGO9aKGH=c#*SLxS!wA8T5rWbiu|Cco7;PE(R1o#rT0GSvdodCk3;>G1w45y(+vHFgS0C{)@0KT ze4$>hRiu-m%uv0u+!Gx!z_ISQ&8%*w!_u^mPr|lEcl!9c%^(K{ht2H=sq#di9%AZk zSBaU(Du;QUu0yeZ%vWX!(lrDwo0j!5SEcU|gbrW*+=+p~13Upu=yK0a6s;L`|4i;Bhen;1@pUaa z^^fQ2*_w`5b{Cj_oSaG(MHg-Dx*=Lzj}Z^<1q3pEu|yaY*xd%rn{U{+ zZ{K}KX1^r$jxsWR5;$tfs&*_sYybv2@VpeROL7m1c-_8z8hiKcdsbGK5o#E1aylX5 z8q)8-aMt6Fy&UwAxrZJz&$SfJfC{qlGE56SmOZ_&+u!NFJ#ltd--V5M!7Z~Gh;6_G zhPbtUy5O@tBt%(NRf&oE_JiE&DBgejFkg}qt**Nd9_W4jX=>`c{m`Pbv2NG&f{&;f zoCsY$E9Rf#Tf`#e%)zItJW#%0k&gjCG4y6@*E(I7p5|(#6&7~n_oRZNLZ2)jK6r3j z)EJlft#h26v!)lAn$GA%3Z$+1=#r1o{^4t4J!cKSFVz-Z^3-U{!@U{YYTc&IQeEBP zC&CEh{#Agfh|GO?;C{-z`}IqMLh&d4x60G+ z)JXwK898=rBR}P7NiLn~0Y+A*3egagl04(->lTfylS4Xm)7_RZ?-ZH7&i8*zs7YOS zZz@o13l6R|aNt0}u!^vDnibu)-vAEvkf+qOYgfsBybRiM3U^7!&O4&|?cth{X9*q3CqDI2%s(a9l#)VQ_0Pm&j;Iq2*GS<}WNg3(KI8%ep zCvg)Xm+KW&GF1nGsOqPT&24G`($eEGnBh;9|WF)`f%k4OGli$9egvrV-X zGP?NB+Ktw)zLZ=?##m8FBe_pNpg`Q6ax*R&B?Y8%FVZ=teuE6<4$ z!OPdL*XKrf1>G>p8)s`rsg|=OKdBGWtY>fDZ033HO!~z|ILUUm)~#Dhf{tx&78wa8 z|MGh>=*7Q--lp6tZ3-Bxv%2BUjCsfDdiKj^jJR&NbBpddNZ;p_1ca9>{FRTl+r4_9CZrD?i=nH(DMPTs5v9|cRy$C!@@@V-m$90M5^ZTct8mH7E3{u!z+y&a zPBYb}*`SkNW!YdIaYAWpX!@RCG$B-C+HT@_o6ficet)16ka?Sr{vN8Km}Xj3hclPw z4YX-RI=c!C^@nkri?q;?RvH0ZMa7pI_c?fjC#GXl6%f#XG76esty>q}KKh)PP@E3 z&^&Sq0tG{_k6Iji7rr%_Tq$EnTr424_3+h(B7%z9WlDWy+2o?KNhe)h-NIA<=tL8Q z5m*!oH3sDH+pwWN^UJfB?SW?*zt>O&Tr1TV1lo*d=3Gx5Boph79$mb4aXf9}U8>KeTnFrsma{730GZ1hOB|U>$cu0BZX_Z^|i)$=3 z8GjgLQ3?xgxR5$))~u1L_fg^HsGl1H zvYpw#!We67KV=tp_p9{X2u%^MrkU!>{-g9_>K0CbZu|t8e@lERX&!Xy*|RBKdPsyi z@5^VH&YsLx$MiJ8<#ha0c&!$VFlAHfgS^{pW&;*d&dRaF6qC zbv7#_BNg1and)Kr`j$(N^G3S#>(>Gsx62|bcx7m6uhOw&b7ZLBe*AdO{~B02`}f#I z4i56t$Yv}2df6p$XL)< zAUja>uid-bh=-NDcTnc{J6nQ+u7S2r;i7)Q^MjIn0PxG4Wy>~$^V<(!-I`cu)5Hy_ z^8n^#4K4io2fi*Ob}UVXZKg9Y2Pm_v-{OfIcMiD#xjg*$&*ezs#w@J*q#QGn0Is5; zal1`{b&N<$vfd0&=0(rtBD%P_{RI*^@*HVZ!@;7BcGhe_~O&_4ojnBw@f z_w~DX&Aq*=hWfuntS)3<)YDIUqKCpTL=XeXeZs*Vt+^m{ued*H_wEJzJH?&>rT1q2 z@8>se?!y|jj`U+a-!i{`GBNQw_4c;?`-7N_t%uz7GFnVJVq~6E3~C*- zO$@5)wEbZVlR5M**k&-OS;ygTKU5UVt`IFSUca>$( z^IQRN>I2oQYGRlBX8;OClwDKpBFoFlBB)lmK~pz}zq@;|Ha5RoxLOW3-1z45WpsQVonP6D8|4oJrl#FhcaW6ACg+!zHe*zR zMjb{7o5gV9RRT?*+eyKc)d(g;V>S|l@5*pmvJE#Sa?8c|np}IEj@iLpkkG*^E znUcQs2N2|3>F+>{Dk7|_>Wm&WDrRsiN^avti@MUJtc#3s?_dwztRAZ0OV1@I55l%j zN!}R~Ym+T0?b0c_`;cc?L;DQ*!&OM;{u&#fx^Z;V_iqmmXG*y_>Boc|Lw`_K`prRz z^m(OKv$vfbzn}q^Zw^~Wh!wwz`;NbU{(O(m86ZIz^9Ok3xGrA2*zTNrLs+TnG>2_8 z_L(iM;gVmzeH(<&Amlnn<~d3Rn{yRhquMQ?s(bPrhy>M*2L&E)xFrf=+?g@Vt`T-sCd>?b}=I-Me>eN?51hSn;j<@a=Y1LGISjx<*+s z|KOo|nJW@;`0(E}%ML?Gg$J2B_i5K|k80CHJGD0c9Uo)$816=F8qx>I^>58Y=&1@} z3P~27DK1PJ^qBrn9RZT<$-TzJQt6n|$eTy%C|}*ibW}}6c81P$C#pEQRAYbx8}nAu z?}x}MdwjZ^Op2`cGmXXKp$`wGe%1k_`tN<}j`$wdhrK8)!03qjFW1`ikScnfmSV&5 zP12n(2uO;8KprMwsaF@V*U451Hd_cm=0`6f-g;cd4d;s=7PYl81zb=LNEO6OjG{H# zE_20-6)r1QT;uCo93I_t!c{E@jP&5YhOZnq!?1e;c{=S3T{@8@ik?5e;<(=IFq}bl z^^k8~6Q2nE4Tr|~y5I93hxzW3v=|s<@_HuKt~?fgVS}bk{prOb)o59oxa_ei!TQeW zz!z}IV*L$^!lcOfJp(5EeIn%P3Y-2KzVuj0u1CN9ZQGU)CnXL|A)CFM&Aw1B^zo@) zEj4UvTsw~MqG=aA`w06XZ=rK%30J#@tkg%>v4w)*%KiIRX*V_oCQ^MWPB^Iw!33GM zfuy;9vP#S>8Y~QCZlOySHKmv5nlH@`1;%r?;6v+?`qrT`xJ(%gpB;dBcrV6(L3occ z_tPt|XU~BCQPEkm*6O!VJI;Uw#K(2$W&;iXr*4GnhwgPWx7?>&$Gw+X8i^eWNn1rl zbE2N)o|xSce)&$~Oi^B_{cTR1iM)TyR68B6#rj|hB01guM`w$$k5_6dU%f> zKYqsjiJpGCt598Vp%cbvS|uZK{9K+huFvaY80rlOL!ilAfD`F%3DjiLL*~mpd-rZD z=mnT6Wtb;p9EuAw_N|Zp2VgDg5`jvPmoC@zmXg5qUwr@mC944VTyw*i18asjxG4uY z|NP2H4_qmtSAlQR=vWtF&*r{IFcnrJFHPItU$ytK^kayM(wm_FX?3=j z5ItMECFjna!#2KGQF(dzVr!N$XydgYcy~SmInVDOwJ|yDv-YQFO)Jdj_pr}jz8HKP z8_=j*VQrEA_i&#ah01>m@T);*v;k6*GoC;5T%=*?mqXW?cM=t=LL|T+MRW7)>-gA{?tw=B0cPGX zp-q5A!c}bAq-U82y=RxQO3kFf1r%ZSW6dJlhG<`sZ%XTy_^px<=X8oh|~n zhd^aRVLSFHlZo53HddXsifYxN0~lWRP99pfg4XpnPVGTtQB(|`3^?@B4qnBBZAwNT z16=IQA>fqFYZNU3Y&wTm|bv>{z-|NJ>rC z>RgOT4LNuhfVKrci%#9T)yHe?3Ua|vr}NC3QXIhGQw7BJ(YAcND?EH2&RoPnUCSZ7 zW9r+3I2OqfK&KLM#LskL>GS7*QG|CgG;B@Wr`o^guB91@$VW*b61}WXC!EcL@uVth zRG&K0n5y6PZT=t;M}P$_9sU<%Zys0k`tJX)q9jwL%u^|}$UKuFp(0ePiG++r8KVp- zL?uI!d2FBonaWHeQO3xWp(4t#QKn4&o>%tgob!ErzrXMAxBob2@6X<9t@r!B@9Vl= z)9qyB7#1GBf)kwBM}xL))vk6~eufE~UzyI!IrRqCx?Y=WbAE$0FqjSj7!R!Yxj1>; z1$>#`C#Jk4oXse*5^Oup>J?#=NNoQ6{%Ln`bT=UZ1t&dj>*Q1q&)qs=859bxFy!#J zcaI3QxegFY?c%~@s!KjA>N`=kj$6X4Qn)B(9Ub*VxCq&nhbLy%ZtpoFY6L$>@p943 z?c25~n-N@AulK_kgawK~(6|Is!-zKo(M{mb2uj5k)W3DXJ>TcyVMB^F`ihM+b+niT zolrHb(6QNF@v5G_+34eb}&fK)Iw^^)y%0kdkNDIEiFf@x4ojTyf0;Ga&tGzS}1kUvM{;@!6C{$24gAG=b|2KFn z?4oUkuElr`POlMGs)aM_9bWCgYeT_Xn>8gov|rPaqc7&>rdU?#VKeA&)5I70&dI9N zpaGMS@JGtEC2!xxaNR{Q%ndDY9jJy{txW8jfh4F3h{@MQevnbwvku_NiyCKAUP%8u4HbKDnXsO;HH-GLC2h+#+FfbbjNa5L(9?`)*tYv zqA>#t9mwEm)7r4H40BGYf`NIt2m0mW2F29vsk@Lq7|#_L&+7(vuY zAJGS)%P6D;51Ys%l>wTeWVI}M)+XAQjl0jB>7&>~pXFR^ww763YJ?nrtzpLFIY#NxRA>d#@_QQl=b*Zv>@&B<*LHHFg`G`)`l9U$_?za zM=*jk*s02jI~DC7qXN2$X!c&*8`MUP5G&8}^>v)h;3(s-@E8qtIMX-Ef5pl~i5kem z$XOx|{Waj4At8Yr(dpd3KKs4L^y#bVqeX%q@0{H1Uv(mtylS;-mr&b~f}sBKO~2JK zF)iixm;&d4s-7~Q*^_NK7)FCpy4Us<$9Jt+Q)`8euo3mzySWtm`}p{TEwf^HJ@T80 z>f)tK%Lfn0@v7h6ZItV{v!25rzu|#G2zNvJAk$htJ{-Fyi~juT1p|*3Vob3%tXSo7 zFOg_%JKLSIMqe|I!G3w#y)Ff$0 zRIM8KK6e+j>Hht#1?xE2Q4R+HJ`X@Nme92aPFg|e5(-GLGD|*bSyTv=CfX7T0VSt) zV4nT@uQh7bB1^~BbpmM>?dIJbW8G?lX{-9PlEI1Ii!IXc-K$qC#h95lL@S6S!U2)S zQZ*ETNTQA+{H+AR_&(8dau5?*taC++$XR#s;>Dgz|MaCP+sl;2q#r}@K>62CtrFw^ zd48b|d>u8qMf>)(2mA?IOw(}Bva3;C-=%+=SZK|Adw)48R)V?^FP5=$gm0qoD5SZ+q}TjRJXYvu1VdYi>S)do7yTg=W12 zjGwh&K_VhY-ujh4kcfa4!JVq>q7> zs6-cyt<#~mX1~2}mN9icXOLhV0#pHLb?n}K2#P>}4_z2w0FsP1O?d)W@Jhq^@4x6g zFm?wkYdkFvI#koo+lqXp!x9#d(MA+M;4e@t?bsS79;mFofA{+ZTovb40hCtBWrLRH zEF`T}$6`Qa?Be=cN+(XX8+Cz~)Qff|1G&maNC}Huy?7W>ZSW$c-?&i=7_?313d^`I zTqiHDNtBR)unf{bGFFoj`4zl)QH6DfH=*u;Kk5rWWN3b>a%g^jpSSrzzG?!(*aG0B zi?m3*8TEEtHFY;4M0Jz-l)0CWGje1dfy@{t)_>?bapFXF@Lq5a(H#_&l!UUvjKKe$ z54l~WSCxlW>#Vv_5u92+)1|{N4E}Z5XSFaeW>8#ec=wocy}|dNKcPEnFm4q7+;Myp zvMdPeaC#rviFAR^r@bdlGG?%Eu_6#{Mk65YHES!)0WtAKM~$tOcl)!)n)R>v)rF32 zC6CDTOT#~OViRyux^-Wx3}+A%NV@p`K>>$Fi^2pst#>`Ugh`Vpi#AnGXXvyLjT-et z#_E6Q7|J>ssc}RYwP~Y;!YxB;Qo&2h$uUVut3_R#nwkpnTSpQKTrvXVkAD30$*ynv z1GmSnr)cEM*>||Pk$2tZ&DY@uP*Z?;ih}ePv$miKtWxJmdHt6Bka1gaakT4ekj)`*7&Ci@S2NkHclXy3 zw*k75jLxBSWLn5K5Ee&7MG=c5|D7DU9MjtZ8d?T-=9fHiVaCPQprx&yPBj&`jX>%& zL(3_QD>vvFd=SMqjeEq79W`5g$XAUWJC^RM1~M!m;u)K}8f=KaheUq0`F~sF}NgE zQ&SV&O$J+f7JISVK4ax}ikb&}m0#)0>X@=Rahr!6fB@R7){gj7ewZncdBNrQG~H%> z{S<~(iLSRRyr5xOBucQHg@esLT`TGz8lqgq7$@u7uDBUrgOsBBXz$P5?oQ~rR7G$C zrb4F!E^0%E4kZ=uM3W75O+U~~e*E@r6`?>Lhcy+{@O(C6&xN1ch``T=daV2uF^1SX>sAsfY5+CD69_Rq(`aE znP|h<|MM%zzpnFP!-HRWo)|8GfB`=@pIm~rd=^4IOKvL^{Z@mxVG zq>Tcnpn7kgI%go)DIt$szh#S7zGJ}|nB6OMe#wBjQZ29;=Y@U16=f(ew8Tt4+8b`SjZt}Ka zHqUNmW!Xd>2~m;op+{L}6RUP#d-dv-V$bKJw=K8jTv#e%)#l%c<7cGeEpf8V=D%9} z`h0*ng4$(Za?zCiqJ~IOD{F90XX?Xp@}VL!i^6&*?*~lTAc|@ zR2=E|W-W}aXt^Sdv#S#qKge~NosOO1-)~zxMAho-u01ElGS%zUt-G8;xiRrKfFsNU z$i2XhzV2uJ)bevv{5_eS)D?-U30s4YKWEmuO&cZ*3m_c7dC1->UtU&8VHb*uX3hv3 z)f@|D$zccj4Eq}F`;;61u%j)sK;#qRmPQ}tA|^8)&>fx{4c@8C@AnVix{RSgt|UMa zpV$aEdmVryB)kmOLY4IGQ1Nq=$m!YUv^=>gOCv=Z%dj?5S&*KW{_cO=z3KKvgd>YS zJ>6*BqH^DnV6ulHMdn-^ZfTj-XYu!rLeUCWD1_6oV-BaifkHtct17;upzAd0s*Z1l zv9fOYx7+-b2`B>n*V2U05yxCz#LY$RLHt(GJYzF+X%-QxGC~hVe+MQx)WgGw_E_j$ zcBBZ_X|PlW+yf6`cX;b zp%#~vc=qh2t7|$D!!APeKzuZ!bl}|z0+Err8fqHpA&OsIRj$RY4o-pl6=4k}z{F=e z@I4eg-5L3=-C|qXHwPS}9M|Uv$epKe6g6H1OKbk6muUR{)#9ut|4}u@u&VT$JJ!cf zqcLeW{O*{fzo@`v7!jM?Kb-uEw1`ZVCqEu7ig1cNjPLkYXLDmG13}h)N~4VD;(sSN zNQZyS09Mr>`~eJ>!Yqn3wjMl01De3DJGSAcJF@VL*Y$1PYH$m#fLwgp+Eb^#R^NPl z4TupNg>>ma!0ws|=FqFOtGI1xavNrgZnuDw{^Yz8J;`yJmj|KK5`X6Grz2FZy?XYP z!V57>tPilQ_T|afabhe2U=1J@QU+a?hKbLuqMe~3U1wqk6uch=mxYA|zJ=&{dF)+L zJ)-~S)~Sminz>sx#{a_Fl7>Wzk81a>_0_la61C7`FenMJB~6$A+H&lrLSaCEMe67e%CdQ+$V3;t;$(PaEYaXGdw-d}%Zhzna;MUeiT* zy=-hico1iy%#YhNm*L?(36e7zvMpejkvzF@>35FzQj#sfpe(e4XdmJ`7~A!du@&xv z6Kem%V?V!4Zz46C{3VA^AsfXEBuWF$a%l!w&4QrgyKMXHR_U^9bYZfJr9y%0#N(1~ z6;n6iI%{B7Ne*U!Zizk(!&OiW&yTEkFg zrIRBs_)00{;la!_Q*9}jpT zgx_S_>gP?pmVEc;=aS_jsu@+y(Tk4HKus{Sz1KFMGqA{~Wx9-ZG`a-qr^*JlKb_&l zpoccp!yP1BGNz!H`p=l+;USC=7>3;Y)M?^N^ut#q3l_K{cE`(?lVsO}5aP)=TDbDj z+i|x@7O6we!-|Lj%GnlX6FWy_860iZkg(uKx0rs9b<>o-=W}BZ6QjHex9qADHGV~wX zbL-;+u+~NwD*p80`(&H6jMyhe#=qY`W!fFFlQIRE$)!4|CoEM%2(YLkalS4`Vz}=w zdEAu%x&ub$3I3MG$`p<1VmWyk-~9w#U}OyYGWdBCt|H} zyG&;LcUg1%A~bp@q$ey-azfBh?z_#&JXhFdz{noA=G@|3g#V`&dF+bYwR6Xg!~EPi z{!JOvcAz$*q}A%l-ltBLHto@|;{cwBFxz|ica5y9RDWFF?F$QBBz`^A_cavL@=^~@ zS+1iSVI$OE@sm?sBv6Zj|EjTh#XgXXiI`P{5AJ1RtUM=@M{q<%X}y!RYSz5X$Cmd+ z(*vx27w&crLtQn>-1Y4}>hK+x;dsWr7s_^H16yzpOW`Mq^6I`1GBaC`-1`&qJOj%g zlTbCq01ynYSarN6_!ubpf*3+DIXth?!YEp=gipr#9nQ{>IdWa|{#Td?DaAE0+Y)M` z2Nbw%EykOfm6fY2yY&f z#-j+dLdM0Vx>wdjyzQa$lgvsA_Ntq_quppBb(0s5zV^L%{`s+6Kz`FR9` zg~4bcMcC9&PR1#Tm1eV~VQN{K?j zjEK*;&F8aJr>YtnrkeTOW!Q@c?#prFSo4+Nl2<+HR}fs(uH4}K+lIiqnVbdUoRoZ2 z=5PMemgL5pwGE9D2Z);6uqqM;)k}ZcAutBDk6vl+cEffd2906XlRG?@hFO;Iq~e>ir;;1awQ*lklnt;d{K9Ru9bw0)TGxPf~WxF z2|ut+b2*@uoA@4zHuIB0QH5r_;4|z2=^ni6IlT+NBA;ns@)|2udw5;plv^s5iMQv< zM*t$Y2&*U}drX9OYY|;^=+&zsbEo*2P_l=CT@7lr;Uc^WowX?L?!bMX!$uh1XP;c@3eK9dr6qJv4rDrjMs`+(G8nZRXw)8_n=dB0AtB2leOEHIMm+n8% zc~MUN)~JVpM|#Ot0wPiVX|C(J9mrM25h#`e_8+{_*`H+y6Dkf7+8O-$s?L&|QqluO zr`d141U>m0rXtFrYJ4Y9MyB!^Pfi}Od>^b65+1H{=xt=Qa3@Y;7>25bRVjU+Bz7io zH!r3T5JIe4Y6>(?E0!-`u8cuFeP@hB4WrX*^Yx4ev2KO4p=iV2JNKLDHg4R78ET>T z-pQW>kuh8mXl%`j9Du9u4x639wmyn_t6{?i&6v@SUTPNlMoJ)PD|l`?>MAk|?tIQ} zkwp(w<CP=XBOdC)R;-<$d-c04kE>VXRlkvQOyCW(uLt<}v}gJj zISzS6roa;%9#t{@GxDLp9y6nl;flfvR&T;BF*<^wxxvMPBCgoGcf$F(5~#rcmT`;p zI`<>LTS;gEGkW6b!nw)`_wL*o$iOmT$`m=bL?F&-OhYh;Frvb~KrIOF`!4*f)o1>@ zHebHE8u{9NSu(rc?F6IBL-9DGR79uX%gZ!;M9OJwTos0x5nSjs-qo0Wau4}RKnQV2 z@GTj7UZ1}j`$ZJG{H9Cvv;YWkZ;A$++RBV_MRY{GQETxwuSz>*0$SLa9$YXT|o*!tG)tNt;FjVugdVrsYm8SOOONo zj6Qrrk;qaqCDlJD_$?n_*maVV(_~Rq4&iuDMTyps>nO(fU#J;vk?`VA&kv^@2B(Qj zZ5qKxJvi%GfGF2l2+J`h?LpEKKRQLpx!bL2MErFyBsB%FNd{-fxWca_Uk%0$sYuJs zb&PuyVv|LF`6pnXYs!+j{NJ!8Rmo$#LX1WVoDAi!B{8=N+ch=J_Fs+oB8EY5EM`7U z#V^QXuoq1d2MjX5y?eBr-(lKsbG2HHRQc~dJYZ6*Lu0RFvC;o_^N8MrSE(r==0CwM zZHhmLpj7;ck&(-o>XTZ6X%o4sx#;UVJGXH}Db}I?vVa2!n=y6Lq!kD|_E6tN7$cVv zBc0PG-vQ)bl=mQYPW?J8i`QZXxF^z`2{PIpm!QjL#1}NeJ@H_CBcm@a=NI>?B#i76 z!k&snnm$sa(uFshZdXIWPOAbS6?YSl)m zhagXrv(>rX$oA0BO`ERa;~65pO-piZ&3+!-zNroR^~zG#r_Uu8n|PkU9O0_8;A=q< z3Ya3p)T~7Z!(EDD@8@!Mgk@%3S8tuOkujDrM3j1IX?rZar4``<2{HRX$vQe%L`Od2 z4y3|Q1XE)2{-O5`uSyV4zW{(QgBeZj?6#0gyum7T3ARI9Ld z!*;tiG4!6|5VSMl3E@{xE1|9S-`znO`aP-+_V0GO0%-+*5Y z056^FMpMB|o7z@uVWg~qfL}4n|IUi|&Jnk|7aTniTMJHzNAc*OP3a$Y_@Nx}Ab~70 zU^c5@TIZX6Ka$BiXRPcXI5qMX@wKBriaSjOxKlCJ$EP;PKHId-q5VY<9-wxCGZjrJ zqH5O}4LHyLYVexIX%uqmXS!`E1g;H&ft!MafyBcx%R5Rp$VXj(ZKn% z0FH^xX)onwV}-l-d`JFTQt_(u%ig^!PUPR`M3{hyI@vpB$eP<22}P!gTBF^3hhPD+1?!i> zMQP;)b3g2I#@Uy>pjtB&_mz%`uqh+!&JGQOgb3!S-*kEj0G#8Nl~(6CAgdsoLc>-^ zyaRH*kIUHPHhe_rY{HKg2D!WsDg@})cXL#Iqt5c&l1E&j~}Wh=V`M>=v4 zac0_mHb2k#m$h10F29ChMppeeE#4Z*=GBclXuud2%uc z+)bn>qOYI3n;AlaPgu}TPp4?x>m{rLfRc(#)6@6X7#Ei|&H7_XyX|pe@fdB#)9<4VWiniT=+FJzcx^v`&{=xF;_}>fP^y;@i8m`v7BElNmxA-<0X>aiFCafrx^bwBh2o zLp%*Qw^l-^DhBk2H&u_lL5&b+OJRa#5H?P=haK6r zu)1Z2+!Euqy@F{e%;LmRVx5D44y2%?(1bEz`b@=tM7wJmTzNKw+x61hqBJl6`Rj}M z*ihwB1R7p?crxjdUWuRfeFw8yit&7(0ivKKL z35xE2$lgv9R~9}5{9Pxg7^H$=7?J_;E(T#h1Fll_AKXfLBrRU7{nxy zOHXJKzLpr2M4Bq??_X)4Bw#Rd8zIjTY2crk26d=-GoGF{U{2h&*wr$FH++c?^NGtO zpep;^IzrQth7O72kYh;|^!^MR92UGTL~xPr=6Dq<82ase5i$0OYHH`P&i<|Z1DL)QVvR#N}25!#^s@wX=e;xOse6wiQwP}f*{4(14S)o zIwm)DHBK#oH5B^8He9fFFVOR z6YMnv9%s(HO_oK?roHt6z#0uFsZN9Z-Rf)=f&%Od-#gPRvb5DI6~#&C*YkZpJ0X4| zghI#DZ0qoq>@jq`rO#SAx`?DSj`Y(&(YQ39K5W=9P-X`#RW=8}Tf`>t&SAK!VZ%m^ zgsX?P4SP+Vm%;7=@LAZ@Jm2w*9oMM5IY##4QuKrpfL%MWJ7X_shZpc?WE6^%nrhmc8$4W7i?&J z=;=VqC@W&)fFxHU$ZvsA)M%Y zz<`D%MtV4y8~O=YR7?8(@slRSmiSqkl)ih^W!{HpO$GJkaF;MP4t<5<$&)Af1qD3J z=x7A{)Xq$VPjIQ^2a97=NV!529o=)?BzhvRTJ`@KHer+#4l3Mk^l>dd;b7@@mu>#K zdFfYH_IR$j;j*^oCzmwwG_Tb1fz78gL(L*oLoxWn#g-Mz-%~d^kB`o>8vOyOo=9{5 zd3J6!(4+@-fY!>=a`HxcJ%nl=6as+^5OtL96x|4)T>^##d5%)LZCS0J(+}mIsM)xZCB5oXfX_ZuoZMx&6Bxf}gPWk1}iDJ1% z%tOY4Avn*iPKH&5um=G;nr}Or1+nNmIZt#QtC_U&*$5W@!3Av!;I9Ph5}Z_#RV(Kh zVht2u5=hv-VQ3a{!TzKY%{}iLRQhevgj;7$pDrY#Sh<_!KeLy!^Mk(q&zw0^f8l|4 z?jLB$v!2uXNeG@{Rb%UAp!@<9PzLvcRfhe1jUo!(WFvc>eyRdxDj~eI1Rgk zN(56fkOYP+Tqn2FuM;6EC;Z`~N9}iP#%D*jF7*{_d~BiS7$$BJkK1OnR&M%rWdP4Z zt{L6(`vD*KxX;r@GEyv@7pI)*qC*qKJ-g#^Lihn4?J_!naDd({=HxXoZvg(N2NpKU z1iA$65dlY6gWd6#Jnl8HXhy)C1D>gW+kV1GG$OQ9mohTq+`8yRIZ`0Sj1H8_lr7C0 z&7w_;!GCnutfQI7poRr3x6pz{PebFy_c5E3qz%9X%hb;0u!4F+3ygda{=mCjqqS14 zvL=a6j#pC+uVt-TLfF=9((~}c`FJp7h!+v9VO8Pgc(74}Jfb7pPD}1u-g&o4M$ro- zA+7l^OMZO5NQM) zN06HcK_dEhh}4W0Abb@;91iKvo;BkKt^#1t+@Kfrghl+{;RoYxmHpVkL41!S#9G|3 zg(Z-sLsZGO9B<&%nWFvVd&&o)QZqBgf3AGx-gs{1w~tT&F9E_HgM9zEH)hfSR0+VYcJzg z_UC}p(1}T|4Gpqr{k{GTJqZ1JZgJh@TVTjmPx%_F7-wTRnpm9e?Z#L~O0X?T>=?Qg z6qOsd+3Q8w8dQ${s~K4p1Ca38sERcMfxyO70sOvy+#8KltSbWwAc&EX0g9sd!cY9< zC?$7SP~VUEJnfy4a+A)+_1!-0Y(5dF?Y6Z8RdPw{sG6rW0|A$)ak^AQcyo^4 zV{Lff=1e@yLjVsh(WxS3WvS`btGAN>II(pLy0yvDwq^=;`EUHdMHVjwQ0RLF;_X-g zOGkcXsF5<}@BgzSkge72Mo;K=t0+TYmZ#$okv=6;wI; zO=BO+ZS@*8(pIpah46GO-gm-+!&}Z?@IQ9prs9Q@81B7>ql+6%%ApWh==(3wW7J7a zJ$!i71f@}Kr9HAMofRNiAx{OD@yw}A&qR~(S2>MU<2udzsr1hcvk7oGh3;N*q?|C0 zlStSau?4j-;`MLX{r`g?I7?L8a74%qC4RE$DyxH3QL$4C$qJ`%8Hit=s^|Lu|02hz zU;>QTWgscwr^D`bH!@PEeIs5A{d#5JZ28%Sj--Wkq{oo)*4L89*c^M%$j*PmwMjy& zit^$BLo|OxL|(?$VM|4MhI*J*@(!X9k-5l4bMxj!dt95^eYUT9;yc0gnRO&m63WQG z-=$%t>o^wnvrH6$5Pyr2ciD`axj!yWvfr>~Ybqo{&#a}L>+C6l_L39_|q zfjLB5_i#Q)f>^*f+jZ1Mt-9(s&-`=u#{C2OA{mB}93+NhbO<&lzLb>-Vo?n~Wib_* zC_mMFKw1d_B`9a?jEF%2Zjr{K3!ylY>HIQlSZuDiD?sutbN8GC9nhwZr*ZZFI_)4% zM8*v@`a?qe^0E+4UZg%A;qLCf=#hCi5?C5Qv?SkS(9qTui(T-hA%m7K;V&`|T$YCH zbx+V|%Pif$CiG1^cdns0icA$nKDZj-jO1jZ9ozKb7u-5p^E7Oc5X)5*qJ{&^EvVlu zu_Mog`c7XZ_Tmh!MUzQkOYGGY1IT2~E9BCv=#dCv52dN7m-mXXLA*{Mfs{md!y#n4 zD%LWtrL`Uho`{g8Tw45psFXPtOmB(>9&)bhZ~GAKB2+MVi9|q1@5CUIC;8@$zTlX4?AMwG?hYZr%a))s3F)EiN_=_qfbVMb z`S<57jyB?c+WK_ytUHDy7w!VHKQ@5hCn`OJITl)K3l=O0Z`3VO^o?$?T8N!yku3Ia z1)-^nokKr&g;8kl{{50oh`?GU)NFiBAv=i(7=VFcS6z#jeLj4N+)INXQPZ5)n}-dF z&Z#_ff@Ri~taFN>!7YPtnOs`)*aa2Dw{x-D1*i?gTEJ1baa)Gj?uK7p&22Y-Ln>D? zD;*ZG36P2i7$7JY1CZlJ^nys=Hou-)-r`(gn?=n&zB$&fbLXR~@(|U8zim)EYU1g6 zo+O%34J0@T4nY`ER$gK!Q@ck3P4iJQIrk-=iq2$i`I1DH>>*xLY1x*Lcs+kasIYKB z4B5op#@sGL#oQ75AnR4~ZHu<7kDn)X#HJ-}4$bOiPSn*qzHm>6t?XJ^EStB-XitPX zLrSOk_{O|16DCZZ?U9%JlakspwUDf`26|SMMp|k#p~JUztqyFW2;_-OFP*GokVSu6 z;<2kU`4Ou@Nkx%K-sYh!qJ>sRKYOjQ#H_7PzWczQheNv(?=oL>!AMxvp{piW3nc;5 z%?fbI7F?cEi$MOHY)A#6nEx-y1irOSGz8A#UMR_tq0hYpM0ZG(Cb1U6L)NZ6^5kXl)6n#2^c#0usgbq*`S|Wc&I9EDB^Gxs z0(Vb;_B!!0M9OGBam3cG)e-OcsE-8~z1JteUS;%UyX**(CYJ)b5^1 zjKC7UD*Tk(L?U0*(lR(Px&+O@8_^`zOG(K2SXSoH*B#0oi)L8gahS>giUv}2#L>)a z7iJgUG}&hAO%U?44T!{91DhpLKzPs)kO6iLqD}lakB9TQfZr+ZY4EED4>H#q4noE& zNb0`>Pg+t#7VH-TBtV@|-`JkFG(zx9C~3rFnq)%|=Oc;3t*_M8QxC0%$U=FegJ?Z&atrPc_}pEFWYNNbHH#=kQhQ{Gv{bd= z?39oh4zU(1t~wV7ENNv07!=fn!`e&c;CyO0j#d(OjyrK^iR=^_SMr7_ZbKt}4*C}% zpd15hHG$u+MDsAjz~%{LA^I28cr9@Wv$&;^#zEnFM{|6AZ0Z&+qohGvAqnzd@R@9GP%f^=(iN=Bj-#Vw%w@ABwy(W_YIhm%?Ofaz<#I>}ST(I~Y zvH>&Ei%Svw+l?Bb_ee;sj0_oi1HDTod>qf1Wp%Y|F@%`GI&ObeO*=B?S$h&LE4{jq z`U;sH;=+uQ0m_<*%u3wCPtQ2pz0(XN$VS#MIlm0JANgpQ34% zY6ux78NeLZ69^A4YQ8O`*z6wh+Hp1R14&ZSd*WOV4Zvv3ABYPDJyIU#PMo;@Z==NR zE`N?D3Jxzyj@%mZZS7qy1entsYiu3vRG1oRobb6$zlg*~4SM`sZ8!aeKVbm{ z4`4E+T-QQYu(-TvGZ#`c#5yjnHvDdFel+YlZ9Rdwc1KXb>A(@a?H&j1(@1%-$1!I}B_bKHWbkr{#5>y7 zg#0znL+6m^d2B_ThKX|>+l*$UiU>udS*7Ku|F{(&F;byq7y#0$QUXcxV}|nn(L#i< z#!S~fJvFx)>(+uqn7!{!tS$Ka{0|>?&wSM*B2_G8z&SvY!do)>n?4om&K1!Y8CGSM zuU4ndy1tpKFwXFTBq0SvMLA-`#?5GS3XkZhgKlY%#);2SITJT*4>@1S10YS#zR7MF zNOD@ZU?E%COy_e7^IqTW{L!kqq z)S42$R84NiEBp>F9Zpdc?>*%0)PDo!5^r-D{7v1{Bml-X)6jl9D;9JSF^U*H1n`26 zXvwnXW5$zJA!!oSzd^|4n%o7G5}gX}K+y_fH(*ZaB&arRd<->}uCqOMH_1$c8*n={ zM?)Mr0M#C@uTlR?uA~1CbH9&R7&^l#(jImF{HEC3{@y!p#~V~Bqc3f}{Aaf}hZ?XX zSo`-+@M<>O1W!*w=kT!#ZXj1h_3wY7Z^l2=uZuTt))q#Dsl&19_IS%m#O6kNx;h5j z9Y<_tnnMhTkdVafd-L?8?en-Ki3505rJqE!K9o|^+*D?Uhuqe3{`Do)T!Ve^Tk!$1 z<}6syA-7ZX30qiIMl2baxEm-4R|}vW`-|P2J;cN&ST@htJ!_t2BFa1l zeVIr8D@hLk7oMQzvuDp-PR%h9RtPDfe%=)vj$H9x9S**R+r|7ZCO1R7mM}Mq!%eOs znKET1+arb@_P23RdGPEX9`~#(>yD-~)K_b%B<;jy@j^v*WcZs88h_vbCD&9;Kt}*n*K-f?D_`1gb29q?q1Y z&+ZgX@~}rimB8U3ux0*H&Me$jTV4-2tYh^27L(!1RjaCzThL86%F0{9);zG^H1rMP30caxIBJy13(Ls2q?7~EP=8n&B*`*<+_9Vizpha+&iHguT-X0 zmArW)spskZv?JUK!_4l%Aq5neGq0;r6u{6$MniF|$hO3h0w(3u($OmR2lb-U7%~mk$w72(-w3}O4<)Jy6v~!P2WbQvX0CW*PkiFq841b^dri{D_SZXriKIXJ z8)x*U`p8)0o0Snx%(7`o{nG%<*?8P+(T^>>Li$9g`%fkZdZKZ8gS*}XZ>7r-4-FAA z;soAPygj4z+iCuDLQ^E2yB_tmt6Y&o$lLDdEn_bPED;)2pk7Kh#@}#oYd=3fPBy)? zq+1_>4Px*VH5xHeCFXd!EXSat8Zd)IBVyRB{@?x+`V*X?;WXiG5BZ{MBsPuEvr6~L zSw5tQYwSLs)%H;+ZoIU%9H_Fh1B`S$yE{r$GTmCWR6gU_mshT3sKb=S_tRha&)Z|C zedo>Nu-1gu=y@#(N)RLzA(IU6TjT4{)?fT=gTOZHD&q=7eYjyQ7S z8~@!WcBp~hKmUzO;8itkrNo6Y1+jz}#d{)x-nD2|u>bF7FXdw(9(tAcb`jwQq9;^9 zLG8J^wb1g-l}f)}+jxab&g{NKwbxC^M)ydzYXkPGa@Pjq>LLL^fhiY<37AC}3$_fQ zrXkL`+qZA`UEUpPLSoCQcQHtVxA~b`PB!|hT17JZ()IL(o`l)hO;kn~!pAB6Qf^%4 zXxDBxuS)WeyUEB`6D6I@ zij0w)VZm~Gk(&MI+l2}u1AkW zvIJxIjkM%5_+y^3utfjX`w^EKA83-RKenQ0P@jLYN5^mwFs-ch@3)!^8 z?tPUUZH~_M*iiYUWutc=cj7w>k|0>^F0Q8ipTN1dYWCHsUSmd&uIImNamVHgj=4QJ zCvboexFKKxL5!2@VE$2df-I735LB~*#|qXBI80^{!G39oFo2jXX?{M@ybh8?tBBBz z=}9xuV^%&7q^^a0C*B+u1j)hdJug1zJ#_sV7L^BdCrG=zJg9&*%mEVg;5B@NYCf5V zlBq{_iee8z13I2H%57g@Q#SPt0CkY(Ft|^2SUYSSm3c1*Z#&k~7RNXkp+wL%IcYii zJjC(_FfXK4mJbKvCr=io+b$k zn`&hjya6+1gX6I^QKZRd9&!Hi!LwKGBzCu&qV}H_2j`QV2iEQ=X9A#&S5IBL5=33X zH}fM0@2cOZQKBD3V|v`r*9s@1QyLL-ppfUu!Z(|LSkG?oZ_EN&)~_*zQ+ z=7j!SaR>J=sXcPZ2X~V#qckM7F^ll}O{%Kj*LTEdp~=mVz)1NMeol~po=#FKpxMFC zu(q?9)D(avLZ8`Fe&DNEDDCDs1UtjLid2xg8N90ueFyfiF`a&n+4>=>bl?FLEQ!^T zq{q#9!A0ESQ-SES88-B;S|ytayw#@JNMbxqmWVt$aQVXEoL?e zoGjvR9^UE_@wjqPU|ilG7;s7>;31Q^-8if0&PTb*gpG2tekVt50`w$~Nim+bj3APM zd_w(9d>i1LZ~7iPy}IrA{jst6oV0v<8hNj5hLDUThyAr#cPm++(}XXR>J~MG?T--y z6~>sCyEcnVlMESJ3Jp$_Im4|~$B8abDE`I2qCR`#1EtCtaZ~vnF2~KfaYIA#tY(bO zWT1Bh56_!RtR>b(lE|Z6A*G~}^!59w;9(7BofH%ns$(MnTbKZj zmB#cc)IVXi<#cE897TjiCA13!CrdP=|uO(Vx(CL-u;G>Oou4(|>}u?NpFAc6?CY}MJH**N$?p@L^^P9Kl4%jQuC`%S zreukMYreFW%GH*e!cvp@=B7@25~OT#1Nf%voXb0ioX8i^v)p(k6W`DkOw zTR)|ucJ$H51gOY(km0+)PM9m479RNJ8MEU3unuPI;$jb1W-miRl1KsipCGk!LB3Km z)>c4J`q~0oWs&hq9YT*?ldDnD25LN;>VKEzA?6u%zZAmDCetx1pVh%?cB_Us&^{c%%V~S$^3XYFnryxC; zlQ;55mAVGv^|a7RF*t*xk(;SpFIXmmqjZ<7Lq@h`98|a($=L!hu!=MmixT7SzquX& zMF?U!x_!aO_EM`PfNXPd>w`uYx;qc?Jh6p7s9S>-RD<~A&(7{E@U7uE#O(?<>>aO! zqVSRls~@LLYX9V&OzO+gtjSdg2xY{3XLX50Yb^=71~wD~jfv8rODx1z*QRJ3Y-q#z z1ACY#*u*Mj#tmIvi#9$D=Ju=3dEwdCKVtg>GgZS%4c%i7UvM7KDQ@k&q?h7-FJ4z5 zdjLf0?l5yTERLqqb(bCEPB!l5!^Ytj9ndpJ5Iohz$B(xq)uJ!53(lP0Xkp{Z(c&?s zL2*{;ws{EdafvIJo96`oKjIanq-eTLUZR_@dhv=++gq+F%H+p{WZebi=-QNtG9>=G zpY5QTRRra==+d>T%nsP^6CP*hHKZ5l%$Nc)!L4$RcRPh-?K~TQqp&;Q>F-~rVkAtw zjZ-?9rC`5i6b*ajW!HOXUA?S^s^+vSy_2Wx-Fq~*^APOMy5ycc$yKPlXOACZm%)wJ zAVRdsmqgqE08EdUkBQFd^m~XL-TL*baU%xExHxlMk9Y+Cm3kW_v75Y}e;X+`0$HW} zsSP5-Uy0vWTsED=<{ZmF50?l%0mQ>40cyNIml;Qe4)lSTka{B^ErC3eJZ**}}t zZqiecpKE)B%9fsyp}{YcAew){KOiYdfP_V(-2_%cUSkG%Zh`6Ye8%~#P;^QnfFq&}Gva(ruLs9IDMP8@!ENB%^{oUn}F zQe8;YBDtpW#YF4%0!mc?6h>PH`zQn}tL%yBX&n6#CmBUA`$gL;H@ypeCtVT26A3?y zlPg;0I8_F(TN&^qD=)GW8z7%U7b1uS+&AW~Uj-?85f5Ny3pRCg|Au{I9A(c&Lq{&)!cSw+{c*;7`Y3L00Je zZJ$tU^EV_U9*vGSTM-C{1f-3vKWegM^D$&hGt5#6!IM-WMt-UcP-cf zn1DKpG;ljL1x*4DF|i2X0Y4J^lVOxXqy|zKDc&f!d?!qVfrcW z9IZFsDx5P~o_RsfnwWE+Pq zsu?mOL?h!8VGVpgLfe3f-wRHjIkS1LS9GNGv;3v3UmB5aVm?IWins?M5Xh|D$!p?s zVOD_!KL*7hr7+RZFk)d}BIlWLo!&g5VD4d&dyRncraEwanmQiHVY%+l4$%lJhYyaG zn{StM_Dig2;#XR1(Sl(SIPiEppVp}K;4?8tkr^15x28cSbs84YMj(T#UaeX+k|pfN zb_&f}!8uj=5rLQ_0k2|JQiJu~xw5kV@6UB%qtq0XVhgkrr8~cY=}lxFu=(| zv3e_RFmbbS|1AG0Y+~%ZJO(nZtZ!(cHe?OBO2q*iE3+!=86r31orZtj*!uX8W&iLOoFcE-KQB#2CE<6$Ugl zssr&ydo8_ryCYF-!-#(_-dj4uGLd+vE^;+FGq}x(hz?@|$oWwOf+wHmdZGwD@t7(A zrLLEq1^edS@NZXwb!6s{5lLO03smF7D^I}pau=7C)d1E%U7$mBDtR@Mjp^pLqi|RQ z&bNrv$Pe!mkzp4YTPAZ;Ol^f+X<;&Kll~%Z6tnx>zPS0v#Qbx7os?Itlu3dqP3eE? z%$XO+lx0t&GeqSC-#WYiY&zV|6X83WmgG94$L2JuW8I|NO&X@ZckD zGnTbcdJK$RU}?EcXJPBE5s^ApEmo*<<_0YNnC)g%-Sqk-z0!b#Q7Jcf9Q$^&%k&95 zdhI^;?dxnGodG!n_4$&o)o|iH^p&|t+MItQQ{M1?Br}nTHKfr!x~3)#o1Zjfntw2; zj=6F6(T&@-B~IPtKLq9S0m*vhARO)un3Mt?W&ZQsF%(Q2@$XCuzXNRN`}W|8-X%>H zy%upj0v&4y*G+L?vTq}XHaqoCqF=?1J3H-)nFQw!985~#UW;Ju$U_I#FHA`~;XuPT zAoVv7%FLR>Y2JX|Y5g7^Ydj#Lp1!{E#Bju4+ltN`J$Uiz)jmK4Wyg-A=mKinXeq8H zlWOG!{;jR8ox{;(P``d_GPKHn{7B?@pw{HSY8V!bVj9rIX6c{u{o$SbpY^CaYXvLr z;o9c@FLlp^moviVO}UB`)QPs}SoX=lmS;a*Y215Oejq5Enf2(SlR`NyjW`}14~}+( z3FhLK2Y_?l8tLH%HJLdt?%Leub9kp8XxQa2hJEs64q8b5#uvh(-R4%9n3>taxN?QV z7iVi{?WZ__M$w?{&~2)i6$KKj?Ai0-*N^j7k_7#5EpzVyP^NKrztI}srw&aHw~Moh zYSPr;YLBp|im>D1MGmO@N^|?asGfTEU4IS5tOKm_Vy;#n#Rf_B?(Xv>Ug%V0Ckr9O zbD5jehikn&fK};{`}Y1z1NK=Id_h9q1{RjF@o2kYR~q!d;=#1uQshTp2skHd0a6NNYGVb1WpoHAz@y7#|PsfU6V`FprRhMu)B;0_!hcV4wVHxqF zq$EMysl0_;v;xNwvjB^a5T*a7;D7jkBk1Sm&GAwUSP3mu%p-NC89;#*>Dl%+r@a$y zsR-jGEUIn&mYn9ajzLr0)O_K+RP$k>dM2MQ*B#ljWxw@pE1v>%p(7X5+}z!Nck&-T zY-DWb$ypD()XN{m=#tAwn1abvF8`N(qeI%oH@>XlUJ(3J-m8jc@yWkvuDFxrp)7Hf3tP!u838uR+$&7^)O})Myif@3_i1VQMTC$t4O~4 zqzALrhQ4`n>KJ3o0rbj5UzG5X*>#)wOX+kV9wS8mLk}V_mYcUXIrTo~= z?C<$obKec*iJbeKGpzU0KWF7GTl}W=U7T7ZOAZ(zB#)em1M-8abw$|3=bn<+n(sdI z+?P#6k9NpDDn1KxqCT9?9$H0XGCy^z`0LX?UY_dz>}WgxMT@2`YT8DL zuWn;xWHiF{CVA|L&JQ%w!cG2^ow3zbq|FALNamz;uhZ}Q9?ZUzT{p3Q;~TkMYGy^%#ZXp&#$j%{myy zfw0%UR_$RnS^fr%8Y#1nqBt_Q-Pzlb48QEz&+jvmKU~{m#>4#LEd$PbqyLY5`Qk;) z1NTK{BbQ{ZX*@M=t!2tLd~_K&Wy2@21G~01WQS zeFJ2eIUaH=-}V(aY+J9n``}9sv40v)oX3^EuNS%wPHV{A9EZG}H3gSDmhN9<~P{IEWKbjNGt5i^4S zdE&(*lp}*~ypK_{@%4!T3=0Xl`fd36@Bse)Yq*Bu>J{%3PsI$-wpFVkz)Sb|$N85g z{`2t2Ab^I>R~}C8yQc;IbQ|QFsYqmYdT*y2c_Y`mQ8&pJ?D^F9!B~wgI#=DVT(UX! zRQcAcVVzz@`#UHUf#yfTdj%TZ;;<6(o?tBFvE|V$T|6!coC4F)L`u~|x;5cUtFf*N zr_-vX#?1^$S59SaVvso&PcQGvwI5b4F^4Lz0k? z`hsAw%k3uykjvt_wvN8j%=6{@cKH=n-`Q`;E?zNAz8O<$; zNRnfye{Vsq9tD&nqJ>A)mcSah#)T|_8`S+bA89w2MOK#Ef1-6U%ks=*^>^>-iXX6W z3;>nb5s8E}$PJSjW!v>_dGlo6mF5TA;Z#9SkkgPYrqQxQ< z+>sY9T;Ku4u*s4Cxh^c;yQZcl}9tHWIW-&*wIu?kw0D<2qX}WT`1J>!RMB3QzLykoc8+)OKWF|qJq$(NTBY-$9p1aiDPmoZ zfbdOfm(5YO;U3fgt#FEBLls3};VYi=dSeFXBL8Irm?dQ4$L-tqhMl$l;Tnt8ldr2i z*+|1W51jSwk<4ay=iV~kpV@5DaQ=h-9BI}KySOozm*!S5h%pfaD)gO@j{K0jmB3rQ zA08eEw{!>4>rmcQmj0W(6UiVG`x$NDa)?E7-LsmBX6b7VjniZo5S_Zu&CGv$P@IKb z19P%`m;%-t%LiK1NJEx$IF^F@N%_M zxq+4E6=@91GY-7}y>E@DIZ?L?WnpO6Y|z459wwKQ6@EORQ0(!8-`;JUz*yYGJT8ZN4_$fWuQ*O9@^ob;wLj}74An8rt=ja29$;#?{r8qUuXYP2L{42;@ zTq9*n`%N3w+wbEZV2oIzdEoV(O3TVN&^$r!>w}-lp9Np1hlh`T!#$7~e73##&=C3} z=DGIzgBv&Mn}51oH*C$CDC4KIGn;Zr$aLuL8WRI`j<@6g=I6~mABqupS_wbAzUn* zI<n8FY1;QPBw_i+0k8nH61q(jU?RT@Vr?d9K>$Nd9w!G3n-YmiN z5yp%=H}3DFH&1VTW^{BgK0{n=(e~B)fI&?K?p;qRhrPLBu~ecB@|!8ka|# zd`Iw!;4o8Yt=s+lJvVL{$zvt_Ylp2U=8U?}8r1pErUk{f2l8zF-@Gj^N;!B}9G_O}nrHc|C+^vvy{4tPayGUs z$#Tzh?jJtSoay5}I;32dS;mzs(v;f`A0FOvqPoA+zTpjjGsa2J;&bkcQ(pN2X2~)) zK=f|@rITb)>YuioG-+{%gXp)YIWrtCKcA~V1=N~K6xFDQ&wsRLmu=^xzj!yij_X#jPIwI>U4Y-l3 zQ|4@BABQ-9vl_&ZGyG&%gxVQXyXX~?U~T^S_8_)yE;S;B%R!!UB5&QKGa3s%l06x7 zezQJUG#lsrKYYCjT#kAB{(sp=wi&Vw*`|oHRH$TW%vf$~wvv=3*;<5TlE}=Mg*3S( zJ0X!MqFallnIREHNm40_wAsrN`oGUR&-eFy|F8f5cfVfGGiKC%U-xx=KIi8=kK;Ix z)1`kgq3WLf%MG*VHJz;61jr-UWHfy+LDA96qH5yf;-rSYMGRjBwMFv&S9)K=VA*<{ zVoRv&becE617jrn4gmkk)vKwgspH8jwsZUdA4bB-9)s{o3v5wr&$IWULU6<&YR)IL z`})1pYZlgTw1kXX%7XxtZ-@Tmx&K4UZwHP=12mO8PeWj(m7t6V>NEfli*of4lwX^##z0as*hvBS!k5qM=!3< zTmHMG^Qe(e&bhRIzPVyCg#<^eVmJQw+ftH2pIPsh!z2yCbu+-k?dZmlTR0;W!ydTn zo+Oz(2Ciw}Fa2YBHlRT^P1RXgdn`)EF>yU7v)XH-iIuaxW$E?7x;fVjH_g6qyot>{ z`lj!%^WQTA(WJf#Fm+AQ7V}qkRuk4W!}`CTW!+F`s*Oz$JLCS@h;hLwxodmVUo(hq z$q<*`hK4EM6M*8ElO^W{V#(y{`;}EM5XEn;yxl^Il#MMU0Bs6lVaUywrRB8ly-U^8 z+u{g24XZ=E55HT*@Zs9`>FcGiXYEWTi(BozKuN0Iak}eEVx8H(^m~aj)MRv9-D?9} zmf@yU>`xL!abm+;OvRLAdq~KW3Jd=qhH`2lgx;Y~Yu=djp|1lfmw9>9Tu?CrC6=jq(Ldo}8bX8G`zvq*Zd%5A@F54n* zj_K+bMpi_IN;dR!K#O7(xwwF2x#Joq-7J%IjmFg~FdS|%Dp~HLIHF&V@k_S-K|PQ< zO>R%4wrx{(aC7?f=rIyqc)DFJ@2PrJ+eESXioWbQpoF)kO&)fHnh7M8FTb$Q(m8#L zR*r|WsUp=oj)byN$Mz{^b>D9~0z(Y$_4_hD2E9s-2LBoGxC;Z3tfB`ythA~BzRlNP zha!+L0XKCbe&xyG*IE~zFUsXH* z&cu#sTvaR<|q#AAz|l`Y9i-=tvc&o&i}6~+3X z3BxHbRNr?UH+F2w%ioschS<({W+;K<>B(~s<`<0DU4x+Hbl+Cz5Jw0cqj9YTbQJTI za#AG$Z#nkP=!t-?QE-QpMBiueV`yp0swRy;adev|A0x47 zIiabNvYD@0Mrx3W-OOcLqHy=kWjR;`xyO#dt zMl+5FOO1wQi)+XmV+YL%F`g7~A9N-mJlbS5EbuMg=NIg3mTy=qT~il7lo@T@xUm$q zPNu`M3y&+t`v1$4XG9cp3A1vhD2b({;uqgNYNpEI(7uBfKi@mo-Lg9OwDa!|*44P@ zx^~HE;Q!Cj6_2lfi-bP%w};{1pYEjikMuyV@4c%Yc^N5?Uh8_*t1%_L0O+n6DdWH1 zkaU)jCI$n>4yqKo0c=sSM@cd%K{)EOz-d^DXZ$l8#52n%)h+rxAQ#iL1i_?3L$)Ue z`Y}+Fgx6_S(K%N;83rIUed@%HY$hb0VrokJ}772j~2nhiD(ad+avhMoVeLq{^VO&%VBcH_QIj$jq^PMO7C_)xEsr%cc5l5DqJ zQs~03lII`q;NK&upkPFHs>Py@_Nn82odf7CezmWa;RGoYID%v;OV%wp9o6T7LpnOK zLch@a$g;~I``KIaxz(^YL6XuC2Sixfa_m=maRKROklm+;71qmh* zQo})1|NXG|eL)R$2QKt(V7MacZ9t$&vTbqN%`-!*?=|UY=ms1r^qNcuMPYddM(r^p z6>Jg5E zO!kMau5K83=qp8cQ)kYeP4_uWmmlbKMe^rS|6mmbgkz`{`UKc;3GYRQ1mLebi_|)s z(k~VvBM3|JlAKS}&YXRtk~`%4l<1170_@j+JMEirBZ~S|dgR#lk7t0_zaNw9K3Mp4sgcy=aDRbe~ zb))!aH6WHoPy?Wl5+w9?wvICq0EDk9_z^j@sgVx#a6o#xAg>?_6{ZV85of@n04^;C zz4h+cMp1N^(N(zGyJ1p<1SZdIsWX$GqT8J`ss4g!QuhzV+a|xcGi~w`_K=*3sD7Am zy3hRp2K5&(xB7rh4}g1LK*jRfwr${DQ;AiGBb|9ih%FI|h{iJ6zojP9`ntNY$jdP8LS8;ZQH#3DuCXa{&Kg z8SCFyb~W?G`|xs z1o1=%Bl$bg48SXaG_f50j82OdS)i9k_(Bg}?8Jl34&J5?F4CJ?KTy1=wS?YbSlGp^ zNo@*HBln6fu{-FAj&sln8$uC!7nPi&bws_;pAxqIX&Qg!uBYLRFBy>LmgAuxn?l}2 zl4>l?i8&&$c&=s2HyAiT~RA4 zqr$o7L#8w~EUQ}pOi4)2`Gz~a!i6xT4x?~Uu+qbcEu?|F8Oj7wDOzkm1cDM{*CyXi^Kply0P2MF6w8ebeCcKZ_IgiIix z^0XPrxCle9!xo3{wcxWArQ#7K2$2Es=zZDq%F4bfLh!jo#@*&nZ&neR%MgxCU$Z7* zzt5A7ei#P7UlUX$ps8TzXu~=>OOlj(X?5rTS!PF21Rw9wqaDC==a$x$Noamr8$fn=mQrS|Rm+8yN=!J%*S9SG@^=A8P94bqg> z0w=ZTY;h}RiodZoudv>5J?`b6Y}cflJ)nx<%%kL7M9#Bz3&qvO8Q@59v1_}I}!-k0>1zsRBiVUfdyq?i-NUGF7+Fq=rN^X8uOU!{Tw zQ&BZDHuL;WGG;oRWh|Oh0U%Z3?VWyln22{b&cdA!{YWa{v2zqDOetD|7I`w?w}dy70m*LqgBW#q+g4%B`e zGkI85Flg#QU^qm{?nJ#AP(3FdffR~aUu0-ffk~x@QsD9BV(SO6ACsxs|B$MXz2$y? z;LQUg=15Bu$Yt^K=l#EEU63tGfhi0M$yIKb`aZ>5$4rMASahkaV%=pgED8TTn>^Ib z=-z_2o?UHbcGz5MrYIKsE?rt5(x`>6X)?^i<#|O7l&JI=VRZRgINBi;bno!X2>4teXyOSPX9D{IQC zHtE@|KTrvuB*p|3(z-uRSCU%b@tg%7JBY%RAUi%I*62CXfO6Cq-lK&ed%(}SN zGWFr1TjM8B&K4@Q=KQ!_I|^${PJV@~V;I_49@K;lPZ}!4lYk!ufMJc6!(48{{=W`h zDh7~52WZ%>o0K%ps~RYVm+?F}PV4a!awZ`U!dyq}F{k8dHS~o)PhIHkMVA}C`gOj! z|H!{OMpF$h#Kb5R|2kxj6TtRQ?Msii*Vi$}{3IC-darB&F!2x5o$gln8;vfbh0+vG z6^LN=$_pHYWe+YS?qB;#1#tLpQ(@=)_c#z18BBbF3n}G_)lq#t7Y*Hsuwx~NeLXU+~fZl?{xNzn>McG>{-K02_FI~Dcs^-+?b~=#(!)|f9 znC^S~=FM5XF?7s>QrTBW11N(7hO-ZHmv^t%L>O?6rILycTtU)ASF91_UWYjZHh5^G?N8n4Z&t;Da z(6Gex^~Q+{bf)?8&;i7&?Ou-ye4&tD<&7ZQf>%@j^ixwsXAiX*@|g&)(u`eQ?&4j= zh`UVEv!QZ2s-qmk&!crOLklMBdHABD@Vd&%8iWzq)FB*LIyz5Bc-V^|&)!qZU zWfOO`(Vnwj9rbB`W8GCCMm88*qJ9f1UCSDDzlvdmEhu4B>7M=jJ#roTM!KYJ_~#Ij z$uxPfOi>sxZ{+@(^+Y3}u_3Mc*Yn>-m0BkG3it9XL%Jyd7tV6WfM+5nw+&FZj0(sn znK?c^LfxiK1M8R7q*5szwv{@Kqr^e7+Eu7oHplAuLy}HwbGNS;5J<^ov;I@n4#s+& zpj0}}B^*ak7VZrLz2@~zbFo=>4e=SAjE&AZ$*kE;4*sPZ&W~rGvS_1wy9M`|G0x?B zFk{L0Y*W{(&ZJ!vj^2(lH`VJRc?6Y_2rtETRhH9KkbTqG_r#vIoja4=L!jFPo!(IY z>c5ke!l5FWfC6A@(?7;+%gv=T{CMNlXmQTx8tsAY<$Dd~y;*GOAC z_#majD2+RSS?5;&QEnv+E-J5a3Ud zZ&E^VLe=j6r>V`(wR~~SNGB12LfX2f1a&{oz+4bb?@V$~fuqR@GSTMDI6S(AN)6ii zjPY~uo@1dw?!ZJGM&fgf14x1yr=o=STno~xOJ^RCHO2iYoWJOP5Lmkxn5!j0#S|9_e4FeJxL^^xA7>x()us~4Ifzza#?Og4hDigO;b?AU zAEL$(`gKUrNyYj!oX!Ygf^h3mM}2J3s(JJ2VkKRw{j>~pJ7?ia z2GUI?FJ2IJ@dupJ=3t?@y-q&mejX9Rs~4>m-5|;F;BAqt?m`b6Em=>`Wc{8!ml8c^ zC3r==5ie)U#osUJAL$Yo<@C9uXNV}`xpRCQ za=Z^99%P+Hxm6O5BaZIR0Gt_!7X(;mBm_H4V^sLC+rnB4hYv8Yru4{;H*k^T)g014 zl2_X%|1yrE-Frprq|>a%{ch310ny!ii{k;!QBQ=EkbTsX$G8;|0qXT^q%#S~mnBA< zkZB#1m=!&zAm_9adhZ?dQWfi$-vQ@b!sI}9o?TRpIVZLxe3gl zRuwI1zwE6Vtf(>|52DaRO-$|}L4%6u+I95pZEnfeh#9G`QloSM`(? zN0Bko)E*pQBFGoxRd+r={UmK)B(naL4u?UwT+tGEPtZ|P{z5OHW$4-IY)JPKv*Bkt zIS23GPo2QRilDk0V$>(7zwwP$p<^8`u8rqmQw2VYE7FQYBsrbIU31tYL0hmw*XRr3 zoRgx)6s@XkKEd-jCD6YZE*N@t3jC^ZvwZa(oOuDEld2OSxwG{$VJ_I|Y8QLC@cfwi z)hLOq0FI!OEUa{tZaYkMazk8l)%X}5YozET_tD^m0jZS&+nWi-Uu>6-b6b?``kq@0!E(WQ`zNR1d*O| zacDvA#`=x6kQ(A2e0+v_!Jbc34*i&xXoima#$eZK7V4U^h@-(fcRqc3%sLR2L0wH7 zl^ey0pmAQmzP9t#n>$wl(64l^SFedc(vKfKI)wESqVs~B{<`uA;8o^e$_-roEN{sUfM4?{kWB&cq0o-Vqhs=c=tje8 zPo9#-BSBiEYlc+nAjRhQlwyo5Dg2=A4ANi*2^mTLGrR34Q(T}H%NDvH8!5Rm2bCOy zEhp#{SCIIrqi!T$+)oyk8#rv}&=sA{4^L$DNsF)sozcoJ)E+L(r$mG?4w!2?*bTt1 z;;Dy8ar~3Lmo8mmRZ}=|niu{$UGYDUHLt(EKSjej7ePZ55pe;sGP? zrbug)5VFWImU5JA+O#Pq_D||3ZulUSy2Rr@o9wh%ECfNAUgO-MY=-zjk z062tvb0$i**e$IT!}H|2QKa*(=xA`y-ohV`g1+}QFt}2kGkoaKylcHTRA|3}OGT$g zj~{O@{g`FG?(m^QB$lP>ifSqV%gU@YZXKqslCM<(R7=MOI}_>O z)r-j;8Z~bGVDa>B1w@cTt$8bocDeR1=@ z^6`tbQ)r6VaW1}R`Z~8N3h4MqQLb`uUR!mr|KWWu_Sj~mW$l06G4eZQTGB?pDut5>)Xdw?G# zp-Ky^w?KOz`X&wwr93mJqYKMPOYdCzT-ATZF`Mqw_6YdHOasnSf@7nOo#LX9p30uf zaVnrOr1IUG%9y3CEHWv~_UYC0jAO|@E}ylI#D6J(ghX!rCFBl#vSJ;@v1{eEDhwd= z_bmXT-DC+}UWG&wGE4MJUJP!b^86uV`{S7Zd#>tU({x-0@{{K$&MC8HcqeM0f>pT- z@&Zx}A^bM5d#V4-_2hPUG#gFUm&HlNM|J0RyUA=Fi518^U>c|JaM;Bwv)&%?eb9hH zP6=uYVzmi(iG+v+*VN-ws@o*BZ*G{S--{Bijs)l=9WlD?+JzI4tx`=u9*)a!gy{>q zjWNy!jfe!kTFkqXnso6z3xQH9+92R}w~}{jwIAWxQ2P-CCs(jJS`chFY|I=Z=`!NJ z9Y#kvl7!N}dKvl;8QzP|y6um%3ln1QJ9JU}^O&!5k_ODZdJSxVxPEJ?cO0%~YGGsl z9t-H&l0BQByqVCGJCm=I?QDF}uYTM$$Y43rL{bl+NxT;c8xuKJJ^b#IJ`wi(;xAPoWjPQeyCx3UIXiSkHB_L4NL5sO39tuR0{rTYk(eP&JWNc z4#ZeddSb|grKIBQ$s!jYh9)fg#It&eXCPUdpmUUvL>Fn6+^5%+sEg9HrQQKlgNo-s z!3z~b8%Y@5=RD-jUQbVdo>0kz=&;_?!c$^SOc?)Y;SKATZ{COy%Zoz9vfseDP57)c zsTRbNCQWi(jFTR)`VIjh=dUS0DFo%2BH;f4#Ngj+16j=3#dJXmf^u%p8hYYP+s2o}NyCN--6xPB zU7Ut0S#l7wqJO!1v0h>0$477N%lf5@^*jc=8PEke`|Wcpd2gi}B*pq5?pnJ`tFlt_ zKfk`eC1puRMpZb#OR9!cUzUMSP1GrrjX`p4aMtsg7 z52O)z%`{oQ**r%ZAmX}Up`gS>*p6s9$&7Zr~4Q8h$nKwQ1#nu zg!&#tTHew54bs6fZextEN76279OB`lwi>^N!cJ&&E34K)C&_%JFry_ji!C)B(K(x1 zE+FsnOuOdRTj$x^YEoA(oSnF}tKwh8?IK?UTAO{&QMiiVgSybj;g%~B+y@UHsH1cn zA$H|?4I~*LSEFCr%(>mhStz??UjdK`fJcq&>8&pxk~A>@*{bdup*+$aTzoJ{v3|G` zuTND*-*b5@^1YF~1$jN5*AwJV1Oz#nRcr%z`dEUwbl7o?t;+QA0>JxMQ zM^@5oGlkApIc#OR2ZMmFa!;eLkdK$sNT9T;yACr* zCua{ku{7W|$%wYtH(>5r^d{@&{WpJ3j=_t%`0-;*zF7_XAdvc2=+dBxuIcN0^_Bl;ZgAe@PI_74i4O{cZ)7!Y4L%ff*_PZ;-WOSipH-k%}Tb4qJ zf$>x&cK&NSS$NKk$_+nqL{LeoPB`)0OZ@!&2JtpyE^F79Vz@#U zScY&Y&>57d$RkQ3o=fxW(yTthl#wJ0x&F)4`ikjKK0c+f*-n`>z8N|PAe{=Oc3ZI2 zIRJJW5znSBes6AfzPfkB`iqtC036QwE=*!-N6!4*&BM<{J-8G{{riDxc!tK!-m2G^ z3)nq-ub!fK{5X0;7$6IIz3cuD59nVJOhSJNpd^hUN=h2D6GYysT?xOq*b96Qy0O3L zcqdvQXT`5L5@5n~*D%y+&kxi-Tlv8x`51?}w6;dVWS?21ea1@CjGZ|=htzwArFp2& zlpf30XNbm%IU}T_eZJ_!?S1HY*9BJxuN4adG`5-^tN-dUY#7a# z09Obb`nD^K4ppuZZU%amD=tfijOA(L@#M{>wW`a5Zssbku9x1~UQF zaaJE!2Xq;B|KTO=t+Y97B1Q@!YIP5l&;G1^-4D+}Fq?0R+>r)LildT9%)$2T%j8US z#__`E!7sz3dHU85zoyNBynHEoB2ns7P;cM8dv|1Ajy}YqJ8?cuyG*W!^o;~~*oQ&h zoKOG$`>%lteD(Aq90`@e@RF2`k2*V!ZK4z#17%3}p|m2d{dDu3@#ae@B{MG!4lpQRL0&}&zjsFQG4n`d6cJ8U$je0@sHR)A<#Uiq$v_Re} z6kAXl+*fIKwC1*H|OR#Yl8@KA> zW+eGgzHUN2#otj%6A4_x8d640AkbacoOH2||Bypt?m9a2>aT6jRmMSoBWL7D6HSvO zO3DIh;M4W#y7t8e`n$ZCJ|^iSxvDU}FhxhG9#il@phL$NX(h6h?kdfvaQPeQw%PTv z9w+5fG#h91IBu`@neynvoC^Ua@%LN3Mg%>T{o`TLm32@>3&%LM$E8o+@o@v@=s=@5 z37_bLj7Jz3A)~;dGrbSLquL#U7%*I8SMZ|;T3IrdaO@3q7d!F%RyIT9LJTCU1=6Sc z|Ml!D6x6PoY^OD)2JU%rQ|QwZ*$XJn!f2k1l6?#%BfoK!IAjSpqa>|*IkSuZo{^kl zXXz;N1M!eEI_$)bhTFq;Q)^0Fiei3{Lnb$nDFC-;zkU7oZF+SHJ(X}c zuQ_mCEA}%~xNs>s_7^+|H6uM=?P+_;8%W0lsIyHAy&=m+#rRhQrdjV#t6?2kNdUysCrd|0+c^eXY$q}^)QA%rR z9x|wtLn=wV1@=-2phe4crD?dyXyUa%(LCP@)1ItyZ($*%zXcjBQSKWHX>~CZp6 z>SKTKtT4rw>GWlDK8fYRF^|T0qdFDE%{Q&udrMh=-CW@3N34iev8?>osiOyxO zEH-Mv9B$l8Zr~!+Vu}(o-`+MV&2h5F!u161ysxAzJkM`S5rNxH{O1hmnjsAa{$3(B zDLln6+IK81dIPcjZ^rR){LIAx*IrL(lk`A3n1zz!V4;POq>AGM#M@hs?Ou3*Y`9oU z)y7Nxu=TAp7)e_ky>by`Acm08(uD#Nai%hxu3wnjiE8TMAzWSI0t(YL6{oLyF&<3i zb6`+?-FDzr8Ps*6;FC5R3dZ<{COAh)b2lAhlp6QD8?Z>Ksf+*ngEi#DbWwxhh!W${ z$5)%N`7l(W0FC!3pkXNz-4GP{9qwGS9^Beab%F22pffQTUL0Zfr`)`GQ+3V%d^7ZV z)UF09l#UzeP1xM2%SwWkricvh!Hb_{?XmzGXp8QEU6>5`(I8FRfd{;X%-l*+^SNC%#v&Mamhvyro+`;is2}zVr}B0F=M)vJ6QRDu8$r z52UUL9V*+m(|%^?S1Xr$!=`;h&AZKHG}|;a+q9TUVY7bk9zFhfa9dG)ULZ}$3FJ6g z3Dpod(tMLKXw9(q#oN?T6+eu?gA8hC$Q)-Oc*K#(-M&^dTQp01rxLe#*#v7(dvA_X z)l{-1n(d1gF6T7T9{A7xhu%p*H${&brp5&qHbQ{KIi4_$hz503 z@@N~3!H%fZ#5}GG6}h^&veNzB1R&M%(q;|baPOK*sWF856d{f z63IrFXpHXXyDy>~us%4#OLF$-ey-|hmSDu1r_1RWm2}!#2RVGjyF{GLNavp18_cQ( z_nbj3)~p+`SrKXYYzk0m`xO1roKIMD8JDh4-PJ(Zdl>#mnv8!sZQ(Huh;6l zhQ9n&;;87n=u_6r_qH|F$xH5_no6eq20C8aa$X)}V@H{SgwSdKJ+6A1bi?d3kVP1d z*hOEc{?;giP+DJ0WZeahlm1AS41&BHnQlj=^Pl3Mdk*2CZ68v>xOw5oCmEsi-$)0r zP>kaCB%V^R_WC&?J!_BRS-yilWqGgS#*(@Y13T$011{8UGJWlg49~f36Abx(s{|UFW29u#ktJMa ztP$oNz;#kqfduu42IV5L(D?l>{i2My2-*dBh3V- zdhkLkA$s>#rjsD-E6=9*c_p`?aj9b%*p~D*W0PfZC_3Pm8UCgG3cfPCrteA zg8*M_Us1m=!PcTt(AOx$?M8U%cIhHcGr0vTF$AE_8Dh7-V2cU;(q1zs@xh@M7_i2Q zu!pc&Mku|pvKE7znTY%|0ZR@c;J515op{m~FrYAe0Hr9Vh&F6eG+;;FO3ZY=t6dv) z6rM=1G8zB#a{%3Rqec5ZzdCFErNf60Lr`ri>JZ{`+JM!n%x%J&QHQOKjtTxBQLGZ9 zJ;D!z?tg&V)iwl@qTb?~lMRUaAD3WCmmnb^UCQ~TT->1Y~uWHwMzDW=HGKCbp zz5Dd(Q*fmWR590r|5leAT)y=+Ew;4$hW~7{79*<(01!5aiuFrn6E^FAz8fP1 z-D>_z!D*a2Rb(WzN=8yAH);!-tNVj2gR`iCLOBEa)HVNbwq^0)9#zbwD#5J@h=447 zDWGy%pp>1uzC^d(b!tj|m9(kQw<}E6dw%vyv4HV}SPBYJ&yu5us(P zWN7I*Te%>4>8?0o&>(bd4YUSl-ui1Dz9Rrgf0rl9 z@W9mNijrnRz>~O-AwmY=X#~TZR3rsyff5OLg$B*2jii{AqGR^g6oAW0 zDoql8lukMeVo*186b6Q+*Q;BDK1_#GK`9=_-AILiq#(R5T2^-E=vlKf-74B)`2s`0I66HUvRGQ>t^3baA*`sxc2r5x^v~q zDDo^@o+0v*5f~9%X-T7n^7(f;Q%-VV4nccQaff9j{X5AW5P0R$Ponskp!5)$^&k_u6DwDQ0wfj?Oc%tn5`9QlY~Kw|PJ!}cda1z?el0_3$M|(d{<_ua*@)v zi$riF+3ym@$O~Z3XYG5(vXPh04jSgmNCiY|_y#glR0)>Vc%u4u!J$-o`f&1=lg}-i zjs-JuTlpB`uqbei~gV&2Gqmbn2?K3ffF!e!RCsU5y^4xk0si&U#TkdiqZP^ghJyr_WV z=v@v{p^A$;IR;)8X?GZqJ4?VneNvZEJn0pqXLEUl;ijmcs2cLh=AqCNZ^Dl zzx*TPg}})CcGU(TIDJz-^z~b&z)KG zPNKIM{z!_#W_M*PH3|{lBhFsb1Z)|&&^ezN8W|?Sm;A)HyImzX!&U2c?CA3gS+61( zCO}9K`uMU{XzbNr3Y3;Bl!T!{2oJ zw%5?1A9t@LUaD#62{*z*f}f_rT7O`Gnwos&d+*`$D@Xia;({1 z&N;jN&83bH_+j&(wdCqv5As`enB=Tv()MXe{C=QJHnSMW2L2&$t84W3fp4__wHRYD zdj-g{!;KM<6VE*6!ZRXKcsYl<=$k08U%x&xXD%m%n--w)&vdy_M%EH5DSF9ggvLSo z2y1RjWlic^f(@|{-OxN7oFb4G4-Vmjk=dHXZu#@i+g!{Wnp+}qB9~l^&LO7@38YaE zCunW09cm$Nb??>itj+sZS2XsVX2o@gu{ni|(a@o@q0gk(2#7Eof&Cp#MPEAe!SnQc zr@FLU`7f@z>fpV5-KcoE>C3^jd;6h*kB_ljULZy7u4MSbgp>J{7>?G@iFkR^d1q)Uh7F+KW7Y7LyNg!XQHhEN3YAf1sx7EL z@m3y}coWE9ap0D`s8IaNS1yl_DI_smbMXU0c$J#EucECmREBp&A&5ev90vliK5~0& zt6xe+LeJ%jkWT{EN&^#Ua>A4Q903)l&P&xLD|RfSP0u5lqInE8Qkb?`sB#haiWU_W zw-)=vi^29G2F?MZs;fAYu1&mAGyGf7KMooAwQb#69sQY!G(q~@u50^GYaiuF z7OGedN)v!26aa}gSCn@npWzssBq$BjsP_g*4Pz%MP*ap+F?y{!nqD5ZcZb7rim3mP5O1Gd z#e|02z#tyJe%X$zE&E8RqNu0?ZOJb8>5)}ftAjt?8LJrnD1E9b>1^O6TAQyyS$4Xu z2919d{-cAYVR0^&Iz-kSC*+Oy)Bdcd_}oU3FNz$swvQT^>gMTm)`eJ(%Jw8cqFV(! z@%EZ7pUZ1wLphH|)-{-Bgk4*e3%a2-y8b{Zovx|ma0mbCs}cdS-_P}xS4U_E63d|J zaAkG;dpUJxmRLn#ZkT&tU@vA#97fAv$+Jn*^yL`*U1X4pu1M_QRoKwjxn=0sIO&Nj z4M7_1AtO1)Cd5oW7a1A(?ELFAx7-9=jOk&K#+C4qI!cnI6cgl<(@6znmoQ}`lw@$M z+I?eSWY_r=Cc-@*q3F~a?sWaO?xXxO1rX6kqC76abmj9kX#IMXj++uj1k%Emz5qq4 zso*4faHJm7IqZZZ6u}iW-wm&u9h{9k7cHi4e}Gd8XZodX&QdB~xehuYRv6HZW9uL_ zJ9cK^rvQ`ewNZS{Q@v#QG-OfH2?FH&B%#uDQ@(md9=2+bmij94ee-r-D|5{Si-y3L z!(3Wr!H%ZleoGL*izGrYc5o8u^Fd$vutJy+`ddJsq?Zg>Z=ThTm&(%Qvhj8YQiEQG zmSwr~6P-6mO8GarFv$?f*i4N(Zrx|s*RIQL9|5}i#KJ?{FF(GpK4}f;wD=(+FymKs z$1|N=D<}gGU+8e-bBSf?xQ8_dvgK{BT*(2vFgIPeq^sqMAJLiFYjRG_`vu<+_c_PJ zYN{v>HRe|R87F4mo}`kuV;^bAuSIDo`5k@KfKIW}EQu`T?n00S5kF}$D)y8d1}^qO zrURHBqAp(wL^MfDd09xK(AcNY`;|;7dYL`@obhwLFc@?LhHQ72bB|m>v>Mrn?w%pE}BTkMu0m z@%--5&w%qS`VCdOjS;9o`ci;()>aj8C*7ONEi+%phpP2qrg5$wV&@0K_6GjZ6)1?CMsNFKN>dk+PGLrK0E> z(Gd`Wq*eMoFhZWa`oyVI15RQTpDZn5Q!9|R^((4v(y`TWSq3SWAW~Fn=+OP8=h@+^RIIMXcNqUG?C$NE7QqJJ)UD3IOr)Woe z3zxxeaCfe!YzpUsRo7q6>m^?-zU0Z15L8ya-b9~8xzW&u>M+oOs||^hM_W9D z-6lOw#4##tz{IH8@k4Ju@uel{&3)Hsm3mxo;pFa$t_zu%NTT$m{cO@CEq3C^zRW^~ zhVR&c(zT>v?pLMfNlYDGYJn+KYBCnRkZ*|S@}<5qOFjk@@xH7ky%)1I8ZDxn$#Jq; z_wn7T=sA6%(K(7f@MP*vt2@lG@x8(WRyLg^y&`S-xO-O)bc04O8UhA{C@y>%jWOh{ z-)UVaoPU9Ooc~vyI_?fjSDd(o(uGaW(!k93 zM1 z<;Umu;vSNj3A!$#dVaw{<4S*R0xl^8*Mr7(ZtAF^mq|*kC+;5_AKC*vcRqM7SH3rm zea{W*I(#;)p{F-!Hap7-uP#XpNbwNvxFodG(>p0WNi5|H3s?0N&Zp|mv~Tfn1#;0b zH6;=IjRcPXT?!{^s}28J3lm@K>IhOn!{jt6h2N zS>7Lkfwo-O#Tu@txLxW_?S-)~^lO`G$ongAoW)ZhhIu}Qd;LCmYr9zOTN?nXu5)Q}ZyUVFJ zcVmR4k77%Ny2UA5g?9)r;mjAq8L$wi(0Ew`duAlx@K6b_E=CZfWmYC^zoghNPsP+@ zH$e3mWH0egJ^(wQ2*wOr(qjo++WUZXR~8Opoy#RR_t<@TTYJbr@$FGbLz37kljGdK z7Qy!r@J1OcEsUZ>+Z0QM41o#}wZO-ZC8{Yx)ZQ<1-3D>1LV#nW5@R0BTlv*1I6k?`ClK_I8yIWguLX1|96d zu3RgXYG;hF=fVa_B)~bK5mqWA`=5XDGl+&+EEA2j_99?amc0#>Za)Z0eJ$pMYn0SL zTF?q<)@Teuvd_lT5v*UD(8@xVZAw>mi?)iQ-J&uNb_+x7uTh;3F=3S(KH~w!EuqXn zdx*S_a{V})OrVrtkRH%`F^J|I-*<*Pr;lUFc!2=c-;9;94{UoinDw6e3=bu>TrV|& zxT$MS-Xhtf7VXbZ-C*`IUk~9vU+?9kE7E$!eK!M3Yw6-}tHsEg4# z2(55s7KjRMdkmU6NxS2OY;p`2D-Tvhh}JY#h59A1ncQM)x)3Fsn{jX?)Gpft*74t8 zdEAA%g3kx@cn^zhw{Rv15Jb3wyYeia5+Pf9CiP`3hqT7y-c(t9S+O}U#gZEMB7)Yz z+_VE0%SX~i7UXp#D4z=2xTY%-r(_Yf<-eS#je}7D!$NJ-0^6*ds}EQVPO zjT0&3Op&8YK}?C5n}(4Bs0n;{>Yv;r^}wHA%*}Pm6UPc|^$o>bmY;Zz@{j$_!T@jf zo>;)pBtvXL)#P=D>)#Y$O4klKl1Pu)vi($3!6+ZBDewDC4Auc%POn%EZWE94Cs^gFe`=R}fYY51B-fN|n z8?HTcRBDcQ_fGN6MU4P(RdkukCe%W!*o1eo;qbdd?=ZG-2mw{N)z_x9z> z(|%2xwQ7|OkMbNM(*L|R?_lnS&~noEg+BBMSmc6P`@TcAC2$>{ep_~oRL7Y2D+lkou@B`vQ&hDKdqIvhOZQ3~Rd08p>+V^r+SwC<& zkr3>FyPJ5_%8VJTL;v|cg1<5~vvnE~tN6D$|Kgu!yUi8_3O#_`vcMWwhm)<}roa>J z4P!G9B8lmgGk?p>^;+~0!b+l+6$HWpKbReqK7(bvJeHFddXPsyL=a&_WSz63A&k#W? zf~6>{1~uFmNwM8FmZNxaB>{tFyQxX1&^J(3Jf)&e{r@cuNym--f91`sU&@4+xVX64 z1>>)I)F(dp57S?Lr7P;{)!UoHX&Rd(M$ z8Yntxpp_zV-Dk1bDX>KrD@4>u!?2Rt6}9NblQVQy;ws6Y8Y!6EuJZ44URSPy*XSIgrv#n(Qe|rP zET#$fl&}b#)l1Qfs>3KGg%Fj+4mc)-g(Ocx|8WlDec+&#P6%*~ql~zDp+axE(nrXt0EQaOOkVu+_>V(5>3K$j6OdhDrNcx z_op`u7D|#sB5MJ>=ykCf_p|Vc=zzt7FJpU;x6oT&%#iz^8e6a%nUzS(=l4tpIZDkW zZAj3i+?xBgr9630U2(BAmDVW2j3D3yzwM-p1}e_UTS#52FdOQPa1}lKC5*?y+Rs@# zMd(8@w83|F24@&QwHwPHTB-FNeIEV9>1p|_vIT?BC_ToiQ8N(Up3bg%`QpVbVs12z zWuE|YC4&}>5FF^r)+yRN?g2ng7*rECg@Za0FQ*($Rduy3fT1{33h62)-fv{m3`*|o z1C{*RtJIlpXX}UlAz~zmB+{)tr5~Y*q!A@|;a8tCv&QeIW%&Z`7i$fYRD$BD}@JVSQ1 z<;Sx05(1cgwZMc#hF&y+NLtC}?UzDW2;DhP*v$~8APw5+p;~(T&_?R6aLFVRi{rW8 z(YWwaJ+<{@wwNt9Sikgoh_2nrRjXq4diCtNFejUM3xd6agNQP*bNkj@hha}zC}{MN ze*DJ`j;iU$-TnBz>^X7lU`*wi{P`@=Bqut3k|stFY2}1QF?Ja4t_;swi0!GAu32#Y z5)?Tmm!ktq8bKSX%NoC`{nGd()*T_AfQPxF2S4+vipPZHyUT0yddRV-DT1@24EwMf z$+@^1%C#`8xZ++YU;3U_`FR)PJec4791}&cG{h`W2pq;lO(;P)v)Jl0cqrB?2dq|4 z%Bc0Y&ZAUPM&J@B<3G$IYswx~lM%)kdymH&ZHGQ<@CKeQigMZX2|sDr5G_{U)R?GU zyZov?uS4~8E#`k!#ziBw+4hfYTYYJ<*_BpPQAH1 zP{5AN@)!7W*ys$DGS$;8p;D8}*6J{*ckgT77YPQM8M_2&BH#TH!bMZg3Y1YY5Trd+ zOx*k7@5U_GD}kqtQ&)J)5um#aE>zYYUqI_8Bh6cm}eF~rpy(-WyeG^L)2eV~lDoAxs9$YaiX9@yF;nUgHr1CsJWTxXlH&)WqhW*cEM<-H^8dN63A2$vj{gHaq-bC}%S9ro z5+AXZo!>$=WDs%tI|rP6`sG&(pNy{S-%P{Z^&qF*^$y!2=bzKJ4|!ZDuzu1_7hunp zwR-WNhT;tx)RoFZ`8;rV13af(tl7f$elnVeM#a|vMUXpnvEGLzroq%>3F!K5S&)eMpmk+Y|Jy~FQP+KsiUK#rGsWdHdVwO#*=`X5<#s78_Gvvl2lSzc~Ves z(k_8Z1wg?5JyUbzJ;m#w<&S<8OIgH?QiuRt?3F4?*IBy`M3Xyf*8M@*LVuE@!`Pu8 zU6gZ){y+8Q_Ga2o@Q*~r=1N_`n%E{%V?y9Hq7NnfuwjnW%B4ESj=g{c4r68GF# zH+8yZyg(tD_P!tCo}VDYFx0`7`==5Rzm#r#L{h9&=xn$-*uLHtP7As0Gyo0G01`NH zmJ_iDs(Gv)MU|$-f4uWp#l|&r$T!u1%a&IgH&Pau zl7S+l@0nW;wyG%tr6{p@kR>UMVlf(sO)v|p$MNF(B(obh{e}ph(IAk}{ z57w9MDyN&42AKDL1C&g_JtDvYjp_qjLden^a>sT+{C#r3 zBfJ6HLb4t!Joa@b=Y1uW;--)~6>`0_Ru>t1P()t#0+e`yQ#xb*U;VOir(=wB1k~S^ndV}9{^nBsb0(ujkW1@^JUAa8A4{`7 zT$tc@JE%&eawmyk$Zo2}5hh7&M^$7~!6ZudHs92RW}WSD7n0V>y@J5T6vsmkUc*7~ zB>#F1FvKi5v6FYr4*lXy%Liy(z*tMtHpV!C5Uyo4l#EPpldn#HT&$Tujdi{e?vQQj zFdktfOTy<_S4ItRlgFunOBbatmRm`**sb+Z(bPXjv!NL7NTDrc!?WW)^4E3ZrQ{tk ztQJ?CDt(@fnDR^7(`eE?$Iiu7ZxJSwJ=WyDeD}UaBG{^BO3nNZj zQ!9&#cA;wFWs@gCowMw8{?#M$?K=OdMdju1be#ite-&in3d)f*MOOb^l5u?%5)SOg z>E`%N_donuIY-)WAKWrP zxA#APY${6n`Sdrx9cuow|5sC6hkf-&(ygWwkBn`5MBjR(e^8f^*L`YJ_Eu^evuf`JN$hy}x1PK6N^JLLimnNKQfJ>`E}wN0^M{2_$PAv> zL8gR)DSKTVRQH|v`OpQ7b@b1_R@rf%m5)csb~~r;Uqp5Mm`lWzT`B=clN1U-~6)maw$~2`>tH_nu9?qK}|oY zdFoyUm;b)oNJ-lFXqTRHYCX88Xp9m4FN>@NXO=NNJn>6^7)b!&WiyKjno6SB0I8eo zW8Ui9Qxu1)N9(2Z<>l}D)jV@0(UwUYZnce+yghP4_I5r(_M?5hZTydi;w=U;GSbYY zXq=*Gg7C)WXkS8-ZQA??XkAUeFBfIqbY40?UP)n|En*We7$kw$008b9)KheBfnE=d zMjR^gGutiOK&TPIGK?T);KYu<9c1X8=Gb*@XyWT%?-;&hx27M`0z2>%Ia|=L%di}X z+B2(Ebd^vBr3?sgD>1EAtto&1^MX%DD}HD-EQ|n|dgaRFw9Wr{r!wu0v}Ni766@>N zu4%2dg*YeOcG%HKG4x-uJLhxps*9l`TDEMt=EdJlgpTHz`7KnSN?vil%aU;vrwRR1 zHi?|~n372UX#|j(sNA1E ztqJP>g@3hbspcG4Xz~w$or%S29 zE9vf6k}umVr4`;u4p@F?#?y=8ORf(Hn18IUflIMo@*^#cQNmUTILq77>c$@1MujQ^ z7a{l@b~I5^u0WRyPl;_u>cE@za~Czvy?a)83SORXyQ5Ca9?KL14kS^YO1m;;oaKFs zAwy^y8m%>*r?bFv$dLP?Q7o+o4XMy| zPB8KLVu!mFNfjn98+G$-5d)NG^`)YsHD6|7sIIRMne${q5^&Q~QYe{>JO0p*=$NH% zNe0UPHt!g)_XptozN!y}zqAtOg4KTN)G0SVv~%LpC#)TzJ{w7}l$JT6YwA8v(G<=t zudftNLRlodj^)oDU?a2`L}Kgm>h0Sdk&&*Nasi6jiz|QarTDAQ(P*f`5ne0GVyeID+2JyI#p;0kO*iYM1Drsv&e{U|=B28O~d{h%LbFo=m>u8=;1|h8c0$I(Cr^-Sou zIy;QFu=x1=K%@{8)hDrF0NCxBIS@i9VSeP72xwh8l3fuca~?yAq+5=R*S5-)y$XRd z&u5}VeMO_K_AwZ{=KUZ)KYztq!s*bOnhzhw2}?GodDfS^WVnTeeJ_^0?)re{h(8^u z&-+X-0D+3&6HO;hqg%l^T&JFL_r;4BLHd|=EwqwcE%hZe=BtYJRb?|&8GMD!2miBU zk9;Fx*ZcAHwyLSLc6dbA5}*5&uJ2BeA1v(AuJ8!n%dG^#lDp*t&YCDcgg6JGq{d$# z-bSbkUP<{|PIXFh{CnU;haE(z27JU0{ZD`(IcergGA_5CyPo{)18#;Z-h4uVDBbxT ze_Nj0YQ&fCb!vn>{aHxw7wirv5B7HWWL5)-5^tDVU(uaP&wg0I_Q5fY9{>`_D6k2c z;^8d)(pzAT>6Q_8RfZS(0kVcvQ-K@3y#M9B@BOyzWHZIz>NcbHa87U%5SsL?@4P<; zQ7U=iC0e&++RoWZEEl*Mo0}QP)3~cg(2LKC5e|$!?j|Asu+qHx6iX?RGWb zhwz1BA)bHUXPR2Zr^|XXa2%`@4*m#2D~!4*A(}L4B7-}n_YJr8W#t)lAEE>gkn{C9 z4ZT{Nx@}v7%z-ZdPkCn=R%N-qeafuuRNI|XirTPHNE~tur%W?ll7$K=BF;GCgo-(! zSZQyX(-N2BkY?h*pa?RF1A7x{qEI5B;!q))GMEFP5b*zsilp-`9Oz=XIXv)oiuJeGl|F!V!6ItHUZLBQLxJ+z^RH%OM;s6Q>ke?*R$TpRCU7 zjb#ZFdvl3=6wmqf?ef%I(RA)qb)-(wnx9guCjYte79Y829ch3l)v|Hc2BSG^JIl6Z zOBC(3#Nor>Gm1{hq%Vh9pC{Y=fIdft((am>mk4+?LLR7Z^xgTas$bi}}qS$qI{9Jw&z1HG=@ zD>kDh(bzZNdWf#*5DQ|$!f}HJ4HDsF|8%j8{vN&i5C~Thur4 zM*ofE7`6~pl*~lR$%N8A*z1T z&pgGk*yH+k{>$T=QnwhK-XB#m zGlk?{lFS~uD8_r zyXrzj!uuWxlRMb#dAg4oZtO^vcO>GtWIm|$th|R@^`M%r(1YInrDyd))JjL~#y9+$ z-WlmW<^736TPJnm?e;W^M=VW)@dx(qeFCu4;Vk&yvK=6mGvFoC;=l6RE2}8}8<$-u zWHB|#dukJqJ-@tkAw-;aK`DT_WCVrId`4cRbq!r<#^>7D_!M%on|Pw{PlNp>(%P(g z)sgVN)79hp?eXC8q*OH3?5a!A@r_^cY^t9Hb{xkFL#o|kr1j4d$65kt;+@VVzcK^n z=)CHDKEJm06TfNu-NxzLbsB~{Tkpd7w$?|D6nU#b)6NkcM9k}^kKmR6Le-xhrjI63 z3`-T~tp$bGojkW%>!>|}T|POG6gz@X(yvc-9jwqapo4%}-HqCmW z=hjE4fa=abiuoxIb&NClE2)z~_DP#qi#vDis%Lw)Q+L?EnH-NF(9FZ<(h}A`m0*GtiO-7ZUgle|L-du%OB~_ z^}5oM@KN&$xG^ts;R`k{8_9R3EG=M!+}-G2n--3-2`E5|V&Av#&5#%{0kW6&ch*?{ zDzMG!>D%{*ueAyI%TlG*H<~Op+{L16yW;rMeh<~{h5c=3RPq9e^LyJ_@*EULz zpWLn{)7M$wZ%7X%IxU{{*^xBAm(BPODj>4SrH6M7LAC^t8Qwd4bXl1{^lJgUazotv zu84@d`Z)}Ox^($*adGPWV!!2Cgp_nhaE@m|7UOsBd{o)?(1d30t8Rj6wNVJstMDxX z;EQmeydar5=c-})svBYEw~96E3&blYvCIbjB$ z@ws^-9xtCLJbNv#Ukmw4dgu9MB9;tV5y>#A`ReaMdw>(jWy1!G=B<}YX+ulK@#C12 zOWcT1^ydfmoDEKFXC%fAl&gaS#^I-v(?NkTYIRdO}{p; zeEsdJRY(fz`j7*7;!%(pG7qPo$p89giG`S5O$Ug?25O(-r>;X=jC_gNx{N%dAy=>5 zW%!4k&pv+g#PQNR>xT!9wIuRnMbHE`O>&Qkii(2!?#i6|7H;^+4drwbRYHBHfjtYE zbTzrzY6LQ4_T^L`1t%3^qXEp7P_3SD4xEJ6r7QVGt~FtepN3vmxgs@*y1dkDH(PF4 zOz}sQAIg1&1TfI0Iv6cY+`rl;{~8$dWT)>4(^}nM%(k&l#>LF>EUG5&P2WDF!!&BW zCv~DhXscfK<(BIZ8L)>7i62oHWrST7`meL?(%vE4nHUtRH{;NZo(-LpnD#?EVUoM_P3MM7NGwrhbwO zr(U=5i2)N2TWhv{(f;u7v`BnhvOUGv!ASpAk! zB>epI#NblMW9;#QoZ?fgRK-mvKC{y!@mR`ipv^JeAH z5^sE?%v%6qxO4UYX%o<(irroPONH-Wu^ji+zfgCr<$SdMbNnL930?g|zpwxMhgF47 zFu_N_E1j%uM+Udn{fkxU0>VRMZ}Dn=Z*%7_9(W$H-eSUX-)3!$ui5l6)2AaDx<_y` z?SmleMW5qmsDv_;O6qiMYFaCK?c!x2VH5Ocl{px7Xa>tJ z;$NdXXlEqwr6Y(Bzn=q-frN7u>6<&93(a3SckbNpj@Cnxdob`@Jwr`(W!uswL(^=Y zx#NW2%%{+xG=LZYn!_*b{S@oD!U@;#9a8qJ&L#JmLsk~GYY}~6+gow--pM*eqlLb8 zJc*Q~cgqIWeBC|g*pzYY(5=$O3|s0qj*(hv%K>V38}5NYCXZ-Vy##F5D50}}6~^I! zCNYoUo*uQOqU@Pg0lmnykC9V*JHA@;`^l^I4MLFz^K{?qQic~#wC$XmC!DfQNzMkB z&G1uF(hpD9JpTflEMI_DyTQoG=^FExU}y%41?qu=&4`R01;?jvUjkClm=1kXCn&)= zrg!P6TVDecf3kEnreP_=nh2&DC)Ng1u#E;MCnt+v^ZH@VU`mt0G!SIQirRL3ykE|l2Ff*s$xLV#U)*Ep2tsT+ zWvX+?yaYS1k z<#njIuAN*O`#1Oo>_9bCxU)4TvL$A{S)DTJ&#$!1ET~=cKL0C`1hxMA4=ssq?Vz3u zXMA%ajYaBw`+1<5d$)c%uwK&_Y@PwPQz|A`^o5j@T+yoL9qgOqIXJ#)-N$2S5Il=X zD_COu>fZC8_T)+BFf0E9o1^7N6e=Q(cW5@<;&04OqzzrXvu2bi4LX=wVfg$;?W8fr~#PC0Hg zksd-43O(Cu=_e=&0lPwB|)0Hd2FC295jsjscov3>iY(nrv# z2CuaHG1WP9+)}`tNE&Mng9e>7*~?v`>NOC|Khf(Qys8B-LC|y^pH0M(MuhkGtm9g< zMgsOyG5mUX;`aSy+55=>klDGiBt}4xL;EY9J&*A-Ia5HeZV#&JSq%+xj%3qWs{Ty1 zvp$*m(%du>is3FU8^cp-e$wwPx_OCx+TkZnYeEJI%J`c!nUZI!`JM50md$B*5BoML zC|lNI^@N=BHNWW6OtE4K2&C9Yc+kj8WbpZa#6<}%Za?I2k%HwvcO*%s zz(1}?jV4f!lxJ#qKok|?ic))g=n>v0HE;a=@u$fltI z^{ILga6YFc-LH95mrL5*E$JGnB_YefrE_bp@cIvfzp=T}IH}VD4h?@AyHdZ&%@X3R zI=|Lez`rbz-3dr#zJ7jY;Oe0xzw1}0(}l1WzrNVd z!Au?^ia0ljoM&82c|wDvje$oul*j{*1%D9m(ws-npZb{tHj{SMkP{Y?qvaABwbzys zMO+IKS7o|Sy9-87`Lh5XJJ0$)B!yv!LjRT^7FIRxq;`OS0fhA_OVIo{==<0S&1lZH zeSXr!t>s?E%>;ZJAaWu3STq`2pRz?QR=<_r)r}HS_%KL%re-gr(!ClnvHH%~T<^e@ zEeeM11ShLMU#}-lOZzG>K1sb@Sa_HWD-RN=8v%FLur@V+H2d`lJpvML!gR4*8?FW% z;8zcOzx=DR9C@9!aIFcK0ofn2<-gkmq6Dbf4k;X%sDw|s+ez8$)r*?&{9v|M=qb@y zVYM!_FR3o-RC6@J59;&5OK+leAQcURf|z~$`SKgZ@~gQ-oAHZm)k%2%T15~2`M)B1 zu`iy#5(fM~`Jtt@;8W*5ShIF*Iwbe%neS|8<||1xE~_JUwrmHaiO@jA?Bipu=aL@6 z?~nDli$q>fxktt=0)y`fCM8ojHubigZvmdotfUHk$Q&cJ7G<7B(PEwCnQ?$UEJ*>0 zpdqQ#n%I8VH~ZF0Af4O~Xqa;KYJ6fv)m?FkgvF`5V?PY0ohwsm7D9IMxqI^DNzE4{ zOEuEoDiFZ@Pq2%Ker=QFHl;fEwxI{#6e_~Z@O+u6igv#G#sK-bFh z6z{ke8PHigqU3pgjFgX_w}u#K_A;|}7$SFQU&@(2g@VSR#CL!~?YY&Gai9B-ST}kV zHs~?urX05%KaA!4(}Xp;vv-;=aDHRL=OD(?<4g2SgzWH>Y2Ir9EUq;3Ef|qlmsZ&J zcE75nVBZi7BN%On&*DBL&LrqYq!>mv`?%#h z9AKorn+=c_$FMa``(#`OrNknJSIrd{Crn0@=rBnQ@|h&moJ?ahZnF|X174p8QsA%X z`h6+pL9S;K#`ExZ4Mr4Ku6UQj`U%EPYZ{VVjhP11kmDw(+;ADF>RKLb3tA|9T!DZn zi$Vx816Z$`)C6q`@aPB&`99`(R23sv=6IJo!t%E_sx2+%h9=O+%JR!EzodUx#O$GI zn9?s^pADugc*pdF51fG#zV@lp27hd@EQ7f3$*+g%A^iTxjZ&7=Eu#WB&-3(b*6}m1|G_1YPsnf z5&gC}jQAK7LylJ#GelZ5Ir%qFdI|uialW5lp~BUWYu#;fE6;;v*^H;g2`dyLqPk@y z+Kg&&rTQtN2Q7P6@-BzVq9!6(OzJu`J#(P`+oo_f4^tMf?T;|U+->|c|6Yi6_rn90 zZfpC6Qo?&%tHuOORPvdsv%7yz69LR&{e(a?J7V?FMzpjjq zthg?K3cO0QEi@U;&U;xKn8L%zGUgZTE{YQW99`r5WAV5OssGC)>!zbNfO*otPFXgx zOI)}7S2>Zts`L7z27fKZAV?FEI@Q-U!mLldu}40Yv8T(2>g%m68q3HzsN^xo3^&h7 zU_ag?F>~b*t!ry3`5z_GXXMrIXf{P&?v?E9gi&7<3R}>u$rFCC{&@ZrOK$4%u+^X` z9jCL}=Ckj@aJ(pgyu0$)jQk6GvH?ojL2iT!RkcLQJUWS{il&2OVGe-pU4qPTJ3l{+ z>R7^0vUOFJT8Z2DpV`p*2q;P(G}MlNma{Jo!m}rnRg`2)RtK2%(JAC~=OE5o}k1b>QeCEQF*fI+%mf zUhDgG4|$Y5gf%f>3rMJ$Pcm)J!c70X?|=boSR@ zeWjsyCT2Z40v3O8>%qU-CK-(S@;bH5WB8d`E%{PeumZ)+lg}VW6&~Ir5p00y2D3ig zDMDk*JuGJh^%bu@S2Xw0mk^TkcIy>9)~jF0cTc3cROU2CLdey&_g?H9Q^u5!OrE=@f*3DR?P?^{n51FUKmt|>a&Cln z(zzhpS!_}f+5Is)*Dd6mJc0hxByMMnf$Z~BR@Ehhj=o=|B5QD?f@L1V0r=K=Jd8|m zh^Xj!3l<3ZM>lD2gt>2AIiUpe1IM!feH*|0a=^-kO>`;p*|yA&-G@7gm=v+y%$%$* z*Kf858mXc~d}a&Yn!r4RZKz8jZXKhk^x}g8s(4Kof(>(o7TDX_k;R~n;thD8DOpkR zxHS?sc^GLHZ&$>nV$Yb1;d?cA^4+H+)@X4$BKF=HRWZgy1*cJqqrWJmaGXIhBI(`w zGVWZYpa|+LP60*jn86MtEO znBsn@*JdIcm~=JjdUNPC7Z>^!ZT_t9A$;Vs9xVaUh%bI{HR^!fWiR-3s6-wuJ3_9U z(&`o!Cg+WNmPa%8bae0DbE$`9hHFbb!`Nl9Y3LM*_PM2} z2pySh20QQkpbGr`ZvW$Wmj~?N(yiWyo@&u?__Y7?ly=^m9jnsIfQi=rTT0&E?~>z5 zVc|axIscFOfJ@u*@<%wl^Uqy82)jS};Dd>~=CEIvp1@U+0CTZuJAMJ{`|PcJxM9WZ zHzt=peLBkAp(i5f|CG1&cA$4tge89qvu=R{u6k{m5mp+Ir z>*C`j;10RB1CO4&PM0KQFmH>tWK8z0(i^Gm^VP}^3vmI% z(!Q;o+E3)Y_A3U_r3;$lsrz&8HH#Mw`VcpsT1-}^yWT36+cCvkTcg=O@bP2L`MW(w6lXT) z4Wv>Taz3sjV{vF57ca+@i@PC;KmFNv$|o!&Tn(u-5TBv3h#TQ7=hEb<2>I# zCapudCJGaq2cO^O@SRiAGj~>Lv~39%86nN*XgD0cKcA_AAv-Q0aFXsPy_$DH76XOy zG8LOBL-b~#je8uPTAnbeAZ-uJ=-F6h*11>U@o51c_rEj1&O@AEXj5AZNTY6#y7*=f zHo#_77;21dZu=R$40G@W?egnQC>we8J1W+6anHBj`hM`)wEBzeJf?iobRQeg98m^s zBqeqhyv|2s9`)J`$eVj*yZ5y&TUVO{A9TJEgfG(37qchFr2N%8a(M?2g42BkOR6WF z)n{kKk?^`lr!@cePBON*zRU8y{AbX2{>2_V@u6f{a&X0QZqqxBJ^+IU-E%O~ue^jo z`FOW=izYG>&iqCuWs*1-Y^K*5FWHFz=;F5b1~7#>JY#oww*rkh0W-Uu-6n5Q#`Xt( znGa^n{@@5DNV2aj3_Ho~`o!7gUK!D=N09Sk=kxQsFLckmIWp5P{>Dg%Vx{>0 za@2cv8@vBR|9hSuk3GB_j70L|J1Ym;O{+djBsVneQ?bBNdG(s3N92#b*8z_-G(DvG zgO5OSC^@~Gkcl0l@#O-hB)a#Mjy*V@PFTmiY_pXp%n{!SSXPk1&^s8MvesMvimuo@z zrmXMI$G-c=DWZA%55Y$6qam&!cbi6*ukJbr)d7edqbnwN9-1Cf(((M#2Y9x4Ke)@3 zshg2KVgujnvP3z+HFF@s*-?d%eu@^S?nz7GL+8{v&IuKYP=2Old!+lSs)$vi`&QCs z8eK|h+VuUH_NPKK!#;8O=gd}5b;$wOfgklBF8T5dkl)$)QOl3pdpGzz>g8L{e!5ne z>7D8P0}CMZTJEB8xE;z-D7T3E20naj@3WD9-51THDIn)LQMA=E1KQKfBs-&Qo$Mz2 zGr;5taeV72F9JdaIY$^MH@pack}P==yZJN6EP6@N45JD!4M+(H2S|=MdN(ftdlhbkm1`KbvNIt zj4a#nZ073q9@!*;r_arB=pb#ZTY=_Wq|D1q8)$-P?|4>~{oJrH=V{sJ#fL{aFvKK> zexCn*qL@Y}8jNJ#OP=L!bw1Hy1*9gd#vhe(#2yV9msuXp1k(}Y%Py2TUc^|`$BANe zclg*xmYibEi`Cg9Rkwti<}3;rO~S3)=Tn54>pgz+n0gj};DLm!i;KRcU<7LImQ@j) zLGITGUDcc&Ae{GzLs2JYL!pex3o0AuT)X31xJ8=bd+X!ifhp7);%@~GOUK(-s>PHd zLBNa#Y7>or%@7f*4A7~jady=%cDRuCCfO&|BpxJn*q(>rgc!UX(@!y>MU)cY;<1qA zEswe3oRdZFF&>u%>T@!M7p`Cq%SlQve76S0Jf?|xtnhT@auD$0l{u{|&&{Vl)eXlV zQWxEU*yPjw~T$| zOdIcXQpnnmNq)t9R)K`mfI1u)*?qsFcfGn!9i9+^We_ za&n0&86)2${YInpRB#9z5n;TRcftRAP0Iz#yAs(|=6b2AsirW`8GTXV)3r41BWlQL zond1?Wwt@nB3CRiA7?CQb~OwaNo|d9v}=n5Ypg;7N$dPst-*32PaL9F0DyKknkC8k zwL{KiILUgIyvp;DlPcd(w_yt_kBnBcFwv^HF&Ro~4Ya7-`+-E8l=mKYiS-VpAUsG; zFQvB0B6uaw7^d18^GsNKa*90PB%m_45r|MbT0)9kWXh0{R8|*& zsSwDoUh50>+vyCm(SN2`5*ynnp{~W~g{o=mh7B7Ik2|dq-Kv&NPpJMy;j!~wQJY+I z1G_q?U28xMX z@?5!~a*vZge`R12Y9OSLBUKaO7Q=<=J+Qjb2mx51@1}R9Js!>+crWto=$C#gjeWk^Ld`zvm+LNd5P|*Dssk|o!;M?Ch@lyMb% zIk#@PM+-lrn3m0r?D0ghkOMIu9ZWXWK;`>XWW1sIK~n4BO=Q#xM5L#zH#aRdmcRmt z#PEYPwvMuO|NX2Zf=qQ0X768>=KSjSH{9N4a*v5W{==fcCaDqD(PHG}UdRmj%0RSR z43_C#eTo9tg<7`tmF-oeO5W#O8c~Tfs4CJ- uT23K)^IqEYkJ>=yOYiNBp2(@Hb zflaKPkUf{>xWI7Jwnh^!5S07g)o-v$QXk9f9P{MqCcu^IY(ji@jPdZZxE5hi zKlPh#J#uV@tOKg@$1>L`wa2V$t`PE$erSO}11_zx_G$u$PzrCsJVDZZ`UesmSmmZJ zYyN&yET10m9}APH>QeN-jha*~ysG~vv5-#K^B?BF`v1ufQ!8IcxKP`FTk4rD`u4_w MpMG?>|M)Nd2Y2> Optional[str]: - return f"Text delta received: {delta.text}" - - def on_thread_message(self, message: "ThreadMessage") -> Optional[str]: - return f"ThreadMessage created. ID: {message.id}, Status: {message.status}" - - def on_thread_run(self, run: "ThreadRun") -> Optional[str]: - return f"ThreadRun status: {run.status}" - - def on_run_step(self, step: "RunStep") -> Optional[str]: - return f"RunStep type: {step.type}, Status: {step.status}" - - def on_error(self, data: str) -> Optional[str]: - return f"An error occurred. Data: {data}" - - def on_done(self) -> Optional[str]: - return "Stream completed." - - def on_unhandled_event(self, event_type: str, event_data: Any) -> Optional[str]: - return f"Unhandled Event Type: {event_type}, Data: {event_data}" - - -# [END stream_event_handler] - - -with agents_client: - # Create an agent and run stream with event handler - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-agent", instructions="You are a helpful agent" - ) - print(f"Created agent, agent ID {agent.id}") - - thread = agents_client.threads.create() - print(f"Created thread, thread ID {thread.id}") - - message = agents_client.messages.create(thread_id=thread.id, role="user", content="Hello, tell me a joke") - print(f"Created message, message ID {message.id}") - - # [START create_stream] - with agents_client.runs.stream(thread_id=thread.id, agent_id=agent.id, event_handler=MyEventHandler()) as stream: - for event_type, event_data, func_return in stream: - print(f"Received data.") - print(f"Streaming receive Event Type: {event_type}") - print(f"Event Data: {str(event_data)[:100]}...") - print(f"Event Function return: {func_return}\n") - # [END create_stream] - - agents_client.delete_agent(agent.id) - print("Deleted agent") - - messages = agents_client.messages.list(thread_id=thread.id) - for msg in messages: - if msg.text_messages: - last_text = msg.text_messages[-1] - print(f"{msg.role}: {last_text.text.value}") diff --git a/sdk/ai/azure-ai-agents/samples/sample_agents_basics_stream_iteration.py b/sdk/ai/azure-ai-agents/samples/sample_agents_basics_stream_iteration.py deleted file mode 100644 index 94437ee16d8a..000000000000 --- a/sdk/ai/azure-ai-agents/samples/sample_agents_basics_stream_iteration.py +++ /dev/null @@ -1,83 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use agent operations in streaming from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_basics_stream_iteration.py - - Before running the sample: - - pip install azure-ai-agents azure-identity - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in - the "Models + endpoints" tab in your Azure AI Foundry project. -""" - -import os -from azure.ai.agents import AgentsClient -from azure.identity import DefaultAzureCredential -from azure.ai.agents.models import ( - AgentStreamEvent, - MessageDeltaChunk, - ThreadMessage, - ThreadRun, - RunStep, -) - -agents_client = AgentsClient( - endpoint=os.environ["PROJECT_ENDPOINT"], - credential=DefaultAzureCredential(), -) - -with agents_client: - # Create an agent and run stream with iteration - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-agent", instructions="You are a helpful agent" - ) - print(f"Created agent, ID {agent.id}") - - thread = agents_client.threads.create() - print(f"Created thread, thread ID {thread.id}") - - message = agents_client.messages.create(thread_id=thread.id, role="user", content="Hello, tell me a joke") - print(f"Created message, message ID {message.id}") - - # [START iterate_stream] - with agents_client.runs.stream(thread_id=thread.id, agent_id=agent.id) as stream: - - for event_type, event_data, _ in stream: - - if isinstance(event_data, MessageDeltaChunk): - print(f"Text delta received: {event_data.text}") - - elif isinstance(event_data, ThreadMessage): - print(f"ThreadMessage created. ID: {event_data.id}, Status: {event_data.status}") - - elif isinstance(event_data, ThreadRun): - print(f"ThreadRun status: {event_data.status}") - - elif isinstance(event_data, RunStep): - print(f"RunStep type: {event_data.type}, Status: {event_data.status}") - - elif event_type == AgentStreamEvent.ERROR: - print(f"An error occurred. Data: {event_data}") - - elif event_type == AgentStreamEvent.DONE: - print("Stream completed.") - break - - else: - print(f"Unhandled Event Type: {event_type}, Data: {event_data}") - # [END iterate_stream] - - agents_client.delete_agent(agent.id) - print("Deleted agent") diff --git a/sdk/ai/azure-ai-agents/samples/sample_agents_basics_thread_and_process_run.py b/sdk/ai/azure-ai-agents/samples/sample_agents_basics_thread_and_process_run.py deleted file mode 100644 index 812070e22f63..000000000000 --- a/sdk/ai/azure-ai-agents/samples/sample_agents_basics_thread_and_process_run.py +++ /dev/null @@ -1,64 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to use the new convenience method - `create_thread_and_process_run` in the Azure AI Agents service. - This single call will create a thread, start a run, poll to - completion (including any tool calls), and return the final result. - -USAGE: - python sample_agents_create_thread_and_process_run.py - - Before running the sample: - - pip install azure-ai-agents azure-identity - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under - "Models + endpoints" in your Azure AI Foundry project. -""" - -import os -from azure.ai.agents import AgentsClient -from azure.ai.agents.models import AgentThreadCreationOptions, ThreadMessageOptions, ListSortOrder -from azure.identity import DefaultAzureCredential - -agents_client = AgentsClient(endpoint=os.environ["PROJECT_ENDPOINT"], credential=DefaultAzureCredential()) - -with agents_client: - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="process-run-sample-agent", - instructions="You are a friendly assistant that generates jokes.", - ) - print(f"Created agent: {agent.id}") - - # [START create_thread_and_process_run] - run = agents_client.create_thread_and_process_run( - agent_id=agent.id, - thread=AgentThreadCreationOptions( - messages=[ThreadMessageOptions(role="user", content="Hi! Tell me your favorite programming joke.")] - ), - ) - # [END create_thread_and_process_run] - print(f"Run completed with status: {run.status!r}") - - if run.status == "failed": - print("Run failed:", run.last_error) - - # List out all messages in the thread - messages = agents_client.messages.list(thread_id=run.thread_id, order=ListSortOrder.ASCENDING) - for msg in messages: - if msg.text_messages: - last_text = msg.text_messages[-1] - print(f"{msg.role}: {last_text.text.value}") - - # clean up - agents_client.delete_agent(agent.id) - print(f"Deleted agent {agent.id}") diff --git a/sdk/ai/azure-ai-agents/samples/sample_agents_basics_thread_and_run.py b/sdk/ai/azure-ai-agents/samples/sample_agents_basics_thread_and_run.py deleted file mode 100644 index 53aa59c16d9e..000000000000 --- a/sdk/ai/azure-ai-agents/samples/sample_agents_basics_thread_and_run.py +++ /dev/null @@ -1,74 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -DESCRIPTION: - This sample demonstrates how to create a new thread and immediately run it - in one call using the Azure AI Agents service. - -USAGE: - python sample_agents_create_thread_and_run.py - - Before running the sample: - - pip install azure-ai-agents azure-identity - - Set these environment variables with your own values: - 1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the Overview - page of your Azure AI Foundry portal. - 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under - the "Name" column in the "Models + endpoints" tab in - your Azure AI Foundry project. -""" - -import os -import time - -from azure.ai.agents import AgentsClient -from azure.ai.agents.models import AgentThreadCreationOptions, ThreadMessageOptions, ListSortOrder -from azure.identity import DefaultAzureCredential - -agents_client = AgentsClient(endpoint=os.environ["PROJECT_ENDPOINT"], credential=DefaultAzureCredential()) - -with agents_client: - agent = agents_client.create_agent( - model=os.environ["MODEL_DEPLOYMENT_NAME"], - name="sample-agent", - instructions="You are a helpful assistant that tells jokes.", - ) - print(f"Created agent, agent ID: {agent.id}") - - # [START create_thread_and_run] - # Prepare the initial user message - initial_message = ThreadMessageOptions(role="user", content="Hello! Can you tell me a joke?") - - # Create a new thread and immediately start a run on it - run = agents_client.create_thread_and_run( - agent_id=agent.id, - thread=AgentThreadCreationOptions(messages=[initial_message]), - ) - # [END create_thread_and_run] - - # Poll the run as long as run status is queued or in progress - while run.status in ["queued", "in_progress", "requires_action"]: - # Wait for a second - time.sleep(1) - run = agents_client.runs.get(thread_id=run.thread_id, run_id=run.id) - print(f"Run status: {run.status}") - - if run.status == "failed": - print(f"Run error: {run.last_error}") - - # List all messages in the thread, in ascending order of creation - messages = agents_client.messages.list(thread_id=run.thread_id, order=ListSortOrder.ASCENDING) - - for msg in messages: - if msg.text_messages: - last_text = msg.text_messages[-1] - print(f"{msg.role}: {last_text.text.value}") - - # clean up - agents_client.delete_agent(agent.id) - print(f"Deleted agent {agent.id!r}") diff --git a/sdk/ai/azure-ai-agents/samples/utils/__init__.py b/sdk/ai/azure-ai-agents/samples/utils/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/sdk/ai/azure-ai-agents/samples/utils/user_functions.py b/sdk/ai/azure-ai-agents/samples/utils/user_functions.py deleted file mode 100644 index cb1e3d9cf43d..000000000000 --- a/sdk/ai/azure-ai-agents/samples/utils/user_functions.py +++ /dev/null @@ -1,248 +0,0 @@ -# pylint: disable=line-too-long,useless-suppression -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -import json -import datetime -from typing import Any, Callable, Set, Dict, List, Optional - -# These are the user-defined functions that can be called by the agent. - - -def fetch_current_datetime(format: Optional[str] = None) -> str: - """ - Get the current time as a JSON string, optionally formatted. - - :param format (Optional[str]): The format in which to return the current time. Defaults to None, which uses a standard format. - :return: The current time in JSON format. - :rtype: str - """ - current_time = datetime.datetime.now() - - # Use the provided format if available, else use a default format - if format: - time_format = format - else: - time_format = "%Y-%m-%d %H:%M:%S" - - time_json = json.dumps({"current_time": current_time.strftime(time_format)}) - return time_json - - -def fetch_weather(location: str) -> str: - """ - Fetches the weather information for the specified location. - - :param location (str): The location to fetch weather for. - :return: Weather information as a JSON string. - :rtype: str - """ - # In a real-world scenario, you'd integrate with a weather API. - # Here, we'll mock the response. - mock_weather_data = {"New York": "Sunny, 25°C", "London": "Cloudy, 18°C", "Tokyo": "Rainy, 22°C"} - weather = mock_weather_data.get(location, "Weather data not available for this location.") - weather_json = json.dumps({"weather": weather}) - return weather_json - - -def send_email(recipient: str, subject: str, body: str) -> str: - """ - Sends an email with the specified subject and body to the recipient. - - :param recipient (str): Email address of the recipient. - :param subject (str): Subject of the email. - :param body (str): Body content of the email. - :return: Confirmation message. - :rtype: str - """ - # In a real-world scenario, you'd use an SMTP server or an email service API. - # Here, we'll mock the email sending. - print(f"Sending email to {recipient}...") - print(f"Subject: {subject}") - print(f"Body:\n{body}") - - message_json = json.dumps({"message": f"Email successfully sent to {recipient}."}) - return message_json - - -def send_email_using_recipient_name(recipient: str, subject: str, body: str) -> str: - """ - Sends an email with the specified subject and body to the recipient. - - :param recipient (str): Name of the recipient. - :param subject (str): Subject of the email. - :param body (str): Body content of the email. - :return: Confirmation message. - :rtype: str - """ - # In a real-world scenario, you'd use an SMTP server or an email service API. - # Here, we'll mock the email sending. - print(f"Sending email to {recipient}...") - print(f"Subject: {subject}") - print(f"Body:\n{body}") - - message_json = json.dumps({"message": f"Email successfully sent to {recipient}."}) - return message_json - - -def calculate_sum(a: int, b: int) -> str: - """Calculates the sum of two integers. - - :param a (int): First integer. - :rtype: int - :param b (int): Second integer. - :rtype: int - - :return: The sum of the two integers. - :rtype: str - """ - result = a + b - return json.dumps({"result": result}) - - -def convert_temperature(celsius: float) -> str: - """Converts temperature from Celsius to Fahrenheit. - - :param celsius (float): Temperature in Celsius. - :rtype: float - - :return: Temperature in Fahrenheit. - :rtype: str - """ - fahrenheit = (celsius * 9 / 5) + 32 - return json.dumps({"fahrenheit": fahrenheit}) - - -def toggle_flag(flag: bool) -> str: - """Toggles a boolean flag. - - :param flag (bool): The flag to toggle. - :rtype: bool - - :return: The toggled flag. - :rtype: str - """ - toggled = not flag - return json.dumps({"toggled_flag": toggled}) - - -def merge_dicts(dict1: Dict[str, Any], dict2: Dict[str, Any]) -> str: - """Merges two dictionaries. - - :param dict1 (Dict[str, Any]): First dictionary. - :rtype: dict - :param dict2 (Dict[str, Any]): Second dictionary. - :rtype: dict - - :return: The merged dictionary. - :rtype: str - """ - merged = dict1.copy() - merged.update(dict2) - return json.dumps({"merged_dict": merged}) - - -def get_user_info(user_id: int) -> str: - """Retrieves user information based on user ID. - - :param user_id (int): ID of the user. - :rtype: int - - :return: User information as a JSON string. - :rtype: str - """ - mock_users = { - 1: {"name": "Alice", "email": "alice@example.com"}, - 2: {"name": "Bob", "email": "bob@example.com"}, - 3: {"name": "Charlie", "email": "charlie@example.com"}, - } - user_info = mock_users.get(user_id, {"error": "User not found."}) - return json.dumps({"user_info": user_info}) - - -def longest_word_in_sentences(sentences: List[str]) -> str: - """Finds the longest word in each sentence. - - :param sentences (List[str]): A list of sentences. - :return: A JSON string mapping each sentence to its longest word. - :rtype: str - """ - if not sentences: - return json.dumps({"error": "The list of sentences is empty"}) - - longest_words = {} - for sentence in sentences: - # Split sentence into words - words = sentence.split() - if words: - # Find the longest word - longest_word = max(words, key=len) - longest_words[sentence] = longest_word - else: - longest_words[sentence] = "" - - return json.dumps({"longest_words": longest_words}) - - -def process_records(records: List[Dict[str, int]]) -> str: - """ - Process a list of records, where each record is a dictionary with string keys and integer values. - - :param records: A list containing dictionaries that map strings to integers. - :return: A list of sums of the integer values in each record. - """ - sums = [] - for record in records: - # Sum up all the values in each dictionary and append the result to the sums list - total = sum(record.values()) - sums.append(total) - return json.dumps({"sums": sums}) - - -# Example User Input for Each Function -# 1. Fetch Current DateTime -# User Input: "What is the current date and time?" -# User Input: "What is the current date and time in '%Y-%m-%d %H:%M:%S' format?" - -# 2. Fetch Weather -# User Input: "Can you provide the weather information for New York?" - -# 3. Send Email -# User Input: "Send an email to john.doe@example.com with the subject 'Meeting Reminder' and body 'Don't forget our meeting at 3 PM.'" - -# 4. Calculate Sum -# User Input: "What is the sum of 45 and 55?" - -# 5. Convert Temperature -# User Input: "Convert 25 degrees Celsius to Fahrenheit." - -# 6. Toggle Flag -# User Input: "Toggle the flag True." - -# 7. Merge Dictionaries -# User Input: "Merge these two dictionaries: {'name': 'Alice'} and {'age': 30}." - -# 8. Get User Info -# User Input: "Retrieve user information for user ID 1." - -# 9. Longest Word in Sentences -# User Input: "Find the longest word in each of these sentences: ['The quick brown fox jumps over the lazy dog', 'Python is an amazing programming language', 'Azure AI capabilities are impressive']." - -# 10. Process Records -# User Input: "Process the following records: [{'a': 10, 'b': 20}, {'x': 5, 'y': 15, 'z': 25}, {'m': 30}]." - -# Statically defined user functions for fast reference -user_functions: Set[Callable[..., Any]] = { - fetch_current_datetime, - fetch_weather, - send_email, - calculate_sum, - convert_temperature, - toggle_flag, - merge_dicts, - get_user_info, - longest_word_in_sentences, - process_records, -} diff --git a/sdk/ai/azure-ai-projects/README_AGENTS.md b/sdk/ai/azure-ai-projects/README_AGENTS.md new file mode 100644 index 000000000000..a71db514276b --- /dev/null +++ b/sdk/ai/azure-ai-projects/README_AGENTS.md @@ -0,0 +1,1284 @@ + +# Azure AI Agents client library for Python + +Use the AI Agents client library to: + +* **Develop Agents using the Azure AI Agents Service**, leveraging an extensive ecosystem of models, tools, and capabilities from OpenAI, Microsoft, and other LLM providers. The Azure AI Agents Service enables the building of Agents for a wide range of generative AI use cases. +* **Note:** While this package can be used independently, we recommend using the Azure AI Projects client library (azure-ai-projects) for an enhanced experience. +The Projects library provides simplified access to advanced functionality, such as creating and managing agents, enumerating AI models, working with datasets and +managing search indexes, evaluating generative AI performance, and enabling OpenTelemetry tracing. + +[Product documentation](https://aka.ms/azsdk/azure-ai-agents/product-doc) +| [Samples][samples] +| [API reference documentation](https://aka.ms/azsdk/azure-ai-agents/python/reference) +| [Package (PyPI)](https://aka.ms/azsdk/azure-ai-agents/python/package) +| [SDK source code](https://aka.ms/azsdk/azure-ai-agents/python/code) +| [AI Starter Template](https://aka.ms/azsdk/azure-ai-agents/python/ai-starter-template) + +## Reporting issues + +To report an issue with the client library, or request additional features, please open a GitHub issue [here](https://github.com/Azure/azure-sdk-for-python/issues). Mention the package name "azure-ai-agents" in the title or content. + +## Table of contents + +- [Getting started](#getting-started) + - [Prerequisite](#prerequisite) + - [Install the package](#install-the-package) +- [Key concepts](#key-concepts) + - [Create and authenticate the client](#create-and-authenticate-the-client) +- [Examples](#examples) + - [Create an Agent](#create-agent) with: + - [File Search](#create-agent-with-file-search) + - [Enterprise File Search](#create-agent-with-enterprise-file-search) + - [Code interpreter](#create-agent-with-code-interpreter) + - [Bing grounding](#create-agent-with-bing-grounding) + - [Azure AI Search](#create-agent-with-azure-ai-search) + - [Function call](#create-agent-with-function-call) + - [Azure Function Call](#create-agent-with-azure-function-call) + - [OpenAPI](#create-agent-with-openapi) + - [Fabric data](#create-an-agent-with-fabric) + - [Create thread](#create-thread) with + - [Tool resource](#create-thread-with-tool-resource) + - [Create message](#create-message) with: + - [File search attachment](#create-message-with-file-search-attachment) + - [Code interpreter attachment](#create-message-with-code-interpreter-attachment) + - [Create Message with Image Inputs](#create-message-with-image-inputs) + - [Execute Run, Run_and_Process, or Stream](#execute-run-run_and_process-or-stream) + - [Retrieve message](#retrieve-message) + - [Retrieve file](#retrieve-file) + - [Tear down by deleting resource](#teardown) + - [Tracing](#tracing) + - [Installation](#installation) + - [How to enable tracing](#how-to-enable-tracing) + - [How to trace your own functions](#how-to-trace-your-own-functions) +- [Troubleshooting](#troubleshooting) + - [Logging](#logging) + - [Reporting issues](#reporting-issues) +- [Next steps](#next-steps) +- [Contributing](#contributing) + +## Getting started + +### Prerequisite + +- Python 3.9 or later. +- An [Azure subscription][azure_sub]. +- A [project in Azure AI Foundry](https://learn.microsoft.com/azure/ai-studio/how-to/create-projects). +- The project endpoint string. It can be found in your Azure AI Foundry project overview page, under "Project details". Below we will assume the environment variable `PROJECT_ENDPOINT_STRING` was defined to hold this value. +- Entra ID is needed to authenticate the client. Your application needs an object that implements the [TokenCredential](https://learn.microsoft.com/python/api/azure-core/azure.core.credentials.tokencredential) interface. Code samples here use [DefaultAzureCredential](https://learn.microsoft.com/python/api/azure-identity/azure.identity.defaultazurecredential). To get that working, you will need: + * An appropriate role assignment. see [Role-based access control in Azure AI Foundry portal](https://learn.microsoft.com/azure/ai-foundry/concepts/rbac-ai-foundry). Role assigned can be done via the "Access Control (IAM)" tab of your Azure AI Project resource in the Azure portal. + * [Azure CLI](https://learn.microsoft.com/cli/azure/install-azure-cli) installed. + * You are logged into your Azure account by running `az login`. + * Note that if you have multiple Azure subscriptions, the subscription that contains your Azure AI Project resource must be your default subscription. Run `az account list --output table` to list all your subscription and see which one is the default. Run `az account set --subscription "Your Subscription ID or Name"` to change your default subscription. + +### Install the package + +```bash +pip install azure-ai-agents +``` + +## Key concepts + +### Create and authenticate the client + +To construct a synchronous client: + +```python +import os +from azure.ai.agents import AgentsClient +from azure.identity import DefaultAzureCredential + +agents_client = AgentsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) +``` + +To construct an asynchronous client, Install the additional package [aiohttp](https://pypi.org/project/aiohttp/): + +```bash +pip install aiohttp +``` + +and update the code above to import `asyncio`, and import `AgentsClient` from the `azure.ai.agents.aio` namespace: + +```python +import os +import asyncio +from azure.ai.agents.aio import AgentsClient +from azure.core.credentials import AzureKeyCredential + +agent_client = AgentsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) +``` + +## Examples + +### Create Agent + +Before creating an Agent, you need to set up Azure resources to deploy your model. [Create a New Agent Quickstart](https://learn.microsoft.com/azure/ai-services/agents/quickstart?pivots=programming-language-python-azure) details selecting and deploying your Agent Setup. + +Here is an example of how to create an Agent: + + +```python + + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are helpful agent", + ) +``` + + + +To allow Agents to access your resources or custom functions, you need tools. You can pass tools to `create_agent` by either `toolset` or combination of `tools` and `tool_resources`. + +Here is an example of `toolset`: + + +```python +functions = FunctionTool(user_functions) +code_interpreter = CodeInterpreterTool() + +toolset = ToolSet() +toolset.add(functions) +toolset.add(code_interpreter) + +# To enable tool calls executed automatically +agents_client.enable_auto_function_calls(toolset) + +agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are a helpful agent", + toolset=toolset, +) +``` + + + +Also notices that if you use asynchronous client, you use `AsyncToolSet` instead. Additional information related to `AsyncFunctionTool` be discussed in the later sections. + +Here is an example to use `tools` and `tool_resources`: + + +```python +file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) + +# Notices that FileSearchTool as tool and tool_resources must be added or the agent unable to search the file +agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are helpful agent", + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, +) +``` + + + +In the following sections, we show you sample code in either `toolset` or combination of `tools` and `tool_resources`. + +### Create Agent with File Search + +To perform file search by an Agent, we first need to upload a file, create a vector store, and associate the file to the vector store. Here is an example: + + + +```python +file = agents_client.files.upload_and_poll(file_path=asset_file_path, purpose=FilePurpose.AGENTS) +print(f"Uploaded file, file ID: {file.id}") + +vector_store = agents_client.vector_stores.create_and_poll(file_ids=[file.id], name="my_vectorstore") +print(f"Created vector store, vector store ID: {vector_store.id}") + +# Create file search tool with resources followed by creating agent +file_search = FileSearchTool(vector_store_ids=[vector_store.id]) + +agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="Hello, you are helpful agent and can search information from uploaded files", + tools=file_search.definitions, + tool_resources=file_search.resources, +) +``` + + + +### Create Agent with Enterprise File Search + +We can upload file to Azure as it is shown in the example, or use the existing Azure blob storage. In the code below we demonstrate how this can be achieved. First we upload file to azure and create `VectorStoreDataSource`, which then is used to create vector store. This vector store is then given to the `FileSearchTool` constructor. + + + +```python +# We will upload the local file to Azure and will use it for vector store creation. +asset_uri = os.environ["AZURE_BLOB_URI"] + +# Create a vector store with no file and wait for it to be processed +ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) +vector_store = agents_client.vector_stores.create_and_poll(data_sources=[ds], name="sample_vector_store") +print(f"Created vector store, vector store ID: {vector_store.id}") + +# Create a file search tool +file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) + +# Notices that FileSearchTool as tool and tool_resources must be added or the agent unable to search the file +agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are helpful agent", + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, +) +``` + + + +We also can attach files to the existing vector store. In the code snippet below, we first create an empty vector store and add file to it. + + + +```python +# Create a vector store with no file and wait for it to be processed +vector_store = agents_client.vector_stores.create_and_poll(data_sources=[], name="sample_vector_store") +print(f"Created vector store, vector store ID: {vector_store.id}") + +ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) +# Add the file to the vector store or you can supply data sources in the vector store creation +vector_store_file_batch = agents_client.vector_store_file_batches.create_and_poll( + vector_store_id=vector_store.id, data_sources=[ds] +) +print(f"Created vector store file batch, vector store file batch ID: {vector_store_file_batch.id}") + +# Create a file search tool +file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) +``` + + + +### Create Agent with Code Interpreter + +Here is an example to upload a file and use it for code interpreter by an Agent: + + + +```python +file = agents_client.files.upload_and_poll(file_path=asset_file_path, purpose=FilePurpose.AGENTS) +print(f"Uploaded file, file ID: {file.id}") + +code_interpreter = CodeInterpreterTool(file_ids=[file.id]) + +# Create agent with code interpreter tool and tools_resources +agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are helpful agent", + tools=code_interpreter.definitions, + tool_resources=code_interpreter.resources, +) +``` + + + +### Create Agent with Bing Grounding + +To enable your Agent to perform search through Bing search API, you use `BingGroundingTool` along with a connection. + +Here is an example: + + + +```python +conn_id = os.environ["AZURE_BING_CONNECTION_ID"] + +# Initialize agent bing tool and add the connection id +bing = BingGroundingTool(connection_id=conn_id) + +# Create agent with the bing tool and process agent run +with agents_client: + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are a helpful agent", + tools=bing.definitions, + ) +``` + + + +### Create Agent with Azure AI Search + +Azure AI Search is an enterprise search system for high-performance applications. It integrates with Azure OpenAI Service and Azure Machine Learning, offering advanced search technologies like vector search and full-text search. Ideal for knowledge base insights, information discovery, and automation. Creating an Agent with Azure AI Search requires an existing Azure AI Search Index. For more information and setup guides, see [Azure AI Search Tool Guide](https://learn.microsoft.com/azure/ai-services/agents/how-to/tools/azure-ai-search?tabs=azurecli%2Cpython&pivots=overview-azure-ai-search). + +Here is an example to integrate Azure AI Search: + + + +```python +conn_id = os.environ["AI_AZURE_AI_CONNECTION_ID"] + +print(conn_id) + +# Initialize agent AI search tool and add the search index connection id +ai_search = AzureAISearchTool( + index_connection_id=conn_id, index_name="sample_index", query_type=AzureAISearchQueryType.SIMPLE, top_k=3, filter="" +) + +# Create agent with AI search tool and process agent run +with agents_client: + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are a helpful agent", + tools=ai_search.definitions, + tool_resources=ai_search.resources, + ) +``` + + + +If the agent has found the relevant information in the index, the reference +and annotation will be provided in the message response. In the example above, we replace +the reference placeholder by the actual reference and url. Please note, that to +get sensible result, the index needs to have "embedding", "token", "category" and "title" fields. + + + +```python +# Fetch and log all messages +messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) +for message in messages: + if message.role == MessageRole.AGENT and message.url_citation_annotations: + placeholder_annotations = { + annotation.text: f" [see {annotation.url_citation.title}] ({annotation.url_citation.url})" + for annotation in message.url_citation_annotations + } + for message_text in message.text_messages: + message_str = message_text.text.value + for k, v in placeholder_annotations.items(): + message_str = message_str.replace(k, v) + print(f"{message.role}: {message_str}") + else: + for message_text in message.text_messages: + print(f"{message.role}: {message_text.text.value}") +``` + + + +### Create Agent with Function Call + +You can enhance your Agents by defining callback functions as function tools. These can be provided to `create_agent` via either the `toolset` parameter or the combination of `tools` and `tool_resources`. Here are the distinctions: + +For more details about requirements and specification of functions, refer to [Function Tool Specifications](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-agents/FunctionTool.md) + +Here is an example to use [user functions](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-agents/samples/utils/user_functions.py) in `toolset`: + + +```python +functions = FunctionTool(user_functions) +toolset = ToolSet() +toolset.add(functions) +agents_client.enable_auto_function_calls(toolset) + +agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are a helpful agent", + toolset=toolset, +) +``` + + + +For asynchronous functions, you must import `AgentsClient` from `azure.ai.agents.aio` and use `AsyncFunctionTool`. Here is an example using [asynchronous user functions](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-agents/samples/agents_async/sample_agents_functions_async.py): + +```python +from azure.ai.agents.aio import AgentsClient +``` + + + +```python +functions = AsyncFunctionTool(user_async_functions) + +toolset = AsyncToolSet() +toolset.add(functions) +agents_client.enable_auto_function_calls(toolset) + +agent = await agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are a helpful agent", + toolset=toolset, +) +``` + + + +Notice that if `enable_auto_function_calls` is called, the SDK will invoke the functions automatically during `create_and_process` or streaming. If you prefer to execute them manually, refer to [`sample_agents_stream_eventhandler_with_functions.py`](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_eventhandler_with_functions.py) or +[`sample_agents_functions.py`](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_functions.py) + +### Create Agent With Azure Function Call + +The AI agent leverages Azure Functions triggered asynchronously via Azure Storage Queues. To enable the agent to perform Azure Function calls, you must set up the corresponding `AzureFunctionTool`, specifying input and output queues as well as parameter definitions. + +Example Python snippet illustrating how you create an agent utilizing the Azure Function Tool: + +```python +azure_function_tool = AzureFunctionTool( + name="foo", + description="Get answers from the foo bot.", + parameters={ + "type": "object", + "properties": { + "query": {"type": "string", "description": "The question to ask."}, + "outputqueueuri": {"type": "string", "description": "The full output queue uri."}, + }, + }, + input_queue=AzureFunctionStorageQueue( + queue_name="azure-function-foo-input", + storage_service_endpoint=storage_service_endpoint, + ), + output_queue=AzureFunctionStorageQueue( + queue_name="azure-function-tool-output", + storage_service_endpoint=storage_service_endpoint, + ), +) + +agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="azure-function-agent-foo", + instructions=f"You are a helpful support agent. Use the provided function any time the prompt contains the string 'What would foo say?'. When you invoke the function, ALWAYS specify the output queue uri parameter as '{storage_service_endpoint}/azure-function-tool-output'. Always responds with \"Foo says\" and then the response from the tool.", + tools=azure_function_tool.definitions, +) +print(f"Created agent, agent ID: {agent.id}") +``` + +--- + +**Limitations** + +Currently, the Azure Function integration for the AI Agent has the following limitations: + +- Supported trigger for Azure Function is currently limited to **Queue triggers** only. + HTTP or other trigger types and streaming responses are not supported at this time. + +--- + +**Create and Deploy Azure Function** + +Before you can use the agent with AzureFunctionTool, you need to create and deploy Azure Function. + +Below is an example Python Azure Function responding to queue-triggered messages and placing responses on the output queue: + +```python +import azure.functions as func +import logging +import json + +app = func.FunctionApp() + + +@app.function_name(name="Foo") +@app.queue_trigger( + arg_name="arguments", + queue_name="azure-function-foo-input", + connection="AzureWebJobsStorage") +@app.queue_output( + arg_name="outputQueue", + queue_name="azure-function-tool-output", + connection="AzureWebJobsStorage") +def foo(arguments: func.QueueMessage, outputQueue: func.Out[str]) -> None: + """ + The function, answering question. + + :param arguments: The arguments, containing json serialized request. + :param outputQueue: The output queue to write messages to. + """ + + parsed_args = json.loads(arguments.get_body().decode('utf-8')) + try: + response = { + "Value": "Bar", + "CorrelationId": parsed_args['CorrelationId'] + } + outputQueue.set(json.dumps(response)) + logging.info(f'The function returns the following message: {json.dumps(response)}') + except Exception as e: + logging.error(f"Error processing message: {e}") + raise +``` + +> **Important:** Both input and output payloads must contain the `CorrelationId`, which must match in request and response. + +--- + +**Azure Function Project Creation and Deployment** + +To deploy your function to Azure properly, follow Microsoft's official documentation step by step: + +[Azure Functions Python Developer Guide](https://learn.microsoft.com/azure/azure-functions/create-first-function-cli-python?tabs=windows%2Cbash%2Cazure-cli%2Cbrowser) + +**Summary of required steps:** + +- Use the Azure CLI or Azure Portal to create an Azure Function App. +- Create input and output queues in Azure Storage. +- Deploy your Function code. + +--- + +**Verification and Testing Azure Function** + +To ensure that your Azure Function deployment functions correctly: + +1. Place the following style message manually into the input queue (`input`): + +{ + "CorrelationId": "42" +} + +Check the output queue (`output`) and validate the structured message response: + +{ + "Value": "Bar", + "CorrelationId": "42" +} + +--- + +**Required Role Assignments (IAM Configuration)** + +Ensure your Azure AI Project identity has the following storage account permissions: +- `Storage Account Contributor` +- `Storage Blob Data Contributor` +- `Storage File Data Privileged Contributor` +- `Storage Queue Data Contributor` +- `Storage Table Data Contributor` + +--- + +**Additional Important Configuration Notes** + +- The Azure Function configured above uses the `AzureWebJobsStorage` connection string for queue connectivity. You may alternatively use managed identity-based connections as described in the official Azure Functions Managed Identity documentation. +- Storage queues you specify (`input` & `output`) should already exist in the storage account before the Function deployment or invocation, created manually via Azure portal or CLI. +- When using Azure storage account connection strings, make sure the account has enabled storage account key access (`Storage Account > Settings > Configuration`). + +--- + +With the above steps complete, your Azure Function integration with your AI Agent is ready for use. + + +### Create Agent With Logic Apps + +Logic Apps allow HTTP requests to trigger actions. For more information, refer to the guide [Logic App Workflows for Function Calling](https://learn.microsoft.com/azure/ai-services/openai/how-to/assistants-logic-apps). + +Your Logic App must be in the same resource group as your Azure AI Project, shown in the Azure Portal. Agents SDK accesses Logic Apps through Workflow URLs, which are fetched and called as requests in functions. + +Below is an example of how to create an Azure Logic App utility tool and register a function with it. + + + +```python + +# Create the agents client +agents_client = AgentsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), +) + +# Extract subscription and resource group from the project scope +subscription_id = os.environ["SUBSCRIPTION_ID"] +resource_group = os.environ["resource_group_name"] + +# Logic App details +logic_app_name = "" +trigger_name = "" + +# Create and initialize AzureLogicAppTool utility +logic_app_tool = AzureLogicAppTool(subscription_id, resource_group) +logic_app_tool.register_logic_app(logic_app_name, trigger_name) +print(f"Registered logic app '{logic_app_name}' with trigger '{trigger_name}'.") + +# Create the specialized "send_email_via_logic_app" function for your agent tools +send_email_func = create_send_email_function(logic_app_tool, logic_app_name) + +# Prepare the function tools for the agent +functions_to_use: Set = { + fetch_current_datetime, + send_email_func, # This references the AzureLogicAppTool instance via closure +} +``` + + + +After this the functions can be incorporated normally into code using `FunctionTool`. + + +### Create Agent With OpenAPI + +OpenAPI specifications describe REST operations against a specific endpoint. Agents SDK can read an OpenAPI spec, create a function from it, and call that function against the REST endpoint without additional client-side execution. + +Here is an example creating an OpenAPI tool (using anonymous authentication): + + + +```python + +with open(weather_asset_file_path, "r") as f: + openapi_weather = jsonref.loads(f.read()) + +with open(countries_asset_file_path, "r") as f: + openapi_countries = jsonref.loads(f.read()) + +# Create Auth object for the OpenApiTool (note that connection or managed identity auth setup requires additional setup in Azure) +auth = OpenApiAnonymousAuthDetails() + +# Initialize agent OpenApi tool using the read in OpenAPI spec +openapi_tool = OpenApiTool( + name="get_weather", spec=openapi_weather, description="Retrieve weather information for a location", auth=auth +) +openapi_tool.add_definition( + name="get_countries", spec=openapi_countries, description="Retrieve a list of countries", auth=auth +) + +# Create agent with OpenApi tool and process agent run +with agents_client: + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are a helpful agent", + tools=openapi_tool.definitions, + ) +``` + + + + +### Create an Agent with Fabric + +To enable your Agent to answer queries using Fabric data, use `FabricTool` along with a connection to the Fabric resource. + +Here is an example: + + + +```python +conn_id = os.environ["FABRIC_CONNECTION_ID"] + +print(conn_id) + +# Initialize an Agent Fabric tool and add the connection id +fabric = FabricTool(connection_id=conn_id) + +# Create an Agent with the Fabric tool and process an Agent run +with agents_client: + agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are a helpful agent", + tools=fabric.definitions, + ) +``` + + + + +### Create Thread + +For each session or conversation, a thread is required. Here is an example: + + + +```python +thread = agents_client.threads.create() +``` + + + +### Create Thread with Tool Resource + +In some scenarios, you might need to assign specific resources to individual threads. To achieve this, you provide the `tool_resources` argument to `create_thread`. In the following example, you create a vector store and upload a file, enable an Agent for file search using the `tools` argument, and then associate the file with the thread using the `tool_resources` argument. + + + +```python +file = agents_client.files.upload_and_poll(file_path=asset_file_path, purpose=FilePurpose.AGENTS) +print(f"Uploaded file, file ID: {file.id}") + +vector_store = agents_client.vector_stores.create_and_poll(file_ids=[file.id], name="my_vectorstore") +print(f"Created vector store, vector store ID: {vector_store.id}") + +# Create file search tool with resources followed by creating agent +file_search = FileSearchTool(vector_store_ids=[vector_store.id]) + +agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="Hello, you are helpful agent and can search information from uploaded files", + tools=file_search.definitions, +) + +print(f"Created agent, ID: {agent.id}") + +# Create thread with file resources. +# If the agent has multiple threads, only this thread can search this file. +thread = agents_client.threads.create(tool_resources=file_search.resources) +``` + + + +#### List Threads + +To list all threads attached to a given agent, use the list_threads API: + +```python +threads = agents_client.threads.list() +``` + +### Create Message + +To create a message for agent to process, you pass `user` as `role` and a question as `content`: + + + +```python +message = agents_client.messages.create(thread_id=thread.id, role="user", content="Hello, tell me a joke") +``` + + + +### Create Message with File Search Attachment + +To attach a file to a message for content searching, you use `MessageAttachment` and `FileSearchTool`: + + + +```python +attachment = MessageAttachment(file_id=file.id, tools=FileSearchTool().definitions) +message = agents_client.messages.create( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?", attachments=[attachment] +) +``` + + + +### Create Message with Code Interpreter Attachment + +To attach a file to a message for data analysis, use `MessageAttachment` and `CodeInterpreterTool` classes. You must pass `CodeInterpreterTool` as `tools` or `toolset` in `create_agent` call or the file attachment cannot be opened for code interpreter. + +Here is an example to pass `CodeInterpreterTool` as tool: + + + +```python +# Notice that CodeInterpreter must be enabled in the agent creation, +# otherwise the agent will not be able to see the file attachment for code interpretation +agent = agents_client.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="my-agent", + instructions="You are helpful agent", + tools=CodeInterpreterTool().definitions, +) +print(f"Created agent, agent ID: {agent.id}") + +thread = agents_client.threads.create() +print(f"Created thread, thread ID: {thread.id}") + +# Create an attachment +attachment = MessageAttachment(file_id=file.id, tools=CodeInterpreterTool().definitions) + +# Create a message +message = agents_client.messages.create( + thread_id=thread.id, + role="user", + content="Could you please create bar chart in TRANSPORTATION sector for the operating profit from the uploaded csv file and provide file to me?", + attachments=[attachment], +) +``` + + + +Azure blob storage can be used as a message attachment. In this case, use `VectorStoreDataSource` as a data source: + + + +```python +# We will upload the local file to Azure and will use it for vector store creation. +asset_uri = os.environ["AZURE_BLOB_URI"] +ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) + +# Create a message with the attachment +attachment = MessageAttachment(data_source=ds, tools=code_interpreter.definitions) +message = agents_client.messages.create( + thread_id=thread.id, role="user", content="What does the attachment say?", attachments=[attachment] +) +``` + + + +### Create Message with Image Inputs + +You can send messages to Azure agents with image inputs in following ways: + +- **Using an image stored as a uploaded file** +- **Using a public image accessible via URL** +- **Using a base64 encoded image string** + +The following examples demonstrate each method: + +#### Create message using uploaded image file + +```python +# Upload the local image file +image_file = agents_client.files.upload_and_poll(file_path="image_file.png", purpose="assistants") + +# Construct content using uploaded image +file_param = MessageImageFileParam(file_id=image_file.id, detail="high") +content_blocks = [ + MessageInputTextBlock(text="Hello, what is in the image?"), + MessageInputImageFileBlock(image_file=file_param), +] + +# Create the message +message = agents_client.messages.create( + thread_id=thread.id, + role="user", + content=content_blocks +) +``` + +#### Create message with an image URL input + +```python +# Specify the public image URL +image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" + +# Create content directly referencing image URL +url_param = MessageImageUrlParam(url=image_url, detail="high") +content_blocks = [ + MessageInputTextBlock(text="Hello, what is in the image?"), + MessageInputImageUrlBlock(image_url=url_param), +] + +# Create the message +message = agents_client.messages.create( + thread_id=thread.id, + role="user", + content=content_blocks +) +``` + +#### Create message with base64-encoded image input + +```python +import base64 + +def image_file_to_base64(path: str) -> str: + with open(path, "rb") as f: + return base64.b64encode(f.read()).decode("utf-8") + +# Convert your image file to base64 format +image_base64 = image_file_to_base64("image_file.png") + +# Prepare the data URL +img_data_url = f"data:image/png;base64,{image_base64}" + +# Use base64 encoded string as image URL parameter +url_param = MessageImageUrlParam(url=img_data_url, detail="high") +content_blocks = [ + MessageInputTextBlock(text="Hello, what is in the image?"), + MessageInputImageUrlBlock(image_url=url_param), +] + +# Create the message +message = agents_client.messages.create( + thread_id=thread.id, + role="user", + content=content_blocks +) +``` + +### Execute Run, Run_and_Process, or Stream + +To process your message, you can use `runs.create`, `runs.create_and_process`, or `runs.stream`. + +`create_run` requests the Agent to process the message without polling for the result. If you are using `function tools` regardless as `toolset` or not, your code is responsible for polling for the result and acknowledging the status of `Run`. When the status is `requires_action`, your code is responsible for calling the function tools. For a code sample, visit [`sample_agents_functions.py`](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_functions.py). + +Here is an example of `runs.create` and poll until the run is completed: + + + +```python +run = agents_client.runs.create(thread_id=thread.id, agent_id=agent.id) + +# Poll the run as long as run status is queued or in progress +while run.status in ["queued", "in_progress", "requires_action"]: + # Wait for a second + time.sleep(1) + run = agents_client.runs.get(thread_id=thread.id, run_id=run.id) +``` + + + +To have the SDK poll on your behalf and call `function tools`, use the `create_and_process` method. Note that `function tools` will only be invoked if they are provided as `toolset` during the `create_agent` call. + +Here is an example: + + + +```python +run = agents_client.runs.create_and_process(thread_id=thread.id, agent_id=agent.id) +``` + + + +With streaming, polling need not be considered. If `function tools` are provided as `toolset` during the `create_agent` call, they will be invoked by the SDK. + +Here is an example of streaming: + + + +```python +with agents_client.runs.stream(thread_id=thread.id, agent_id=agent.id) as stream: + + for event_type, event_data, _ in stream: + + if isinstance(event_data, MessageDeltaChunk): + print(f"Text delta received: {event_data.text}") + + elif isinstance(event_data, ThreadMessage): + print(f"ThreadMessage created. ID: {event_data.id}, Status: {event_data.status}") + + elif isinstance(event_data, ThreadRun): + print(f"ThreadRun status: {event_data.status}") + + elif isinstance(event_data, RunStep): + print(f"RunStep type: {event_data.type}, Status: {event_data.status}") + + elif event_type == AgentStreamEvent.ERROR: + print(f"An error occurred. Data: {event_data}") + + elif event_type == AgentStreamEvent.DONE: + print("Stream completed.") + break + + else: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") +``` + + + +In the code above, because an `event_handler` object is not passed to the `stream` function, the SDK will instantiate `AgentEventHandler` or `AsyncAgentEventHandler` as the default event handler and produce an iterable object with `event_type` and `event_data`. `AgentEventHandler` and `AsyncAgentEventHandler` are overridable. Here is an example: + + + +```python +# With AgentEventHandler[str], the return type for each event functions is optional string. +class MyEventHandler(AgentEventHandler[str]): + + def on_message_delta(self, delta: "MessageDeltaChunk") -> Optional[str]: + return f"Text delta received: {delta.text}" + + def on_thread_message(self, message: "ThreadMessage") -> Optional[str]: + return f"ThreadMessage created. ID: {message.id}, Status: {message.status}" + + def on_thread_run(self, run: "ThreadRun") -> Optional[str]: + return f"ThreadRun status: {run.status}" + + def on_run_step(self, step: "RunStep") -> Optional[str]: + return f"RunStep type: {step.type}, Status: {step.status}" + + def on_error(self, data: str) -> Optional[str]: + return f"An error occurred. Data: {data}" + + def on_done(self) -> Optional[str]: + return "Stream completed." + + def on_unhandled_event(self, event_type: str, event_data: Any) -> Optional[str]: + return f"Unhandled Event Type: {event_type}, Data: {event_data}" +``` + + + + + + +```python +with agents_client.runs.stream(thread_id=thread.id, agent_id=agent.id, event_handler=MyEventHandler()) as stream: + for event_type, event_data, func_return in stream: + print(f"Received data.") + print(f"Streaming receive Event Type: {event_type}") + print(f"Event Data: {str(event_data)[:100]}...") + print(f"Event Function return: {func_return}\n") +``` + + + +As you can see, this SDK parses the events and produces various event types similar to OpenAI agents. In your use case, you might not be interested in handling all these types and may decide to parse the events on your own. To achieve this, please refer to [override base event handler](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_with_base_override_eventhandler.py). + +``` +Note: Multiple streaming processes may be chained behind the scenes. + +When the SDK receives a `ThreadRun` event with the status `requires_action`, the next event will be `Done`, followed by termination. The SDK will submit the tool calls using the same event handler. The event handler will then chain the main stream with the tool stream. + +Consequently, when you iterate over the streaming using a for loop similar to the example above, the for loop will receive events from the main stream followed by events from the tool stream. +``` + + +### Retrieve Message + +To retrieve messages from agents, use the following example: + + + +```python +messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) +for msg in messages: + if msg.text_messages: + last_text = msg.text_messages[-1] + print(f"{msg.role}: {last_text.text.value}") +``` + + + +In addition, `messages` and `messages.data[]` offer helper properties such as `text_messages`, `image_contents`, `file_citation_annotations`, and `file_path_annotations` to quickly retrieve content from one message or all messages. + +### Retrieve File + +Files uploaded by Agents cannot be retrieved back. If your use case need to access the file content uploaded by the Agents, you are advised to keep an additional copy accessible by your application. However, files generated by Agents are retrievable by `save_file` or `get_file_content`. + +Here is an example retrieving file ids from messages and save to the local drive: + + + +```python +messages = agents_client.messages.list(thread_id=thread.id) +print(f"Messages: {messages}") + +for msg in messages: + # Save every image file in the message + for img in msg.image_contents: + file_id = img.image_file.file_id + file_name = f"{file_id}_image_file.png" + agents_client.files.save(file_id=file_id, file_name=file_name) + print(f"Saved image file to: {Path.cwd() / file_name}") + + # Print details of every file-path annotation + for ann in msg.file_path_annotations: + print("File Paths:") + print(f" Type: {ann.type}") + print(f" Text: {ann.text}") + print(f" File ID: {ann.file_path.file_id}") + print(f" Start Index: {ann.start_index}") + print(f" End Index: {ann.end_index}") +``` + + + +Here is an example to use `get_file_content`: + +```python +from pathlib import Path + +async def save_file_content(client, file_id: str, file_name: str, target_dir: Optional[Union[str, Path]] = None): + # Determine the target directory + path = Path(target_dir).expanduser().resolve() if target_dir else Path.cwd() + path.mkdir(parents=True, exist_ok=True) + + # Retrieve the file content + file_content_stream = await client.files.get_content(file_id) + if not file_content_stream: + raise RuntimeError(f"No content retrievable for file ID '{file_id}'.") + + # Collect all chunks asynchronously + chunks = [] + async for chunk in file_content_stream: + if isinstance(chunk, (bytes, bytearray)): + chunks.append(chunk) + else: + raise TypeError(f"Expected bytes or bytearray, got {type(chunk).__name__}") + + target_file_path = path / file_name + + # Write the collected content to the file synchronously + with open(target_file_path, "wb") as file: + for chunk in chunks: + file.write(chunk) +``` + +### Teardown + +To remove resources after completing tasks, use the following functions: + + + +```python +# Delete the file when done +agents_client.vector_stores.delete(vector_store.id) +print("Deleted vector store") + +agents_client.files.delete(file_id=file.id) +print("Deleted file") + +# Delete the agent when done +agents_client.delete_agent(agent.id) +print("Deleted agent") +``` + + + +## Tracing + +You can add an Application Insights Azure resource to your Azure AI Foundry project. See the Tracing tab in your AI Foundry project. If one was enabled, you can get the Application Insights connection string, configure your Agents, and observe the full execution path through Azure Monitor. Typically, you might want to start tracing before you create an Agent. + +### Installation + +Make sure to install OpenTelemetry and the Azure SDK tracing plugin via + +```bash +pip install opentelemetry +pip install azure-ai-agents azure-identity opentelemetry-sdk azure-core-tracing-opentelemetry +``` + +You will also need an exporter to send telemetry to your observability backend. You can print traces to the console or use a local viewer such as [Aspire Dashboard](https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/standalone?tabs=bash). + +To connect to Aspire Dashboard or another OpenTelemetry compatible backend, install OTLP exporter: + +```bash +pip install opentelemetry-exporter-otlp +``` + +### How to enable tracing + +Here is a code sample that shows how to enable Azure Monitor tracing: + + + +```python +from opentelemetry import trace +from azure.monitor.opentelemetry import configure_azure_monitor + +# Enable Azure Monitor tracing +application_insights_connection_string = os.environ["APPLICATIONINSIGHTS_CONNECTION_STRING"] +configure_azure_monitor(connection_string=application_insights_connection_string) + +scenario = os.path.basename(__file__) +tracer = trace.get_tracer(__name__) + +with tracer.start_as_current_span(scenario): + with agents_client: +``` + + + +In addition, you might find helpful to see the tracing logs in console. You can achieve by the following code: + +```python +from azure.ai.agents.telemetry import enable_telemetry + +enable_telemetry(destination=sys.stdout) +``` +### How to trace your own functions + +The decorator `trace_function` is provided for tracing your own function calls using OpenTelemetry. By default the function name is used as the name for the span. Alternatively you can provide the name for the span as a parameter to the decorator. + +This decorator handles various data types for function parameters and return values, and records them as attributes in the trace span. The supported data types include: +* Basic data types: str, int, float, bool +* Collections: list, dict, tuple, set + * Special handling for collections: + - If a collection (list, dict, tuple, set) contains nested collections, the entire collection is converted to a string before being recorded as an attribute. + - Sets and dictionaries are always converted to strings to ensure compatibility with span attributes. + +Object types are omitted, and the corresponding parameter is not traced. + +The parameters are recorded in attributes `code.function.parameter.` and the return value is recorder in attribute `code.function.return.value` + +## Troubleshooting + +### Logging + +The client uses the standard [Python logging library](https://docs.python.org/3/library/logging.html). The SDK logs HTTP request and response details, which may be useful in troubleshooting. To log to stdout, add the following: + +```python +import sys +import logging + +# Acquire the logger for this client library. Use 'azure' to affect both +# 'azure.core` and `azure.ai.inference' libraries. +logger = logging.getLogger("azure") + +# Set the desired logging level. logging.INFO or logging.DEBUG are good options. +logger.setLevel(logging.DEBUG) + +# Direct logging output to stdout: +handler = logging.StreamHandler(stream=sys.stdout) +# Or direct logging output to a file: +# handler = logging.FileHandler(filename="sample.log") +logger.addHandler(handler) + +# Optional: change the default logging format. Here we add a timestamp. +#formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(name)s:%(message)s") +#handler.setFormatter(formatter) +``` + +By default logs redact the values of URL query strings, the values of some HTTP request and response headers (including `Authorization` which holds the key or token), and the request and response payloads. To create logs without redaction, add `logging_enable = True` to the client constructor: + +```python +agents_client = AgentsClient( + endpoint=os.environ["PROJECT_ENDPOINT"], + credential=DefaultAzureCredential(), + logging_enable = True +) +``` + +Note that the log level must be set to `logging.DEBUG` (see above code). Logs will be redacted with any other log level. + +Be sure to protect non redacted logs to avoid compromising security. + +For more information, see [Configure logging in the Azure libraries for Python](https://aka.ms/azsdk/python/logging) + +### Reporting issues + +To report an issue with the client library, or request additional features, please open a GitHub issue [here](https://github.com/Azure/azure-sdk-for-python/issues). Mention the package name "azure-ai-agents" in the title or content. + + +## Next steps + +Have a look at the [Samples](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai/azure-ai-agents/samples) folder, containing fully runnable Python code for synchronous and asynchronous clients. + +Explore the [AI Starter Template](https://aka.ms/azsdk/azure-ai-agents/python/ai-starter-template). This template creates an Azure AI Foundry hub, project and connected resources including Azure OpenAI Service, AI Search and more. It also deploys a simple chat application to Azure Container Apps. + +## Contributing + +This project welcomes contributions and suggestions. Most contributions require +you to agree to a Contributor License Agreement (CLA) declaring that you have +the right to, and actually do, grant us the rights to use your contribution. +For details, visit https://cla.microsoft.com. + +When you submit a pull request, a CLA-bot will automatically determine whether +you need to provide a CLA and decorate the PR appropriately (e.g., label, +comment). Simply follow the instructions provided by the bot. You will only +need to do this once across all repos using our CLA. + +This project has adopted the +[Microsoft Open Source Code of Conduct][code_of_conduct]. For more information, +see the Code of Conduct FAQ or contact opencode@microsoft.com with any +additional questions or comments. + + +[samples]: https://aka.ms/azsdk/azure-ai-projects/python/samples/ +[code_of_conduct]: https://opensource.microsoft.com/codeofconduct/ +[entra_id]: https://learn.microsoft.com/azure/ai-services/authentication?tabs=powershell#authenticate-with-microsoft-entra-id +[azure_identity_credentials]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity#credentials +[azure_identity_pip]: https://pypi.org/project/azure-identity/ +[default_azure_credential]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity#defaultazurecredential +[pip]: https://pypi.org/project/pip/ +[azure_sub]: https://azure.microsoft.com/free/ +[evaluators]: https://learn.microsoft.com/azure/ai-studio/how-to/develop/evaluate-sdk +[azure_ai_evaluation]: https://learn.microsoft.com/python/api/overview/azure/ai-evaluation-readme +[evaluator_library]: https://learn.microsoft.com/azure/ai-studio/how-to/evaluate-generative-ai-app#view-and-manage-the-evaluators-in-the-evaluator-library \ No newline at end of file