From 6216d31cf00d0491fd3da73b1a0d84c909d73d0c Mon Sep 17 00:00:00 2001 From: edvan_microsoft Date: Mon, 27 May 2024 16:01:33 +0200 Subject: [PATCH 1/3] updated samples --- python/samples/learn_resources/README.md | 14 +- python/samples/learn_resources/__init__.py | 5 + python/samples/learn_resources/ai_services.py | 13 +- .../learn_resources/configuring_prompts.py | 135 +++++----- .../learn_resources/creating_functions.py | 60 ++++- .../functions_within_prompts.py | 80 +++--- python/samples/learn_resources/planner.py | 20 +- python/samples/learn_resources/plugin.py | 8 +- .../{native_function.py => Math.py} | 6 + python/samples/learn_resources/prompts.py | 149 ----------- .../learn_resources/serializing_prompts.py | 3 +- .../learn_resources/service_configurator.py | 95 ------- .../sk_service_configurator.py | 55 ++++ python/samples/learn_resources/templates.py | 194 ++++++++++---- .../learn_resources/using_the_kernel.py | 40 +-- .../learn_resources/your_first_prompt.py | 253 ++++++++++++++++++ python/semantic_kernel/kernel.py | 2 +- 17 files changed, 662 insertions(+), 470 deletions(-) create mode 100644 python/samples/learn_resources/__init__.py rename python/samples/learn_resources/plugins/MathPlugin/{native_function.py => Math.py} (94%) delete mode 100644 python/samples/learn_resources/prompts.py delete mode 100644 python/samples/learn_resources/service_configurator.py create mode 100644 python/samples/learn_resources/sk_service_configurator.py create mode 100644 python/samples/learn_resources/your_first_prompt.py diff --git a/python/samples/learn_resources/README.md b/python/samples/learn_resources/README.md index 8c5df651fc76..f36b03bca2b3 100644 --- a/python/samples/learn_resources/README.md +++ b/python/samples/learn_resources/README.md @@ -4,7 +4,11 @@ This project contains a collection of examples used in documentation on [learn.m ## Prerequisites -- [Python](https://www.python.org/downloads/) 3.8 and above +- [Python](https://www.python.org/downloads/) 3.10 and above +- Install Semantic Kernel through PyPi: + ```bash + pip install semantic-kernel + ``` ## Configuring the sample @@ -19,13 +23,13 @@ Copy the `.env.example` file to a new file named `.env`. Then, copy those keys i ``` GLOBAL_LLM_SERVICE="OpenAI" # Toggle between "OpenAI" or "AzureOpenAI" -OPEN_AI_CHAT_COMPLETION_MODEL_ID="gpt-3.5-turbo-0125" -OPEN_AI_TEXT_COMPLETION_MODEL_ID="gpt-3.5-turbo-instruct" +OPEN_AI_CHAT_MODEL_ID="gpt-3.5-turbo-0125" +OPEN_AI_TEXT_MODEL_ID="gpt-3.5-turbo-instruct" OPENAI_API_KEY="" OPENAI_ORG_ID="" -AZURE_OPEN_AI_CHAT_COMPLETION_DEPLOYMENT_NAME="gpt-35-turbo" -AZURE_OPEN_AI_TEXT_COMPLETION_DEPLOYMENT_NAME="gpt-35-turbo-instruct" +AZURE_OPEN_AI_CHAT_DEPLOYMENT_NAME="gpt-35-turbo" +AZURE_OPEN_AI_TEXT_DEPLOYMENT_NAME="gpt-35-turbo-instruct" AZURE_OPENAI_ENDPOINT="" AZURE_OPENAI_API_KEY="" AZURE_OPENAI_API_VERSION="" diff --git a/python/samples/learn_resources/__init__.py b/python/samples/learn_resources/__init__.py new file mode 100644 index 000000000000..754bc0fbdc11 --- /dev/null +++ b/python/samples/learn_resources/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Microsoft. All rights reserved. + +from .sk_service_configurator import add_service + +__all__ = ["add_service"] diff --git a/python/samples/learn_resources/ai_services.py b/python/samples/learn_resources/ai_services.py index b330a62e33e1..8640cea96e6c 100644 --- a/python/samples/learn_resources/ai_services.py +++ b/python/samples/learn_resources/ai_services.py @@ -3,24 +3,25 @@ import asyncio import os -from service_configurator import add_service +from sk_service_configurator import add_service -import semantic_kernel as sk +from semantic_kernel.kernel import Kernel async def main(): # Initialize the kernel - kernel = sk.Kernel() + kernel = Kernel() # Add the service to the kernel # use_chat: True to use chat completion, False to use text completion - kernel = add_service(kernel=kernel, use_chat=True) + # use_azure: True to use Azure OpenAI, False to use OpenAI + kernel = add_service(kernel, use_chat=True) script_directory = os.path.dirname(__file__) plugins_directory = os.path.join(script_directory, "plugins") - writer_plugin = kernel.import_plugin_from_prompt_directory( + writer_plugin = kernel.add_plugin( parent_directory=plugins_directory, - plugin_directory_name="WriterPlugin", + plugin_name="WriterPlugin", ) # Run the ShortPoem function with the Kernel Argument. diff --git a/python/samples/learn_resources/configuring_prompts.py b/python/samples/learn_resources/configuring_prompts.py index d0588be8053b..46a21e40e821 100644 --- a/python/samples/learn_resources/configuring_prompts.py +++ b/python/samples/learn_resources/configuring_prompts.py @@ -1,87 +1,86 @@ # Copyright (c) Microsoft. All rights reserved. + import asyncio -from service_configurator import add_service +from sk_service_configurator import add_service -import semantic_kernel as sk -from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings -from semantic_kernel.contents.chat_history import ChatHistory +from semantic_kernel.connectors.ai import PromptExecutionSettings +from semantic_kernel.contents import ChatHistory from semantic_kernel.core_plugins import ConversationSummaryPlugin -from semantic_kernel.prompt_template.input_variable import InputVariable -from semantic_kernel.prompt_template.prompt_template_config import PromptTemplateConfig - - -async def main(): - # Initialize the kernel - kernel = sk.Kernel() - - # Add the service to the kernel - # use_chat: True to use chat completion, False to use text completion - kernel = add_service(kernel=kernel, use_chat=True) - - service_id = "default" - - # The following execution settings are used for the ConversationSummaryPlugin - execution_settings = PromptExecutionSettings( - service_id=service_id, max_tokens=ConversationSummaryPlugin._max_tokens, temperature=0.1, top_p=0.5 - ) - prompt_template_config = PromptTemplateConfig( - template=ConversationSummaryPlugin._summarize_conversation_prompt_template, - description="Given a section of a conversation transcript, summarize the part of" " the conversation.", - execution_settings=execution_settings, - ) - - # Import the ConversationSummaryPlugin - kernel.add_plugin( - ConversationSummaryPlugin(kernel=kernel, prompt_template_config=prompt_template_config), - plugin_name="ConversationSummaryPlugin", - ) - - # Create the history - history = ChatHistory() - - # Create the prompt with the ConversationSummaryPlugin - prompt = """{{ConversationSummaryPlugin.SummarizeConversation $history}} - User: {{$request}} - Assistant: """ - - # These execution settings are tied to the chat function, created below. - execution_settings = kernel.get_service(service_id).instantiate_prompt_execution_settings(service_id=service_id) - chat_prompt_template_config = PromptTemplateConfig( - template=prompt, +from semantic_kernel.kernel import Kernel +from semantic_kernel.prompt_template import InputVariable, PromptTemplateConfig + +# Initialize the kernel +kernel = Kernel() + +# Add the service to the kernel +# use_chat: True to use chat completion, False to use text completion +kernel = add_service(kernel=kernel, use_chat=True) + +service_id = "default" + +# The following execution settings are used for the ConversationSummaryPlugin +execution_settings = PromptExecutionSettings( + service_id=service_id, max_tokens=ConversationSummaryPlugin._max_tokens, temperature=0.1, top_p=0.5 +) +prompt_template_config = PromptTemplateConfig( + template=ConversationSummaryPlugin._summarize_conversation_prompt_template, + description="Given a section of a conversation transcript, summarize the part of" " the conversation.", + execution_settings=execution_settings, +) + +# Import the ConversationSummaryPlugin +kernel.add_plugin( + ConversationSummaryPlugin(kernel=kernel, prompt_template_config=prompt_template_config), + plugin_name="ConversationSummaryPlugin", +) + + +# +# Create the function with the prompt +kernel.add_function( + prompt_template_config=PromptTemplateConfig( + template="""{{ConversationSummaryPlugin.SummarizeConversation $history}} +User: {{$request}} +Assistant: """, description="Chat with the assistant", - execution_settings=execution_settings, + execution_settings=[ + PromptExecutionSettings(service_id="default", temperature=0.0, max_tokens=1000), + PromptExecutionSettings(service_id="gpt-3.5-turbo", temperature=0.2, max_tokens=4000), + PromptExecutionSettings(service_id="gpt-4", temperature=0.3, max_tokens=8000), + ], input_variables=[ InputVariable(name="request", description="The user input", is_required=True), - InputVariable(name="history", description="The history of the conversation", is_required=True), + InputVariable( + name="history", + description="The history of the conversation", + is_required=True, + allow_dangerously_set_content=True, + ), ], - ) + ), + plugin_name="Summarize_Conversation", + function_name="Chat", + description="Chat with the assistant", +) +# + +# Create the history +history = ChatHistory() - # Create the function - chat_function = kernel.add_function( - prompt=prompt, - plugin_name="Summarize_Conversation", - function_name="Chat", - description="Chat with the assistant", - prompt_template_config=chat_prompt_template_config, - ) +async def main(): while True: try: request = input("User:> ") - except KeyboardInterrupt: - print("\n\nExiting chat...") - return False - except EOFError: - print("\n\nExiting chat...") - return False - + except (KeyboardInterrupt, EOFError): + break if request == "exit": - print("\n\nExiting chat...") - return False + break result = await kernel.invoke( - chat_function, + plugin_name="Summarize_Conversation", + function_name="Chat", request=request, history=history, ) @@ -92,6 +91,8 @@ async def main(): print(f"Assistant:> {result}") + print("\n\nExiting chat...") + # Run the main function if __name__ == "__main__": diff --git a/python/samples/learn_resources/creating_functions.py b/python/samples/learn_resources/creating_functions.py index 696eafbbc207..5c143b18e756 100644 --- a/python/samples/learn_resources/creating_functions.py +++ b/python/samples/learn_resources/creating_functions.py @@ -3,31 +3,65 @@ import asyncio import os -from service_configurator import add_service +from sk_service_configurator import add_service -import semantic_kernel as sk +from semantic_kernel import Kernel +from semantic_kernel.connectors.ai.function_call_behavior import FunctionCallBehavior +from semantic_kernel.connectors.ai.open_ai import OpenAIChatPromptExecutionSettings +from semantic_kernel.contents import ChatHistory async def main(): # Initialize the kernel - kernel = sk.Kernel() - - # Add the service to the kernel - # use_chat: True to use chat completion, False to use text completion - kernel = add_service(kernel=kernel, use_chat=True) + kernel = Kernel() # Import the MathPlugin. - script_directory = os.path.dirname(__file__) - plugins_directory = os.path.join(script_directory, "plugins") - math_plugin = kernel.import_native_plugin_from_directory(plugins_directory, "MathPlugin") + # + plugins_directory = os.path.join(os.path.dirname(__file__), "plugins") + math_plugin = kernel.add_plugin(parent_directory=plugins_directory, plugin_name="MathPlugin") result = await kernel.invoke( - math_plugin["Add"], - number1=5, - number2=5, + math_plugin["Sqrt"], + number1=12, ) print(result) + # + + # + kernel = add_service(kernel, use_chat=True) + kernel.add_function( + prompt="""{{$chat_history}}{{$input}}""", + execution_settings=OpenAIChatPromptExecutionSettings( + service_id="default", + temperature=0.0, + max_tokens=1000, + function_call_behavior=FunctionCallBehavior.AutoInvokeKernelFunctions(), + ), + plugin_name="Chat", + function_name="Chat", + description="Chat with the assistant", + ) + chat_history = ChatHistory() + while True: + try: + request = input("Your request: ") + except (KeyboardInterrupt, EOFError): + break + if request.lower() == "exit": + break + result = await kernel.invoke( + plugin_name="Chat", + function_name="Chat", + input=request, + chat_history=chat_history, + ) + print(result) + chat_history.add_user_message(request) + chat_history.add_assistant_message(str(result)) + + print("\n\nExiting...") + # # Run the main function diff --git a/python/samples/learn_resources/functions_within_prompts.py b/python/samples/learn_resources/functions_within_prompts.py index d467e89b915d..505f0bb8498a 100644 --- a/python/samples/learn_resources/functions_within_prompts.py +++ b/python/samples/learn_resources/functions_within_prompts.py @@ -2,32 +2,31 @@ import asyncio -from service_configurator import add_service +from sk_service_configurator import add_service -import semantic_kernel as sk -from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings -from semantic_kernel.contents.chat_history import ChatHistory +from semantic_kernel import Kernel +from semantic_kernel.connectors.ai import PromptExecutionSettings +from semantic_kernel.contents import ChatHistory from semantic_kernel.core_plugins import ConversationSummaryPlugin -from semantic_kernel.prompt_template.input_variable import InputVariable -from semantic_kernel.prompt_template.prompt_template_config import PromptTemplateConfig +from semantic_kernel.prompt_template import InputVariable, PromptTemplateConfig async def main(): + # # Initialize the kernel - kernel = sk.Kernel() + kernel = Kernel() # Add the service to the kernel # use_chat: True to use chat completion, False to use text completion kernel = add_service(kernel=kernel, use_chat=True) service_id = "default" - execution_settings = PromptExecutionSettings( - service_id=service_id, max_tokens=ConversationSummaryPlugin._max_tokens, temperature=0.1, top_p=0.5 - ) prompt_template_config = PromptTemplateConfig( template=ConversationSummaryPlugin._summarize_conversation_prompt_template, description="Given a section of a conversation transcript, summarize the part of" " the conversation.", - execution_settings=execution_settings, + execution_settings=PromptExecutionSettings( + service_id=service_id, max_tokens=ConversationSummaryPlugin._max_tokens, temperature=0.1, top_p=0.5 + ), ) # Import the ConversationSummaryPlugin @@ -35,48 +34,43 @@ async def main(): ConversationSummaryPlugin(kernel=kernel, prompt_template_config=prompt_template_config), plugin_name="ConversationSummaryPlugin", ) + # - # Create the history - history = ChatHistory() - - # Create the prompt with the ConversationSummaryPlugin - prompt = """{{ConversationSummaryPlugin.SummarizeConversation $history}} - User: {{$request}} - Assistant: """ - - req_settings = kernel.get_service("default").get_prompt_execution_settings_class()(service_id=service_id) - chat_prompt_template_config = PromptTemplateConfig( - template=prompt, - description="Chat with the assistant", - execution_settings={service_id: req_settings}, - input_variables=[ - InputVariable(name="request", description="The user input", is_required=True), - InputVariable(name="history", description="The history of the conversation", is_required=True), - ], - ) - - # Run the prompt + # chat_function = kernel.add_function( - prompt=prompt, plugin_name="Summarize_Conversation", function_name="Chat", description="Chat with the assistant", - prompt_template_config=chat_prompt_template_config, + prompt_template_config=PromptTemplateConfig( + template="""{{ConversationSummaryPlugin.SummarizeConversation $history}} + User: {{$request}} + Assistant: """, + execution_settings=kernel.get_prompt_execution_settings_from_service_id(service_id=service_id), + description="Chat with the assistant", + input_variables=[ + InputVariable(name="request", description="The user input", is_required=True), + InputVariable( + name="history", + description="The history of the conversation", + is_required=True, + allow_dangerously_set_content=True, + ), + ], + ), ) + # + + # + # Create the history + history = ChatHistory() while True: try: request = input("User:> ") - except KeyboardInterrupt: - print("\n\nExiting chat...") - return False - except EOFError: - print("\n\nExiting chat...") - return False - + except (KeyboardInterrupt, EOFError): + break if request == "exit": - print("\n\nExiting chat...") - return False + break result = await kernel.invoke( chat_function, @@ -89,6 +83,8 @@ async def main(): history.add_assistant_message(str(result)) print(f"Assistant:> {result}") + print("\n\nExiting chat...") + # # Run the main function diff --git a/python/samples/learn_resources/planner.py b/python/samples/learn_resources/planner.py index d1af71686395..aa7d077aee27 100644 --- a/python/samples/learn_resources/planner.py +++ b/python/samples/learn_resources/planner.py @@ -2,15 +2,16 @@ import asyncio import os -from service_configurator import add_service +from sk_service_configurator import add_service -import semantic_kernel as sk +from semantic_kernel import Kernel from semantic_kernel.planners.sequential_planner import SequentialPlanner async def main(): + # # Initialize the kernel - kernel = sk.Kernel() + kernel = Kernel() # Add the service to the kernel # use_chat: True to use chat completion, False to use text completion @@ -18,24 +19,23 @@ async def main(): script_directory = os.path.dirname(__file__) plugins_directory = os.path.join(script_directory, "plugins") - kernel.import_native_plugin_from_directory(plugins_directory, "MathPlugin") - - planner = SequentialPlanner( - kernel=kernel, - service_id="default", - ) + kernel.add_plugin(parent_directory=plugins_directory, plugin_name="MathPlugin") + planner = SequentialPlanner(kernel=kernel, service_id="default") + # + # goal = "Figure out how much I have if first, my investment of 2130.23 dollars increased by 23%, and then I spend $5 on a coffee" # noqa: E501 # Create a plan plan = await planner.create_plan(goal) # Execute the plan - result = await kernel.invoke(plan) + result = await plan.invoke(kernel) print(f"The goal: {goal}") print("Plan results:") print(f"I will have: ${result} left over.") + # # Run the main function diff --git a/python/samples/learn_resources/plugin.py b/python/samples/learn_resources/plugin.py index 3e4c4cc00a04..ff384c4ec73a 100644 --- a/python/samples/learn_resources/plugin.py +++ b/python/samples/learn_resources/plugin.py @@ -3,10 +3,10 @@ import asyncio from typing import Annotated -from service_configurator import add_service +from sk_service_configurator import add_service -import semantic_kernel as sk -from semantic_kernel.functions.kernel_function_decorator import kernel_function +from semantic_kernel import Kernel +from semantic_kernel.functions import kernel_function # Let's define a light plugin @@ -40,7 +40,7 @@ def change_state( async def main(): # Initialize the kernel - kernel = sk.Kernel() + kernel = Kernel() # Add the service to the kernel # use_chat: True to use chat completion, False to use text completion diff --git a/python/samples/learn_resources/plugins/MathPlugin/native_function.py b/python/samples/learn_resources/plugins/MathPlugin/Math.py similarity index 94% rename from python/samples/learn_resources/plugins/MathPlugin/native_function.py rename to python/samples/learn_resources/plugins/MathPlugin/Math.py index de9540f420df..f85fb224233a 100644 --- a/python/samples/learn_resources/plugins/MathPlugin/native_function.py +++ b/python/samples/learn_resources/plugins/MathPlugin/Math.py @@ -1,3 +1,5 @@ +# Copyright (c) Microsoft. All rights reserved. +# import math from typing import Annotated @@ -5,6 +7,7 @@ class Math: + # """ Description: MathPlugin provides a set of functions to make Math calculations. @@ -40,6 +43,7 @@ def multiply( ) -> Annotated[float, "The output is a float"]: return float(number1) * float(number2) + # @kernel_function( description="Takes the square root of a number", name="Sqrt", @@ -50,6 +54,8 @@ def square_root( ) -> Annotated[float, "The output is a float"]: return math.sqrt(float(number1)) + # + @kernel_function(name="Add") def add( self, diff --git a/python/samples/learn_resources/prompts.py b/python/samples/learn_resources/prompts.py deleted file mode 100644 index b227b4360c03..000000000000 --- a/python/samples/learn_resources/prompts.py +++ /dev/null @@ -1,149 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -import asyncio - -from service_configurator import add_service - -import semantic_kernel as sk - - -async def main(): - # Initialize the kernel - kernel = sk.Kernel() - - # Add the service to the kernel - # use_chat: True to use chat completion, False to use text completion - kernel = add_service(kernel=kernel, use_chat=True) - - request = input("Your request: ") - - # 0.0 Initial prompt - prompt = f"What is the intent of this request? {request}" - print("0.0 Initial prompt") - print("-------------------------") - prompt_function = kernel.add_function(function_name="sample_zero", plugin_name="sample_plugin", prompt=prompt) - result = await kernel.invoke(prompt_function, request=request) - print(result) - print("-------------------------") - - # 1.0 Make the prompt more specific - prompt = f"""What is the intent of this request? {request} - You can choose between SendEmail, SendMessage, CompleteTask, CreateDocument.""" - print("1.0 Make the prompt more specific") - print("-------------------------") - prompt_function = kernel.add_function(function_name="sample_one", plugin_name="sample_plugin", prompt=prompt) - result = await kernel.invoke(prompt_function, request=request) - print(result) - print("-------------------------") - - # 2.0 Add structure to the output with formatting - prompt = f"""Instructions: What is the intent of this request? - Choices: SendEmail, SendMessage, CompleteTask, CreateDocument. - User Input: {request} - Intent: """ - print("2.0 Add structure to the output with formatting") - print("-------------------------") - prompt_function = kernel.add_function(function_name="sample_two", plugin_name="sample_plugin", prompt=prompt) - result = await kernel.invoke(prompt_function, request=request) - print(result) - print("-------------------------") - - # 2.1 Add structure to the output with formatting (using Markdown and JSON) - prompt = f"""## Instructions - Provide the intent of the request using the following format: - ```json - {{ - "intent": {{intent}} - }} - ``` - - ## Choices - You can choose between the following intents: - ```json - ["SendEmail", "SendMessage", "CompleteTask", "CreateDocument"] - ``` - - ## User Input - The user input is: - ```json - {{ - "request": "{request}"\n' - }} - ``` - - ## Intent""" - print("2.1 Add structure to the output with formatting (using Markdown and JSON)") - print("-------------------------") - prompt_function = kernel.add_function(function_name="sample_two_one", plugin_name="sample_plugin", prompt=prompt) - result = await kernel.invoke(prompt_function, request=request) - print(result) - print("-------------------------") - - # 3.0 Provide examples with few-shot prompting - prompt = f"""Instructions: What is the intent of this request? - Choices: SendEmail, SendMessage, CompleteTask, CreateDocument. - - User Input: Can you send a very quick approval to the marketing team? - Intent: SendMessage - - User Input: Can you send the full update to the marketing team? - Intent: SendEmail - - User Input: {request} - Intent: """ - print("3.0 Provide examples with few-shot prompting") - print("-------------------------") - prompt_function = kernel.add_function(function_name="sample_three", plugin_name="sample_plugin", prompt=prompt) - result = await kernel.invoke(prompt_function, request=request) - print(result) - print("-------------------------") - - # 4.0 Tell the AI what to do to avoid doing something wrong - prompt = f"""Instructions: What is the intent of this request? - If you don't know the intent, don't guess; instead respond with "Unknown". - Choices: SendEmail, SendMessage, CompleteTask, CreateDocument, Unknown. - - User Input: Can you send a very quick approval to the marketing team? - Intent: SendMessage - - User Input: Can you send the full update to the marketing team? - Intent: SendEmail - - User Input: {request} - Intent: """ - print("4.0 Tell the AI what to do to avoid doing something wrong") - print("-------------------------") - prompt_function = kernel.add_function(function_name="sample_four", plugin_name="sample_plugin", prompt=prompt) - result = await kernel.invoke(prompt_function, request=request) - print(result) - print("-------------------------") - - # 5.0 Provide context to the AI through a chat history of this user - history = ( - "User input: I hate sending emails, no one ever reads them.\n" - "AI response: I'm sorry to hear that. Messages may be a better way to communicate." - ) - prompt = f"""Instructions: What is the intent of this request?\n" - If you don't know the intent, don't guess; instead respond with "Unknown". - Choices: SendEmail, SendMessage, CompleteTask, CreateDocument, Unknown. - - User Input: Can you send a very quick approval to the marketing team? - Intent: SendMessage - - User Input: Can you send the full update to the marketing team? - Intent: SendEmail - - {history} - User Input: {request} - Intent: """ - print("5.0 Provide context to the AI") - print("-------------------------") - prompt_function = kernel.add_function(function_name="sample_five", plugin_name="sample_plugin", prompt=prompt) - result = await kernel.invoke(prompt_function, request=request, history=history) - print(result) - print("-------------------------") - - -# Run the main function -if __name__ == "__main__": - asyncio.run(main()) diff --git a/python/samples/learn_resources/serializing_prompts.py b/python/samples/learn_resources/serializing_prompts.py index 9ade73ac575c..8ca96e1a8f01 100644 --- a/python/samples/learn_resources/serializing_prompts.py +++ b/python/samples/learn_resources/serializing_prompts.py @@ -2,9 +2,8 @@ import asyncio -from service_configurator import add_service - import semantic_kernel as sk +from samples.learn_resources.sk_service_configurator import add_service from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings from semantic_kernel.contents.chat_history import ChatHistory from semantic_kernel.core_plugins import ConversationSummaryPlugin diff --git a/python/samples/learn_resources/service_configurator.py b/python/samples/learn_resources/service_configurator.py deleted file mode 100644 index 8423de598df4..000000000000 --- a/python/samples/learn_resources/service_configurator.py +++ /dev/null @@ -1,95 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -from dotenv import dotenv_values - -import semantic_kernel as sk -from semantic_kernel.connectors.ai.open_ai import ( - AzureChatCompletion, - AzureTextCompletion, - OpenAIChatCompletion, - OpenAITextCompletion, -) -from semantic_kernel.kernel import Kernel - - -def add_service(kernel: Kernel, use_chat: bool = True) -> Kernel: - """ - Configure the AI service for the kernel - - Args: - kernel (Kernel): The kernel to configure - use_chat (bool): Whether to use the chat completion model, or the text completion model - - Returns: - Kernel: The configured kernel - """ - config = dotenv_values(".env") - llm_service = config.get("GLOBAL_LLM_SERVICE", None) - assert llm_service, "The LLM_SERVICE environment variable is not set." - - # The service_id is used to identify the service in the kernel. - # This can be updated to a custom value if needed. - # It should match the execution setting's key in a config.json file. - service_id = "default" - - # Configure AI service used by the kernel. Load settings from the .env file. - if llm_service == "AzureOpenAI": - _, api_key, endpoint = sk.azure_openai_settings_from_dot_env(include_deployment=False) - deployment_name = ( - config.get("AZURE_OPEN_AI_CHAT_COMPLETION_DEPLOYMENT_NAME") - if use_chat - else config.get("AZURE_OPEN_AI_TEXT_COMPLETION_DEPLOYMENT_NAME") - ) - - if not deployment_name: - raise ValueError("Deployment name for Azure AI is not set in .env file.") - - if use_chat: - kernel.add_service( - AzureChatCompletion( - service_id=service_id, - deployment_name=deployment_name, - endpoint=endpoint, - api_key=api_key, - ), - ) - else: - kernel.add_service( - AzureTextCompletion( - service_id=service_id, - deployment_name=deployment_name, - endpoint=endpoint, - api_key=api_key, - ), - ) - else: - api_key, org_id = sk.openai_settings_from_dot_env() - model_id = ( - config.get("OPEN_AI_CHAT_COMPLETION_MODEL_ID") - if use_chat - else config.get("OPEN_AI_TEXT_COMPLETION_MODEL_ID") - ) - - if not model_id: - raise ValueError("Model ID for OpenAI is not set in .env file.") - - if use_chat: - kernel.add_service( - OpenAIChatCompletion( - service_id=service_id, - ai_model_id=model_id, - api_key=api_key, - org_id=org_id, - ), - ) - else: - kernel.add_service( - OpenAITextCompletion( - service_id=service_id, - ai_model_id=model_id, - api_key=api_key, - org_id=org_id, - ), - ) - - return kernel diff --git a/python/samples/learn_resources/sk_service_configurator.py b/python/samples/learn_resources/sk_service_configurator.py new file mode 100644 index 000000000000..31c0c6862d73 --- /dev/null +++ b/python/samples/learn_resources/sk_service_configurator.py @@ -0,0 +1,55 @@ +# Copyright (c) Microsoft. All rights reserved. + +from dotenv import dotenv_values + +from semantic_kernel import Kernel +from semantic_kernel.connectors.ai.open_ai import ( + AzureChatCompletion, + AzureTextCompletion, + OpenAIChatCompletion, + OpenAITextCompletion, +) + + +def add_service(kernel: Kernel, use_chat: bool = True) -> Kernel: + """ + Configure the AI service for the kernel + + Args: + kernel (Kernel): The kernel to configure + use_chat (bool): Whether to use the chat completion model, or the text completion model + + Returns: + Kernel: The configured kernel + """ + config = dotenv_values(".env") + llm_service = config.get("GLOBAL_LLM_SERVICE", None) + if not llm_service: + print("GLOBAL_LLM_SERVICE not set, trying to use Azure OpenAI.") + + # The service_id is used to identify the service in the kernel. + # This can be updated to a custom value if needed. + # It should match the execution setting's key in a config.json file. + service_id = "default" + + # Configure AI service used by the kernel. Load settings from the .env file. + if llm_service == "OpenAI": + if use_chat: + # + kernel.add_service(OpenAIChatCompletion(service_id=service_id)) + # + else: + # + kernel.add_service(OpenAITextCompletion(service_id=service_id)) + # + else: + if use_chat: + # + kernel.add_service(AzureChatCompletion(service_id=service_id)) + # + else: + # + kernel.add_service(AzureTextCompletion(service_id=service_id)) + # + + return kernel diff --git a/python/samples/learn_resources/templates.py b/python/samples/learn_resources/templates.py index 0c17754e1ccd..2466c1afbae5 100644 --- a/python/samples/learn_resources/templates.py +++ b/python/samples/learn_resources/templates.py @@ -1,80 +1,158 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio - -from service_configurator import add_service - -import semantic_kernel as sk -from semantic_kernel.contents.chat_history import ChatHistory -from semantic_kernel.prompt_template.input_variable import InputVariable -from semantic_kernel.prompt_template.prompt_template_config import PromptTemplateConfig - - -async def main(): - # Initialize the kernel - kernel = sk.Kernel() - - # Add the service to the kernel - # use_chat: True to use chat completion, False to use text completion - kernel = add_service(kernel=kernel, use_chat=True) - - # Create the history - history = ChatHistory() - - # An ideal prompt for this is {{$history}}{{$request}} as those - # get cleanly parsed into a new chat_history object while invoking - # the function. Another possibility is create the prompt as {{$history}} - # and make sure to add the user message to the history before invoking. - prompt = "{{$history}}" - - service_id = "default" - req_settings = kernel.get_service("default").get_prompt_execution_settings_class()(service_id=service_id) - chat_prompt_template_config = PromptTemplateConfig( - template=prompt, +from functools import reduce + +from sk_service_configurator import add_service + +from semantic_kernel import Kernel +from semantic_kernel.contents import ChatHistory +from semantic_kernel.contents.author_role import AuthorRole +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.prompt_template import InputVariable, PromptTemplateConfig + +# Initialize the kernel +kernel = Kernel() + +# Add the service to the kernel +# use_chat: True to use chat completion, False to use text completion +kernel = add_service(kernel=kernel, use_chat=True) + +# An ideal prompt for this is {{$history}}{{$request}} as those +# get cleanly parsed into a new chat_history object while invoking +# the function. Another possibility is create the prompt as {{$history}} +# and make sure to add the user message to the history before invoking. +chat_function = kernel.add_function( + plugin_name="Conversation", + function_name="Chat", + description="Chat with the assistant", + prompt_template_config=PromptTemplateConfig( + template="{{$history}}{{$request}}", description="Chat with the assistant", - execution_settings={service_id: req_settings}, input_variables=[ InputVariable(name="request", description="The user input", is_required=True), - InputVariable(name="history", description="The history of the conversation", is_required=True), + InputVariable( + name="history", + description="The history of the conversation", + is_required=True, + allow_dangerously_set_content=True, + ), ], - ) - - # Run the prompt - chat_function = kernel.add_function( - prompt=prompt, - plugin_name="Summarize_Conversation", - function_name="Chat", + ), +) + +choices = ["ContinueConversation", "EndConversation"] +chat_function_intent = kernel.add_function( + plugin_name="Conversation", + function_name="getIntent", + description="Chat with the assistant", + template_format="handlebars", + prompt_template_config=PromptTemplateConfig( + template=""" + Instructions: What is the intent of this request? + Do not explain the reasoning, just reply back with the intent. If you are unsure, reply with {{choices[0]}}. + Choices: {{choices}}. + + {{#each few_shot_examples}} + {{#each this.messages}} + {{#message role=role}} + {{~content~}} + {{/message}} + {{/each}} + {{/each}} + + {{#each chat_history.messages}} + {{#message role=role}} + {{~content~}} + {{/message}} + {{/each}} + + {{request}} + Intent: + """, description="Chat with the assistant", - prompt_template_config=chat_prompt_template_config, - ) + template_format="handlebars", + input_variables=[ + InputVariable(name="request", description="The user input", is_required=True), + InputVariable( + name="chat_history", + description="The history of the conversation", + is_required=True, + allow_dangerously_set_content=True, + ), + InputVariable( + name="choices", + description="The choices for the user to select from", + is_required=True, + allow_dangerously_set_content=True, + ), + InputVariable( + name="few_shot_examples", + description="The few shot examples to help the user", + is_required=True, + allow_dangerously_set_content=True, + ), + ], + ), +) +few_shot_examples = [ + ChatHistory( + messages=[ + ChatMessageContent( + role=AuthorRole.USER, content="Can you send a very quick approval to the marketing team?" + ), + ChatMessageContent(role=AuthorRole.SYSTEM, content="Intent:"), + ChatMessageContent(role=AuthorRole.ASSISTANT, content="ContinueConversation"), + ] + ), + ChatHistory( + messages=[ + ChatMessageContent(role=AuthorRole.USER, content="Thanks, I'm done for now"), + ChatMessageContent(role=AuthorRole.SYSTEM, content="Intent:"), + ChatMessageContent(role=AuthorRole.ASSISTANT, content="EndConversation"), + ] + ), +] + + +async def main(): + # Create the history + history = ChatHistory() while True: try: request = input("User:> ") - except KeyboardInterrupt: - print("\n\nExiting chat...") - return False - except EOFError: - print("\n\nExiting chat...") - return False - - if request == "exit": - print("\n\nExiting chat...") - return False - - # Add the request to the history before we - # invoke the function to include it in the prompt - history.add_user_message(request) + except (KeyboardInterrupt, EOFError): + break result = await kernel.invoke( - chat_function, + plugin_name="Conversation", + function_name="getIntent", + request=request, + history=history, + choices=choices, + few_shot_examples=few_shot_examples, + ) + if str(result) == "EndConversation": + break + + result = kernel.invoke_stream( + plugin_name="Conversation", + function_name="Chat", request=request, history=history, ) + all_chunks = [] + print("Assistant:> ", end="") + async for chunk in result: + all_chunks.append(chunk[0]) + print(str(chunk[0]), end="") + print() - history.add_assistant_message(str(result)) + history.add_user_message(request) + history.add_assistant_message(str(reduce(lambda x, y: x + y, all_chunks))) - print(f"Assistant:> {result}") + print("\n\nExiting chat...") # Run the main function diff --git a/python/samples/learn_resources/using_the_kernel.py b/python/samples/learn_resources/using_the_kernel.py index 27ad67dfcd69..5b9ece8fbb50 100644 --- a/python/samples/learn_resources/using_the_kernel.py +++ b/python/samples/learn_resources/using_the_kernel.py @@ -1,40 +1,44 @@ # Copyright (c) Microsoft. All rights reserved. +# import asyncio import os -from service_configurator import add_service +from samples.learn_resources import add_service +from semantic_kernel import Kernel -import semantic_kernel as sk -from semantic_kernel.core_plugins.time_plugin import TimePlugin +# async def main(): # Initialize the kernel - kernel = sk.Kernel() - + # + kernel = Kernel() # Add the service to the kernel # use_chat: True to use chat completion, False to use text completion - kernel = add_service(kernel=kernel, use_chat=True) + kernel = add_service(kernel, use_chat=True) + # + + # + # Import the TimePlugin and add it to the kernel + from semantic_kernel.core_plugins import TimePlugin - # Import the TimePlugin time = kernel.add_plugin(TimePlugin(), "TimePlugin") + # Invoke the Today function + current_time = await kernel.invoke(time["today"]) + print(f"The current date is: {current_time}\n") + # + + # # Import the WriterPlugin from the plugins directory. script_directory = os.path.dirname(__file__) plugins_directory = os.path.join(script_directory, "plugins") - writer_plugin = kernel.import_plugin_from_prompt_directory( - parent_directory=plugins_directory, - plugin_directory_name="WriterPlugin", - ) - - # Run the current time function - currentTime = await kernel.invoke(time["today"]) - print(f"The current date is: {currentTime}\n") - + kernel.add_plugin(parent_directory=plugins_directory, plugin_name="WriterPlugin") # Run the short poem function with the Kernel Argument - poemResult = await kernel.invoke(writer_plugin["ShortPoem"], input=str(currentTime)) - print(f"The poem result:\n\n{poemResult}") + poem_result = await kernel.invoke(function_name="ShortPoem", plugin_name="WriterPlugin", input=str(current_time)) + print(f"The poem result:\n\n{poem_result}") + # # Run the main function diff --git a/python/samples/learn_resources/your_first_prompt.py b/python/samples/learn_resources/your_first_prompt.py new file mode 100644 index 000000000000..48c6f622207b --- /dev/null +++ b/python/samples/learn_resources/your_first_prompt.py @@ -0,0 +1,253 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio + +from sk_service_configurator import add_service + +from semantic_kernel import Kernel +from semantic_kernel.connectors.ai import PromptExecutionSettings +from semantic_kernel.functions import KernelArguments +from semantic_kernel.prompt_template import InputVariable, PromptTemplateConfig + + +async def main(): + # + # Initialize the kernel + kernel = Kernel() + + # Add the service to the kernel + # use_chat: True to use chat completion, False to use text completion + kernel = add_service(kernel=kernel, use_chat=True) + # + print( + "This sample uses different prompts with the same request, they are related to Emails, " + "Tasks and Documents, make sure to include that in your request." + ) + request = input("Your request: ") + arguments = KernelArguments(request=request, settings=PromptExecutionSettings(max_tokens=100)) + # 0.0 Initial prompt + prompt = "What is the intent of this request? {{$request}}" + # + # + print("0.0 Initial prompt") + print("-------------------------") + result = await kernel.invoke_prompt( + function_name="sample_zero", plugin_name="sample_plugin", prompt=prompt, arguments=arguments + ) + print(result) + print("-------------------------") + # + + # 1.0 Make the prompt more specific + prompt = """What is the intent of this request? {{$request}} + You can choose between SendEmail, SendMessage, CompleteTask, CreateDocument.""" + # + print("1.0 Make the prompt more specific") + print("-------------------------") + result = await kernel.invoke_prompt( + function_name="sample_one", plugin_name="sample_plugin", prompt=prompt, arguments=arguments + ) + print(result) + print("-------------------------") + + # 2.0 Add structure to the output with formatting + prompt = """Instructions: What is the intent of this request? + Choices: SendEmail, SendMessage, CompleteTask, CreateDocument. + User Input: {{$request}} + Intent: """ + # + print("2.0 Add structure to the output with formatting") + print("-------------------------") + result = await kernel.invoke_prompt( + function_name="sample_two", plugin_name="sample_plugin", prompt=prompt, arguments=arguments + ) + print(result) + print("-------------------------") + + # 2.1 Add structure to the output with formatting (using Markdown and JSON) + prompt = """## Instructions + Provide the intent of the request using the following format: + ```json + { + "intent": {intent} + } + ``` + + ## Choices + You can choose between the following intents: + ```json + ["SendEmail", "SendMessage", "CompleteTask", "CreateDocument"] + ``` + + ## User Input + The user input is: + ```json + { + "request": "{{$request}}"\n' + } + ``` + + ## Intent""" + # + print("2.1 Add structure to the output with formatting (using Markdown and JSON)") + print("-------------------------") + result = await kernel.invoke_prompt( + function_name="sample_two_one", plugin_name="sample_plugin", prompt=prompt, arguments=arguments + ) + print(result) + print("-------------------------") + + # 3.0 Provide examples with few-shot prompting + prompt = """Instructions: What is the intent of this request? + Choices: SendEmail, SendMessage, CompleteTask, CreateDocument. + + User Input: Can you send a very quick approval to the marketing team? + Intent: SendMessage + + User Input: Can you send the full update to the marketing team? + Intent: SendEmail + + User Input: {{$request}} + Intent: """ + # + print("3.0 Provide examples with few-shot prompting") + print("-------------------------") + result = await kernel.invoke_prompt( + function_name="sample_three", plugin_name="sample_plugin", prompt=prompt, arguments=arguments + ) + print(result) + print("-------------------------") + + # 4.0 Tell the AI what to do to avoid doing something wrong + prompt = """Instructions: What is the intent of this request? + If you don't know the intent, don't guess; instead respond with "Unknown". + Choices: SendEmail, SendMessage, CompleteTask, CreateDocument, Unknown. + + User Input: Can you send a very quick approval to the marketing team? + Intent: SendMessage + + User Input: Can you send the full update to the marketing team? + Intent: SendEmail + + User Input: {{$request}} + Intent: """ + # + print("4.0 Tell the AI what to do to avoid doing something wrong") + print("-------------------------") + result = await kernel.invoke_prompt( + function_name="sample_four", plugin_name="sample_plugin", prompt=prompt, arguments=arguments + ) + print(result) + print("-------------------------") + + # 5.0 Provide context to the AI through a chat history of this user + history = ( + "User input: I hate sending emails, no one ever reads them.\n" + "AI response: I'm sorry to hear that. Messages may be a better way to communicate." + ) + prompt = """Instructions: What is the intent of this request?\n" + If you don't know the intent, don't guess; instead respond with "Unknown". + Choices: SendEmail, SendMessage, CompleteTask, CreateDocument, Unknown. + + User Input: Can you send a very quick approval to the marketing team? + Intent: SendMessage + + User Input: Can you send the full update to the marketing team? + Intent: SendEmail + + {{$history}} + User Input: {{$request}} + Intent: """ + # + print("5.0 Provide context to the AI") + print("-------------------------") + arguments["history"] = history + result = await kernel.invoke_prompt( + function_name="sample_five", plugin_name="sample_plugin", prompt=prompt, arguments=arguments + ) + print(result) + print("-------------------------") + + # 6.0 Using message roles in chat completion prompts + history = """ + I hate sending emails, no one ever reads them. + I'm sorry to hear that. Messages may be a better way to communicate. + """ + + prompt = """ + Instructions: What is the intent of this request? + If you don't know the intent, don't guess; instead respond with "Unknown". + Choices: SendEmail, SendMessage, CompleteTask, CreateDocument, Unknown. + + Can you send a very quick approval to the marketing team? + Intent: + SendMessage + + Can you send the full update to the marketing team? + Intent: + SendEmail + + {{$history}} + {{$request}} + Intent: + """ + # + print("6.0 Using message roles in chat completion prompts") + print("-------------------------") + arguments["history"] = history + result = await kernel.invoke_prompt( + function_name="sample_six", + plugin_name="sample_plugin", + prompt=prompt, + arguments=arguments, + prompt_template_config=PromptTemplateConfig( + input_variables=[InputVariable(name="history", allow_dangerously_set_content=True)] + ), + ) + print(result) + print("-------------------------") + + # 7.0 Give your AI words of encouragement + history = """ + I hate sending emails, no one ever reads them. + I'm sorry to hear that. Messages may be a better way to communicate. + """ + + prompt = """ + Instructions: What is the intent of this request? + If you don't know the intent, don't guess; instead respond with "Unknown". + Choices: SendEmail, SendMessage, CompleteTask, CreateDocument, Unknown. + Bonus: You'll get $20 if you get this right. + + Can you send a very quick approval to the marketing team? + Intent: + SendMessage + + Can you send the full update to the marketing team? + Intent: + SendEmail + + {{$history}} + {{$request}} + Intent: + """ + # + print("7.0 Give your AI words of encouragement") + print("-------------------------") + arguments["history"] = history + result = await kernel.invoke_prompt( + function_name="sample_seven", + plugin_name="sample_plugin", + prompt=prompt, + arguments=arguments, + prompt_template_config=PromptTemplateConfig( + input_variables=[InputVariable(name="history", allow_dangerously_set_content=True)] + ), + ) + print(result) + print("-------------------------") + + +# Run the main function +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/semantic_kernel/kernel.py b/python/semantic_kernel/kernel.py index 53c84a979f4d..713009acd593 100644 --- a/python/semantic_kernel/kernel.py +++ b/python/semantic_kernel/kernel.py @@ -58,7 +58,7 @@ def __init__( Args: plugins (KernelPlugin | dict[str, KernelPlugin] | list[KernelPlugin] | None): The plugins to be used by the kernel, will be rewritten to a dict with plugin name as key - services (AIServiceClientBase | list[AIServiceClientBase] | dict[str, AIServiceClientBase] | None: + services (AIServiceClientBase | list[AIServiceClientBase] | dict[str, AIServiceClientBase] | None): The services to be used by the kernel, will be rewritten to a dict with service_id as key ai_service_selector (AIServiceSelector | None): The AI service selector to be used by the kernel, default is based on order of execution settings. From 53683f06db184090f2674fd217cb8a8d9ef54c12 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Tue, 28 May 2024 16:19:38 +0100 Subject: [PATCH 2/3] add samples tests --- python/samples/learn_resources/ai_services.py | 3 +- .../learn_resources/configuring_prompts.py | 5 +- .../learn_resources/creating_functions.py | 3 +- .../functions_within_prompts.py | 3 +- python/samples/learn_resources/planner.py | 3 +- python/samples/learn_resources/plugin.py | 3 +- python/samples/learn_resources/templates.py | 3 +- .../learn_resources/your_first_prompt.py | 13 ++- python/tests/samples/test_learn_resources.py | 93 +++++++++++++++++++ 9 files changed, 111 insertions(+), 18 deletions(-) create mode 100644 python/tests/samples/test_learn_resources.py diff --git a/python/samples/learn_resources/ai_services.py b/python/samples/learn_resources/ai_services.py index 8640cea96e6c..9b040196efd6 100644 --- a/python/samples/learn_resources/ai_services.py +++ b/python/samples/learn_resources/ai_services.py @@ -3,8 +3,7 @@ import asyncio import os -from sk_service_configurator import add_service - +from samples.learn_resources.sk_service_configurator import add_service from semantic_kernel.kernel import Kernel diff --git a/python/samples/learn_resources/configuring_prompts.py b/python/samples/learn_resources/configuring_prompts.py index 46a21e40e821..304b1c37ae09 100644 --- a/python/samples/learn_resources/configuring_prompts.py +++ b/python/samples/learn_resources/configuring_prompts.py @@ -2,8 +2,7 @@ import asyncio -from sk_service_configurator import add_service - +from samples.learn_resources.sk_service_configurator import add_service from semantic_kernel.connectors.ai import PromptExecutionSettings from semantic_kernel.contents import ChatHistory from semantic_kernel.core_plugins import ConversationSummaryPlugin @@ -25,7 +24,7 @@ ) prompt_template_config = PromptTemplateConfig( template=ConversationSummaryPlugin._summarize_conversation_prompt_template, - description="Given a section of a conversation transcript, summarize the part of" " the conversation.", + description="Given a section of a conversation transcript, summarize the part of the conversation.", execution_settings=execution_settings, ) diff --git a/python/samples/learn_resources/creating_functions.py b/python/samples/learn_resources/creating_functions.py index 5c143b18e756..89dea567d94a 100644 --- a/python/samples/learn_resources/creating_functions.py +++ b/python/samples/learn_resources/creating_functions.py @@ -3,8 +3,7 @@ import asyncio import os -from sk_service_configurator import add_service - +from samples.learn_resources.sk_service_configurator import add_service from semantic_kernel import Kernel from semantic_kernel.connectors.ai.function_call_behavior import FunctionCallBehavior from semantic_kernel.connectors.ai.open_ai import OpenAIChatPromptExecutionSettings diff --git a/python/samples/learn_resources/functions_within_prompts.py b/python/samples/learn_resources/functions_within_prompts.py index 505f0bb8498a..6f813742ac8a 100644 --- a/python/samples/learn_resources/functions_within_prompts.py +++ b/python/samples/learn_resources/functions_within_prompts.py @@ -2,8 +2,7 @@ import asyncio -from sk_service_configurator import add_service - +from samples.learn_resources.sk_service_configurator import add_service from semantic_kernel import Kernel from semantic_kernel.connectors.ai import PromptExecutionSettings from semantic_kernel.contents import ChatHistory diff --git a/python/samples/learn_resources/planner.py b/python/samples/learn_resources/planner.py index aa7d077aee27..0c8f3916256c 100644 --- a/python/samples/learn_resources/planner.py +++ b/python/samples/learn_resources/planner.py @@ -2,8 +2,7 @@ import asyncio import os -from sk_service_configurator import add_service - +from samples.learn_resources.sk_service_configurator import add_service from semantic_kernel import Kernel from semantic_kernel.planners.sequential_planner import SequentialPlanner diff --git a/python/samples/learn_resources/plugin.py b/python/samples/learn_resources/plugin.py index ff384c4ec73a..1f146c8b40a0 100644 --- a/python/samples/learn_resources/plugin.py +++ b/python/samples/learn_resources/plugin.py @@ -3,8 +3,7 @@ import asyncio from typing import Annotated -from sk_service_configurator import add_service - +from samples.learn_resources.sk_service_configurator import add_service from semantic_kernel import Kernel from semantic_kernel.functions import kernel_function diff --git a/python/samples/learn_resources/templates.py b/python/samples/learn_resources/templates.py index 2466c1afbae5..d87b0e1a9f3b 100644 --- a/python/samples/learn_resources/templates.py +++ b/python/samples/learn_resources/templates.py @@ -3,8 +3,7 @@ import asyncio from functools import reduce -from sk_service_configurator import add_service - +from samples.learn_resources.sk_service_configurator import add_service from semantic_kernel import Kernel from semantic_kernel.contents import ChatHistory from semantic_kernel.contents.author_role import AuthorRole diff --git a/python/samples/learn_resources/your_first_prompt.py b/python/samples/learn_resources/your_first_prompt.py index 48c6f622207b..e1d4f42d2128 100644 --- a/python/samples/learn_resources/your_first_prompt.py +++ b/python/samples/learn_resources/your_first_prompt.py @@ -2,15 +2,14 @@ import asyncio -from sk_service_configurator import add_service - +from samples.learn_resources.sk_service_configurator import add_service from semantic_kernel import Kernel from semantic_kernel.connectors.ai import PromptExecutionSettings from semantic_kernel.functions import KernelArguments from semantic_kernel.prompt_template import InputVariable, PromptTemplateConfig -async def main(): +async def main(delay: int = 0): # # Initialize the kernel kernel = Kernel() @@ -35,6 +34,7 @@ async def main(): function_name="sample_zero", plugin_name="sample_plugin", prompt=prompt, arguments=arguments ) print(result) + await asyncio.sleep(delay) print("-------------------------") # @@ -48,6 +48,7 @@ async def main(): function_name="sample_one", plugin_name="sample_plugin", prompt=prompt, arguments=arguments ) print(result) + await asyncio.sleep(delay) print("-------------------------") # 2.0 Add structure to the output with formatting @@ -62,6 +63,7 @@ async def main(): function_name="sample_two", plugin_name="sample_plugin", prompt=prompt, arguments=arguments ) print(result) + await asyncio.sleep(delay) print("-------------------------") # 2.1 Add structure to the output with formatting (using Markdown and JSON) @@ -95,6 +97,7 @@ async def main(): function_name="sample_two_one", plugin_name="sample_plugin", prompt=prompt, arguments=arguments ) print(result) + await asyncio.sleep(delay) print("-------------------------") # 3.0 Provide examples with few-shot prompting @@ -116,6 +119,7 @@ async def main(): function_name="sample_three", plugin_name="sample_plugin", prompt=prompt, arguments=arguments ) print(result) + await asyncio.sleep(delay) print("-------------------------") # 4.0 Tell the AI what to do to avoid doing something wrong @@ -138,6 +142,7 @@ async def main(): function_name="sample_four", plugin_name="sample_plugin", prompt=prompt, arguments=arguments ) print(result) + await asyncio.sleep(delay) print("-------------------------") # 5.0 Provide context to the AI through a chat history of this user @@ -166,6 +171,7 @@ async def main(): function_name="sample_five", plugin_name="sample_plugin", prompt=prompt, arguments=arguments ) print(result) + await asyncio.sleep(delay) print("-------------------------") # 6.0 Using message roles in chat completion prompts @@ -205,6 +211,7 @@ async def main(): ), ) print(result) + await asyncio.sleep(delay) print("-------------------------") # 7.0 Give your AI words of encouragement diff --git a/python/tests/samples/test_learn_resources.py b/python/tests/samples/test_learn_resources.py new file mode 100644 index 000000000000..49096b8b8f28 --- /dev/null +++ b/python/tests/samples/test_learn_resources.py @@ -0,0 +1,93 @@ +# Copyright (c) Microsoft. All rights reserved. + + +# import dotenv +from pytest import mark + +# dotenv.load_dotenv() + + +@mark.asyncio +async def test_ai_service_sample(): + from samples.learn_resources.ai_services import main + + await main() + + +@mark.asyncio +async def test_configuring_prompts(monkeypatch): + from samples.learn_resources.configuring_prompts import main + + responses = ["Hello, who are you?", "exit"] + + monkeypatch.setattr("builtins.input", lambda _: responses.pop(0)) + await main() + + +@mark.asyncio +async def test_creating_functions(monkeypatch): + from samples.learn_resources.creating_functions import main + + responses = ["What is 3+3?", "exit"] + + monkeypatch.setattr("builtins.input", lambda _: responses.pop(0)) + await main() + + +@mark.asyncio +async def test_functions_within_prompts(monkeypatch): + from samples.learn_resources.functions_within_prompts import main + + responses = ["Hello, who are you?", "exit"] + + monkeypatch.setattr("builtins.input", lambda _: responses.pop(0)) + await main() + + +@mark.asyncio +async def test_planner(): + from samples.learn_resources.planner import main + + await main() + + +@mark.asyncio +async def test_plugin(): + from samples.learn_resources.plugin import main + + await main() + + +@mark.asyncio +async def test_serializing_prompts(monkeypatch): + from samples.learn_resources.serializing_prompts import main + + responses = ["Hello, who are you?", "exit"] + + monkeypatch.setattr("builtins.input", lambda _: responses.pop(0)) + await main() + + +@mark.asyncio +async def test_templates(monkeypatch): + from samples.learn_resources.templates import main + + responses = ["Hello, who are you?", "Thanks, see you next time!"] + + monkeypatch.setattr("builtins.input", lambda _: responses.pop(0)) + await main() + + +@mark.asyncio +async def test_using_the_kernel(): + from samples.learn_resources.using_the_kernel import main + + await main() + + +@mark.asyncio +async def test_your_first_prompt(monkeypatch): + from samples.learn_resources.your_first_prompt import main + + monkeypatch.setattr("builtins.input", lambda _: "I want to send an email to my manager!") + await main(delay=10) From 55cade04f9f9093c3f776dd2a71830f8392eb204 Mon Sep 17 00:00:00 2001 From: eavanvalkenburg Date: Tue, 28 May 2024 16:30:36 +0100 Subject: [PATCH 3/3] removed commented code --- python/tests/samples/test_learn_resources.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/python/tests/samples/test_learn_resources.py b/python/tests/samples/test_learn_resources.py index 49096b8b8f28..869d710c91cb 100644 --- a/python/tests/samples/test_learn_resources.py +++ b/python/tests/samples/test_learn_resources.py @@ -1,11 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. - -# import dotenv from pytest import mark -# dotenv.load_dotenv() - @mark.asyncio async def test_ai_service_sample():