diff --git a/python/samples/learn_resources/README.md b/python/samples/learn_resources/README.md
index 8c5df651fc76..f36b03bca2b3 100644
--- a/python/samples/learn_resources/README.md
+++ b/python/samples/learn_resources/README.md
@@ -4,7 +4,11 @@ This project contains a collection of examples used in documentation on [learn.m
## Prerequisites
-- [Python](https://www.python.org/downloads/) 3.8 and above
+- [Python](https://www.python.org/downloads/) 3.10 and above
+- Install Semantic Kernel through PyPi:
+ ```bash
+ pip install semantic-kernel
+ ```
## Configuring the sample
@@ -19,13 +23,13 @@ Copy the `.env.example` file to a new file named `.env`. Then, copy those keys i
```
GLOBAL_LLM_SERVICE="OpenAI" # Toggle between "OpenAI" or "AzureOpenAI"
-OPEN_AI_CHAT_COMPLETION_MODEL_ID="gpt-3.5-turbo-0125"
-OPEN_AI_TEXT_COMPLETION_MODEL_ID="gpt-3.5-turbo-instruct"
+OPEN_AI_CHAT_MODEL_ID="gpt-3.5-turbo-0125"
+OPEN_AI_TEXT_MODEL_ID="gpt-3.5-turbo-instruct"
OPENAI_API_KEY=""
OPENAI_ORG_ID=""
-AZURE_OPEN_AI_CHAT_COMPLETION_DEPLOYMENT_NAME="gpt-35-turbo"
-AZURE_OPEN_AI_TEXT_COMPLETION_DEPLOYMENT_NAME="gpt-35-turbo-instruct"
+AZURE_OPEN_AI_CHAT_DEPLOYMENT_NAME="gpt-35-turbo"
+AZURE_OPEN_AI_TEXT_DEPLOYMENT_NAME="gpt-35-turbo-instruct"
AZURE_OPENAI_ENDPOINT=""
AZURE_OPENAI_API_KEY=""
AZURE_OPENAI_API_VERSION=""
diff --git a/python/samples/learn_resources/__init__.py b/python/samples/learn_resources/__init__.py
new file mode 100644
index 000000000000..754bc0fbdc11
--- /dev/null
+++ b/python/samples/learn_resources/__init__.py
@@ -0,0 +1,5 @@
+# Copyright (c) Microsoft. All rights reserved.
+
+from .sk_service_configurator import add_service
+
+__all__ = ["add_service"]
diff --git a/python/samples/learn_resources/ai_services.py b/python/samples/learn_resources/ai_services.py
index 792becd79d9e..87c92374bbd2 100644
--- a/python/samples/learn_resources/ai_services.py
+++ b/python/samples/learn_resources/ai_services.py
@@ -3,18 +3,18 @@
import asyncio
import os
-from service_configurator import add_service
-
-import semantic_kernel as sk
+from samples.learn_resources.sk_service_configurator import add_service
+from semantic_kernel.kernel import Kernel
async def main():
# Initialize the kernel
- kernel = sk.Kernel()
+ kernel = Kernel()
# Add the service to the kernel
# use_chat: True to use chat completion, False to use text completion
- kernel = add_service(kernel=kernel, use_chat=True)
+ # use_azure: True to use Azure OpenAI, False to use OpenAI
+ kernel = add_service(kernel, use_chat=True)
script_directory = os.path.dirname(__file__)
plugins_directory = os.path.join(script_directory, "plugins")
diff --git a/python/samples/learn_resources/configuring_prompts.py b/python/samples/learn_resources/configuring_prompts.py
index d0588be8053b..304b1c37ae09 100644
--- a/python/samples/learn_resources/configuring_prompts.py
+++ b/python/samples/learn_resources/configuring_prompts.py
@@ -1,87 +1,85 @@
# Copyright (c) Microsoft. All rights reserved.
-import asyncio
-from service_configurator import add_service
+import asyncio
-import semantic_kernel as sk
-from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings
-from semantic_kernel.contents.chat_history import ChatHistory
+from samples.learn_resources.sk_service_configurator import add_service
+from semantic_kernel.connectors.ai import PromptExecutionSettings
+from semantic_kernel.contents import ChatHistory
from semantic_kernel.core_plugins import ConversationSummaryPlugin
-from semantic_kernel.prompt_template.input_variable import InputVariable
-from semantic_kernel.prompt_template.prompt_template_config import PromptTemplateConfig
-
-
-async def main():
- # Initialize the kernel
- kernel = sk.Kernel()
-
- # Add the service to the kernel
- # use_chat: True to use chat completion, False to use text completion
- kernel = add_service(kernel=kernel, use_chat=True)
-
- service_id = "default"
-
- # The following execution settings are used for the ConversationSummaryPlugin
- execution_settings = PromptExecutionSettings(
- service_id=service_id, max_tokens=ConversationSummaryPlugin._max_tokens, temperature=0.1, top_p=0.5
- )
- prompt_template_config = PromptTemplateConfig(
- template=ConversationSummaryPlugin._summarize_conversation_prompt_template,
- description="Given a section of a conversation transcript, summarize the part of" " the conversation.",
- execution_settings=execution_settings,
- )
-
- # Import the ConversationSummaryPlugin
- kernel.add_plugin(
- ConversationSummaryPlugin(kernel=kernel, prompt_template_config=prompt_template_config),
- plugin_name="ConversationSummaryPlugin",
- )
-
- # Create the history
- history = ChatHistory()
-
- # Create the prompt with the ConversationSummaryPlugin
- prompt = """{{ConversationSummaryPlugin.SummarizeConversation $history}}
- User: {{$request}}
- Assistant: """
-
- # These execution settings are tied to the chat function, created below.
- execution_settings = kernel.get_service(service_id).instantiate_prompt_execution_settings(service_id=service_id)
- chat_prompt_template_config = PromptTemplateConfig(
- template=prompt,
+from semantic_kernel.kernel import Kernel
+from semantic_kernel.prompt_template import InputVariable, PromptTemplateConfig
+
+# Initialize the kernel
+kernel = Kernel()
+
+# Add the service to the kernel
+# use_chat: True to use chat completion, False to use text completion
+kernel = add_service(kernel=kernel, use_chat=True)
+
+service_id = "default"
+
+# The following execution settings are used for the ConversationSummaryPlugin
+execution_settings = PromptExecutionSettings(
+ service_id=service_id, max_tokens=ConversationSummaryPlugin._max_tokens, temperature=0.1, top_p=0.5
+)
+prompt_template_config = PromptTemplateConfig(
+ template=ConversationSummaryPlugin._summarize_conversation_prompt_template,
+ description="Given a section of a conversation transcript, summarize the part of the conversation.",
+ execution_settings=execution_settings,
+)
+
+# Import the ConversationSummaryPlugin
+kernel.add_plugin(
+ ConversationSummaryPlugin(kernel=kernel, prompt_template_config=prompt_template_config),
+ plugin_name="ConversationSummaryPlugin",
+)
+
+
+#
+# Create the function with the prompt
+kernel.add_function(
+ prompt_template_config=PromptTemplateConfig(
+ template="""{{ConversationSummaryPlugin.SummarizeConversation $history}}
+User: {{$request}}
+Assistant: """,
description="Chat with the assistant",
- execution_settings=execution_settings,
+ execution_settings=[
+ PromptExecutionSettings(service_id="default", temperature=0.0, max_tokens=1000),
+ PromptExecutionSettings(service_id="gpt-3.5-turbo", temperature=0.2, max_tokens=4000),
+ PromptExecutionSettings(service_id="gpt-4", temperature=0.3, max_tokens=8000),
+ ],
input_variables=[
InputVariable(name="request", description="The user input", is_required=True),
- InputVariable(name="history", description="The history of the conversation", is_required=True),
+ InputVariable(
+ name="history",
+ description="The history of the conversation",
+ is_required=True,
+ allow_dangerously_set_content=True,
+ ),
],
- )
+ ),
+ plugin_name="Summarize_Conversation",
+ function_name="Chat",
+ description="Chat with the assistant",
+)
+#
+
+# Create the history
+history = ChatHistory()
- # Create the function
- chat_function = kernel.add_function(
- prompt=prompt,
- plugin_name="Summarize_Conversation",
- function_name="Chat",
- description="Chat with the assistant",
- prompt_template_config=chat_prompt_template_config,
- )
+async def main():
while True:
try:
request = input("User:> ")
- except KeyboardInterrupt:
- print("\n\nExiting chat...")
- return False
- except EOFError:
- print("\n\nExiting chat...")
- return False
-
+ except (KeyboardInterrupt, EOFError):
+ break
if request == "exit":
- print("\n\nExiting chat...")
- return False
+ break
result = await kernel.invoke(
- chat_function,
+ plugin_name="Summarize_Conversation",
+ function_name="Chat",
request=request,
history=history,
)
@@ -92,6 +90,8 @@ async def main():
print(f"Assistant:> {result}")
+ print("\n\nExiting chat...")
+
# Run the main function
if __name__ == "__main__":
diff --git a/python/samples/learn_resources/creating_functions.py b/python/samples/learn_resources/creating_functions.py
index 696eafbbc207..89dea567d94a 100644
--- a/python/samples/learn_resources/creating_functions.py
+++ b/python/samples/learn_resources/creating_functions.py
@@ -3,31 +3,64 @@
import asyncio
import os
-from service_configurator import add_service
-
-import semantic_kernel as sk
+from samples.learn_resources.sk_service_configurator import add_service
+from semantic_kernel import Kernel
+from semantic_kernel.connectors.ai.function_call_behavior import FunctionCallBehavior
+from semantic_kernel.connectors.ai.open_ai import OpenAIChatPromptExecutionSettings
+from semantic_kernel.contents import ChatHistory
async def main():
# Initialize the kernel
- kernel = sk.Kernel()
-
- # Add the service to the kernel
- # use_chat: True to use chat completion, False to use text completion
- kernel = add_service(kernel=kernel, use_chat=True)
+ kernel = Kernel()
# Import the MathPlugin.
- script_directory = os.path.dirname(__file__)
- plugins_directory = os.path.join(script_directory, "plugins")
- math_plugin = kernel.import_native_plugin_from_directory(plugins_directory, "MathPlugin")
+ #
+ plugins_directory = os.path.join(os.path.dirname(__file__), "plugins")
+ math_plugin = kernel.add_plugin(parent_directory=plugins_directory, plugin_name="MathPlugin")
result = await kernel.invoke(
- math_plugin["Add"],
- number1=5,
- number2=5,
+ math_plugin["Sqrt"],
+ number1=12,
)
print(result)
+ #
+
+ #
+ kernel = add_service(kernel, use_chat=True)
+ kernel.add_function(
+ prompt="""{{$chat_history}}{{$input}}""",
+ execution_settings=OpenAIChatPromptExecutionSettings(
+ service_id="default",
+ temperature=0.0,
+ max_tokens=1000,
+ function_call_behavior=FunctionCallBehavior.AutoInvokeKernelFunctions(),
+ ),
+ plugin_name="Chat",
+ function_name="Chat",
+ description="Chat with the assistant",
+ )
+ chat_history = ChatHistory()
+ while True:
+ try:
+ request = input("Your request: ")
+ except (KeyboardInterrupt, EOFError):
+ break
+ if request.lower() == "exit":
+ break
+ result = await kernel.invoke(
+ plugin_name="Chat",
+ function_name="Chat",
+ input=request,
+ chat_history=chat_history,
+ )
+ print(result)
+ chat_history.add_user_message(request)
+ chat_history.add_assistant_message(str(result))
+
+ print("\n\nExiting...")
+ #
# Run the main function
diff --git a/python/samples/learn_resources/functions_within_prompts.py b/python/samples/learn_resources/functions_within_prompts.py
index d467e89b915d..6f813742ac8a 100644
--- a/python/samples/learn_resources/functions_within_prompts.py
+++ b/python/samples/learn_resources/functions_within_prompts.py
@@ -2,32 +2,30 @@
import asyncio
-from service_configurator import add_service
-
-import semantic_kernel as sk
-from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings
-from semantic_kernel.contents.chat_history import ChatHistory
+from samples.learn_resources.sk_service_configurator import add_service
+from semantic_kernel import Kernel
+from semantic_kernel.connectors.ai import PromptExecutionSettings
+from semantic_kernel.contents import ChatHistory
from semantic_kernel.core_plugins import ConversationSummaryPlugin
-from semantic_kernel.prompt_template.input_variable import InputVariable
-from semantic_kernel.prompt_template.prompt_template_config import PromptTemplateConfig
+from semantic_kernel.prompt_template import InputVariable, PromptTemplateConfig
async def main():
+ #
# Initialize the kernel
- kernel = sk.Kernel()
+ kernel = Kernel()
# Add the service to the kernel
# use_chat: True to use chat completion, False to use text completion
kernel = add_service(kernel=kernel, use_chat=True)
service_id = "default"
- execution_settings = PromptExecutionSettings(
- service_id=service_id, max_tokens=ConversationSummaryPlugin._max_tokens, temperature=0.1, top_p=0.5
- )
prompt_template_config = PromptTemplateConfig(
template=ConversationSummaryPlugin._summarize_conversation_prompt_template,
description="Given a section of a conversation transcript, summarize the part of" " the conversation.",
- execution_settings=execution_settings,
+ execution_settings=PromptExecutionSettings(
+ service_id=service_id, max_tokens=ConversationSummaryPlugin._max_tokens, temperature=0.1, top_p=0.5
+ ),
)
# Import the ConversationSummaryPlugin
@@ -35,48 +33,43 @@ async def main():
ConversationSummaryPlugin(kernel=kernel, prompt_template_config=prompt_template_config),
plugin_name="ConversationSummaryPlugin",
)
+ #
- # Create the history
- history = ChatHistory()
-
- # Create the prompt with the ConversationSummaryPlugin
- prompt = """{{ConversationSummaryPlugin.SummarizeConversation $history}}
- User: {{$request}}
- Assistant: """
-
- req_settings = kernel.get_service("default").get_prompt_execution_settings_class()(service_id=service_id)
- chat_prompt_template_config = PromptTemplateConfig(
- template=prompt,
- description="Chat with the assistant",
- execution_settings={service_id: req_settings},
- input_variables=[
- InputVariable(name="request", description="The user input", is_required=True),
- InputVariable(name="history", description="The history of the conversation", is_required=True),
- ],
- )
-
- # Run the prompt
+ #
chat_function = kernel.add_function(
- prompt=prompt,
plugin_name="Summarize_Conversation",
function_name="Chat",
description="Chat with the assistant",
- prompt_template_config=chat_prompt_template_config,
+ prompt_template_config=PromptTemplateConfig(
+ template="""{{ConversationSummaryPlugin.SummarizeConversation $history}}
+ User: {{$request}}
+ Assistant: """,
+ execution_settings=kernel.get_prompt_execution_settings_from_service_id(service_id=service_id),
+ description="Chat with the assistant",
+ input_variables=[
+ InputVariable(name="request", description="The user input", is_required=True),
+ InputVariable(
+ name="history",
+ description="The history of the conversation",
+ is_required=True,
+ allow_dangerously_set_content=True,
+ ),
+ ],
+ ),
)
+ #
+
+ #
+ # Create the history
+ history = ChatHistory()
while True:
try:
request = input("User:> ")
- except KeyboardInterrupt:
- print("\n\nExiting chat...")
- return False
- except EOFError:
- print("\n\nExiting chat...")
- return False
-
+ except (KeyboardInterrupt, EOFError):
+ break
if request == "exit":
- print("\n\nExiting chat...")
- return False
+ break
result = await kernel.invoke(
chat_function,
@@ -89,6 +82,8 @@ async def main():
history.add_assistant_message(str(result))
print(f"Assistant:> {result}")
+ print("\n\nExiting chat...")
+ #
# Run the main function
diff --git a/python/samples/learn_resources/planner.py b/python/samples/learn_resources/planner.py
index d1af71686395..0c8f3916256c 100644
--- a/python/samples/learn_resources/planner.py
+++ b/python/samples/learn_resources/planner.py
@@ -2,15 +2,15 @@
import asyncio
import os
-from service_configurator import add_service
-
-import semantic_kernel as sk
+from samples.learn_resources.sk_service_configurator import add_service
+from semantic_kernel import Kernel
from semantic_kernel.planners.sequential_planner import SequentialPlanner
async def main():
+ #
# Initialize the kernel
- kernel = sk.Kernel()
+ kernel = Kernel()
# Add the service to the kernel
# use_chat: True to use chat completion, False to use text completion
@@ -18,24 +18,23 @@ async def main():
script_directory = os.path.dirname(__file__)
plugins_directory = os.path.join(script_directory, "plugins")
- kernel.import_native_plugin_from_directory(plugins_directory, "MathPlugin")
-
- planner = SequentialPlanner(
- kernel=kernel,
- service_id="default",
- )
+ kernel.add_plugin(parent_directory=plugins_directory, plugin_name="MathPlugin")
+ planner = SequentialPlanner(kernel=kernel, service_id="default")
+ #
+ #
goal = "Figure out how much I have if first, my investment of 2130.23 dollars increased by 23%, and then I spend $5 on a coffee" # noqa: E501
# Create a plan
plan = await planner.create_plan(goal)
# Execute the plan
- result = await kernel.invoke(plan)
+ result = await plan.invoke(kernel)
print(f"The goal: {goal}")
print("Plan results:")
print(f"I will have: ${result} left over.")
+ #
# Run the main function
diff --git a/python/samples/learn_resources/plugin.py b/python/samples/learn_resources/plugin.py
index 3e4c4cc00a04..1f146c8b40a0 100644
--- a/python/samples/learn_resources/plugin.py
+++ b/python/samples/learn_resources/plugin.py
@@ -3,10 +3,9 @@
import asyncio
from typing import Annotated
-from service_configurator import add_service
-
-import semantic_kernel as sk
-from semantic_kernel.functions.kernel_function_decorator import kernel_function
+from samples.learn_resources.sk_service_configurator import add_service
+from semantic_kernel import Kernel
+from semantic_kernel.functions import kernel_function
# Let's define a light plugin
@@ -40,7 +39,7 @@ def change_state(
async def main():
# Initialize the kernel
- kernel = sk.Kernel()
+ kernel = Kernel()
# Add the service to the kernel
# use_chat: True to use chat completion, False to use text completion
diff --git a/python/samples/learn_resources/plugins/MathPlugin/native_function.py b/python/samples/learn_resources/plugins/MathPlugin/Math.py
similarity index 91%
rename from python/samples/learn_resources/plugins/MathPlugin/native_function.py
rename to python/samples/learn_resources/plugins/MathPlugin/Math.py
index 104ae40c649e..f85fb224233a 100644
--- a/python/samples/learn_resources/plugins/MathPlugin/native_function.py
+++ b/python/samples/learn_resources/plugins/MathPlugin/Math.py
@@ -1,3 +1,5 @@
+# Copyright (c) Microsoft. All rights reserved.
+#
import math
from typing import Annotated
@@ -5,7 +7,9 @@
class Math:
- """Description: MathPlugin provides a set of functions to make Math calculations.
+ #
+ """
+ Description: MathPlugin provides a set of functions to make Math calculations.
Usage:
kernel.add_plugin(MathPlugin(), plugin_name="math")
@@ -39,6 +43,7 @@ def multiply(
) -> Annotated[float, "The output is a float"]:
return float(number1) * float(number2)
+ #
@kernel_function(
description="Takes the square root of a number",
name="Sqrt",
@@ -49,6 +54,8 @@ def square_root(
) -> Annotated[float, "The output is a float"]:
return math.sqrt(float(number1))
+ #
+
@kernel_function(name="Add")
def add(
self,
diff --git a/python/samples/learn_resources/prompts.py b/python/samples/learn_resources/prompts.py
deleted file mode 100644
index b227b4360c03..000000000000
--- a/python/samples/learn_resources/prompts.py
+++ /dev/null
@@ -1,149 +0,0 @@
-# Copyright (c) Microsoft. All rights reserved.
-
-import asyncio
-
-from service_configurator import add_service
-
-import semantic_kernel as sk
-
-
-async def main():
- # Initialize the kernel
- kernel = sk.Kernel()
-
- # Add the service to the kernel
- # use_chat: True to use chat completion, False to use text completion
- kernel = add_service(kernel=kernel, use_chat=True)
-
- request = input("Your request: ")
-
- # 0.0 Initial prompt
- prompt = f"What is the intent of this request? {request}"
- print("0.0 Initial prompt")
- print("-------------------------")
- prompt_function = kernel.add_function(function_name="sample_zero", plugin_name="sample_plugin", prompt=prompt)
- result = await kernel.invoke(prompt_function, request=request)
- print(result)
- print("-------------------------")
-
- # 1.0 Make the prompt more specific
- prompt = f"""What is the intent of this request? {request}
- You can choose between SendEmail, SendMessage, CompleteTask, CreateDocument."""
- print("1.0 Make the prompt more specific")
- print("-------------------------")
- prompt_function = kernel.add_function(function_name="sample_one", plugin_name="sample_plugin", prompt=prompt)
- result = await kernel.invoke(prompt_function, request=request)
- print(result)
- print("-------------------------")
-
- # 2.0 Add structure to the output with formatting
- prompt = f"""Instructions: What is the intent of this request?
- Choices: SendEmail, SendMessage, CompleteTask, CreateDocument.
- User Input: {request}
- Intent: """
- print("2.0 Add structure to the output with formatting")
- print("-------------------------")
- prompt_function = kernel.add_function(function_name="sample_two", plugin_name="sample_plugin", prompt=prompt)
- result = await kernel.invoke(prompt_function, request=request)
- print(result)
- print("-------------------------")
-
- # 2.1 Add structure to the output with formatting (using Markdown and JSON)
- prompt = f"""## Instructions
- Provide the intent of the request using the following format:
- ```json
- {{
- "intent": {{intent}}
- }}
- ```
-
- ## Choices
- You can choose between the following intents:
- ```json
- ["SendEmail", "SendMessage", "CompleteTask", "CreateDocument"]
- ```
-
- ## User Input
- The user input is:
- ```json
- {{
- "request": "{request}"\n'
- }}
- ```
-
- ## Intent"""
- print("2.1 Add structure to the output with formatting (using Markdown and JSON)")
- print("-------------------------")
- prompt_function = kernel.add_function(function_name="sample_two_one", plugin_name="sample_plugin", prompt=prompt)
- result = await kernel.invoke(prompt_function, request=request)
- print(result)
- print("-------------------------")
-
- # 3.0 Provide examples with few-shot prompting
- prompt = f"""Instructions: What is the intent of this request?
- Choices: SendEmail, SendMessage, CompleteTask, CreateDocument.
-
- User Input: Can you send a very quick approval to the marketing team?
- Intent: SendMessage
-
- User Input: Can you send the full update to the marketing team?
- Intent: SendEmail
-
- User Input: {request}
- Intent: """
- print("3.0 Provide examples with few-shot prompting")
- print("-------------------------")
- prompt_function = kernel.add_function(function_name="sample_three", plugin_name="sample_plugin", prompt=prompt)
- result = await kernel.invoke(prompt_function, request=request)
- print(result)
- print("-------------------------")
-
- # 4.0 Tell the AI what to do to avoid doing something wrong
- prompt = f"""Instructions: What is the intent of this request?
- If you don't know the intent, don't guess; instead respond with "Unknown".
- Choices: SendEmail, SendMessage, CompleteTask, CreateDocument, Unknown.
-
- User Input: Can you send a very quick approval to the marketing team?
- Intent: SendMessage
-
- User Input: Can you send the full update to the marketing team?
- Intent: SendEmail
-
- User Input: {request}
- Intent: """
- print("4.0 Tell the AI what to do to avoid doing something wrong")
- print("-------------------------")
- prompt_function = kernel.add_function(function_name="sample_four", plugin_name="sample_plugin", prompt=prompt)
- result = await kernel.invoke(prompt_function, request=request)
- print(result)
- print("-------------------------")
-
- # 5.0 Provide context to the AI through a chat history of this user
- history = (
- "User input: I hate sending emails, no one ever reads them.\n"
- "AI response: I'm sorry to hear that. Messages may be a better way to communicate."
- )
- prompt = f"""Instructions: What is the intent of this request?\n"
- If you don't know the intent, don't guess; instead respond with "Unknown".
- Choices: SendEmail, SendMessage, CompleteTask, CreateDocument, Unknown.
-
- User Input: Can you send a very quick approval to the marketing team?
- Intent: SendMessage
-
- User Input: Can you send the full update to the marketing team?
- Intent: SendEmail
-
- {history}
- User Input: {request}
- Intent: """
- print("5.0 Provide context to the AI")
- print("-------------------------")
- prompt_function = kernel.add_function(function_name="sample_five", plugin_name="sample_plugin", prompt=prompt)
- result = await kernel.invoke(prompt_function, request=request, history=history)
- print(result)
- print("-------------------------")
-
-
-# Run the main function
-if __name__ == "__main__":
- asyncio.run(main())
diff --git a/python/samples/learn_resources/serializing_prompts.py b/python/samples/learn_resources/serializing_prompts.py
index 9ade73ac575c..8ca96e1a8f01 100644
--- a/python/samples/learn_resources/serializing_prompts.py
+++ b/python/samples/learn_resources/serializing_prompts.py
@@ -2,9 +2,8 @@
import asyncio
-from service_configurator import add_service
-
import semantic_kernel as sk
+from samples.learn_resources.sk_service_configurator import add_service
from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings
from semantic_kernel.contents.chat_history import ChatHistory
from semantic_kernel.core_plugins import ConversationSummaryPlugin
diff --git a/python/samples/learn_resources/service_configurator.py b/python/samples/learn_resources/service_configurator.py
deleted file mode 100644
index 4f735a368a89..000000000000
--- a/python/samples/learn_resources/service_configurator.py
+++ /dev/null
@@ -1,94 +0,0 @@
-# Copyright (c) Microsoft. All rights reserved.
-
-from dotenv import dotenv_values
-
-import semantic_kernel as sk
-from semantic_kernel.connectors.ai.open_ai import (
- AzureChatCompletion,
- AzureTextCompletion,
- OpenAIChatCompletion,
- OpenAITextCompletion,
-)
-from semantic_kernel.kernel import Kernel
-
-
-def add_service(kernel: Kernel, use_chat: bool = True) -> Kernel:
- """Configure the AI service for the kernel
-
- Args:
- kernel (Kernel): The kernel to configure
- use_chat (bool): Whether to use the chat completion model, or the text completion model
-
- Returns:
- Kernel: The configured kernel
- """
- config = dotenv_values(".env")
- llm_service = config.get("GLOBAL_LLM_SERVICE", None)
- assert llm_service, "The LLM_SERVICE environment variable is not set." # nosec
-
- # The service_id is used to identify the service in the kernel.
- # This can be updated to a custom value if needed.
- # It should match the execution setting's key in a config.json file.
- service_id = "default"
-
- # Configure AI service used by the kernel. Load settings from the .env file.
- if llm_service == "AzureOpenAI":
- _, api_key, endpoint = sk.azure_openai_settings_from_dot_env(include_deployment=False)
- deployment_name = (
- config.get("AZURE_OPEN_AI_CHAT_COMPLETION_DEPLOYMENT_NAME")
- if use_chat
- else config.get("AZURE_OPEN_AI_TEXT_COMPLETION_DEPLOYMENT_NAME")
- )
-
- if not deployment_name:
- raise ValueError("Deployment name for Azure AI is not set in .env file.")
-
- if use_chat:
- kernel.add_service(
- AzureChatCompletion(
- service_id=service_id,
- deployment_name=deployment_name,
- endpoint=endpoint,
- api_key=api_key,
- ),
- )
- else:
- kernel.add_service(
- AzureTextCompletion(
- service_id=service_id,
- deployment_name=deployment_name,
- endpoint=endpoint,
- api_key=api_key,
- ),
- )
- else:
- api_key, org_id = sk.openai_settings_from_dot_env()
- model_id = (
- config.get("OPEN_AI_CHAT_COMPLETION_MODEL_ID")
- if use_chat
- else config.get("OPEN_AI_TEXT_COMPLETION_MODEL_ID")
- )
-
- if not model_id:
- raise ValueError("Model ID for OpenAI is not set in .env file.")
-
- if use_chat:
- kernel.add_service(
- OpenAIChatCompletion(
- service_id=service_id,
- ai_model_id=model_id,
- api_key=api_key,
- org_id=org_id,
- ),
- )
- else:
- kernel.add_service(
- OpenAITextCompletion(
- service_id=service_id,
- ai_model_id=model_id,
- api_key=api_key,
- org_id=org_id,
- ),
- )
-
- return kernel
diff --git a/python/samples/learn_resources/sk_service_configurator.py b/python/samples/learn_resources/sk_service_configurator.py
new file mode 100644
index 000000000000..31c0c6862d73
--- /dev/null
+++ b/python/samples/learn_resources/sk_service_configurator.py
@@ -0,0 +1,55 @@
+# Copyright (c) Microsoft. All rights reserved.
+
+from dotenv import dotenv_values
+
+from semantic_kernel import Kernel
+from semantic_kernel.connectors.ai.open_ai import (
+ AzureChatCompletion,
+ AzureTextCompletion,
+ OpenAIChatCompletion,
+ OpenAITextCompletion,
+)
+
+
+def add_service(kernel: Kernel, use_chat: bool = True) -> Kernel:
+ """
+ Configure the AI service for the kernel
+
+ Args:
+ kernel (Kernel): The kernel to configure
+ use_chat (bool): Whether to use the chat completion model, or the text completion model
+
+ Returns:
+ Kernel: The configured kernel
+ """
+ config = dotenv_values(".env")
+ llm_service = config.get("GLOBAL_LLM_SERVICE", None)
+ if not llm_service:
+ print("GLOBAL_LLM_SERVICE not set, trying to use Azure OpenAI.")
+
+ # The service_id is used to identify the service in the kernel.
+ # This can be updated to a custom value if needed.
+ # It should match the execution setting's key in a config.json file.
+ service_id = "default"
+
+ # Configure AI service used by the kernel. Load settings from the .env file.
+ if llm_service == "OpenAI":
+ if use_chat:
+ #
+ kernel.add_service(OpenAIChatCompletion(service_id=service_id))
+ #
+ else:
+ #
+ kernel.add_service(OpenAITextCompletion(service_id=service_id))
+ #
+ else:
+ if use_chat:
+ #
+ kernel.add_service(AzureChatCompletion(service_id=service_id))
+ #
+ else:
+ #
+ kernel.add_service(AzureTextCompletion(service_id=service_id))
+ #
+
+ return kernel
diff --git a/python/samples/learn_resources/templates.py b/python/samples/learn_resources/templates.py
index 0c17754e1ccd..d87b0e1a9f3b 100644
--- a/python/samples/learn_resources/templates.py
+++ b/python/samples/learn_resources/templates.py
@@ -1,80 +1,157 @@
# Copyright (c) Microsoft. All rights reserved.
import asyncio
-
-from service_configurator import add_service
-
-import semantic_kernel as sk
-from semantic_kernel.contents.chat_history import ChatHistory
-from semantic_kernel.prompt_template.input_variable import InputVariable
-from semantic_kernel.prompt_template.prompt_template_config import PromptTemplateConfig
-
-
-async def main():
- # Initialize the kernel
- kernel = sk.Kernel()
-
- # Add the service to the kernel
- # use_chat: True to use chat completion, False to use text completion
- kernel = add_service(kernel=kernel, use_chat=True)
-
- # Create the history
- history = ChatHistory()
-
- # An ideal prompt for this is {{$history}}{{$request}} as those
- # get cleanly parsed into a new chat_history object while invoking
- # the function. Another possibility is create the prompt as {{$history}}
- # and make sure to add the user message to the history before invoking.
- prompt = "{{$history}}"
-
- service_id = "default"
- req_settings = kernel.get_service("default").get_prompt_execution_settings_class()(service_id=service_id)
- chat_prompt_template_config = PromptTemplateConfig(
- template=prompt,
+from functools import reduce
+
+from samples.learn_resources.sk_service_configurator import add_service
+from semantic_kernel import Kernel
+from semantic_kernel.contents import ChatHistory
+from semantic_kernel.contents.author_role import AuthorRole
+from semantic_kernel.contents.chat_message_content import ChatMessageContent
+from semantic_kernel.prompt_template import InputVariable, PromptTemplateConfig
+
+# Initialize the kernel
+kernel = Kernel()
+
+# Add the service to the kernel
+# use_chat: True to use chat completion, False to use text completion
+kernel = add_service(kernel=kernel, use_chat=True)
+
+# An ideal prompt for this is {{$history}}{{$request}} as those
+# get cleanly parsed into a new chat_history object while invoking
+# the function. Another possibility is create the prompt as {{$history}}
+# and make sure to add the user message to the history before invoking.
+chat_function = kernel.add_function(
+ plugin_name="Conversation",
+ function_name="Chat",
+ description="Chat with the assistant",
+ prompt_template_config=PromptTemplateConfig(
+ template="{{$history}}{{$request}}",
description="Chat with the assistant",
- execution_settings={service_id: req_settings},
input_variables=[
InputVariable(name="request", description="The user input", is_required=True),
- InputVariable(name="history", description="The history of the conversation", is_required=True),
+ InputVariable(
+ name="history",
+ description="The history of the conversation",
+ is_required=True,
+ allow_dangerously_set_content=True,
+ ),
],
- )
-
- # Run the prompt
- chat_function = kernel.add_function(
- prompt=prompt,
- plugin_name="Summarize_Conversation",
- function_name="Chat",
+ ),
+)
+
+choices = ["ContinueConversation", "EndConversation"]
+chat_function_intent = kernel.add_function(
+ plugin_name="Conversation",
+ function_name="getIntent",
+ description="Chat with the assistant",
+ template_format="handlebars",
+ prompt_template_config=PromptTemplateConfig(
+ template="""
+ Instructions: What is the intent of this request?
+ Do not explain the reasoning, just reply back with the intent. If you are unsure, reply with {{choices[0]}}.
+ Choices: {{choices}}.
+
+ {{#each few_shot_examples}}
+ {{#each this.messages}}
+ {{#message role=role}}
+ {{~content~}}
+ {{/message}}
+ {{/each}}
+ {{/each}}
+
+ {{#each chat_history.messages}}
+ {{#message role=role}}
+ {{~content~}}
+ {{/message}}
+ {{/each}}
+
+ {{request}}
+ Intent:
+ """,
description="Chat with the assistant",
- prompt_template_config=chat_prompt_template_config,
- )
+ template_format="handlebars",
+ input_variables=[
+ InputVariable(name="request", description="The user input", is_required=True),
+ InputVariable(
+ name="chat_history",
+ description="The history of the conversation",
+ is_required=True,
+ allow_dangerously_set_content=True,
+ ),
+ InputVariable(
+ name="choices",
+ description="The choices for the user to select from",
+ is_required=True,
+ allow_dangerously_set_content=True,
+ ),
+ InputVariable(
+ name="few_shot_examples",
+ description="The few shot examples to help the user",
+ is_required=True,
+ allow_dangerously_set_content=True,
+ ),
+ ],
+ ),
+)
+few_shot_examples = [
+ ChatHistory(
+ messages=[
+ ChatMessageContent(
+ role=AuthorRole.USER, content="Can you send a very quick approval to the marketing team?"
+ ),
+ ChatMessageContent(role=AuthorRole.SYSTEM, content="Intent:"),
+ ChatMessageContent(role=AuthorRole.ASSISTANT, content="ContinueConversation"),
+ ]
+ ),
+ ChatHistory(
+ messages=[
+ ChatMessageContent(role=AuthorRole.USER, content="Thanks, I'm done for now"),
+ ChatMessageContent(role=AuthorRole.SYSTEM, content="Intent:"),
+ ChatMessageContent(role=AuthorRole.ASSISTANT, content="EndConversation"),
+ ]
+ ),
+]
+
+
+async def main():
+ # Create the history
+ history = ChatHistory()
while True:
try:
request = input("User:> ")
- except KeyboardInterrupt:
- print("\n\nExiting chat...")
- return False
- except EOFError:
- print("\n\nExiting chat...")
- return False
-
- if request == "exit":
- print("\n\nExiting chat...")
- return False
-
- # Add the request to the history before we
- # invoke the function to include it in the prompt
- history.add_user_message(request)
+ except (KeyboardInterrupt, EOFError):
+ break
result = await kernel.invoke(
- chat_function,
+ plugin_name="Conversation",
+ function_name="getIntent",
+ request=request,
+ history=history,
+ choices=choices,
+ few_shot_examples=few_shot_examples,
+ )
+ if str(result) == "EndConversation":
+ break
+
+ result = kernel.invoke_stream(
+ plugin_name="Conversation",
+ function_name="Chat",
request=request,
history=history,
)
+ all_chunks = []
+ print("Assistant:> ", end="")
+ async for chunk in result:
+ all_chunks.append(chunk[0])
+ print(str(chunk[0]), end="")
+ print()
- history.add_assistant_message(str(result))
+ history.add_user_message(request)
+ history.add_assistant_message(str(reduce(lambda x, y: x + y, all_chunks)))
- print(f"Assistant:> {result}")
+ print("\n\nExiting chat...")
# Run the main function
diff --git a/python/samples/learn_resources/using_the_kernel.py b/python/samples/learn_resources/using_the_kernel.py
index 27ad67dfcd69..5b9ece8fbb50 100644
--- a/python/samples/learn_resources/using_the_kernel.py
+++ b/python/samples/learn_resources/using_the_kernel.py
@@ -1,40 +1,44 @@
# Copyright (c) Microsoft. All rights reserved.
+#
import asyncio
import os
-from service_configurator import add_service
+from samples.learn_resources import add_service
+from semantic_kernel import Kernel
-import semantic_kernel as sk
-from semantic_kernel.core_plugins.time_plugin import TimePlugin
+#
async def main():
# Initialize the kernel
- kernel = sk.Kernel()
-
+ #
+ kernel = Kernel()
# Add the service to the kernel
# use_chat: True to use chat completion, False to use text completion
- kernel = add_service(kernel=kernel, use_chat=True)
+ kernel = add_service(kernel, use_chat=True)
+ #
+
+ #
+ # Import the TimePlugin and add it to the kernel
+ from semantic_kernel.core_plugins import TimePlugin
- # Import the TimePlugin
time = kernel.add_plugin(TimePlugin(), "TimePlugin")
+ # Invoke the Today function
+ current_time = await kernel.invoke(time["today"])
+ print(f"The current date is: {current_time}\n")
+ #
+
+ #
# Import the WriterPlugin from the plugins directory.
script_directory = os.path.dirname(__file__)
plugins_directory = os.path.join(script_directory, "plugins")
- writer_plugin = kernel.import_plugin_from_prompt_directory(
- parent_directory=plugins_directory,
- plugin_directory_name="WriterPlugin",
- )
-
- # Run the current time function
- currentTime = await kernel.invoke(time["today"])
- print(f"The current date is: {currentTime}\n")
-
+ kernel.add_plugin(parent_directory=plugins_directory, plugin_name="WriterPlugin")
# Run the short poem function with the Kernel Argument
- poemResult = await kernel.invoke(writer_plugin["ShortPoem"], input=str(currentTime))
- print(f"The poem result:\n\n{poemResult}")
+ poem_result = await kernel.invoke(function_name="ShortPoem", plugin_name="WriterPlugin", input=str(current_time))
+ print(f"The poem result:\n\n{poem_result}")
+ #
# Run the main function
diff --git a/python/samples/learn_resources/your_first_prompt.py b/python/samples/learn_resources/your_first_prompt.py
new file mode 100644
index 000000000000..e1d4f42d2128
--- /dev/null
+++ b/python/samples/learn_resources/your_first_prompt.py
@@ -0,0 +1,260 @@
+# Copyright (c) Microsoft. All rights reserved.
+
+import asyncio
+
+from samples.learn_resources.sk_service_configurator import add_service
+from semantic_kernel import Kernel
+from semantic_kernel.connectors.ai import PromptExecutionSettings
+from semantic_kernel.functions import KernelArguments
+from semantic_kernel.prompt_template import InputVariable, PromptTemplateConfig
+
+
+async def main(delay: int = 0):
+ #
+ # Initialize the kernel
+ kernel = Kernel()
+
+ # Add the service to the kernel
+ # use_chat: True to use chat completion, False to use text completion
+ kernel = add_service(kernel=kernel, use_chat=True)
+ #
+ print(
+ "This sample uses different prompts with the same request, they are related to Emails, "
+ "Tasks and Documents, make sure to include that in your request."
+ )
+ request = input("Your request: ")
+ arguments = KernelArguments(request=request, settings=PromptExecutionSettings(max_tokens=100))
+ # 0.0 Initial prompt
+ prompt = "What is the intent of this request? {{$request}}"
+ #
+ #
+ print("0.0 Initial prompt")
+ print("-------------------------")
+ result = await kernel.invoke_prompt(
+ function_name="sample_zero", plugin_name="sample_plugin", prompt=prompt, arguments=arguments
+ )
+ print(result)
+ await asyncio.sleep(delay)
+ print("-------------------------")
+ #
+
+ # 1.0 Make the prompt more specific
+ prompt = """What is the intent of this request? {{$request}}
+ You can choose between SendEmail, SendMessage, CompleteTask, CreateDocument."""
+ #
+ print("1.0 Make the prompt more specific")
+ print("-------------------------")
+ result = await kernel.invoke_prompt(
+ function_name="sample_one", plugin_name="sample_plugin", prompt=prompt, arguments=arguments
+ )
+ print(result)
+ await asyncio.sleep(delay)
+ print("-------------------------")
+
+ # 2.0 Add structure to the output with formatting
+ prompt = """Instructions: What is the intent of this request?
+ Choices: SendEmail, SendMessage, CompleteTask, CreateDocument.
+ User Input: {{$request}}
+ Intent: """
+ #
+ print("2.0 Add structure to the output with formatting")
+ print("-------------------------")
+ result = await kernel.invoke_prompt(
+ function_name="sample_two", plugin_name="sample_plugin", prompt=prompt, arguments=arguments
+ )
+ print(result)
+ await asyncio.sleep(delay)
+ print("-------------------------")
+
+ # 2.1 Add structure to the output with formatting (using Markdown and JSON)
+ prompt = """## Instructions
+ Provide the intent of the request using the following format:
+ ```json
+ {
+ "intent": {intent}
+ }
+ ```
+
+ ## Choices
+ You can choose between the following intents:
+ ```json
+ ["SendEmail", "SendMessage", "CompleteTask", "CreateDocument"]
+ ```
+
+ ## User Input
+ The user input is:
+ ```json
+ {
+ "request": "{{$request}}"\n'
+ }
+ ```
+
+ ## Intent"""
+ #
+ print("2.1 Add structure to the output with formatting (using Markdown and JSON)")
+ print("-------------------------")
+ result = await kernel.invoke_prompt(
+ function_name="sample_two_one", plugin_name="sample_plugin", prompt=prompt, arguments=arguments
+ )
+ print(result)
+ await asyncio.sleep(delay)
+ print("-------------------------")
+
+ # 3.0 Provide examples with few-shot prompting
+ prompt = """Instructions: What is the intent of this request?
+ Choices: SendEmail, SendMessage, CompleteTask, CreateDocument.
+
+ User Input: Can you send a very quick approval to the marketing team?
+ Intent: SendMessage
+
+ User Input: Can you send the full update to the marketing team?
+ Intent: SendEmail
+
+ User Input: {{$request}}
+ Intent: """
+ #
+ print("3.0 Provide examples with few-shot prompting")
+ print("-------------------------")
+ result = await kernel.invoke_prompt(
+ function_name="sample_three", plugin_name="sample_plugin", prompt=prompt, arguments=arguments
+ )
+ print(result)
+ await asyncio.sleep(delay)
+ print("-------------------------")
+
+ # 4.0 Tell the AI what to do to avoid doing something wrong
+ prompt = """Instructions: What is the intent of this request?
+ If you don't know the intent, don't guess; instead respond with "Unknown".
+ Choices: SendEmail, SendMessage, CompleteTask, CreateDocument, Unknown.
+
+ User Input: Can you send a very quick approval to the marketing team?
+ Intent: SendMessage
+
+ User Input: Can you send the full update to the marketing team?
+ Intent: SendEmail
+
+ User Input: {{$request}}
+ Intent: """
+ #
+ print("4.0 Tell the AI what to do to avoid doing something wrong")
+ print("-------------------------")
+ result = await kernel.invoke_prompt(
+ function_name="sample_four", plugin_name="sample_plugin", prompt=prompt, arguments=arguments
+ )
+ print(result)
+ await asyncio.sleep(delay)
+ print("-------------------------")
+
+ # 5.0 Provide context to the AI through a chat history of this user
+ history = (
+ "User input: I hate sending emails, no one ever reads them.\n"
+ "AI response: I'm sorry to hear that. Messages may be a better way to communicate."
+ )
+ prompt = """Instructions: What is the intent of this request?\n"
+ If you don't know the intent, don't guess; instead respond with "Unknown".
+ Choices: SendEmail, SendMessage, CompleteTask, CreateDocument, Unknown.
+
+ User Input: Can you send a very quick approval to the marketing team?
+ Intent: SendMessage
+
+ User Input: Can you send the full update to the marketing team?
+ Intent: SendEmail
+
+ {{$history}}
+ User Input: {{$request}}
+ Intent: """
+ #
+ print("5.0 Provide context to the AI")
+ print("-------------------------")
+ arguments["history"] = history
+ result = await kernel.invoke_prompt(
+ function_name="sample_five", plugin_name="sample_plugin", prompt=prompt, arguments=arguments
+ )
+ print(result)
+ await asyncio.sleep(delay)
+ print("-------------------------")
+
+ # 6.0 Using message roles in chat completion prompts
+ history = """
+ I hate sending emails, no one ever reads them.
+ I'm sorry to hear that. Messages may be a better way to communicate.
+ """
+
+ prompt = """
+ Instructions: What is the intent of this request?
+ If you don't know the intent, don't guess; instead respond with "Unknown".
+ Choices: SendEmail, SendMessage, CompleteTask, CreateDocument, Unknown.
+
+ Can you send a very quick approval to the marketing team?
+ Intent:
+ SendMessage
+
+ Can you send the full update to the marketing team?
+ Intent:
+ SendEmail
+
+ {{$history}}
+ {{$request}}
+ Intent:
+ """
+ #
+ print("6.0 Using message roles in chat completion prompts")
+ print("-------------------------")
+ arguments["history"] = history
+ result = await kernel.invoke_prompt(
+ function_name="sample_six",
+ plugin_name="sample_plugin",
+ prompt=prompt,
+ arguments=arguments,
+ prompt_template_config=PromptTemplateConfig(
+ input_variables=[InputVariable(name="history", allow_dangerously_set_content=True)]
+ ),
+ )
+ print(result)
+ await asyncio.sleep(delay)
+ print("-------------------------")
+
+ # 7.0 Give your AI words of encouragement
+ history = """
+ I hate sending emails, no one ever reads them.
+ I'm sorry to hear that. Messages may be a better way to communicate.
+ """
+
+ prompt = """
+ Instructions: What is the intent of this request?
+ If you don't know the intent, don't guess; instead respond with "Unknown".
+ Choices: SendEmail, SendMessage, CompleteTask, CreateDocument, Unknown.
+ Bonus: You'll get $20 if you get this right.
+
+ Can you send a very quick approval to the marketing team?
+ Intent:
+ SendMessage
+
+ Can you send the full update to the marketing team?
+ Intent:
+ SendEmail
+
+ {{$history}}
+ {{$request}}
+ Intent:
+ """
+ #
+ print("7.0 Give your AI words of encouragement")
+ print("-------------------------")
+ arguments["history"] = history
+ result = await kernel.invoke_prompt(
+ function_name="sample_seven",
+ plugin_name="sample_plugin",
+ prompt=prompt,
+ arguments=arguments,
+ prompt_template_config=PromptTemplateConfig(
+ input_variables=[InputVariable(name="history", allow_dangerously_set_content=True)]
+ ),
+ )
+ print(result)
+ print("-------------------------")
+
+
+# Run the main function
+if __name__ == "__main__":
+ asyncio.run(main())
diff --git a/python/tests/samples/test_learn_resources.py b/python/tests/samples/test_learn_resources.py
new file mode 100644
index 000000000000..869d710c91cb
--- /dev/null
+++ b/python/tests/samples/test_learn_resources.py
@@ -0,0 +1,89 @@
+# Copyright (c) Microsoft. All rights reserved.
+
+from pytest import mark
+
+
+@mark.asyncio
+async def test_ai_service_sample():
+ from samples.learn_resources.ai_services import main
+
+ await main()
+
+
+@mark.asyncio
+async def test_configuring_prompts(monkeypatch):
+ from samples.learn_resources.configuring_prompts import main
+
+ responses = ["Hello, who are you?", "exit"]
+
+ monkeypatch.setattr("builtins.input", lambda _: responses.pop(0))
+ await main()
+
+
+@mark.asyncio
+async def test_creating_functions(monkeypatch):
+ from samples.learn_resources.creating_functions import main
+
+ responses = ["What is 3+3?", "exit"]
+
+ monkeypatch.setattr("builtins.input", lambda _: responses.pop(0))
+ await main()
+
+
+@mark.asyncio
+async def test_functions_within_prompts(monkeypatch):
+ from samples.learn_resources.functions_within_prompts import main
+
+ responses = ["Hello, who are you?", "exit"]
+
+ monkeypatch.setattr("builtins.input", lambda _: responses.pop(0))
+ await main()
+
+
+@mark.asyncio
+async def test_planner():
+ from samples.learn_resources.planner import main
+
+ await main()
+
+
+@mark.asyncio
+async def test_plugin():
+ from samples.learn_resources.plugin import main
+
+ await main()
+
+
+@mark.asyncio
+async def test_serializing_prompts(monkeypatch):
+ from samples.learn_resources.serializing_prompts import main
+
+ responses = ["Hello, who are you?", "exit"]
+
+ monkeypatch.setattr("builtins.input", lambda _: responses.pop(0))
+ await main()
+
+
+@mark.asyncio
+async def test_templates(monkeypatch):
+ from samples.learn_resources.templates import main
+
+ responses = ["Hello, who are you?", "Thanks, see you next time!"]
+
+ monkeypatch.setattr("builtins.input", lambda _: responses.pop(0))
+ await main()
+
+
+@mark.asyncio
+async def test_using_the_kernel():
+ from samples.learn_resources.using_the_kernel import main
+
+ await main()
+
+
+@mark.asyncio
+async def test_your_first_prompt(monkeypatch):
+ from samples.learn_resources.your_first_prompt import main
+
+ monkeypatch.setattr("builtins.input", lambda _: "I want to send an email to my manager!")
+ await main(delay=10)