Skip to content

Python: updated samples #6411

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
May 28, 2024
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 9 additions & 5 deletions python/samples/learn_resources/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,11 @@ This project contains a collection of examples used in documentation on [learn.m

## Prerequisites

- [Python](https://www.python.org/downloads/) 3.8 and above
- [Python](https://www.python.org/downloads/) 3.10 and above
- Install Semantic Kernel through PyPi:
```bash
pip install semantic-kernel
```

## Configuring the sample

Expand All @@ -19,13 +23,13 @@ Copy the `.env.example` file to a new file named `.env`. Then, copy those keys i
```
GLOBAL_LLM_SERVICE="OpenAI" # Toggle between "OpenAI" or "AzureOpenAI"

OPEN_AI_CHAT_COMPLETION_MODEL_ID="gpt-3.5-turbo-0125"
OPEN_AI_TEXT_COMPLETION_MODEL_ID="gpt-3.5-turbo-instruct"
OPEN_AI_CHAT_MODEL_ID="gpt-3.5-turbo-0125"
OPEN_AI_TEXT_MODEL_ID="gpt-3.5-turbo-instruct"
OPENAI_API_KEY=""
OPENAI_ORG_ID=""

AZURE_OPEN_AI_CHAT_COMPLETION_DEPLOYMENT_NAME="gpt-35-turbo"
AZURE_OPEN_AI_TEXT_COMPLETION_DEPLOYMENT_NAME="gpt-35-turbo-instruct"
AZURE_OPEN_AI_CHAT_DEPLOYMENT_NAME="gpt-35-turbo"
AZURE_OPEN_AI_TEXT_DEPLOYMENT_NAME="gpt-35-turbo-instruct"
AZURE_OPENAI_ENDPOINT=""
AZURE_OPENAI_API_KEY=""
AZURE_OPENAI_API_VERSION=""
Expand Down
5 changes: 5 additions & 0 deletions python/samples/learn_resources/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
# Copyright (c) Microsoft. All rights reserved.

from .sk_service_configurator import add_service

__all__ = ["add_service"]
13 changes: 7 additions & 6 deletions python/samples/learn_resources/ai_services.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,24 +3,25 @@
import asyncio
import os

from service_configurator import add_service
from sk_service_configurator import add_service

import semantic_kernel as sk
from semantic_kernel.kernel import Kernel


async def main():
# Initialize the kernel
kernel = sk.Kernel()
kernel = Kernel()

# Add the service to the kernel
# use_chat: True to use chat completion, False to use text completion
kernel = add_service(kernel=kernel, use_chat=True)
# use_azure: True to use Azure OpenAI, False to use OpenAI
kernel = add_service(kernel, use_chat=True)

script_directory = os.path.dirname(__file__)
plugins_directory = os.path.join(script_directory, "plugins")
writer_plugin = kernel.import_plugin_from_prompt_directory(
writer_plugin = kernel.add_plugin(
parent_directory=plugins_directory,
plugin_directory_name="WriterPlugin",
plugin_name="WriterPlugin",
)

# Run the ShortPoem function with the Kernel Argument.
Expand Down
135 changes: 68 additions & 67 deletions python/samples/learn_resources/configuring_prompts.py
Original file line number Diff line number Diff line change
@@ -1,87 +1,86 @@
# Copyright (c) Microsoft. All rights reserved.

import asyncio

from service_configurator import add_service
from sk_service_configurator import add_service

import semantic_kernel as sk
from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings
from semantic_kernel.contents.chat_history import ChatHistory
from semantic_kernel.connectors.ai import PromptExecutionSettings
from semantic_kernel.contents import ChatHistory
from semantic_kernel.core_plugins import ConversationSummaryPlugin
from semantic_kernel.prompt_template.input_variable import InputVariable
from semantic_kernel.prompt_template.prompt_template_config import PromptTemplateConfig


async def main():
# Initialize the kernel
kernel = sk.Kernel()

# Add the service to the kernel
# use_chat: True to use chat completion, False to use text completion
kernel = add_service(kernel=kernel, use_chat=True)

service_id = "default"

# The following execution settings are used for the ConversationSummaryPlugin
execution_settings = PromptExecutionSettings(
service_id=service_id, max_tokens=ConversationSummaryPlugin._max_tokens, temperature=0.1, top_p=0.5
)
prompt_template_config = PromptTemplateConfig(
template=ConversationSummaryPlugin._summarize_conversation_prompt_template,
description="Given a section of a conversation transcript, summarize the part of" " the conversation.",
execution_settings=execution_settings,
)

# Import the ConversationSummaryPlugin
kernel.add_plugin(
ConversationSummaryPlugin(kernel=kernel, prompt_template_config=prompt_template_config),
plugin_name="ConversationSummaryPlugin",
)

# Create the history
history = ChatHistory()

# Create the prompt with the ConversationSummaryPlugin
prompt = """{{ConversationSummaryPlugin.SummarizeConversation $history}}
User: {{$request}}
Assistant: """

# These execution settings are tied to the chat function, created below.
execution_settings = kernel.get_service(service_id).instantiate_prompt_execution_settings(service_id=service_id)
chat_prompt_template_config = PromptTemplateConfig(
template=prompt,
from semantic_kernel.kernel import Kernel
from semantic_kernel.prompt_template import InputVariable, PromptTemplateConfig

# Initialize the kernel
kernel = Kernel()

# Add the service to the kernel
# use_chat: True to use chat completion, False to use text completion
kernel = add_service(kernel=kernel, use_chat=True)

service_id = "default"

# The following execution settings are used for the ConversationSummaryPlugin
execution_settings = PromptExecutionSettings(
service_id=service_id, max_tokens=ConversationSummaryPlugin._max_tokens, temperature=0.1, top_p=0.5
)
prompt_template_config = PromptTemplateConfig(
template=ConversationSummaryPlugin._summarize_conversation_prompt_template,
description="Given a section of a conversation transcript, summarize the part of" " the conversation.",
execution_settings=execution_settings,
)

# Import the ConversationSummaryPlugin
kernel.add_plugin(
ConversationSummaryPlugin(kernel=kernel, prompt_template_config=prompt_template_config),
plugin_name="ConversationSummaryPlugin",
)


# <FunctionFromPrompt>
# Create the function with the prompt
kernel.add_function(
prompt_template_config=PromptTemplateConfig(
template="""{{ConversationSummaryPlugin.SummarizeConversation $history}}
User: {{$request}}
Assistant: """,
description="Chat with the assistant",
execution_settings=execution_settings,
execution_settings=[
PromptExecutionSettings(service_id="default", temperature=0.0, max_tokens=1000),
PromptExecutionSettings(service_id="gpt-3.5-turbo", temperature=0.2, max_tokens=4000),
PromptExecutionSettings(service_id="gpt-4", temperature=0.3, max_tokens=8000),
],
input_variables=[
InputVariable(name="request", description="The user input", is_required=True),
InputVariable(name="history", description="The history of the conversation", is_required=True),
InputVariable(
name="history",
description="The history of the conversation",
is_required=True,
allow_dangerously_set_content=True,
),
],
)
),
plugin_name="Summarize_Conversation",
function_name="Chat",
description="Chat with the assistant",
)
# </FunctionFromPrompt>

# Create the history
history = ChatHistory()

# Create the function
chat_function = kernel.add_function(
prompt=prompt,
plugin_name="Summarize_Conversation",
function_name="Chat",
description="Chat with the assistant",
prompt_template_config=chat_prompt_template_config,
)

async def main():
while True:
try:
request = input("User:> ")
except KeyboardInterrupt:
print("\n\nExiting chat...")
return False
except EOFError:
print("\n\nExiting chat...")
return False

except (KeyboardInterrupt, EOFError):
break
if request == "exit":
print("\n\nExiting chat...")
return False
break

result = await kernel.invoke(
chat_function,
plugin_name="Summarize_Conversation",
function_name="Chat",
request=request,
history=history,
)
Expand All @@ -92,6 +91,8 @@ async def main():

print(f"Assistant:> {result}")

print("\n\nExiting chat...")


# Run the main function
if __name__ == "__main__":
Expand Down
60 changes: 47 additions & 13 deletions python/samples/learn_resources/creating_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,31 +3,65 @@
import asyncio
import os

from service_configurator import add_service
from sk_service_configurator import add_service

import semantic_kernel as sk
from semantic_kernel import Kernel
from semantic_kernel.connectors.ai.function_call_behavior import FunctionCallBehavior
from semantic_kernel.connectors.ai.open_ai import OpenAIChatPromptExecutionSettings
from semantic_kernel.contents import ChatHistory


async def main():
# Initialize the kernel
kernel = sk.Kernel()

# Add the service to the kernel
# use_chat: True to use chat completion, False to use text completion
kernel = add_service(kernel=kernel, use_chat=True)
kernel = Kernel()

# Import the MathPlugin.
script_directory = os.path.dirname(__file__)
plugins_directory = os.path.join(script_directory, "plugins")
math_plugin = kernel.import_native_plugin_from_directory(plugins_directory, "MathPlugin")
# <RunningNativeFunction>
plugins_directory = os.path.join(os.path.dirname(__file__), "plugins")
math_plugin = kernel.add_plugin(parent_directory=plugins_directory, plugin_name="MathPlugin")

result = await kernel.invoke(
math_plugin["Add"],
number1=5,
number2=5,
math_plugin["Sqrt"],
number1=12,
)

print(result)
# </RunningNativeFunction>

# <Chat>
kernel = add_service(kernel, use_chat=True)
kernel.add_function(
prompt="""{{$chat_history}}{{$input}}""",
execution_settings=OpenAIChatPromptExecutionSettings(
service_id="default",
temperature=0.0,
max_tokens=1000,
function_call_behavior=FunctionCallBehavior.AutoInvokeKernelFunctions(),
),
plugin_name="Chat",
function_name="Chat",
description="Chat with the assistant",
)
chat_history = ChatHistory()
while True:
try:
request = input("Your request: ")
except (KeyboardInterrupt, EOFError):
break
if request.lower() == "exit":
break
result = await kernel.invoke(
plugin_name="Chat",
function_name="Chat",
input=request,
chat_history=chat_history,
)
print(result)
chat_history.add_user_message(request)
chat_history.add_assistant_message(str(result))

print("\n\nExiting...")
# </Chat>


# Run the main function
Expand Down
Loading
Loading