Skip to content

Python: Fix typos. #6381

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
May 24, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ def configure(
EnabledFunctions (filtered set of functions from the Kernel)
RequiredFunction (a single function)
By default the update_settings_callback is called with FunctionCallConfiguration,
By default, the update_settings_callback is called with FunctionCallConfiguration,
which contains a list of available functions or a list of required functions, it also
takes the PromptExecutionSettings object.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ async def get_streaming_chat_message_contents(
**kwargs: Any,
) -> AsyncGenerator[list[StreamingChatMessageContent], Any]:
"""
Streams a text completion using a Ollama model.
Streams a text completion using an Ollama model.
Note that this method does not support multiple responses.

Arguments:
Expand Down Expand Up @@ -150,7 +150,7 @@ async def get_streaming_text_contents(
settings: OllamaChatPromptExecutionSettings,
) -> AsyncGenerator[list[StreamingTextContent], Any]:
"""
Streams a text completion using a Ollama model.
Streams a text completion using an Ollama model.
Note that this method does not support multiple responses.

Arguments:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ async def get_streaming_text_contents(
settings: OllamaTextPromptExecutionSettings,
) -> AsyncGenerator[list[StreamingTextContent], Any]:
"""
Streams a text completion using a Ollama model.
Streams a text completion using an Ollama model.
Note that this method does not support multiple responses,
but the result will be a list anyway.

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ def from_dict(cls, settings: dict[str, str]) -> "AzureChatCompletion":

Arguments:
settings: A dictionary of settings for the service.
should contains keys: service_id, and optionally:
should contain keys: service_id, and optionally:
ad_auth, ad_token_provider, default_headers
"""

Expand All @@ -151,7 +151,7 @@ def get_prompt_execution_settings_class(self) -> "PromptExecutionSettings":
def _create_chat_message_content(
self, response: ChatCompletion, choice: Choice, response_metadata: dict[str, Any]
) -> ChatMessageContent:
"""Create a Azure chat message content object from a choice."""
"""Create an Azure chat message content object from a choice."""
content = super()._create_chat_message_content(response, choice, response_metadata)
return self._add_tool_message_to_chat_message_content(content, choice)

Expand All @@ -161,7 +161,7 @@ def _create_streaming_chat_message_content(
choice: ChunkChoice,
chunk_metadata: dict[str, Any],
) -> "StreamingChatMessageContent":
"""Create a Azure streaming chat message content object from a choice."""
"""Create an Azure streaming chat message content object from a choice."""
content = super()._create_streaming_chat_message_content(chunk, choice, chunk_metadata)
return self._add_tool_message_to_chat_message_content(content, choice)

Expand Down Expand Up @@ -200,7 +200,7 @@ def _get_tool_message_from_chat_choice(self, choice: Choice | ChunkChoice) -> st

@staticmethod
def split_message(message: "ChatMessageContent") -> list["ChatMessageContent"]:
"""Split a Azure On Your Data response into separate ChatMessageContents.
"""Split an Azure On Your Data response into separate ChatMessageContents.

If the message does not have three contents, and those three are one each of:
FunctionCallContent, FunctionResultContent, and TextContent,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ def from_dict(cls, settings: dict[str, str]) -> "AzureTextCompletion":

Arguments:
settings: A dictionary of settings for the service.
should contains keys: deployment_name, endpoint, api_key
should contain keys: deployment_name, endpoint, api_key
and optionally: api_version, ad_auth
"""

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ def from_dict(cls, settings: dict[str, str]) -> "AzureTextEmbedding":
Arguments:
settings: A dictionary of settings for the service.
should contains keys: deployment_name, endpoint, api_key
should contain keys: deployment_name, endpoint, api_key
and optionally: api_version, ad_auth
"""
return AzureTextEmbedding(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ class PromptExecutionSettings(KernelBaseModel):

Can be used by itself or as a base class for other prompt execution settings. The methods are used to create
specific prompt execution settings objects based on the keys in the extension_data field, this way you can
create a generic PromptExecutionSettings object in your application, which get's mapped into the keys of the
create a generic PromptExecutionSettings object in your application, which gets mapped into the keys of the
prompt execution settings that each services returns by using the service.get_prompt_execution_settings() method.

Parameters:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ async def create_collection(
name=vector_search_algorithm_name,
kind="hnsw",
parameters=HnswParameters(
m=4, # Number of bi-directional links, typically between 4 and 10
m=4, # Number of bidirectional links, typically between 4 and 10
ef_construction=400, # Size during indexing, range: 100-1000
ef_search=500, # Size during search, range: 100-1000
metric="cosine", # Can be "cosine", "dotProduct", or "euclidean"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ async def get_collections(
return [collection.name for collection in collection_info.collections]

async def get_collection(self, collection_name: str) -> qdrant_models.CollectionInfo:
"""Gets the a collections based upon collection name.
"""Gets the collection based upon collection name.

Returns:
CollectionInfo -- Collection Information from Qdrant about collection.
Expand Down
10 changes: 5 additions & 5 deletions python/semantic_kernel/contents/chat_message_content.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@
class ChatMessageContent(KernelContent):
"""This is the class for chat message response content.
All Chat Completion Services should return a instance of this class as response.
All Chat Completion Services should return an instance of this class as response.
Or they can implement their own subclass of this class and return an instance.
Args:
Expand Down Expand Up @@ -73,7 +73,7 @@ def __init__(
metadata: dict[str, Any] | None = None,
**kwargs: Any,
) -> None:
"""All Chat Completion Services should return a instance of this class as response.
"""All Chat Completion Services should return an instance of this class as response.
Or they can implement their own subclass of this class and return an instance.
Args:
Expand All @@ -100,7 +100,7 @@ def __init__(
metadata: dict[str, Any] | None = None,
**kwargs: Any,
) -> None:
"""All Chat Completion Services should return a instance of this class as response.
"""All Chat Completion Services should return an instance of this class as response.
Or they can implement their own subclass of this class and return an instance.
Args:
Expand All @@ -127,7 +127,7 @@ def __init__( # type: ignore
metadata: dict[str, Any] | None = None,
**kwargs: Any,
):
"""All Chat Completion Services should return a instance of this class as response.
"""All Chat Completion Services should return an instance of this class as response.
Or they can implement their own subclass of this class and return an instance.
Args:
Expand Down Expand Up @@ -231,7 +231,7 @@ def to_element(self) -> "Element":

@classmethod
def from_element(cls, element: Element) -> "ChatMessageContent":
"""Create a new instance of ChatMessageContent from a XML element.
"""Create a new instance of ChatMessageContent from an XML element.
Args:
element: Element - The XML Element to create the ChatMessageContent from.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
class FunctionResultContent(KernelContent):
"""This is the base class for text response content.

All Text Completion Services should return a instance of this class as response.
All Text Completion Services should return an instance of this class as response.
Or they can implement their own subclass of this class and return an instance.

Args:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,8 @@
class StreamingChatMessageContent(ChatMessageContent, StreamingContentMixin):
"""This is the class for streaming chat message response content.

All Chat Completion Services should return a instance of this class as streaming response,
where each part of the response as it is streamed is converted to a instance of this class,
All Chat Completion Services should return an instance of this class as streaming response,
where each part of the response as it is streamed is converted to an instance of this class,
the end-user will have to either do something directly or gather them and combine them into a
new instance. A service can implement their own subclass of this class and return instances of that.

Expand Down Expand Up @@ -55,7 +55,7 @@ def __init__(
ai_model_id: str | None = None,
metadata: dict[str, Any] | None = None,
) -> None:
"""All Chat Completion Services should return a instance of this class as response for streaming.
"""All Chat Completion Services should return an instance of this class as response for streaming.
Or they can implement their own subclass of this class and return an instance.

Args:
Expand All @@ -82,7 +82,7 @@ def __init__(
ai_model_id: str | None = None,
metadata: dict[str, Any] | None = None,
) -> None:
"""All Chat Completion Services should return a instance of this class as response for streaming.
"""All Chat Completion Services should return an instance of this class as response for streaming.
Or they can implement their own subclass of this class and return an instance.

Args:
Expand All @@ -109,7 +109,7 @@ def __init__( # type: ignore
ai_model_id: str | None = None,
metadata: dict[str, Any] | None = None,
):
"""All Chat Completion Services should return a instance of this class as response for streaming.
"""All Chat Completion Services should return an instance of this class as response for streaming.
Or they can implement their own subclass of this class and return an instance.

Args:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
class StreamingTextContent(StreamingContentMixin, TextContent):
"""This is the base class for streaming text response content.

All Text Completion Services should return a instance of this class as streaming response.
All Text Completion Services should return an instance of this class as streaming response.
Or they can implement their own subclass of this class and return an instance.

Args:
Expand Down
2 changes: 1 addition & 1 deletion python/semantic_kernel/contents/text_content.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
class TextContent(KernelContent):
"""This is the base class for text response content.
All Text Completion Services should return a instance of this class as response.
All Text Completion Services should return an instance of this class as response.
Or they can implement their own subclass of this class and return an instance.
Args:
Expand Down
4 changes: 2 additions & 2 deletions python/semantic_kernel/core_plugins/text_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,10 @@ class TextPlugin(KernelBaseModel):
{{text.trim $input}} => "hello world"

KernelArguments["input"] = " hello world "
{{text.trimStart $input} => "hello world "
{{text.trimStart $input}} => "hello world "

KernelArguments["input"] = " hello world "
{{text.trimEnd $input} => " hello world"
{{text.trimEnd $input}} => " hello world"

KernelArguments["input"] = "hello world"
{{text.uppercase $input}} => "HELLO WORLD"
Expand Down
2 changes: 1 addition & 1 deletion python/semantic_kernel/core_plugins/time_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ def iso_date(self) -> str:
@kernel_function(description="Get the current date and time in the local time zone")
def now(self) -> str:
"""
Get the current date and time in the local time zone"
Get the current date and time in the local time zone

Example:
{{time.now}} => Sunday, January 12, 2031 9:15 PM
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ def __init__(self, service_id: str, options: FunctionCallingStepwisePlannerOptio
(whether it be AzureOpenAI or OpenAI), so that we can use tools.
If the options are configured to use callbacks to get the initial plan and the step prompt,
the planner will use those provided callbacks to get that information. Otherwise it will
the planner will use those provided callbacks to get that information. Otherwise, it will
read from the default yaml plan file and the step prompt file.
Args:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
class AIServiceClientBase(KernelBaseModel, ABC):
"""Base class for all AI Services.
Has a ai_model_id and service_id, any other fields have to be defined by the subclasses.
Has an ai_model_id and service_id, any other fields have to be defined by the subclasses.
The ai_model_id can refer to a specific model, like 'gpt-35-turbo' for OpenAI,
or can just be a string that is used to identify the model in the service.
Expand Down
2 changes: 1 addition & 1 deletion python/semantic_kernel/services/ai_service_selector.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ def select_ai_service(
arguments: "KernelArguments",
type_: type["AI_SERVICE_CLIENT_TYPE"] | None = None,
) -> tuple["AI_SERVICE_CLIENT_TYPE", "PromptExecutionSettings"]:
"""Select a AI Service on a first come, first served basis,
"""Select an AI Service on a first come, first served basis,
starting with execution settings in the arguments,
followed by the execution settings from the function.
If the same service_id is in both, the one in the arguments will be used.
Expand Down
4 changes: 2 additions & 2 deletions python/semantic_kernel/template_engine/blocks/code_block.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ class CodeBlock(Block):
CodeBlockTokenError: If a token is not a named argument after the second token.
CodeBlockRenderError: If the plugin collection is not set in the kernel.
CodeBlockRenderError: If the function is not found in the plugin collection.
CodeBlockRenderError: If the function does not take any arguments but it is being
CodeBlockRenderError: If the function does not take any arguments, but it is being
called in the template with arguments.
"""

Expand Down Expand Up @@ -104,7 +104,7 @@ async def render_code(self, kernel: "Kernel", arguments: "KernelArguments") -> s
"""Render the code block.
If the first token is a function_id, it will call the function from the plugin collection.
Otherwise it is a value or variable and those are then rendered directly.
Otherwise, it is a value or variable and those are then rendered directly.
"""
logger.debug(f"Rendering code: `{self.content}`")
if self.tokens[0].type == BlockTypes.FUNCTION_ID:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ class FunctionIdBlock(Block):
The content is parsed using a regex, that returns either a plugin and
function name or just a function name, depending on the content.

Anything other then that and a ValueError is raised.
Anything other than that and a ValueError is raised.

Args:
content (str): The content of the block.
Expand All @@ -48,7 +48,7 @@ def parse_content(cls, fields: dict[str, Any]) -> dict[str, Any]:
"""Parse the content of the function id block and extract the plugin and function name.

If both are present in the fields, return the fields as is.
Otherwise use the regex to extract the plugin and function name.
Otherwise, use the regex to extract the plugin and function name.
"""
if "plugin_name" in fields and "function_name" in fields:
return fields
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ def parse_content(cls, fields: Any) -> Any:
"""Parse the content of the named argument block and extract the name and value.
If the name and either value or variable is present the parsing is skipped.
Otherwise the content is parsed using a regex to extract the name and value.
Otherwise, the content is parsed using a regex to extract the name and value.
Those are then turned into Blocks.
Raises:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ class VarBlock(Block):
"""Create a variable block.

A variable block is used to add a variable to a template.
It get's rendered from KernelArguments, if the variable is not found
It gets rendered from KernelArguments, if the variable is not found
a warning is logged and an empty string is returned.
The variable must start with $ and be followed by a valid variable name.
A valid variable name is a string of letters, numbers and underscores.
Expand Down
2 changes: 1 addition & 1 deletion python/semantic_kernel/template_engine/code_tokenizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ def tokenize(text: str) -> list[Block]:

continue

# If we're not inside a quoted value and we're not processing a space
# If we're not inside a quoted value, and we're not processing a space
current_token_content.append(current_char)

if current_token_type is None:
Expand Down
Loading