diff --git a/python/samples/SAMPLE_GUIDELINES.md b/python/samples/SAMPLE_GUIDELINES.md index 9505e4d9780e..46815a464115 100644 --- a/python/samples/SAMPLE_GUIDELINES.md +++ b/python/samples/SAMPLE_GUIDELINES.md @@ -33,7 +33,7 @@ Try to do a best effort to make sure that the samples are incremental in complex ### **Documentation** -Try to over-document the samples. This includes comments in the code, README.md files, and any other documentation that is necessary to understand the sample. +Try to over-document the samples. This includes comments in the code, README.md files, and any other documentation that is necessary to understand the sample. We use the guidance from [PEP8](https://peps.python.org/pep-0008/#comments) for comments in the code, with a deviation for the initial summary comment in samples and the output of the samples. For the getting started samples and the concept samples, we should have the following: @@ -41,10 +41,12 @@ For the getting started samples and the concept samples, we should have the foll 2. A summary should be included at the top of the file that explains the purpose of the sample and required components/concepts to understand the sample. For example: ```python - # This sample shows how to create a chatbot. This sample uses the following two main components: - # - a ChatCompletionService: This component is responsible for generating responses to user messages. - # - a ChatHistory: This component is responsible for keeping track of the chat history. - # The chatbot in this sample is called Mosscap, who responds to user messages with long flowery prose. + ''' + This sample shows how to create a chatbot. This sample uses the following two main components: + - a ChatCompletionService: This component is responsible for generating responses to user messages. + - a ChatHistory: This component is responsible for keeping track of the chat history. + The chatbot in this sample is called Mosscap, who responds to user messages with long flowery prose. + ''' ``` 3. Mark the code with comments to explain the purpose of each section of the code. For example: @@ -64,12 +66,11 @@ For the getting started samples and the concept samples, we should have the foll ```python ''' Sample output: - # User:> Why is the sky blue in one sentence? - # Mosscap:> The sky is blue due to the scattering of sunlight by the molecules in the Earth's atmosphere, - # a phenomenon known as Rayleigh scattering, which causes shorter blue wavelengths to become more - # prominent in our visual perception. + User:> Why is the sky blue in one sentence? + Mosscap:> The sky is blue due to the scattering of sunlight by the molecules in the Earth's atmosphere, + a phenomenon known as Rayleigh scattering, which causes shorter blue wavelengths to become more + prominent in our visual perception. ''' - ``` For the demos, a README.md file must be included that explains the purpose of the demo and how to run it. The README.md file should include the following: diff --git a/python/samples/concepts/README.md b/python/samples/concepts/README.md index 17377b62ec7b..72fe6258f876 100644 --- a/python/samples/concepts/README.md +++ b/python/samples/concepts/README.md @@ -44,7 +44,6 @@ - [Chat Completion Truncate History Reducer Agent Chat](./agents/chat_completion_agent/chat_completion_truncate_history_reducer_agent_chat.py) - [Chat Completion Truncate History Reducer Single Agent](./agents/chat_completion_agent/chat_completion_truncate_history_reducer_single_agent.py) - #### [Mixed Agent Group Chat](../../semantic_kernel/agents/group_chat/agent_group_chat.py) - [Mixed Chat Agents Plugins](./agents/mixed_chat/mixed_chat_agents_plugins.py) @@ -90,6 +89,7 @@ ### ChatHistory - Using and serializing the [`ChatHistory`](https://github.com/microsoft/semantic-kernel/blob/main/python/semantic_kernel/contents/chat_history.py) - [Serialize Chat History](./chat_history/serialize_chat_history.py) +- [Store Chat History in CosmosDB](./chat_history/store_chat_history_in_cosmosdb.py) ### Filtering - Creating and using Filters @@ -202,7 +202,7 @@ In Semantic Kernel for Python, we leverage Pydantic Settings to manage configura 1. **Reading Environment Variables:** - **Primary Source:** Pydantic first attempts to read the required settings from environment variables. - + 2. **Using a .env File:** - **Fallback Source:** If the required environment variables are not set, Pydantic will look for a `.env` file in the current working directory. - **Custom Path (Optional):** You can specify an alternative path for the `.env` file via `env_file_path`. This can be either a relative or an absolute path. @@ -220,4 +220,4 @@ To successfully retrieve and use the Entra Auth Token, you need the `Cognitive S - **.env File Placement:** We highly recommend placing the `.env` file in the `semantic-kernel/python` root directory. This is a common practice when developing in the Semantic Kernel repository. -By following these guidelines, you can ensure that your settings for various components are configured correctly, enabling seamless functionality and integration of Semantic Kernel in your Python projects. \ No newline at end of file +By following these guidelines, you can ensure that your settings for various components are configured correctly, enabling seamless functionality and integration of Semantic Kernel in your Python projects. diff --git a/python/samples/concepts/auto_function_calling/chat_completion_with_auto_function_calling.py b/python/samples/concepts/auto_function_calling/chat_completion_with_auto_function_calling.py index dd3359ce0297..27e80773645c 100644 --- a/python/samples/concepts/auto_function_calling/chat_completion_with_auto_function_calling.py +++ b/python/samples/concepts/auto_function_calling/chat_completion_with_auto_function_calling.py @@ -1,7 +1,6 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio -from typing import TYPE_CHECKING from samples.concepts.setup.chat_completion_services import Services, get_chat_completion_service_and_request_settings from semantic_kernel import Kernel @@ -11,9 +10,6 @@ from semantic_kernel.core_plugins.time_plugin import TimePlugin from semantic_kernel.functions import KernelArguments -if TYPE_CHECKING: - pass - ##################################################################### # This sample demonstrates how to build a conversational chatbot # # using Semantic Kernel, featuring auto function calling, # diff --git a/python/samples/concepts/chat_history/README.md b/python/samples/concepts/chat_history/README.md new file mode 100644 index 000000000000..1fada334e7c5 --- /dev/null +++ b/python/samples/concepts/chat_history/README.md @@ -0,0 +1,17 @@ +# Chat History manipulation samples + +This folder contains samples that demonstrate how to manipulate chat history in Semantic Kernel. + +## [Serialize Chat History](./serialize_chat_history.py) + +This sample demonstrates how to build a conversational chatbot using Semantic Kernel, it features auto function calling, but with file-based serialization of the chat history. This sample stores and reads the chat history at every turn. This is not the best way to do it, but clearly demonstrates the mechanics. + +To run this sample a environment with keys for the chosen chat service is required. In line 61 you can change the model used. This sample uses a temporary file to store the chat history, so no additional setup is required. + +## [Store Chat History in Cosmos DB](./store_chat_history_in_cosmosdb.py) + +This a more complex version of the sample above, it uses Azure CosmosDB NoSQL to store the chat messages. + +In order to do that a simple datamodel is defined. And then a class is created that extends ChatHistory, this class adds `store` and `read` methods, as well as a `create_collection` method that creates a collection in CosmosDB. + +This samples further uses the same chat service setup as the sample above, so the keys and other parameters for the chosen model should be in the environment. Next to that a AZURE_COSMOS_DB_NO_SQL_URL and optionally a AZURE_COSMOS_DB_NO_SQL_KEY should be set in the environment, you can also rely on Entra ID Auth instead of the key. The database name can also be put in the environment. diff --git a/python/samples/concepts/chat_history/serialize_chat_history.py b/python/samples/concepts/chat_history/serialize_chat_history.py index 331669be0906..f6c04bbd00c0 100644 --- a/python/samples/concepts/chat_history/serialize_chat_history.py +++ b/python/samples/concepts/chat_history/serialize_chat_history.py @@ -1,94 +1,112 @@ # Copyright (c) Microsoft. All rights reserved. import asyncio -import os -from typing import TYPE_CHECKING - -from semantic_kernel import Kernel -from semantic_kernel.connectors.ai.function_choice_behavior import FunctionChoiceBehavior -from semantic_kernel.connectors.ai.open_ai.prompt_execution_settings.azure_chat_prompt_execution_settings import ( - AzureChatPromptExecutionSettings, -) -from semantic_kernel.connectors.ai.open_ai.services.azure_chat_completion import AzureChatCompletion +import tempfile + +from samples.concepts.setup.chat_completion_services import Services, get_chat_completion_service_and_request_settings from semantic_kernel.contents import ChatHistory -from semantic_kernel.core_plugins.math_plugin import MathPlugin -from semantic_kernel.core_plugins.time_plugin import TimePlugin -from semantic_kernel.functions import KernelArguments - -if TYPE_CHECKING: - pass - - -system_message = """ -You are a chat bot. Your name is Mosscap and -you have one goal: figure out what people need. -Your full name, should you need to know it, is -Splendid Speckled Mosscap. You communicate -effectively, but you tend to answer with long -flowery prose. You are also a math wizard, -especially for adding and subtracting. -You also excel at joke telling, where your tone is often sarcastic. -Once you have the answer I am looking for, -you will return a full answer to me as soon as possible. + +""" +This sample demonstrates how to build a conversational chatbot +using Semantic Kernel, it features auto function calling, +but with file-based serialization of the chat history. +This sample stores and reads the chat history at every turn. +This is not the best way to do it, but clearly demonstrates the mechanics. +More optimal would for instance be to only write once when a conversation is done. +And writing to something other then a file is also usually better. """ -kernel = Kernel() - -# Note: the underlying gpt-35/gpt-4 model version needs to be at least version 0613 to support tools. -kernel.add_service(AzureChatCompletion(service_id="chat")) - -plugins_directory = os.path.join(__file__, "../../../../../prompt_template_samples/") -# adding plugins to the kernel -kernel.add_plugin(MathPlugin(), plugin_name="math") -kernel.add_plugin(TimePlugin(), plugin_name="time") - -# Enabling or disabling function calling is done by setting the `function_choice_behavior` attribute for the -# prompt execution settings. When the function_call parameter is set to "auto" the model will decide which -# function to use, if any. -# -# There are two ways to define the `function_choice_behavior` parameter: -# 1. Using the type string as `"auto"`, `"required"`, or `"none"`. For example: -# configure `function_choice_behavior="auto"` parameter directly in the execution settings. -# 2. Using the FunctionChoiceBehavior class. For example: -# `function_choice_behavior=FunctionChoiceBehavior.Auto()`. -# Both of these configure the `auto` tool_choice and all of the available plugins/functions -# registered on the kernel. If you want to limit the available plugins/functions, you must -# configure the `filters` dictionary attribute for each type of function choice behavior. -# For example: -# -# from semantic_kernel.connectors.ai.function_choice_behavior import FunctionChoiceBehavior - -# function_choice_behavior = FunctionChoiceBehavior.Auto( -# filters={"included_functions": ["time-date", "time-time", "math-Add"]} -# ) -# -# The filters attribute allows you to specify either: `included_functions`, `excluded_functions`, -# `included_plugins`, or `excluded_plugins`. - -# Note: the number of responses for auto invoking tool calls is limited to 1. -# If configured to be greater than one, this value will be overridden to 1. -execution_settings = AzureChatPromptExecutionSettings( - service_id="chat", - max_tokens=2000, - temperature=0.7, - top_p=0.8, - function_choice_behavior=FunctionChoiceBehavior.Auto(), -) - -arguments = KernelArguments(settings=execution_settings) +# You can select from the following chat completion services that support function calling: +# - Services.OPENAI +# - Services.AZURE_OPENAI +# - Services.AZURE_AI_INFERENCE +# - Services.ANTHROPIC +# - Services.BEDROCK +# - Services.GOOGLE_AI +# - Services.MISTRAL_AI +# - Services.OLLAMA +# - Services.ONNX +# - Services.VERTEX_AI +# - Services.DEEPSEEK +# Please make sure you have configured your environment correctly for the selected chat completion service. +chat_completion_service, request_settings = get_chat_completion_service_and_request_settings(Services.OPENAI) + + +async def chat(file) -> bool: + """ + Continuously prompt the user for input and show the assistant's response. + Type 'exit' to exit. + """ + try: + # Try to load the chat history from a file. + history = ChatHistory.load_chat_history_from_file(file_path=file) + print(f"Chat history successfully loaded {len(history.messages)} messages.") + except Exception: + # Create a new chat history to store the system message, initial messages, and the conversation. + print("Chat history file not found. Starting a new conversation.") + history = ChatHistory() + history.add_system_message( + "You are a chat bot. Your name is Mosscap and you have one goal: figure out what people need." + ) + + try: + # Get the user input + user_input = input("User:> ") + except (KeyboardInterrupt, EOFError): + print("\n\nExiting chat...") + return False + + if user_input.lower().strip() == "exit": + print("\n\nExiting chat...") + return False + + # Add the user input to the chat history + history.add_user_message(user_input) + # Get a response from the chat completion service + result = await chat_completion_service.get_chat_message_content(history, request_settings) + + # Update the chat history with the user's input and the assistant's response + if result: + print(f"Mosscap:> {result}") + history.add_message(result) + + # Save the chat history to a file. + print(f"Saving {len(history.messages)} messages to the file.") + history.store_chat_history_to_file(file_path=file) + return True -async def main() -> None: - user_input = "What is the current hour plus 10?" - print(f"User:> {user_input}") - result = await kernel.invoke_prompt(prompt=user_input, arguments=arguments) +""" +Sample output: + +Welcome to the chat bot! + Type 'exit' to exit. + Try a math question to see function calling in action (e.g. 'what is 3+3?'). + Your chat history will be saved in: /tmpq1n1f6qk.json +Chat history file not found. Starting a new conversation. +User:> Hello, how are you? +Mosscap:> Hello! I'm here and ready to help. What do you need today? +Saving 3 messages to the file. +Chat history successfully loaded 3 messages. +User:> exit +""" - print(f"Mosscap:> {result}") - print("\nChat history:") - chat_history: ChatHistory = result.metadata["messages"] - print(chat_history.serialize()) +async def main() -> None: + chatting = True + with tempfile.NamedTemporaryFile(mode="w+", dir=".", suffix=".json", delete=True) as file: + print( + "Welcome to the chat bot!\n" + " Type 'exit' to exit.\n" + " Try a math question to see function calling in action (e.g. 'what is 3+3?')." + f" Your chat history will be saved in: {file.name}" + ) + try: + while chatting: + chatting = await chat(file.name) + except Exception: + print("Closing and removing the file.") if __name__ == "__main__": diff --git a/python/samples/concepts/chat_history/store_chat_history_in_cosmosdb.py b/python/samples/concepts/chat_history/store_chat_history_in_cosmosdb.py new file mode 100644 index 000000000000..e6a68d354e3d --- /dev/null +++ b/python/samples/concepts/chat_history/store_chat_history_in_cosmosdb.py @@ -0,0 +1,199 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +from dataclasses import dataclass +from typing import Annotated + +from samples.concepts.setup.chat_completion_services import Services, get_chat_completion_service_and_request_settings +from semantic_kernel import Kernel +from semantic_kernel.connectors.ai.function_choice_behavior import FunctionChoiceBehavior +from semantic_kernel.connectors.memory.azure_cosmos_db.azure_cosmos_db_no_sql_store import AzureCosmosDBNoSQLStore +from semantic_kernel.contents import ChatHistory +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.core_plugins.math_plugin import MathPlugin +from semantic_kernel.core_plugins.time_plugin import TimePlugin +from semantic_kernel.data.record_definition.vector_store_model_decorator import vectorstoremodel +from semantic_kernel.data.record_definition.vector_store_record_fields import ( + VectorStoreRecordDataField, + VectorStoreRecordKeyField, +) +from semantic_kernel.data.vector_storage.vector_store import VectorStore +from semantic_kernel.data.vector_storage.vector_store_record_collection import VectorStoreRecordCollection + +""" +This sample demonstrates how to build a conversational chatbot +using Semantic Kernel, it features auto function calling, +but with Azure CosmosDB as storage for the chat history. +This sample stores and reads the chat history at every turn. +This is not the best way to do it, but clearly demonstrates the mechanics. + +Further refinement would be to only write once when a conversation is done. +And there is also no logic to see if there is something to write. +You could also enhance the ChatHistoryModel with a summary and a vector for that +in order to search for similar conversations. +""" + + +# 1. We first create simple datamodel for the chat history. +# Note that this model does not contain any vectors, +# those can be added, for instance to store a summary of the conversation. +@vectorstoremodel +@dataclass +class ChatHistoryModel: + session_id: Annotated[str, VectorStoreRecordKeyField] + user_id: Annotated[str, VectorStoreRecordDataField(is_filterable=True)] + messages: Annotated[list[dict[str, str]], VectorStoreRecordDataField(is_filterable=True)] + + +# 2. We then create a class that extends the ChatHistory class +# and implements the methods to store and read the chat history. +# This could also use one of the history reducers to make +# sure the database doesn't grow too large. +# It adds a `store` attribute and a couple of methods. +class ChatHistoryInCosmosDB(ChatHistory): + """This class extends the ChatHistory class to store the chat history in a Cosmos DB.""" + + session_id: str + user_id: str + store: VectorStore + collection: VectorStoreRecordCollection[str, ChatHistoryModel] | None = None + + async def create_collection(self, collection_name: str) -> None: + """Create a collection with the inbuild data model using the vector store. + + First create the collection, then call this method to create the collection itself. + """ + self.collection = self.store.get_collection( + collection_name=collection_name, + data_model_type=ChatHistoryModel, + ) + await self.collection.create_collection_if_not_exists() + + async def store_messages(self) -> None: + """Store the chat history in the Cosmos DB. + + Note that we use model_dump to convert the chat message content into a serializable format. + """ + if self.collection: + await self.collection.upsert( + ChatHistoryModel( + session_id=self.session_id, + user_id=self.user_id, + messages=[msg.model_dump() for msg in self.messages], + ) + ) + + async def read_messages(self) -> None: + """Read the chat history from the Cosmos DB. + + Note that we use the model_validate method to convert the serializable format back into a ChatMessageContent. + """ + if self.collection: + record = await self.collection.get(self.session_id) + if record: + for message in record.messages: + self.messages.append(ChatMessageContent.model_validate(message)) + + +# 3. We now create a fairly standard kernel, with functions and a chat service. +# Create and configure the kernel. +kernel = Kernel() + +# Load some sample plugins (for demonstration of function calling). +kernel.add_plugin(MathPlugin(), plugin_name="math") +kernel.add_plugin(TimePlugin(), plugin_name="time") + +# You can select from the following chat completion services that support function calling: +# - Services.OPENAI +# - Services.AZURE_OPENAI +# - Services.AZURE_AI_INFERENCE +# - Services.ANTHROPIC +# - Services.BEDROCK +# - Services.GOOGLE_AI +# - Services.MISTRAL_AI +# - Services.OLLAMA +# - Services.ONNX +# - Services.VERTEX_AI +# - Services.DEEPSEEK +# Please make sure you have configured your environment correctly for the selected chat completion service. +chat_completion_service, request_settings = get_chat_completion_service_and_request_settings(Services.AZURE_OPENAI) + +# Configure the function choice behavior. Here, we set it to Auto, where auto_invoke=True by default. +# With `auto_invoke=True`, the model will automatically choose and call functions as needed. +request_settings.function_choice_behavior = FunctionChoiceBehavior.Auto(filters={"excluded_plugins": ["ChatBot"]}) + +kernel.add_service(chat_completion_service) + + +# 4. The main chat loop, which takes a history object and prompts the user for input. +# It then adds the user input to the history and gets a response from the chat completion service. +# Finally, it prints the response and saves the chat history to the Cosmos DB. +async def chat(history: ChatHistoryInCosmosDB) -> bool: + """ + Continuously prompt the user for input and show the assistant's response. + Type 'exit' to exit. + """ + await history.read_messages() + print(f"Chat history successfully loaded {len(history.messages)} messages.") + if len(history.messages) == 0: + # if it is a new conversation, add the system message and a couple of initial messages. + history.add_system_message( + "You are a chat bot. Your name is Mosscap and you have one goal: figure out what people need." + ) + history.add_user_message("Hi there, who are you?") + history.add_assistant_message("I am Mosscap, a chat bot. I'm trying to figure out what people need.") + + try: + user_input = input("User:> ") + except (KeyboardInterrupt, EOFError): + print("\n\nExiting chat...") + return False + + if user_input.lower().strip() == "exit": + print("\n\nExiting chat...") + return False + + # add the user input to the chat history + history.add_user_message(user_input) + + result = await chat_completion_service.get_chat_message_content(history, request_settings, kernel=kernel) + + if result: + print(f"Mosscap:> {result}") + history.add_message(result) + + # Save the chat history to CosmosDB. + print(f"Saving {len(history.messages)} messages to AzureCosmosDB.") + await history.store_messages() + return True + + +async def main() -> None: + delete_when_done = True + session_id = "session1" + chatting = True + # 5. We now create the store, ChatHistory and collection and start the chat loop. + + # First we enter the store context manager to connect. + # The create_database flag will create the database if it does not exist. + async with AzureCosmosDBNoSQLStore(create_database=True) as store: + # Then we create the chat history in CosmosDB. + history = ChatHistoryInCosmosDB(store=store, session_id=session_id, user_id="user") + # Finally we create the collection. + await history.create_collection(collection_name="chat_history") + print( + "Welcome to the chat bot!\n" + " Type 'exit' to exit.\n" + " Try a math question to see function calling in action (e.g. 'what is 3+3?')." + ) + try: + while chatting: + chatting = await chat(history) + except Exception: + print("Closing chat...") + if delete_when_done and history.collection: + await history.collection.delete_collection() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/semantic_kernel/contents/chat_history.py b/python/semantic_kernel/contents/chat_history.py index e38172db02fa..53d49d2d80e4 100644 --- a/python/semantic_kernel/contents/chat_history.py +++ b/python/semantic_kernel/contents/chat_history.py @@ -1,6 +1,5 @@ # Copyright (c) Microsoft. All rights reserved. -import json import logging from collections.abc import Generator, Iterable from functools import singledispatchmethod @@ -363,26 +362,30 @@ def restore_chat_history(cls: type[_T], chat_history_json: str) -> _T: fails validation. """ try: - return cls(**json.loads(chat_history_json)) + return cls.model_validate_json(chat_history_json) except Exception as e: raise ContentInitializationError(f"Invalid JSON format: {e}") def store_chat_history_to_file(self, file_path: str) -> None: """Stores the serialized ChatHistory to a file. + Uses mode "w" which means the file is created if it does not exist and gets truncated if it does. + Args: - file_path (str): The path to the file where the serialized data will be stored. + file_path: The path to the file where the serialized data will be stored. """ json_str = self.serialize() - with open(file_path, "w") as file: - file.write(json_str) + with open(file_path, "w") as local_file: + local_file.write(json_str) @classmethod - def load_chat_history_from_file(cls, file_path: str) -> "ChatHistory": + def load_chat_history_from_file(cls: type[_T], file_path: str) -> _T: """Loads the ChatHistory from a file. + Uses mode "r" which means it can only be read if it exists. + Args: - file_path (str): The path to the file from which to load the ChatHistory. + file_path: The path to the file from which to load the ChatHistory. Returns: ChatHistory: The deserialized ChatHistory instance. diff --git a/python/semantic_kernel/data/record_definition/vector_store_model_decorator.py b/python/semantic_kernel/data/record_definition/vector_store_model_decorator.py index ad905e7d113a..0b6893116389 100644 --- a/python/semantic_kernel/data/record_definition/vector_store_model_decorator.py +++ b/python/semantic_kernel/data/record_definition/vector_store_model_decorator.py @@ -3,7 +3,7 @@ import logging from inspect import Parameter, _empty, signature from types import MappingProxyType, NoneType -from typing import Any +from typing import TypeVar from semantic_kernel.data.record_definition.vector_store_model_definition import VectorStoreRecordDefinition from semantic_kernel.data.record_definition.vector_store_record_fields import ( @@ -15,11 +15,13 @@ logger = logging.getLogger(__name__) +_T = TypeVar("_T") + @experimental def vectorstoremodel( - cls: Any | None = None, -): + cls: type[_T] | None = None, +) -> type[_T]: """Returns the class as a vector store model. This decorator makes a class a vector store model. @@ -44,18 +46,18 @@ def vectorstoremodel( VectorStoreModelException: If there is a ndarray field without a serialize or deserialize function. """ - def wrap(cls: Any): + def wrap(cls: type[_T]) -> type[_T]: # get fields and annotations cls_sig = signature(cls) setattr(cls, "__kernel_vectorstoremodel__", True) setattr(cls, "__kernel_vectorstoremodel_definition__", _parse_signature_to_definition(cls_sig.parameters)) - return cls + return cls # type: ignore # See if we're being called as @vectorstoremodel or @vectorstoremodel(). if cls is None: # We're called with parens. - return wrap + return wrap # type: ignore # We're called as @vectorstoremodel without parens. return wrap(cls)