diff --git a/code/backend/batch/utilities/plugins/ChatPlugin.py b/code/backend/batch/utilities/plugins/ChatPlugin.py index eb3e79454..2d672537a 100644 --- a/code/backend/batch/utilities/plugins/ChatPlugin.py +++ b/code/backend/batch/utilities/plugins/ChatPlugin.py @@ -21,7 +21,6 @@ def search_documents( str, "A standalone question, converted from the chat history" ], ) -> Answer: - # TODO: Use Semantic Kernel to call LLM return QuestionAnswerTool().answer_question( question=question, chat_history=self.chat_history ) @@ -37,7 +36,6 @@ def text_processing( "The operation to be performed on the text. Like Translate to Italian, Summarize, Paraphrase, etc. If a language is specified, return that as part of the operation. Preserve the operation name in the user language.", ], ) -> Answer: - # TODO: Use Semantic Kernel to call LLM return TextProcessingTool().answer_question( question=self.question, chat_history=self.chat_history, diff --git a/code/backend/batch/utilities/plugins/PostAnsweringPlugin.py b/code/backend/batch/utilities/plugins/PostAnsweringPlugin.py index b6aa6128a..1c5d87378 100644 --- a/code/backend/batch/utilities/plugins/PostAnsweringPlugin.py +++ b/code/backend/batch/utilities/plugins/PostAnsweringPlugin.py @@ -8,5 +8,4 @@ class PostAnsweringPlugin: @kernel_function(description="Run post answering prompt to validate the answer.") def validate_answer(self, arguments: KernelArguments) -> Answer: - # TODO: Use Semantic Kernel to call LLM return PostPromptTool().validate_answer(arguments["answer"]) diff --git a/infra/main.bicep b/infra/main.bicep index 64d23b87d..f19c80f28 100644 --- a/infra/main.bicep +++ b/infra/main.bicep @@ -119,9 +119,10 @@ param azureOpenAIVisionModelVersion string = 'vision-preview' @description('Azure OpenAI Vision Model Capacity - See here for more info https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/quota') param azureOpenAIVisionModelCapacity int = 10 -@description('Orchestration strategy: openai_function or langchain str. If you use a old version of turbo (0301), plese select langchain') +@description('Orchestration strategy: openai_function or semantic_kernel or langchain str. If you use a old version of turbo (0301), please select langchain') @allowed([ 'openai_function' + 'semantic_kernel' 'langchain' ]) param orchestrationStrategy string = 'openai_function'