diff --git a/code/backend/batch/utilities/helpers/config/config_helper.py b/code/backend/batch/utilities/helpers/config/config_helper.py index 05549ac04..106d6b740 100644 --- a/code/backend/batch/utilities/helpers/config/config_helper.py +++ b/code/backend/batch/utilities/helpers/config/config_helper.py @@ -53,6 +53,10 @@ def __init__(self, config: dict): "enable_chat_history", self.env_helper.CHAT_HISTORY_ENABLED ) + self.conversational_flow = config.get( + "conversational_flow", self.env_helper.CONVERSATION_FLOW + ) + def get_available_document_types(self) -> list[str]: document_types = { "txt", @@ -247,6 +251,7 @@ def get_default_config(): Template(f.read()).substitute( ORCHESTRATION_STRATEGY=env_helper.ORCHESTRATION_STRATEGY, CHAT_HISTORY_ENABLED=env_helper.CHAT_HISTORY_ENABLED, + CONVERSATION_FLOW=env_helper.CONVERSATION_FLOW, ) ) if env_helper.USE_ADVANCED_IMAGE_PROCESSING: diff --git a/code/backend/batch/utilities/helpers/config/default.json b/code/backend/batch/utilities/helpers/config/default.json index be50c1a4c..eb6c5c326 100644 --- a/code/backend/batch/utilities/helpers/config/default.json +++ b/code/backend/batch/utilities/helpers/config/default.json @@ -9,7 +9,7 @@ "enable_post_answering_prompt": false, "ai_assistant_type": "default", "enable_content_safety": true, - "conversational_flow": "custom" + "conversational_flow": "${CONVERSATION_FLOW}" }, "example": { "documents": "{\n \"retrieved_documents\": [\n {\n \"[doc1]\": {\n \"content\": \"Dual Transformer Encoder (DTE) DTE (https://dev.azure.com/TScience/TSciencePublic/_wiki/wikis/TSciencePublic.wiki/82/Dual-Transformer-Encoder) DTE is a general pair-oriented sentence representation learning framework based on transformers. It provides training, inference and evaluation for sentence similarity models. Model Details DTE can be used to train a model for sentence similarity with the following features: - Build upon existing transformer-based text representations (e.g.TNLR, BERT, RoBERTa, BAG-NLR) - Apply smoothness inducing technology to improve the representation robustness - SMART (https://arxiv.org/abs/1911.03437) SMART - Apply NCE (Noise Contrastive Estimation) based similarity learning to speed up training of 100M pairs We use pretrained DTE model\"\n }\n },\n {\n \"[doc2]\": {\n \"content\": \"trained on internal data. You can find more details here - Models.md (https://dev.azure.com/TScience/_git/TSciencePublic?path=%2FDualTransformerEncoder%2FMODELS.md&version=GBmaster&_a=preview) Models.md DTE-pretrained for In-context Learning Research suggests that finetuned transformers can be used to retrieve semantically similar exemplars for e.g. KATE (https://arxiv.org/pdf/2101.06804.pdf) KATE . They show that finetuned models esp. tuned on related tasks give the maximum boost to GPT-3 in-context performance. DTE have lot of pretrained models that are trained on intent classification tasks. We can use these model embedding to find natural language utterances which are similar to our test utterances at test time. The steps are: 1. Embed\"\n }\n },\n {\n \"[doc3]\": {\n \"content\": \"train and test utterances using DTE model 2. For each test embedding, find K-nearest neighbors. 3. Prefix the prompt with nearest embeddings. The following diagram from the above paper (https://arxiv.org/pdf/2101.06804.pdf) the above paper visualizes this process: DTE-Finetuned This is an extension of DTE-pretrained method where we further finetune the embedding models for prompt crafting task. In summary, we sample random prompts from our training data and use them for GPT-3 inference for the another part of training data. Some prompts work better and lead to right results whereas other prompts lead\"\n }\n },\n {\n \"[doc4]\": {\n \"content\": \"to wrong completions. We finetune the model on the downstream task of whether a prompt is good or not based on whether it leads to right or wrong completion. This approach is similar to this paper: Learning To Retrieve Prompts for In-Context Learning (https://arxiv.org/pdf/2112.08633.pdf) this paper: Learning To Retrieve Prompts for In-Context Learning . This method is very general but it may require a lot of data to actually finetune a model to learn how to retrieve examples suitable for the downstream inference model like GPT-3.\"\n }\n }\n ]\n}", diff --git a/code/backend/batch/utilities/helpers/env_helper.py b/code/backend/batch/utilities/helpers/env_helper.py index 63c5d52d9..49d11a6b7 100644 --- a/code/backend/batch/utilities/helpers/env_helper.py +++ b/code/backend/batch/utilities/helpers/env_helper.py @@ -243,6 +243,10 @@ def __load_config(self, **kwargs) -> None: self.ORCHESTRATION_STRATEGY = os.getenv( "ORCHESTRATION_STRATEGY", "openai_function" ) + + # Conversation settings + self.CONVERSATION_FLOW = os.getenv("CONVERSATION_FLOW", "custom") + # Speech Service self.AZURE_SPEECH_SERVICE_NAME = os.getenv("AZURE_SPEECH_SERVICE_NAME", "") self.AZURE_SPEECH_SERVICE_REGION = os.getenv("AZURE_SPEECH_SERVICE_REGION")