|
5 | 5 | from llama_index.core.memory import ChatMemoryBuffer
|
6 | 6 | from llama_index.core import StorageContext, load_index_from_storage
|
7 | 7 | from llama_index.readers.web import SimpleWebPageReader
|
8 |
| -from aimon import Detect |
| 8 | +from aimon import Detect, AnalyzeProd, Application, Model |
9 | 9 | from aimon import AuthenticationError
|
10 | 10 | import logging
|
11 | 11 | import os
|
|
21 | 21 | 'completeness': {'detector_name': 'default'},
|
22 | 22 | 'toxicity': {'detector_name': 'default'},
|
23 | 23 | }
|
24 |
| -detect = Detect(values_returned=['context', 'user_query', 'instructions', 'generated_text'], api_key=os.getenv("AIMON_API_KEY"), config=aimon_config) |
25 |
| - |
| 24 | +values_returned = ['context', 'user_query', 'instructions', 'generated_text'] |
| 25 | +detect = Detect(values_returned=values_returned, api_key=os.getenv("AIMON_API_KEY"), config=aimon_config) |
| 26 | +analyze_prod = AnalyzeProd(Application("paul_graham_chatbot_aug_2024"), Model("gpt_4o_model", "GPT-4o"), values_returned=values_returned, config=aimon_config) |
26 | 27 |
|
27 | 28 | @st.cache_resource(show_spinner=False)
|
28 | 29 | def load_data():
|
@@ -88,10 +89,11 @@ def split_into_paragraphs(text):
|
88 | 89 | return paragraphs
|
89 | 90 |
|
90 | 91 |
|
| 92 | +@analyze_prod |
91 | 93 | @detect
|
92 | 94 | def am_chat(usr_prompt, instructions):
|
93 | 95 | response = st.session_state.chat_engine.chat(usr_prompt)
|
94 |
| - context = get_source_docs(response) |
| 96 | + context, relevance_scores = get_source_docs(response) |
95 | 97 | return context, usr_prompt, instructions, response.response
|
96 | 98 |
|
97 | 99 |
|
@@ -145,7 +147,7 @@ def execute():
|
145 | 147 | if st.session_state.messages[-1]["role"] != "assistant":
|
146 | 148 | with st.chat_message("assistant"):
|
147 | 149 | if cprompt:
|
148 |
| - context, usr_prompt, instructions, response, am_res = am_chat(cprompt, instructions) |
| 150 | + context, usr_prompt, instructions, response, am_res, am_analyze_res = am_chat(cprompt, instructions) |
149 | 151 | message = {"role": "assistant", "content": response}
|
150 | 152 | am_res_json = am_res.to_json()
|
151 | 153 | st.write(response)
|
|
0 commit comments