13
13
from langchain .memory import ConversationBufferMemory
14
14
from langchain .prompts import PromptTemplate
15
15
from langchain .vectorstores import FAISS
16
+ from streamlit import logger
16
17
17
18
openai .api_key = os .getenv ('OPENAI_API_KEY' )
19
+ module_logger = logger .get_logger (__name__ )
18
20
19
21
20
22
class BaseQaChain (ABC ):
@@ -159,7 +161,7 @@ def _embeddings(self):
159
161
documents = self .docs ,
160
162
embedding = embeddings
161
163
)
162
- print ('embedded...' )
164
+ module_logger . info ('embedded...' )
163
165
return db
164
166
165
167
def create_qa_chain (
@@ -190,7 +192,7 @@ def run(self, query: str) -> str:
190
192
with get_openai_callback () as callback :
191
193
if isinstance (self .qa_chain , RetrievalQA ):
192
194
response = self .qa_chain .run (query )
193
- print (callback )
195
+ module_logger . info (callback )
194
196
return response
195
197
196
198
@@ -221,7 +223,7 @@ class GPT4Free(LLM):
221
223
'g4f.Provider.You' : g4f .Provider .You ,
222
224
'g4f.Provider.Yqcloud' : g4f .Provider .Yqcloud ,
223
225
}
224
- provider : str = 'g4f.Provider.ChatgptAi '
226
+ provider : str = 'g4f.Provider.DeepAi '
225
227
226
228
@property
227
229
def _llm_type (self ) -> str :
@@ -235,11 +237,13 @@ def _call(
235
237
) -> str :
236
238
try :
237
239
# print(f'\033[36mPromopt: {prompt}\033[0m')
238
- print (f'\033 [36mProvider: { self .PROVIDER_MAPPING [self .provider ]} \033 [0m' )
240
+ module_logger .info (
241
+ f'\033 [36mProvider: { self .PROVIDER_MAPPING [self .provider ]} \033 [0m'
242
+ )
239
243
return g4f .ChatCompletion .create (
240
244
model = "gpt-3.5-turbo" ,
241
245
messages = [{"role" : "user" , "content" : prompt }],
242
246
provider = self .PROVIDER_MAPPING [self .provider ]
243
247
)
244
248
except Exception as e :
245
- print (f'{ __file__ } : { e } ' )
249
+ module_logger . info (f'{ __file__ } : { e } ' )
0 commit comments