Skip to content

Commit 66dd34a

Browse files
committed
convert print to logger
1 parent aa1f744 commit 66dd34a

File tree

2 files changed

+9
-6
lines changed

2 files changed

+9
-6
lines changed

app.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
import os
22
import tempfile
3-
from functools import lru_cache
43

54
os.chdir(os.path.dirname(os.path.abspath(__file__)))
65
os.environ['SERPAPI_API_KEY'] = ''

docGPT/docGPT.py

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,10 @@
1313
from langchain.memory import ConversationBufferMemory
1414
from langchain.prompts import PromptTemplate
1515
from langchain.vectorstores import FAISS
16+
from streamlit import logger
1617

1718
openai.api_key = os.getenv('OPENAI_API_KEY')
19+
module_logger = logger.get_logger(__name__)
1820

1921

2022
class BaseQaChain(ABC):
@@ -159,7 +161,7 @@ def _embeddings(self):
159161
documents=self.docs,
160162
embedding=embeddings
161163
)
162-
print('embedded...')
164+
module_logger.info('embedded...')
163165
return db
164166

165167
def create_qa_chain(
@@ -190,7 +192,7 @@ def run(self, query: str) -> str:
190192
with get_openai_callback() as callback:
191193
if isinstance(self.qa_chain, RetrievalQA):
192194
response = self.qa_chain.run(query)
193-
print(callback)
195+
module_logger.info(callback)
194196
return response
195197

196198

@@ -221,7 +223,7 @@ class GPT4Free(LLM):
221223
'g4f.Provider.You': g4f.Provider.You,
222224
'g4f.Provider.Yqcloud': g4f.Provider.Yqcloud,
223225
}
224-
provider: str = 'g4f.Provider.ChatgptAi'
226+
provider: str = 'g4f.Provider.DeepAi'
225227

226228
@property
227229
def _llm_type(self) -> str:
@@ -235,11 +237,13 @@ def _call(
235237
) -> str:
236238
try:
237239
# print(f'\033[36mPromopt: {prompt}\033[0m')
238-
print(f'\033[36mProvider: {self.PROVIDER_MAPPING[self.provider]}\033[0m')
240+
module_logger.info(
241+
f'\033[36mProvider: {self.PROVIDER_MAPPING[self.provider]}\033[0m'
242+
)
239243
return g4f.ChatCompletion.create(
240244
model="gpt-3.5-turbo",
241245
messages=[{"role": "user", "content": prompt}],
242246
provider=self.PROVIDER_MAPPING[self.provider]
243247
)
244248
except Exception as e:
245-
print(f'{__file__}: {e}')
249+
module_logger.info(f'{__file__}: {e}')

0 commit comments

Comments
 (0)