Skip to content

Commit 33f2b5a

Browse files
committed
Reduce memory cost: remove unnecessary ¨S¦³­n¦C¦LªºÀɮ×
1 parent 0cc3560 commit 33f2b5a

File tree

3 files changed

+9
-8
lines changed

3 files changed

+9
-8
lines changed

app.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -126,12 +126,12 @@ def upload_and_process_pdf() -> list:
126126
return docs
127127

128128

129-
@lru_cache(maxsize=20)
130129
def get_response(query: str) -> str:
130+
app_logger.info(f'\033[36mUser Query: {query}\033[0m')
131131
try:
132132
if model is not None:
133133
response = model.run(query)
134-
app_logger.info(f'llm response: {response}')
134+
app_logger.info(f'\033[36mLLM Response: {response}\033[0m')
135135
return response
136136
except Exception as e:
137137
app_logger.info(f'{__file__}: {e}')

docGPT/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ def create_doc_gpt(
3636
docGPT.llm = llm_model
3737
agent_.llm = llm_model
3838

39-
docGPT.create_qa_chain(chain_type='refine')
39+
docGPT.create_qa_chain(chain_type='refine', verbose=False)
4040
docGPT_tool = agent_.create_doc_chat(docGPT)
4141
calculate_tool = agent_.get_calculate_chain
4242
llm_tool = agent_.create_llm_chain()
@@ -58,7 +58,7 @@ def create_doc_gpt(
5858
# Use gpt4free llm model without agent
5959
llm_model = GPT4Free(provider=g4f_provider)
6060
docGPT.llm = llm_model
61-
docGPT.create_qa_chain(chain_type='refine')
61+
docGPT.create_qa_chain(chain_type='refine', verbose=False)
6262
return docGPT
6363
except Exception as e:
6464
module_logger.info(f'{__file__}: {e}')

docGPT/docGPT.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -164,13 +164,14 @@ def _embeddings(self):
164164

165165
def create_qa_chain(
166166
self,
167-
chain_type: str='stuff',
167+
chain_type: str ='stuff',
168+
verbose: bool = True
168169
) -> BaseQaChain:
169170
# TODO: Bug helper
170171
self._helper_prompt(chain_type)
171172
chain_type_kwargs = {
172173
'question_prompt': self.prompt,
173-
'verbose': True,
174+
'verbose': verbose,
174175
'refine_prompt': self.refine_prompt
175176
}
176177

@@ -233,8 +234,8 @@ def _call(
233234
run_manager: Optional[CallbackManagerForLLMRun] = None,
234235
) -> str:
235236
try:
236-
print(f'Promopt: {prompt}')
237-
print(f'Provider: {self.PROVIDER_MAPPING[self.provider]}')
237+
# print(f'\033[36mPromopt: {prompt}\033[0m')
238+
print(f'\033[36mProvider: {self.PROVIDER_MAPPING[self.provider]}\033[0m')
238239
return g4f.ChatCompletion.create(
239240
model="gpt-3.5-turbo",
240241
messages=[{"role": "user", "content": prompt}],

0 commit comments

Comments
 (0)