Skip to content

Commit f93cd92

Browse files
committed
docGPT 進行 embeddings 時,依然需要使用 OpenAIEmbeddings,因此無法只單純依靠 gpt4free
1 parent 41df5b5 commit f93cd92

File tree

8 files changed

+160
-32
lines changed

8 files changed

+160
-32
lines changed

.gitignore

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,8 @@ ipython_config.py
100100
# However, in case of collaboration, if having platform-specific dependencies or dependencies
101101
# having no cross-platform support, pipenv may install dependencies that don't work, or not
102102
# install all needed dependencies.
103-
#Pipfile.lock
103+
Pipfile.lock
104+
104105

105106
# poetry
106107
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.

Pipfile

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
[[source]]
2+
url = "https://pypi.org/simple"
3+
verify_ssl = true
4+
name = "pypi"
5+
6+
[packages]
7+
g4f = "==0.0.2.2"
8+
langchain = "==0.0.218"
9+
openai = "==0.27.8"
10+
streamlit = "==1.22.0"
11+
streamlit-chat = "==0.1.1"
12+
pymupdf = "==1.22.5"
13+
faiss-cpu = "==1.7.4"
14+
tiktoken = "==0.4.0"
15+
tenacity = "==8.1.0"
16+
google-search-results = "==2.4.2"
17+
18+
[dev-packages]
19+
20+
[requires]
21+
python_version = "3.10"
22+
python_full_version = "3.10.11"

agent/agent.py

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -16,14 +16,18 @@
1616
class AgentHelper:
1717
"""Add agent to help docGPT can be perfonm better."""
1818
def __init__(self) -> None:
19-
self.llm = ChatOpenAI(
20-
temperature=0.2,
21-
max_tokens=6000,
22-
model_name='gpt-3.5-turbo-16k'
23-
)
19+
self._llm = None
2420
self.agent_ = None
2521
self.tools = []
2622

23+
@property
24+
def llm(self):
25+
return self._llm
26+
27+
@llm.setter
28+
def llm(self, llm) -> None:
29+
self._llm = llm
30+
2731
@property
2832
def get_calculate_chain(self) -> Tool:
2933
llm_math_chain = LLMMathChain.from_llm(llm=self.llm, verbose=True)

app.py

Lines changed: 57 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -8,12 +8,15 @@
88
import langchain
99
import streamlit as st
1010
from langchain.cache import InMemoryCache
11+
from langchain.chat_models import ChatOpenAI
1112
from streamlit import logger
1213
from streamlit_chat import message
1314

1415
from agent import AgentHelper
15-
from docGPT import DocGPT, OpenAiAPI, SerpAPI
16+
from docGPT import DocGPT, OpenAiAPI, SerpAPI, GPT4Free
1617
from model import PDFLoader
18+
import g4f
19+
from tenacity import retry, stop_after_attempt
1720

1821
langchain.llm_cache = InMemoryCache()
1922

@@ -23,6 +26,7 @@
2326

2427
st.session_state.openai_api_key = None
2528
st.session_state.serpapi_api_key = None
29+
st.session_state.g4f_provider = None
2630
app_logger = logger.get_logger(__name__)
2731

2832

@@ -66,6 +70,15 @@ def load_api_key() -> None:
6670

6771
os.environ['SERPAPI_API_KEY'] = SERPAPI_API_KEY
6872

73+
with st.sidebar:
74+
st.session_state.g4f_provider = st.selectbox(
75+
(
76+
"#### Select a provider if you want to use free model. "
77+
"([details](https://github.com/xtekky/gpt4free#models))"
78+
),
79+
(GPT4Free().PROVIDER_MAPPING.keys())
80+
)
81+
6982

7083
def upload_and_process_pdf():
7184
upload_file = st.file_uploader('#### Upload a PDF file:', type='pdf')
@@ -103,12 +116,23 @@ def get_response(query: str):
103116
with doc_container:
104117
docs = upload_and_process_pdf()
105118

106-
agent_, docGPT_tool, calculate_tool, search_tool, llm_tool = [None]*5
107-
if OpenAiAPI.is_valid():
119+
if docs:
120+
docGPT = DocGPT(docs=docs)
121+
docGPT_tool, calculate_tool, search_tool, llm_tool = [None]*4
108122
agent_ = AgentHelper()
109123

110-
if docs:
111-
docGPT = DocGPT(docs=docs)
124+
if OpenAiAPI.is_valid():
125+
# Use openai llm model
126+
docGPT.llm = ChatOpenAI(
127+
temperature=0.2,
128+
max_tokens=6000,
129+
model_name='gpt-3.5-turbo-16k'
130+
)
131+
agent_.llm = ChatOpenAI(
132+
temperature=0.2,
133+
max_tokens=6000,
134+
model_name='gpt-3.5-turbo-16k'
135+
)
112136
docGPT.create_qa_chain(
113137
chain_type='refine',
114138
)
@@ -117,19 +141,35 @@ def get_response(query: str):
117141
calculate_tool = agent_.get_calculate_chain
118142
llm_tool = agent_.create_llm_chain()
119143

120-
if SerpAPI.is_valid():
121-
search_tool = agent_.get_searp_chain
144+
if SerpAPI.is_valid():
145+
search_tool = agent_.get_searp_chain
146+
else:
147+
# Use gpt4free llm model
148+
docGPT.llm = GPT4Free(
149+
provider=GPT4Free().PROVIDER_MAPPING[
150+
st.session_state.g4f_provider
151+
]
152+
)
153+
agent_.llm = GPT4Free(
154+
provider=GPT4Free().PROVIDER_MAPPING[
155+
st.session_state.g4f_provider
156+
]
157+
)
158+
docGPT.create_qa_chain(
159+
chain_type='refine',
160+
)
161+
docGPT_tool = agent_.create_doc_chat(docGPT)
162+
try:
163+
tools = [
164+
docGPT_tool,
165+
search_tool,
166+
# llm_tool, # This will cause agent confuse
167+
calculate_tool
168+
]
169+
agent_.initialize(tools)
170+
except Exception as e:
171+
app_logger.info(e)
122172

123-
try:
124-
tools = [
125-
docGPT_tool,
126-
search_tool,
127-
# llm_tool, # This will cause agent confuse
128-
calculate_tool
129-
]
130-
agent_.initialize(tools)
131-
except Exception as e:
132-
app_logger.info(e)
133173

134174
st.write('---')
135175

docGPT/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
1-
from .docGPT import DocGPT
1+
from .docGPT import DocGPT, GPT4Free
22
from .check_api_key import OpenAiAPI, SerpAPI

docGPT/check_api_key.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ class OpenAiAPI(ApiKey):
2020
@classmethod
2121
def is_valid(cls) -> str:
2222
if not st.session_state['openai_api_key']:
23-
st.error('⚠️ :red[You have not pass OpenAI API key.] Necessary Pass')
23+
st.error('⚠️ :red[You have not pass OpenAI API key.] Use default key')
2424
return
2525

2626
openai.api_key = os.getenv('OPENAI_API_KEY')

docGPT/docGPT.py

Lines changed: 66 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,19 @@
11
import os
22
from abc import ABC, abstractmethod
3+
from typing import List, Optional
34

5+
import g4f
46
import openai
57
from langchain.callbacks import get_openai_callback
8+
from langchain.callbacks.manager import CallbackManagerForLLMRun
69
from langchain.chains import ConversationalRetrievalChain, RetrievalQA
710
from langchain.chat_models import ChatOpenAI
811
from langchain.embeddings.openai import OpenAIEmbeddings
12+
from langchain.llms.base import LLM
913
from langchain.memory import ConversationBufferMemory
1014
from langchain.prompts import PromptTemplate
1115
from langchain.vectorstores import FAISS
16+
from tenacity import retry, stop_after_attempt
1217

1318
openai.api_key = os.getenv('OPENAI_API_KEY')
1419

@@ -80,11 +85,7 @@ class DocGPT:
8085
def __init__(self, docs):
8186
self.docs = docs
8287
self.qa_chain = None
83-
self.llm = ChatOpenAI(
84-
temperature=0.2,
85-
max_tokens=6000,
86-
model_name='gpt-3.5-turbo-16k'
87-
)
88+
self._llm = None
8889

8990
self.prompt_template = """
9091
Only answer what is asked. Answer step-by-step.
@@ -102,6 +103,14 @@ def __init__(self, docs):
102103
input_variables=['context', 'question']
103104
)
104105

106+
@property
107+
def llm(self):
108+
return self._llm
109+
110+
@llm.setter
111+
def llm(self, llm) -> None:
112+
self._llm = llm
113+
105114
def _helper_prompt(self, chain_type: str) -> None:
106115
# TODO: Bug helper
107116
if chain_type == 'refine':
@@ -151,14 +160,14 @@ def create_qa_chain(
151160
self.qa_chain = RChain(
152161
chain_type=chain_type,
153162
retriever=retriever,
154-
llm=self.llm,
163+
llm=self._llm,
155164
chain_type_kwargs=chain_type_kwargs
156165
).create_qa_chain
157166
else:
158167
self.qa_chain = CRChain(
159168
chain_type=chain_type,
160169
retriever=retriever,
161-
llm=self.llm
170+
llm=self._llm
162171
).create_qa_chain
163172

164173
def run(self, query: str) -> str:
@@ -171,3 +180,53 @@ def run(self, query: str) -> str:
171180
response = self.qa_chain({'question': query, 'chat_history': chat_history})
172181
print(callback)
173182
return response
183+
184+
185+
class GPT4Free(LLM):
186+
PROVIDER_MAPPING = {
187+
'g4f.Provider.ChatgptAi': g4f.Provider.ChatgptAi,
188+
'g4f.Provider.AItianhu': g4f.Provider.AItianhu,
189+
'g4f.Provider.Acytoo': g4f.Provider.Acytoo,
190+
'g4f.Provider.AiService': g4f.Provider.AiService,
191+
'g4f.Provider.Aichat': g4f.Provider.Aichat,
192+
'g4f.Provider.Ails': g4f.Provider.Ails,
193+
'g4f.Provider.Bard': g4f.Provider.Bard,
194+
'g4f.Provider.Bing': g4f.Provider.Bing,
195+
'g4f.Provider.ChatgptLogin': g4f.Provider.ChatgptLogin,
196+
'g4f.Provider.DeepAi': g4f.Provider.DeepAi,
197+
'g4f.Provider.DfeHub': g4f.Provider.DfeHub,
198+
'g4f.Provider.EasyChat': g4f.Provider.EasyChat,
199+
'g4f.Provider.Forefront': g4f.Provider.Forefront,
200+
'g4f.Provider.GetGpt': g4f.Provider.GetGpt,
201+
'g4f.Provider.H2o': g4f.Provider.H2o,
202+
'g4f.Provider.Liaobots': g4f.Provider.Liaobots,
203+
'g4f.Provider.Lockchat': g4f.Provider.Lockchat,
204+
'g4f.Provider.Opchatgpts': g4f.Provider.Opchatgpts,
205+
'g4f.Provider.Raycast': g4f.Provider.Raycast,
206+
'g4f.Provider.Theb': g4f.Provider.Theb,
207+
'g4f.Provider.Vercel': g4f.Provider.Vercel,
208+
'g4f.Provider.Wewordle': g4f.Provider.Wewordle,
209+
'g4f.Provider.You': g4f.Provider.You,
210+
'g4f.Provider.Yqcloud': g4f.Provider.Yqcloud,
211+
}
212+
provider = PROVIDER_MAPPING['g4f.Provider.ChatgptAi']
213+
214+
@property
215+
def _llm_type(self) -> str:
216+
return 'gpt4free model'
217+
218+
# @retry(stop=stop_after_attempt(20))
219+
def _call(
220+
self,
221+
prompt: str,
222+
stop: Optional[List[str]] = None,
223+
run_manager: Optional[CallbackManagerForLLMRun] = None,
224+
) -> str:
225+
try:
226+
return g4f.ChatCompletion.create(
227+
model="gpt-3.5-turbo",
228+
messages=[{"role": "user", "content": prompt}],
229+
provider=self.provider
230+
)
231+
except Exception as e:
232+
print(self.provider)

requirements.txt

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,10 @@
1+
g4f==0.0.2.2
12
langchain==0.0.218
23
openai==0.27.8
34
streamlit==1.22.0
45
streamlit_chat==0.1.1
56
pymupdf==1.22.5
67
faiss-cpu==1.7.4
78
tiktoken==0.4.0
9+
tenacity==8.1.0
810
google-search-results==2.4.2

0 commit comments

Comments
 (0)