Skip to content

Commit 216bea6

Browse files
committed
Fix LlamaIndex bugs
1 parent 7f2deb1 commit 216bea6

File tree

3 files changed

+28
-51
lines changed

3 files changed

+28
-51
lines changed

pyqt_openai/chat_widget/llamaOpenAIThread.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,10 @@
11
from llama_index.core.base.response.schema import StreamingResponse
22
from PySide6.QtCore import QThread, Signal
33

4-
import pyqt_openai.util.script
54
from pyqt_openai.models import ChatMessageContainer
65

76

7+
88
# TODO
99
# Should combine with ChatThread
1010
class LlamaOpenAIThread(QThread):
@@ -27,16 +27,16 @@ def stop(self):
2727

2828
def run(self):
2929
try:
30-
resp = pyqt_openai.util.script.get_response(self.__query_text)
30+
resp = self.__wrapper.get_response(self.__query_text)
3131
f = isinstance(resp, StreamingResponse)
3232
if f:
33-
for response_text in resp.response_gen:
33+
for chunk in resp.response_gen:
3434
if self.__stop:
3535
self.__info.finish_reason = "stopped by user"
36+
self.streamFinished.emit(self.__info)
3637
break
3738
else:
38-
self.replyGenerated.emit(response_text, True, self.__info)
39-
self.streamFinished.emit(self.__info)
39+
self.replyGenerated.emit(chunk, True, self.__info)
4040
else:
4141
self.__info.content = resp.response
4242
self.replyGenerated.emit(self.__info.content, False, self.__info)

pyqt_openai/globals.py

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2,12 +2,9 @@
22
This is the file that contains the global variables that are used, or possibly used, throughout the application.
33
"""
44

5-
import anthropic
6-
import google.generativeai as genai
7-
from openai import OpenAI
85
from g4f.client import Client
6+
from openai import OpenAI
97

10-
from pyqt_openai import DEFAULT_GEMINI_MODEL
118
from pyqt_openai.sqlite import SqliteDatabase
129
from pyqt_openai.util.llamapage_script import GPTLLamaIndexWrapper
1310
from pyqt_openai.util.replicate_script import ReplicateWrapper
@@ -18,8 +15,7 @@
1815

1916
G4F_CLIENT = Client()
2017

18+
# For Whisper
2119
OPENAI_CLIENT = OpenAI(api_key="")
22-
GEMINI_CLIENT = genai.GenerativeModel(DEFAULT_GEMINI_MODEL)
23-
ANTHROPIC_CLIENT = anthropic.Anthropic(api_key="")
2420

2521
REPLICATE_CLIENT = ReplicateWrapper(api_key="")

pyqt_openai/util/script.py

Lines changed: 21 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,6 @@
7373
from pyqt_openai.globals import (
7474
DB,
7575
OPENAI_CLIENT,
76-
ANTHROPIC_CLIENT,
7776
G4F_CLIENT,
7877
LLAMAINDEX_WRAPPER,
7978
REPLICATE_CLIENT,
@@ -718,13 +717,10 @@ def get_claude_argument(model, system, messages, cur_text, stream, images):
718717
def set_api_key(env_var_name, api_key):
719718
api_key = api_key.strip() if api_key else ""
720719
if env_var_name == "OPENAI_API_KEY":
721-
OPENAI_CLIENT.api_key = api_key
722720
os.environ["OPENAI_API_KEY"] = api_key
723721
if env_var_name == "GEMINI_API_KEY":
724-
genai.configure(api_key=api_key)
725722
os.environ["GEMINI_API_KEY"] = api_key
726723
if env_var_name == "CLAUDE_API_KEY":
727-
ANTHROPIC_CLIENT.api_key = api_key
728724
os.environ["ANTHROPIC_API_KEY"] = api_key
729725
if env_var_name == "REPLICATE_API_KEY":
730726
REPLICATE_CLIENT.api_key = api_key
@@ -898,36 +894,23 @@ def get_api_argument(
898894
json_content=None,
899895
):
900896
try:
901-
provider = get_provider_from_model(model)
902-
if provider == "OpenAI":
903-
args = get_gpt_argument(
904-
model,
905-
system,
906-
messages,
907-
cur_text,
908-
temperature,
909-
top_p,
910-
frequency_penalty,
911-
presence_penalty,
912-
stream,
913-
use_max_tokens,
914-
max_tokens,
915-
images,
916-
is_llama_available=is_llama_available,
917-
is_json_response_available=is_json_response_available,
918-
json_content=json_content,
919-
)
920-
elif provider == "Gemini":
921-
args = get_gemini_argument(
922-
model, system, messages, cur_text, stream, images
923-
)
924-
925-
elif provider == "Anthropic":
926-
args = get_claude_argument(
927-
model, system, messages, cur_text, stream, images
928-
)
929-
else:
930-
raise Exception(f"Provider not found for model {model}")
897+
args = get_gpt_argument(
898+
model,
899+
system,
900+
messages,
901+
cur_text,
902+
temperature,
903+
top_p,
904+
frequency_penalty,
905+
presence_penalty,
906+
stream,
907+
use_max_tokens,
908+
max_tokens,
909+
images,
910+
is_llama_available=is_llama_available,
911+
is_json_response_available=is_json_response_available,
912+
json_content=json_content,
913+
)
931914
return args
932915
except Exception as e:
933916
print(e)
@@ -979,7 +962,7 @@ def get_argument(
979962
raise e
980963

981964

982-
def stream_response(provider, response, is_g4f=False, get_content_only=True):
965+
def stream_response(response, is_g4f=False, get_content_only=True):
983966
if is_g4f:
984967
if get_content_only:
985968
for chunk in response:
@@ -994,10 +977,10 @@ def stream_response(provider, response, is_g4f=False, get_content_only=True):
994977

995978
def get_api_response(args, get_content_only=True):
996979
try:
997-
provider = get_provider_from_model(args["model"])
980+
print(args)
998981
response = completion(drop_params=True, **args)
999982
if args["stream"]:
1000-
return stream_response(provider, response)
983+
return stream_response(response)
1001984
else:
1002985
return response.choices[0].message.content or ""
1003986
except Exception as e:
@@ -1010,7 +993,6 @@ def get_g4f_response(args, get_content_only=True):
1010993
response = G4F_CLIENT.chat.completions.create(**args)
1011994
if args["stream"]:
1012995
return stream_response(
1013-
provider="",
1014996
response=response,
1015997
is_g4f=True,
1016998
get_content_only=get_content_only,
@@ -1174,8 +1156,7 @@ class RecorderThread(QThread):
11741156
recording_finished = Signal(str)
11751157
errorGenerated = Signal(str)
11761158

1177-
# Silence detection 사용 여부
1178-
1159+
# Silence detection parameters
11791160
def __init__(
11801161
self, is_silence_detection=False, silence_duration=3, silence_threshold=500
11811162
):

0 commit comments

Comments
 (0)