Skip to content

Commit bb768c1

Browse files
Openai compatability (#231)
* fix: ๐Ÿ› fix OPENAI key setting issue and update readme * feat: ๐ŸŽธ update gpt4o * style: format code with Black This commit fixes the style issues introduced in 99581a8 according to the output from Black. Details: #229 * fix: ๐Ÿ› fix OPENAI_KEY typo * style: format code with Black This commit fixes the style issues introduced in 8f9091c according to the output from Black. Details: #230 * feat: ๐ŸŽธ update openai python sdk --------- Co-authored-by: deepsource-autofix[bot] <62050782+deepsource-autofix[bot]@users.noreply.github.com>
1 parent f072f90 commit bb768c1

File tree

6 files changed

+27
-18
lines changed

6 files changed

+27
-18
lines changed

โ€Žpentestgpt/_version.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
__version__ = '"0.13.1"'
1+
__version__ = '"0.13.3"'

โ€Žpentestgpt/utils/APIs/chatgpt_api.py

Lines changed: 15 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
import openai
1010
import tiktoken
1111
from langfuse.model import InitialGeneration, Usage
12+
from openai import OpenAI
1213
from tenacity import *
1314

1415
from pentestgpt.utils.llm_api import LLMAPI
@@ -46,6 +47,8 @@ def __eq__(self, other):
4647
class ChatGPTAPI(LLMAPI):
4748
def __init__(self, config_class, use_langfuse_logging=False):
4849
self.name = str(config_class.model)
50+
api_key = os.getenv("OPENAI_API_KEY", None)
51+
self.client = OpenAI(api_key=api_key, base_url=config_class.api_base)
4952

5053
if use_langfuse_logging:
5154
# use langfuse.openai to shadow the default openai library
@@ -58,9 +61,7 @@ def __init__(self, config_class, use_langfuse_logging=False):
5861
from langfuse import Langfuse
5962

6063
self.langfuse = Langfuse()
61-
62-
openai.api_key = os.getenv("OPENAI_API_KEY", None)
63-
openai.api_base = config_class.api_base
64+
6465
self.model = config_class.model
6566
self.log_dir = config_class.log_dir
6667
self.history_length = 5 # maintain 5 messages in the history. (5 chat memory)
@@ -69,7 +70,9 @@ def __init__(self, config_class, use_langfuse_logging=False):
6970

7071
logger.add(sink=os.path.join(self.log_dir, "chatgpt.log"), level="WARNING")
7172

72-
def _chat_completion(self, history: List, model=None, temperature=0.5) -> str:
73+
def _chat_completion(
74+
self, history: List, model=None, temperature=0.5, image_url: str = None
75+
) -> str:
7376
generationStartTime = datetime.now()
7477
# use model if provided, otherwise use self.model; if self.model is None, use gpt-4-1106-preview
7578
if model is None:
@@ -78,12 +81,12 @@ def _chat_completion(self, history: List, model=None, temperature=0.5) -> str:
7881
else:
7982
model = self.model
8083
try:
81-
response = openai.ChatCompletion.create(
84+
response = self.client.chat.completions.create(
8285
model=model,
8386
messages=history,
8487
temperature=temperature,
8588
)
86-
except openai.error.APIConnectionError as e: # give one more try
89+
except openai._exceptions.APIConnectionError as e: # give one more try
8790
logger.warning(
8891
"API Connection Error. Waiting for {} seconds".format(
8992
self.error_wait_time
@@ -96,7 +99,7 @@ def _chat_completion(self, history: List, model=None, temperature=0.5) -> str:
9699
messages=history,
97100
temperature=temperature,
98101
)
99-
except openai.error.RateLimitError as e: # give one more try
102+
except openai._exceptions.RateLimitError as e: # give one more try
100103
logger.warning("Rate limit reached. Waiting for 5 seconds")
101104
logger.error("Rate Limit Error: ", e)
102105
time.sleep(5)
@@ -105,7 +108,7 @@ def _chat_completion(self, history: List, model=None, temperature=0.5) -> str:
105108
messages=history,
106109
temperature=temperature,
107110
)
108-
except openai.error.InvalidRequestError as e: # token limit reached
111+
except openai._exceptions.RateLimitError as e: # token limit reached
109112
logger.warning("Token size limit reached. The recent message is compressed")
110113
logger.error("Token size error; will retry with compressed message ", e)
111114
# compress the message in two ways.
@@ -151,14 +154,14 @@ def _chat_completion(self, history: List, model=None, temperature=0.5) -> str:
151154
model=self.model,
152155
modelParameters={"temperature": str(temperature)},
153156
prompt=history,
154-
completion=response["choices"][0]["message"]["content"],
157+
completion=response.choices[0].message.content,
155158
usage=Usage(
156-
promptTokens=response["usage"]["prompt_tokens"],
157-
completionTokens=response["usage"]["completion_tokens"],
159+
promptTokens=response.usage.prompt_tokens,
160+
completionTokens=response.usage.completion_tokens,
158161
),
159162
)
160163
)
161-
return response["choices"][0]["message"]["content"]
164+
return response.choices[0].message.content
162165

163166

164167
if __name__ == "__main__":

โ€Žpentestgpt/utils/APIs/chatgpt_vision_api.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,12 @@ def __init__(self, config_class, use_langfuse_logging=False):
6969

7070
logger.add(sink=os.path.join(self.log_dir, "chatgpt.log"), level="WARNING")
7171

72-
def _chat_completion(self, history: List, model=None, temperature=0.5) -> str:
72+
def _chat_completion(
73+
self,
74+
history: List,
75+
model=None,
76+
temperature=0.5,
77+
) -> str:
7378
generationStartTime = datetime.now()
7479
# use model if provided, otherwise use self.model; if self.model is None, use gpt-4-1106-preview
7580
if model is None:

โ€Žpyproject.toml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[tool.poetry]
22
name = "pentestgpt"
3-
version = "0.13.1"
3+
version = "0.13.3"
44
description = "PentestGPT is an LLM-powered penetration testing tool."
55
authors = ["Gelei Deng <GELEI.DENG@ntu.edu.sg>"]
66
license = "MIT"
@@ -19,7 +19,7 @@ rich = "^13.7.1"
1919
prompt-toolkit = "^3.0.43"
2020
google = "^3.0.0"
2121
pytest = "^8.1.1"
22-
openai = ">=0.27.8,<0.28.0"
22+
openai = "^1.29.0"
2323
langchain = "^0.1.13"
2424
tiktoken = "^0.6.0"
2525
pycookiecheat = "^0.6.0"

โ€Žrequirements.txt

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ colorama==0.4.6 ; python_version >= "3.10" and python_version < "4.0"
1616
cryptography==41.0.4 ; python_version >= "3.10" and python_version < "4.0"
1717
cssselect==1.2.0 ; python_version >= "3.10" and python_version < "4.0"
1818
dataclasses-json==0.6.6 ; python_version >= "3.10" and python_version < "4.0"
19+
distro==1.9.0 ; python_version >= "3.10" and python_version < "4.0"
1920
exceptiongroup==1.2.1 ; python_version >= "3.10" and python_version < "3.11"
2021
feedfinder2==0.0.4 ; python_version >= "3.10" and python_version < "4.0"
2122
feedparser==6.0.11 ; python_version >= "3.10" and python_version < "4.0"
@@ -64,7 +65,7 @@ mypy-extensions==1.0.0 ; python_version >= "3.10" and python_version < "4.0"
6465
newspaper3k==0.2.8 ; python_version >= "3.10" and python_version < "4.0"
6566
nltk==3.8.1 ; python_version >= "3.10" and python_version < "4.0"
6667
numpy==1.26.4 ; python_version >= "3.10" and python_version < "4.0"
67-
openai==0.27.10 ; python_version >= "3.10" and python_version < "4.0"
68+
openai==1.29.0 ; python_version >= "3.10" and python_version < "4.0"
6869
orjson==3.10.3 ; python_version >= "3.10" and python_version < "4.0"
6970
packaging==23.2 ; python_version >= "3.10" and python_version < "4.0"
7071
pathspec==0.12.1 ; python_version >= "3.10" and python_version < "4.0"

โ€Žsetup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77

88
setup(
99
name="pentestgpt",
10-
version="0.13.1",
10+
version="0.13.3",
1111
description="PentestGPT, a GPT-empowered penetration testing tool",
1212
long_description="""
1313
PentestGPT is a penetration testing tool empowered by ChatGPT.

0 commit comments

Comments
ย (0)