Skip to content

Commit 36e343b

Browse files
committed
refactors
1 parent 0b20f42 commit 36e343b

File tree

10 files changed

+461
-142
lines changed

10 files changed

+461
-142
lines changed

ngwidgets/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
__version__ = "0.23.0"
1+
__version__ = "0.24.0"

ngwidgets/base_llm.py

Lines changed: 97 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,97 @@
1+
"""
2+
Created on 2025-04-07
3+
4+
@author: wf
5+
"""
6+
7+
import time
8+
from dataclasses import field
9+
from datetime import datetime
10+
from typing import List, Optional
11+
12+
from ngwidgets.yamlable import lod_storable
13+
14+
15+
@lod_storable
16+
class Prompt:
17+
"""
18+
a single prompt with response
19+
"""
20+
21+
prompt: str
22+
response: str
23+
tokens: int
24+
temperature: float
25+
timestamp: datetime
26+
duration: float
27+
model: Optional[str] = None
28+
model_details: Optional[str] = None
29+
image_path: Optional[str] = None
30+
31+
def append_to_file(self, filepath: str):
32+
# Open the file in append mode
33+
with open(filepath, "a") as file:
34+
# Dump the new prompt as YAML directly into the file
35+
yaml_str = self.to_yaml()
36+
# Ensure the new prompt starts on a new line
37+
file.write(f"\n{yaml_str}")
38+
39+
@classmethod
40+
def from_chat(
41+
self,
42+
prompt_text: str,
43+
result: str,
44+
chat_completion,
45+
start_time: datetime,
46+
model: Optional[str] = None,
47+
image_path: Optional[str] = None,
48+
temperature: float = 0.7,
49+
) -> "Prompt":
50+
"""
51+
Create a prompt instance with timing and usage info
52+
"""
53+
total_tokens = chat_completion.usage.total_tokens
54+
model_details = chat_completion.model
55+
duration = (datetime.now() - start_time).total_seconds()
56+
prompt = Prompt(
57+
prompt=prompt_text,
58+
response=result,
59+
model=model,
60+
model_details=model_details,
61+
temperature=temperature,
62+
tokens=total_tokens,
63+
timestamp=datetime.now(),
64+
duration=duration,
65+
image_path=image_path,
66+
)
67+
return prompt
68+
69+
70+
@lod_storable
71+
class Prompts:
72+
"""
73+
keep track of a series of prompts
74+
"""
75+
76+
prompts: List[Prompt] = field(default_factory=list)
77+
78+
def add_to_filepath(
79+
self, new_prompt: Prompt, prompts_filepath, debug: bool = False
80+
):
81+
start_save_time = time.time()
82+
83+
# Save the prompts to a file
84+
self.prompts.append(new_prompt)
85+
new_prompt.append_to_file(prompts_filepath)
86+
87+
# After saving
88+
end_save_time = time.time()
89+
save_duration = end_save_time - start_save_time
90+
if debug:
91+
print(f"Time taken to append to prompts: {save_duration} seconds")
92+
93+
94+
class BaseLLM:
95+
"""
96+
Large Language Model access wrapper
97+
"""

ngwidgets/claude_llm.py

Lines changed: 289 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,289 @@
1+
"""
2+
Created on 2025-04-07
3+
4+
@author: wf
5+
"""
6+
7+
import base64
8+
import json
9+
import os
10+
from datetime import datetime
11+
from pathlib import Path
12+
13+
import anthropic
14+
15+
from ngwidgets.base_llm import BaseLLM
16+
from ngwidgets.base_llm import Prompt, Prompts
17+
18+
19+
class ClaudeLLM(BaseLLM):
20+
"""
21+
Claude Large Language Model access wrapper
22+
23+
to get a key visit
24+
https://console.anthropic.com/
25+
"""
26+
27+
# Define a dictionary that maps models to their token limits
28+
MODEL_SIZE_LIMITS = {
29+
"claude-3-opus-20240229": 200000,
30+
"claude-3-sonnet-20240229": 200000,
31+
"claude-3-haiku-20240307": 200000,
32+
"claude-3.5-sonnet-20240620": 200000,
33+
"claude-3.7-sonnet-20250219": 200000,
34+
}
35+
36+
# Default model
37+
DEFAULT_MODEL = "claude-3.7-sonnet-20250219"
38+
39+
def __init__(
40+
self,
41+
api_key=None,
42+
model=DEFAULT_MODEL,
43+
force_key=False,
44+
prompts_filepath=None,
45+
max_tokens=4000
46+
):
47+
"""
48+
constructor
49+
50+
Args:
51+
api_key: The access key for Claude API
52+
model: The model name to use
53+
force_key: If True, raise error when key is missing
54+
prompts_filepath: Path for storing prompt history
55+
max_tokens: Maximum tokens in the response
56+
"""
57+
self.client = None
58+
self.model = model
59+
self.max_tokens = max_tokens
60+
self.token_size_limit = self.MODEL_SIZE_LIMITS.get(model, 200000)
61+
62+
# Get API key from environment or file
63+
anthropic_api_key = api_key or os.getenv("ANTHROPIC_API_KEY")
64+
65+
# Try to load from JSON file if not provided
66+
if not anthropic_api_key:
67+
json_file = Path.home() / ".anthropic" / "anthropic_api_key.json"
68+
if json_file.is_file():
69+
with open(json_file, "r") as file:
70+
data = json.load(file)
71+
anthropic_api_key = data.get("ANTHROPIC_API_KEY")
72+
73+
# Initialize client if key is available
74+
if anthropic_api_key:
75+
self.client = anthropic.Anthropic(api_key=anthropic_api_key)
76+
elif force_key:
77+
raise ValueError(
78+
"No Anthropic API key found. Please set the 'ANTHROPIC_API_KEY' environment variable or store it in `~/.anthropic/anthropic_api_key.json`."
79+
)
80+
81+
# Set up prompts filepath
82+
if prompts_filepath is None:
83+
date_str = datetime.now().strftime("%Y-%m-%d")
84+
default_filename = f"claude_prompts_{date_str}.yaml"
85+
anthropic_dir = Path.home() / ".anthropic"
86+
87+
if not anthropic_dir.exists():
88+
anthropic_dir.mkdir(parents=True, exist_ok=True)
89+
90+
prompts_filepath = anthropic_dir / default_filename
91+
92+
self.prompts_filepath = prompts_filepath
93+
94+
# Initialize prompts
95+
if os.path.isfile(prompts_filepath):
96+
self.prompts = Prompts.load_from_yaml_file(str(prompts_filepath))
97+
else:
98+
self.prompts = Prompts()
99+
100+
def available(self):
101+
"""
102+
check availability of API by making sure there is an api_key
103+
104+
Returns:
105+
bool: True if the API client is available
106+
"""
107+
return self.client is not None
108+
109+
def ask(self, prompt_text, model=None, temperature=0.7, system_prompt=None):
110+
"""
111+
ask a prompt
112+
113+
Args:
114+
prompt_text: The text of the prompt to send to the model
115+
model: The model to use (defaults to self.model)
116+
temperature: Controls randomness in the response
117+
system_prompt: Optional system prompt for context
118+
119+
Returns:
120+
str: The model's response
121+
"""
122+
if not self.available():
123+
return "Claude API not available"
124+
125+
if model is None:
126+
model = self.model
127+
128+
# Start timing
129+
start_time = datetime.now()
130+
131+
# Prepare message for Claude API
132+
message_kwargs = {
133+
"model": model,
134+
"max_tokens": self.max_tokens,
135+
"temperature": temperature,
136+
"messages": [
137+
{"role": "user", "content": prompt_text}
138+
]
139+
}
140+
141+
# Add system prompt if provided
142+
if system_prompt:
143+
message_kwargs["system"] = system_prompt
144+
145+
# Call Claude API
146+
message_completion = self.client.messages.create(**message_kwargs)
147+
148+
# Extract response text
149+
result = message_completion.content[0].text
150+
151+
# Create and save prompt record
152+
new_prompt = Prompt.from_chat(
153+
prompt_text,
154+
result,
155+
message_completion,
156+
start_time,
157+
model,
158+
temperature=temperature,
159+
)
160+
self.prompts.add_to_filepath(new_prompt, self.prompts_filepath)
161+
162+
return result
163+
164+
165+
class ClaudeVisionLLM(ClaudeLLM):
166+
"""
167+
Extension of ClaudeLLM class to handle image analysis
168+
"""
169+
170+
def __init__(
171+
self,
172+
api_key=None,
173+
model="claude-3-opus-20240229",
174+
force_key=False
175+
):
176+
"""
177+
Initialize with vision-capable model as default
178+
179+
Args:
180+
api_key: The access key for Claude API
181+
model: The model name to use (should be vision-capable)
182+
force_key: If True, raise error when key is missing
183+
"""
184+
super().__init__(api_key=api_key, model=model, force_key=force_key)
185+
186+
def analyze_image(self, image_path, prompt_text, system_prompt=None):
187+
"""
188+
Analyze an image with a given prompt
189+
190+
Args:
191+
image_path: Path to local file or URL of the image
192+
prompt_text: The text prompt to guide the image analysis
193+
system_prompt: Optional system prompt for context
194+
195+
Returns:
196+
str: The model's response
197+
"""
198+
if not self.available():
199+
return "Claude Vision API not available"
200+
201+
# Start timing
202+
start_time = datetime.now()
203+
204+
# Prepare media content based on image source
205+
if image_path.startswith("http"):
206+
media_content = {
207+
"type": "image",
208+
"source": {
209+
"type": "url",
210+
"url": image_path
211+
}
212+
}
213+
else:
214+
# Handle local file
215+
with open(image_path, "rb") as image_file:
216+
image_data = image_file.read()
217+
media_content = {
218+
"type": "image",
219+
"source": {
220+
"type": "base64",
221+
"media_type": self._get_media_type(image_path),
222+
"data": base64.b64encode(image_data).decode("utf-8")
223+
}
224+
}
225+
226+
# Construct message with text and image content
227+
messages = [
228+
{
229+
"role": "user",
230+
"content": [
231+
{"type": "text", "text": prompt_text},
232+
media_content
233+
]
234+
}
235+
]
236+
237+
# Prepare API call parameters
238+
message_kwargs = {
239+
"model": self.model,
240+
"max_tokens": self.max_tokens,
241+
"messages": messages
242+
}
243+
244+
# Add system prompt if provided
245+
if system_prompt:
246+
message_kwargs["system"] = system_prompt
247+
248+
# Create the message
249+
message_completion = self.client.messages.create(**message_kwargs)
250+
251+
# Extract response text
252+
result = message_completion.content[0].text
253+
254+
# Create and save prompt record
255+
new_prompt = Prompt.from_chat(
256+
prompt_text,
257+
result,
258+
message_completion,
259+
start_time,
260+
self.model,
261+
image_path=image_path,
262+
)
263+
self.prompts.add_to_filepath(new_prompt, self.prompts_filepath)
264+
265+
return result
266+
267+
def _get_media_type(self, image_path):
268+
"""
269+
Determine the media type (MIME type) based on file extension
270+
271+
Args:
272+
image_path: Path to the image file
273+
274+
Returns:
275+
str: MIME type of the image
276+
"""
277+
extension = image_path.lower().split('.')[-1]
278+
279+
mime_types = {
280+
'jpg': 'image/jpeg',
281+
'jpeg': 'image/jpeg',
282+
'png': 'image/png',
283+
'gif': 'image/gif',
284+
'webp': 'image/webp',
285+
'svg': 'image/svg+xml',
286+
'heic': 'image/heic'
287+
}
288+
289+
return mime_types.get(extension, 'application/octet-stream')

0 commit comments

Comments
 (0)