Skip to content

Commit 97e1d34

Browse files
committed
get example working, iron out kinks
1 parent 9211c6f commit 97e1d34

File tree

9 files changed

+2005
-26
lines changed

9 files changed

+2005
-26
lines changed

examples/0-simple/poetry.lock

Lines changed: 1949 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

examples/0-simple/pyproject.toml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,8 @@ packages = [
99

1010
[tool.poetry.dependencies]
1111
python = "^3.11"
12-
ada = "^0.1.0"
12+
# ada-python = "^0.2.0"
13+
ada-python = {path = "../../", develop = true}
1314

1415
[tool.poetry.dev-dependencies]
1516
pytest = "^8.1.1"

examples/0-simple/src/main.py

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,9 @@
1-
from ada.abcs import AnthropicLLM
2-
from ada.agents import Ada
1+
from abcs.anthropic import AnthropicLLM
2+
from agents.ada import Ada
33

44
agent = Ada(client=AnthropicLLM())
5-
response = agent.generate_text("Name five fruit that start with the letter a.")
5+
prompt = "Name five fruit that start with the letter a."
6+
7+
print(prompt)
8+
response = agent.generate_text(prompt)
69
print(response.content)

poetry.lock

Lines changed: 20 additions & 1 deletion
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

pyproject.toml

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,14 @@
11
[tool.poetry]
22
name = "ada-python"
3-
version = "0.2.1"
4-
description = ""
3+
version = "0.2.2"
4+
description = "Ada, making LLMs easier to work with."
55
authors = ["Will Beebe"]
66
packages = [
77
{include = "abcs", from="src"},
88
{include = "agents", from="src"},
99
{include = "tools", from="src"},
1010
{include = "storage", from="src"},
11+
# {include = "metrics", from="src"},
1112
{include = "data", from="."}
1213
]
1314

@@ -23,6 +24,7 @@ groq = "^0.9.0"
2324
yfinance = "^0.2.38"
2425
openai-multi-tool-use-parallel-patch = "^0.2.0"
2526
ollama = "^0.2.1"
27+
neo4j = "^5.22.0"
2628

2729
[tool.poetry.dev-dependencies]
2830
pytest = "^8.1.1"

src/metrics/main.py renamed to snippets/metrics.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,10 @@
11

22
from opentelemetry import metrics
33
from opentelemetry.exporter.prometheus import PrometheusMetricReader
4-
from opentelemetry.sdk.metrics import Counter, MeterProvider, ObservableGauge
5-
from opentelemetry.sdk.metrics.export import InMemoryMetricReader
6-
from prometheus_client import start_http_server
4+
from opentelemetry.sdk.metrics import MeterProvider
5+
6+
# from opentelemetry.sdk.metrics.export import InMemoryMetricReader
7+
# from prometheus_client import start_http_server
78

89
prometheus_exporter = PrometheusMetricReader()
910
metrics.set_meter_provider(MeterProvider(metric_readers=[prometheus_exporter]))

src/abcs/anthropic.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -132,6 +132,7 @@ def _translate_response(self, response) -> PromptResponse:
132132
content = response.content[0].text
133133
return PromptResponse(
134134
content=content,
135+
raw_response=response,
135136
error={},
136137
usage=UsageStats(
137138
input_tokens=response.usage.input_tokens,

src/agents/agent.py

Lines changed: 19 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,8 @@
22

33
from abcs.llm import LLM
44
from abcs.models import PromptResponse
5-
from metrics.main import call_tool_counter, generate_text_counter
5+
6+
# from metrics.main import call_tool_counter, generate_text_counter
67
from storage.storage_manager import StorageManager
78
from tools.tool_manager import ToolManager
89

@@ -32,7 +33,7 @@ def get_history(self):
3233
return []
3334

3435
def generate_text(self, prompt: str) -> PromptResponse:
35-
generate_text_counter.add(1)
36+
# generate_text_counter.add(1)
3637
logger.debug("Generating text for prompt: '%s'", prompt)
3738
past_messages = []
3839
if self.storage_manager is not None:
@@ -41,11 +42,11 @@ def generate_text(self, prompt: str) -> PromptResponse:
4142
# past_messages = self.storage_manager.get_past_messages_callback()
4243
# else:
4344
past_messages = self.storage_manager.get_past_messages()
44-
logger.info("Fetched %d past messages", len(past_messages))
45+
logger.debug("Fetched %d past messages", len(past_messages))
4546
# todo: push down to core llm class, leave for now while scripting
4647

4748
try:
48-
logger.info("passing %d past messages", len(past_messages))
49+
logger.debug("passing %d past messages", len(past_messages))
4950
if self.storage_manager is not None:
5051
self.storage_manager.store_message("user", prompt)
5152
response = self.client.generate_text(prompt, past_messages, self.tools)
@@ -56,17 +57,18 @@ def generate_text(self, prompt: str) -> PromptResponse:
5657

5758
if self.storage_manager is not None:
5859
try:
59-
translated = self.translate_response(response)
60-
self.storage_manager.store_message("assistant", translated.content)
60+
# translated = self._translate_response(response)
61+
self.storage_manager.store_message("assistant", response.content)
6162
except Exception as e:
6263
logger.error("Error storing messages: %s", e, exc_info=True)
6364
raise e
6465

65-
logger.debug("Generated response: %s", response)
66-
return self.translate_response(response)
66+
# logger.debug("Generated response: %s", response)
67+
# return self._translate_response(response)
68+
return response
6769

6870
def call_tool(self, past_messages, tool_msg, tools) -> str:
69-
call_tool_counter.add(1)
71+
# call_tool_counter.add(1)
7072
logger.debug("Calling tool with message: %s", tool_msg)
7173
try:
7274
if len(tools) == 0:
@@ -79,10 +81,11 @@ def call_tool(self, past_messages, tool_msg, tools) -> str:
7981
logger.error("Error calling tool: %s", e, exc_info=True)
8082
raise e
8183

82-
def translate_response(self, response) -> PromptResponse:
83-
try:
84-
translated_response = self.client.translate_response(response)
85-
return translated_response
86-
except Exception as e:
87-
logger.error("Error translating response: %s", e, exc_info=True)
88-
raise e
84+
def _translate_response(self, response) -> PromptResponse:
85+
pass
86+
# try:
87+
# translated_response = self.client._translate_response(response)
88+
# return translated_response
89+
# except Exception as e:
90+
# logger.error("Error translating response: %s", e, exc_info=True)
91+
# raise e

src/metrics/.gitkeep

Whitespace-only changes.

0 commit comments

Comments
 (0)