Skip to content

Commit 50bc2c8

Browse files
authored
update anthropic version (#25)
* update anthropic version * fix import in test_anthropic_client.py * fix import in test_anthropic_client.py * Update antrofic version
1 parent f417c5b commit 50bc2c8

File tree

5 files changed

+66
-21
lines changed

5 files changed

+66
-21
lines changed

llm_client/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
__version__ = "0.6.1"
1+
__version__ = "0.6.2"
22

33
from llm_client.base_llm_client import BaseLLMClient
44

llm_client/llm_api_client/anthropic_client.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
from typing import Optional
22

3-
from anthropic import count_tokens
3+
from anthropic import AsyncAnthropic
44

55
from llm_client.llm_api_client.base_llm_api_client import BaseLLMAPIClient, LLMAPIClientConfig
66
from llm_client.consts import PROMPT_KEY
@@ -10,6 +10,7 @@
1010
COMPLETIONS_KEY = "completion"
1111
AUTH_HEADER = "x-api-key"
1212
ACCEPT_HEADER = "Accept"
13+
VERSION_HEADER = "anthropic-version"
1314
ACCEPT_VALUE = "application/json"
1415
MAX_TOKENS_KEY = "max_tokens_to_sample"
1516

@@ -19,6 +20,9 @@ def __init__(self, config: LLMAPIClientConfig):
1920
super().__init__(config)
2021
if self._base_url is None:
2122
self._base_url = BASE_URL
23+
self._anthropic = AsyncAnthropic()
24+
if self._headers.get(VERSION_HEADER) is None:
25+
self._headers[VERSION_HEADER] = self._anthropic.default_headers[VERSION_HEADER]
2226
self._headers[ACCEPT_HEADER] = ACCEPT_VALUE
2327
self._headers[AUTH_HEADER] = self._api_key
2428

@@ -40,4 +44,4 @@ async def text_completion(self, prompt: str, model: Optional[str] = None, max_to
4044
return [response_json[COMPLETIONS_KEY]]
4145

4246
async def get_tokens_count(self, text: str, **kwargs) -> int:
43-
return count_tokens(text)
47+
return await self._anthropic.count_tokens(text)

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ huggingface = [
4343
"transformers >= 4.0.0"
4444
]
4545
anthropic = [
46-
"anthropic >= 0.2.0"
46+
"anthropic >= 0.3.2"
4747
]
4848
google = [
4949
"google-generativeai >= 0.1.0"

tests/llm_api_client/anthropic_client/conftest.py

Lines changed: 21 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,8 @@
1+
from unittest.mock import patch, AsyncMock
2+
13
import pytest
24

3-
from llm_client import AnthropicClient
4-
from llm_client.llm_api_client.anthropic_client import BASE_URL, COMPLETE_PATH
5+
from llm_client.llm_api_client.anthropic_client import BASE_URL, COMPLETE_PATH, VERSION_HEADER, AnthropicClient
56
from llm_client.llm_api_client.base_llm_api_client import LLMAPIClientConfig
67

78

@@ -23,3 +24,21 @@ def llm_client(config):
2324
@pytest.fixture
2425
def complete_url():
2526
return BASE_URL + COMPLETE_PATH
27+
28+
29+
@pytest.fixture
30+
def number_of_tokens():
31+
return 10
32+
33+
34+
@pytest.fixture
35+
def anthropic_version():
36+
return "2023-06-01"
37+
38+
39+
@pytest.fixture(autouse=True)
40+
def mock_anthropic(number_of_tokens, anthropic_version):
41+
with patch("llm_client.llm_api_client.anthropic_client.AsyncAnthropic") as mock_anthropic:
42+
mock_anthropic.return_value.count_tokens = AsyncMock(return_value=number_of_tokens)
43+
mock_anthropic.return_value.default_headers = {VERSION_HEADER: anthropic_version}
44+
yield mock_anthropic

tests/llm_api_client/anthropic_client/test_anthropic_client.py

Lines changed: 37 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,9 @@
1-
from unittest.mock import patch
2-
31
import pytest
42

5-
from llm_client import LLMAPIClientFactory, LLMAPIClientType, AnthropicClient
3+
from llm_client import LLMAPIClientFactory, LLMAPIClientType
64
from llm_client.consts import PROMPT_KEY, MODEL_KEY
75
from llm_client.llm_api_client.anthropic_client import AUTH_HEADER, COMPLETIONS_KEY, MAX_TOKENS_KEY, ACCEPT_HEADER, \
8-
ACCEPT_VALUE
6+
ACCEPT_VALUE, VERSION_HEADER, AnthropicClient
97

108

119
@pytest.mark.asyncio
@@ -18,18 +16,41 @@ async def test_get_llm_api_client__with_anthropic(config):
1816

1917

2018
@pytest.mark.asyncio
21-
async def test_text_completion__sanity(mock_aioresponse, llm_client, complete_url):
19+
async def test_text_completion__sanity(mock_aioresponse, llm_client, complete_url, anthropic_version):
20+
mock_aioresponse.post(
21+
complete_url,
22+
payload={COMPLETIONS_KEY: "completion text"}
23+
)
24+
25+
actual = await llm_client.text_completion(prompt="These are a few of my favorite", max_tokens=10)
26+
27+
assert actual == ["completion text"]
28+
mock_aioresponse.assert_called_once_with(complete_url, method='POST',
29+
headers={AUTH_HEADER: llm_client._api_key,
30+
ACCEPT_HEADER: ACCEPT_VALUE,
31+
VERSION_HEADER: anthropic_version},
32+
json={PROMPT_KEY: 'These are a few of my favorite',
33+
MAX_TOKENS_KEY: 10, "temperature": 1,
34+
MODEL_KEY: llm_client._default_model},
35+
raise_for_status=True)
36+
37+
38+
@pytest.mark.asyncio
39+
async def test_text_completion__with_version_header(mock_aioresponse, config, complete_url):
2240
mock_aioresponse.post(
2341
complete_url,
2442
payload={COMPLETIONS_KEY: "completion text"}
2543
)
44+
config.headers[VERSION_HEADER] = "1.0.0"
45+
llm_client = AnthropicClient(config)
2646

2747
actual = await llm_client.text_completion(prompt="These are a few of my favorite", max_tokens=10)
2848

2949
assert actual == ["completion text"]
3050
mock_aioresponse.assert_called_once_with(complete_url, method='POST',
3151
headers={AUTH_HEADER: llm_client._api_key,
32-
ACCEPT_HEADER: ACCEPT_VALUE},
52+
ACCEPT_HEADER: ACCEPT_VALUE,
53+
VERSION_HEADER: "1.0.0"},
3354
json={PROMPT_KEY: 'These are a few of my favorite',
3455
MAX_TOKENS_KEY: 10, "temperature": 1,
3556
MODEL_KEY: llm_client._default_model},
@@ -43,7 +64,7 @@ async def test_text_completion__without_max_tokens_raise_value_error(mock_aiores
4364

4465

4566
@pytest.mark.asyncio
46-
async def test_text_completion__override_model(mock_aioresponse, llm_client, complete_url):
67+
async def test_text_completion__override_model(mock_aioresponse, llm_client, complete_url, anthropic_version):
4768
new_model_name = "claude-instant"
4869
mock_aioresponse.post(
4970
complete_url,
@@ -56,15 +77,16 @@ async def test_text_completion__override_model(mock_aioresponse, llm_client, com
5677
assert actual == ["completion text"]
5778
mock_aioresponse.assert_called_once_with(complete_url, method='POST',
5879
headers={AUTH_HEADER: llm_client._api_key,
59-
ACCEPT_HEADER: ACCEPT_VALUE},
80+
ACCEPT_HEADER: ACCEPT_VALUE,
81+
VERSION_HEADER: anthropic_version},
6082
json={PROMPT_KEY: 'These are a few of my favorite',
6183
MAX_TOKENS_KEY: 10, "temperature": 1,
6284
MODEL_KEY: new_model_name},
6385
raise_for_status=True)
6486

6587

6688
@pytest.mark.asyncio
67-
async def test_text_completion__with_kwargs(mock_aioresponse, llm_client, complete_url):
89+
async def test_text_completion__with_kwargs(mock_aioresponse, llm_client, complete_url, anthropic_version):
6890
mock_aioresponse.post(
6991
complete_url,
7092
payload={COMPLETIONS_KEY: "completion text"}
@@ -75,7 +97,8 @@ async def test_text_completion__with_kwargs(mock_aioresponse, llm_client, comple
7597
assert actual == ["completion text"]
7698
mock_aioresponse.assert_called_once_with(complete_url, method='POST',
7799
headers={AUTH_HEADER: llm_client._api_key,
78-
ACCEPT_HEADER: ACCEPT_VALUE},
100+
ACCEPT_HEADER: ACCEPT_VALUE,
101+
VERSION_HEADER: anthropic_version},
79102
json={PROMPT_KEY: 'These are a few of my favorite',
80103
MAX_TOKENS_KEY: 10,
81104
MODEL_KEY: llm_client._default_model,
@@ -84,9 +107,8 @@ async def test_text_completion__with_kwargs(mock_aioresponse, llm_client, comple
84107

85108

86109
@pytest.mark.asyncio
87-
async def test_get_tokens_count__sanity(llm_client):
88-
with patch("llm_client.llm_api_client.anthropic_client.count_tokens") as mock_count_tokens:
89-
actual = await llm_client.get_tokens_count(text="These are a few of my favorite things!")
110+
async def test_get_tokens_count__sanity(llm_client, number_of_tokens, mock_anthropic):
111+
actual = await llm_client.get_tokens_count(text="These are a few of my favorite things!")
90112

91-
assert actual == mock_count_tokens.return_value
92-
mock_count_tokens.assert_called_once_with("These are a few of my favorite things!")
113+
assert actual == 10
114+
mock_anthropic.return_value.count_tokens.assert_awaited_once_with("These are a few of my favorite things!")

0 commit comments

Comments
 (0)