1
- from unittest .mock import patch
2
-
3
1
import pytest
4
2
5
- from llm_client import LLMAPIClientFactory , LLMAPIClientType , AnthropicClient
3
+ from llm_client import LLMAPIClientFactory , LLMAPIClientType
6
4
from llm_client .consts import PROMPT_KEY , MODEL_KEY
7
5
from llm_client .llm_api_client .anthropic_client import AUTH_HEADER , COMPLETIONS_KEY , MAX_TOKENS_KEY , ACCEPT_HEADER , \
8
- ACCEPT_VALUE
6
+ ACCEPT_VALUE , VERSION_HEADER , AnthropicClient
9
7
10
8
11
9
@pytest .mark .asyncio
@@ -18,18 +16,41 @@ async def test_get_llm_api_client__with_anthropic(config):
18
16
19
17
20
18
@pytest .mark .asyncio
21
- async def test_text_completion__sanity (mock_aioresponse , llm_client , complete_url ):
19
+ async def test_text_completion__sanity (mock_aioresponse , llm_client , complete_url , anthropic_version ):
20
+ mock_aioresponse .post (
21
+ complete_url ,
22
+ payload = {COMPLETIONS_KEY : "completion text" }
23
+ )
24
+
25
+ actual = await llm_client .text_completion (prompt = "These are a few of my favorite" , max_tokens = 10 )
26
+
27
+ assert actual == ["completion text" ]
28
+ mock_aioresponse .assert_called_once_with (complete_url , method = 'POST' ,
29
+ headers = {AUTH_HEADER : llm_client ._api_key ,
30
+ ACCEPT_HEADER : ACCEPT_VALUE ,
31
+ VERSION_HEADER : anthropic_version },
32
+ json = {PROMPT_KEY : 'These are a few of my favorite' ,
33
+ MAX_TOKENS_KEY : 10 , "temperature" : 1 ,
34
+ MODEL_KEY : llm_client ._default_model },
35
+ raise_for_status = True )
36
+
37
+
38
+ @pytest .mark .asyncio
39
+ async def test_text_completion__with_version_header (mock_aioresponse , config , complete_url ):
22
40
mock_aioresponse .post (
23
41
complete_url ,
24
42
payload = {COMPLETIONS_KEY : "completion text" }
25
43
)
44
+ config .headers [VERSION_HEADER ] = "1.0.0"
45
+ llm_client = AnthropicClient (config )
26
46
27
47
actual = await llm_client .text_completion (prompt = "These are a few of my favorite" , max_tokens = 10 )
28
48
29
49
assert actual == ["completion text" ]
30
50
mock_aioresponse .assert_called_once_with (complete_url , method = 'POST' ,
31
51
headers = {AUTH_HEADER : llm_client ._api_key ,
32
- ACCEPT_HEADER : ACCEPT_VALUE },
52
+ ACCEPT_HEADER : ACCEPT_VALUE ,
53
+ VERSION_HEADER : "1.0.0" },
33
54
json = {PROMPT_KEY : 'These are a few of my favorite' ,
34
55
MAX_TOKENS_KEY : 10 , "temperature" : 1 ,
35
56
MODEL_KEY : llm_client ._default_model },
@@ -43,7 +64,7 @@ async def test_text_completion__without_max_tokens_raise_value_error(mock_aiores
43
64
44
65
45
66
@pytest .mark .asyncio
46
- async def test_text_completion__override_model (mock_aioresponse , llm_client , complete_url ):
67
+ async def test_text_completion__override_model (mock_aioresponse , llm_client , complete_url , anthropic_version ):
47
68
new_model_name = "claude-instant"
48
69
mock_aioresponse .post (
49
70
complete_url ,
@@ -56,15 +77,16 @@ async def test_text_completion__override_model(mock_aioresponse, llm_client, com
56
77
assert actual == ["completion text" ]
57
78
mock_aioresponse .assert_called_once_with (complete_url , method = 'POST' ,
58
79
headers = {AUTH_HEADER : llm_client ._api_key ,
59
- ACCEPT_HEADER : ACCEPT_VALUE },
80
+ ACCEPT_HEADER : ACCEPT_VALUE ,
81
+ VERSION_HEADER : anthropic_version },
60
82
json = {PROMPT_KEY : 'These are a few of my favorite' ,
61
83
MAX_TOKENS_KEY : 10 , "temperature" : 1 ,
62
84
MODEL_KEY : new_model_name },
63
85
raise_for_status = True )
64
86
65
87
66
88
@pytest .mark .asyncio
67
- async def test_text_completion__with_kwargs (mock_aioresponse , llm_client , complete_url ):
89
+ async def test_text_completion__with_kwargs (mock_aioresponse , llm_client , complete_url , anthropic_version ):
68
90
mock_aioresponse .post (
69
91
complete_url ,
70
92
payload = {COMPLETIONS_KEY : "completion text" }
@@ -75,7 +97,8 @@ async def test_text_completion__with_kwargs(mock_aioresponse, llm_client, comple
75
97
assert actual == ["completion text" ]
76
98
mock_aioresponse .assert_called_once_with (complete_url , method = 'POST' ,
77
99
headers = {AUTH_HEADER : llm_client ._api_key ,
78
- ACCEPT_HEADER : ACCEPT_VALUE },
100
+ ACCEPT_HEADER : ACCEPT_VALUE ,
101
+ VERSION_HEADER : anthropic_version },
79
102
json = {PROMPT_KEY : 'These are a few of my favorite' ,
80
103
MAX_TOKENS_KEY : 10 ,
81
104
MODEL_KEY : llm_client ._default_model ,
@@ -84,9 +107,8 @@ async def test_text_completion__with_kwargs(mock_aioresponse, llm_client, comple
84
107
85
108
86
109
@pytest .mark .asyncio
87
- async def test_get_tokens_count__sanity (llm_client ):
88
- with patch ("llm_client.llm_api_client.anthropic_client.count_tokens" ) as mock_count_tokens :
89
- actual = await llm_client .get_tokens_count (text = "These are a few of my favorite things!" )
110
+ async def test_get_tokens_count__sanity (llm_client , number_of_tokens , mock_anthropic ):
111
+ actual = await llm_client .get_tokens_count (text = "These are a few of my favorite things!" )
90
112
91
- assert actual == mock_count_tokens . return_value
92
- mock_count_tokens . assert_called_once_with ("These are a few of my favorite things!" )
113
+ assert actual == 10
114
+ mock_anthropic . return_value . count_tokens . assert_awaited_once_with ("These are a few of my favorite things!" )
0 commit comments