40
40
with try_import () as imports_successful :
41
41
from openai import NOT_GIVEN , APIStatusError , AsyncOpenAI
42
42
from openai .types import chat
43
- from openai .types .chat .chat_completion import Choice
43
+ from openai .types .chat .chat_completion import Choice , ChoiceLogprobs
44
44
from openai .types .chat .chat_completion_chunk import (
45
45
Choice as ChunkChoice ,
46
46
ChoiceDelta ,
49
49
)
50
50
from openai .types .chat .chat_completion_message import ChatCompletionMessage
51
51
from openai .types .chat .chat_completion_message_tool_call import Function
52
+ from openai .types .chat .chat_completion_token_logprob import ChatCompletionTokenLogprob
52
53
from openai .types .completion_usage import CompletionUsage , PromptTokensDetails
53
54
54
55
from pydantic_ai .models .openai import (
@@ -129,10 +130,15 @@ def get_mock_chat_completion_kwargs(async_open_ai: AsyncOpenAI) -> list[dict[str
129
130
raise RuntimeError ('Not a MockOpenAI instance' )
130
131
131
132
132
- def completion_message (message : ChatCompletionMessage , * , usage : CompletionUsage | None = None ) -> chat .ChatCompletion :
133
+ def completion_message (
134
+ message : ChatCompletionMessage , * , usage : CompletionUsage | None = None , logprobs : ChoiceLogprobs | None = None
135
+ ) -> chat .ChatCompletion :
136
+ choices = [Choice (finish_reason = 'stop' , index = 0 , message = message )]
137
+ if logprobs :
138
+ choices = [Choice (finish_reason = 'stop' , index = 0 , message = message , logprobs = logprobs )]
133
139
return chat .ChatCompletion (
134
140
id = '123' ,
135
- choices = [ Choice ( finish_reason = 'stop' , index = 0 , message = message )] ,
141
+ choices = choices ,
136
142
created = 1704067200 , # 2024-01-01
137
143
model = 'gpt-4o-123' ,
138
144
object = 'chat.completion' ,
@@ -141,7 +147,9 @@ def completion_message(message: ChatCompletionMessage, *, usage: CompletionUsage
141
147
142
148
143
149
async def test_request_simple_success (allow_model_requests : None ):
144
- c = completion_message (ChatCompletionMessage (content = 'world' , role = 'assistant' ))
150
+ c = completion_message (
151
+ ChatCompletionMessage (content = 'world' , role = 'assistant' ),
152
+ )
145
153
mock_client = MockOpenAI .create_mock (c )
146
154
m = OpenAIModel ('gpt-4o' , provider = OpenAIProvider (openai_client = mock_client ))
147
155
agent = Agent (m )
@@ -1543,3 +1551,43 @@ async def get_temperature(city: str) -> float:
1543
1551
),
1544
1552
]
1545
1553
)
1554
+
1555
+
1556
+ @pytest .mark .vcr ()
1557
+ async def test_openai_instructions_with_logprobs (allow_model_requests : None ):
1558
+ # Create a mock response with logprobs
1559
+ c = completion_message (
1560
+ ChatCompletionMessage (content = 'world' , role = 'assistant' ),
1561
+ logprobs = ChoiceLogprobs (
1562
+ content = [
1563
+ ChatCompletionTokenLogprob (
1564
+ token = 'world' , logprob = - 0.6931 , top_logprobs = [], bytes = [119 , 111 , 114 , 108 , 100 ]
1565
+ )
1566
+ ],
1567
+ ),
1568
+ )
1569
+
1570
+ mock_client = MockOpenAI .create_mock (c )
1571
+ m = OpenAIModel (
1572
+ 'gpt-4o' ,
1573
+ provider = OpenAIProvider (openai_client = mock_client ),
1574
+ )
1575
+ agent = Agent (
1576
+ m ,
1577
+ instructions = 'You are a helpful assistant.' ,
1578
+ )
1579
+ result = await agent .run (
1580
+ 'What is the capital of Minas Gerais?' ,
1581
+ model_settings = OpenAIModelSettings (openai_logprobs = True ),
1582
+ )
1583
+ messages = result .all_messages ()
1584
+ response = cast (Any , messages [1 ])
1585
+ assert response .vendor_details is not None
1586
+ assert response .vendor_details ['logprobs' ] == [
1587
+ {
1588
+ 'token' : 'world' ,
1589
+ 'logprob' : - 0.6931 ,
1590
+ 'bytes' : [119 , 111 , 114 , 108 , 100 ],
1591
+ 'top_logprobs' : [],
1592
+ }
1593
+ ]
0 commit comments