@@ -87,9 +87,9 @@ def test_openai_llm_with_message_history_happy_path(mock_import: Mock) -> None:
87
87
assert res .content == "openai chat response"
88
88
message_history .append ({"role" : "user" , "content" : question })
89
89
# Use assert_called_once() instead of assert_called_once_with() to avoid issues with overloaded functions
90
- llm .client .chat .completions .create .assert_called_once ()
90
+ llm .client .chat .completions .create .assert_called_once () # type: ignore
91
91
# Check call arguments individually
92
- call_args = llm .client .chat .completions .create .call_args [
92
+ call_args = llm .client .chat .completions .create .call_args [ # type: ignore
93
93
1
94
94
] # Get the keyword arguments
95
95
assert call_args ["messages" ] == message_history
@@ -123,9 +123,9 @@ def test_openai_llm_with_message_history_and_system_instruction(
123
123
messages .extend (message_history )
124
124
messages .append ({"role" : "user" , "content" : question })
125
125
# Use assert_called_once() instead of assert_called_once_with() to avoid issues with overloaded functions
126
- llm .client .chat .completions .create .assert_called_once ()
126
+ llm .client .chat .completions .create .assert_called_once () # type: ignore
127
127
# Check call arguments individually
128
- call_args = llm .client .chat .completions .create .call_args [
128
+ call_args = llm .client .chat .completions .create .call_args [ # type: ignore
129
129
1
130
130
] # Get the keyword arguments
131
131
assert call_args ["messages" ] == messages
@@ -240,9 +240,9 @@ def test_openai_llm_invoke_with_tools_with_message_history(
240
240
# Verify the correct messages were passed
241
241
message_history .append ({"role" : "user" , "content" : question })
242
242
# Use assert_called_once() instead of assert_called_once_with() to avoid issues with overloaded functions
243
- llm .client .chat .completions .create .assert_called_once ()
243
+ llm .client .chat .completions .create .assert_called_once () # type: ignore
244
244
# Check call arguments individually
245
- call_args = llm .client .chat .completions .create .call_args [
245
+ call_args = llm .client .chat .completions .create .call_args [ # type: ignore
246
246
1
247
247
] # Get the keyword arguments
248
248
assert call_args ["messages" ] == message_history
@@ -297,9 +297,9 @@ def test_openai_llm_invoke_with_tools_with_system_instruction(
297
297
messages = [{"role" : "system" , "content" : system_instruction }]
298
298
messages .append ({"role" : "user" , "content" : "my text" })
299
299
# Use assert_called_once() instead of assert_called_once_with() to avoid issues with overloaded functions
300
- llm .client .chat .completions .create .assert_called_once ()
300
+ llm .client .chat .completions .create .assert_called_once () # type: ignore
301
301
# Check call arguments individually
302
- call_args = llm .client .chat .completions .create .call_args [
302
+ call_args = llm .client .chat .completions .create .call_args [ # type: ignore
303
303
1
304
304
] # Get the keyword arguments
305
305
assert call_args ["messages" ] == messages
@@ -384,9 +384,9 @@ def test_azure_openai_llm_with_message_history_happy_path(mock_import: Mock) ->
384
384
assert res .content == "openai chat response"
385
385
message_history .append ({"role" : "user" , "content" : question })
386
386
# Use assert_called_once() instead of assert_called_once_with() to avoid issues with overloaded functions
387
- llm .client .chat .completions .create .assert_called_once ()
387
+ llm .client .chat .completions .create .assert_called_once () # type: ignore
388
388
# Check call arguments individually
389
- call_args = llm .client .chat .completions .create .call_args [
389
+ call_args = llm .client .chat .completions .create .call_args [ # type: ignore
390
390
1
391
391
] # Get the keyword arguments
392
392
assert call_args ["messages" ] == message_history
0 commit comments