@@ -446,7 +446,7 @@ def test_create_schema_model_missing_relations(
446
446
@pytest .fixture
447
447
def mock_llm () -> AsyncMock :
448
448
mock = AsyncMock ()
449
- mock .invoke = AsyncMock ()
449
+ mock .ainvoke = AsyncMock ()
450
450
return mock
451
451
452
452
@@ -507,14 +507,14 @@ async def test_schema_from_text_run_valid_response(
507
507
schema_from_text : SchemaFromText , mock_llm : AsyncMock , valid_schema_json : str
508
508
) -> None :
509
509
# configure the mock LLM to return a valid schema JSON
510
- mock_llm .invoke .return_value = valid_schema_json
510
+ mock_llm .ainvoke .return_value = valid_schema_json
511
511
512
512
# run the schema extraction
513
513
schema_config = await schema_from_text .run (text = "Sample text for extraction" )
514
514
515
515
# verify the LLM was called with a prompt
516
- mock_llm .invoke .assert_called_once ()
517
- prompt_arg = mock_llm .invoke .call_args [0 ][0 ]
516
+ mock_llm .ainvoke .assert_called_once ()
517
+ prompt_arg = mock_llm .ainvoke .call_args [0 ][0 ]
518
518
assert isinstance (prompt_arg , str )
519
519
assert "Sample text for extraction" in prompt_arg
520
520
@@ -536,7 +536,7 @@ async def test_schema_from_text_run_invalid_json(
536
536
schema_from_text : SchemaFromText , mock_llm : AsyncMock , invalid_schema_json : str
537
537
) -> None :
538
538
# configure the mock LLM to return invalid JSON
539
- mock_llm .invoke .return_value = invalid_schema_json
539
+ mock_llm .ainvoke .return_value = invalid_schema_json
540
540
541
541
# verify that running with invalid JSON raises a ValueError
542
542
with pytest .raises (ValueError ) as exc_info :
@@ -557,13 +557,13 @@ async def test_schema_from_text_custom_template(
557
557
schema_from_text = SchemaFromText (llm = mock_llm , prompt_template = custom_template )
558
558
559
559
# configure mock LLM to return valid JSON and capture the prompt that was sent to it
560
- mock_llm .invoke .return_value = valid_schema_json
560
+ mock_llm .ainvoke .return_value = valid_schema_json
561
561
562
562
# run the schema extraction
563
563
await schema_from_text .run (text = "Sample text" )
564
564
565
565
# verify the custom prompt was passed to the LLM
566
- prompt_sent_to_llm = mock_llm .invoke .call_args [0 ][0 ]
566
+ prompt_sent_to_llm = mock_llm .ainvoke .call_args [0 ][0 ]
567
567
assert "This is a custom prompt with text" in prompt_sent_to_llm
568
568
569
569
@@ -578,14 +578,14 @@ async def test_schema_from_text_llm_params(
578
578
schema_from_text = SchemaFromText (llm = mock_llm , llm_params = llm_params )
579
579
580
580
# configure the mock LLM to return a valid schema JSON
581
- mock_llm .invoke .return_value = valid_schema_json
581
+ mock_llm .ainvoke .return_value = valid_schema_json
582
582
583
583
# run the schema extraction
584
584
await schema_from_text .run (text = "Sample text" )
585
585
586
586
# verify the LLM was called with the custom parameters
587
- mock_llm .invoke .assert_called_once ()
588
- call_kwargs = mock_llm .invoke .call_args [1 ]
587
+ mock_llm .ainvoke .assert_called_once ()
588
+ call_kwargs = mock_llm .ainvoke .call_args [1 ]
589
589
assert call_kwargs ["temperature" ] == 0.1
590
590
assert call_kwargs ["max_tokens" ] == 500
591
591
0 commit comments