Skip to content

Commit 9d05c76

Browse files
Fix unit tests
1 parent 7088286 commit 9d05c76

File tree

1 file changed

+10
-10
lines changed

1 file changed

+10
-10
lines changed

tests/unit/experimental/components/test_schema.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -446,7 +446,7 @@ def test_create_schema_model_missing_relations(
446446
@pytest.fixture
447447
def mock_llm() -> AsyncMock:
448448
mock = AsyncMock()
449-
mock.invoke = AsyncMock()
449+
mock.ainvoke = AsyncMock()
450450
return mock
451451

452452

@@ -507,14 +507,14 @@ async def test_schema_from_text_run_valid_response(
507507
schema_from_text: SchemaFromText, mock_llm: AsyncMock, valid_schema_json: str
508508
) -> None:
509509
# configure the mock LLM to return a valid schema JSON
510-
mock_llm.invoke.return_value = valid_schema_json
510+
mock_llm.ainvoke.return_value = valid_schema_json
511511

512512
# run the schema extraction
513513
schema_config = await schema_from_text.run(text="Sample text for extraction")
514514

515515
# verify the LLM was called with a prompt
516-
mock_llm.invoke.assert_called_once()
517-
prompt_arg = mock_llm.invoke.call_args[0][0]
516+
mock_llm.ainvoke.assert_called_once()
517+
prompt_arg = mock_llm.ainvoke.call_args[0][0]
518518
assert isinstance(prompt_arg, str)
519519
assert "Sample text for extraction" in prompt_arg
520520

@@ -536,7 +536,7 @@ async def test_schema_from_text_run_invalid_json(
536536
schema_from_text: SchemaFromText, mock_llm: AsyncMock, invalid_schema_json: str
537537
) -> None:
538538
# configure the mock LLM to return invalid JSON
539-
mock_llm.invoke.return_value = invalid_schema_json
539+
mock_llm.ainvoke.return_value = invalid_schema_json
540540

541541
# verify that running with invalid JSON raises a ValueError
542542
with pytest.raises(ValueError) as exc_info:
@@ -557,13 +557,13 @@ async def test_schema_from_text_custom_template(
557557
schema_from_text = SchemaFromText(llm=mock_llm, prompt_template=custom_template)
558558

559559
# configure mock LLM to return valid JSON and capture the prompt that was sent to it
560-
mock_llm.invoke.return_value = valid_schema_json
560+
mock_llm.ainvoke.return_value = valid_schema_json
561561

562562
# run the schema extraction
563563
await schema_from_text.run(text="Sample text")
564564

565565
# verify the custom prompt was passed to the LLM
566-
prompt_sent_to_llm = mock_llm.invoke.call_args[0][0]
566+
prompt_sent_to_llm = mock_llm.ainvoke.call_args[0][0]
567567
assert "This is a custom prompt with text" in prompt_sent_to_llm
568568

569569

@@ -578,14 +578,14 @@ async def test_schema_from_text_llm_params(
578578
schema_from_text = SchemaFromText(llm=mock_llm, llm_params=llm_params)
579579

580580
# configure the mock LLM to return a valid schema JSON
581-
mock_llm.invoke.return_value = valid_schema_json
581+
mock_llm.ainvoke.return_value = valid_schema_json
582582

583583
# run the schema extraction
584584
await schema_from_text.run(text="Sample text")
585585

586586
# verify the LLM was called with the custom parameters
587-
mock_llm.invoke.assert_called_once()
588-
call_kwargs = mock_llm.invoke.call_args[1]
587+
mock_llm.ainvoke.assert_called_once()
588+
call_kwargs = mock_llm.ainvoke.call_args[1]
589589
assert call_kwargs["temperature"] == 0.1
590590
assert call_kwargs["max_tokens"] == 500
591591

0 commit comments

Comments
 (0)