diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index d40a4ad4..90c88c4d 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -10,6 +10,7 @@ on: - synchronize - reopened - ready_for_review + workflow_dispatch: jobs: lint: diff --git a/integrations/langchain/src/databricks_langchain/chat_models.py b/integrations/langchain/src/databricks_langchain/chat_models.py index 0704b523..c714686d 100644 --- a/integrations/langchain/src/databricks_langchain/chat_models.py +++ b/integrations/langchain/src/databricks_langchain/chat_models.py @@ -307,8 +307,13 @@ def _convert_response_to_chat_result(self, response: Mapping[str, Any]) -> ChatR ) for choice in response["choices"] ] - usage = response.get("usage", {}) - return ChatResult(generations=generations, llm_output=usage) + llm_output = { + k: v for k, v in response.items() if k not in ("choices", "content", "role", "type") + } + if "model" in llm_output and "model_name" not in llm_output: + llm_output["model_name"] = llm_output["model"] + + return ChatResult(generations=generations, llm_output=llm_output) def _stream( self, diff --git a/integrations/langchain/tests/unit_tests/test_chat_models.py b/integrations/langchain/tests/unit_tests/test_chat_models.py index df281d51..451a845a 100644 --- a/integrations/langchain/tests/unit_tests/test_chat_models.py +++ b/integrations/langchain/tests/unit_tests/test_chat_models.py @@ -345,3 +345,23 @@ def test_convert_tool_message_chunk() -> None: def test_convert_message_to_dict_function() -> None: with pytest.raises(ValueError, match="Function messages are not supported"): _convert_message_to_dict(FunctionMessage(content="", name="name")) + + +def test_convert_response_to_chat_result_llm_output(llm: ChatDatabricks) -> None: + """Test that _convert_response_to_chat_result correctly sets llm_output.""" + + result = llm._convert_response_to_chat_result(_MOCK_CHAT_RESPONSE) + + # Verify that llm_output contains the full response metadata + assert "model_name" in result.llm_output + assert "usage" in result.llm_output + assert result.llm_output["model_name"] == _MOCK_CHAT_RESPONSE["model"] + + # Verify that usage information is included directly in llm_output + assert result.llm_output["usage"] == _MOCK_CHAT_RESPONSE["usage"] + + # Verify that choices, content, role, and type are excluded from llm_output + assert "choices" not in result.llm_output + assert "content" not in result.llm_output + assert "role" not in result.llm_output + assert "type" not in result.llm_output