Skip to content

Add Model Name and System Fingerprint to llm_output in _convert_response_to_chat_result #84

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 7 commits into from
Mar 20, 2025
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 8 additions & 4 deletions integrations/langchain/src/databricks_langchain/chat_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
Union,
)

from databricks_langchain.utils import get_deployment_client
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models import BaseChatModel
from langchain_core.language_models.base import LanguageModelInput
Expand Down Expand Up @@ -54,8 +55,6 @@
from mlflow.deployments import BaseDeploymentClient # type: ignore
from pydantic import BaseModel, ConfigDict, Field

from databricks_langchain.utils import get_deployment_client

logger = logging.getLogger(__name__)


Expand Down Expand Up @@ -307,8 +306,13 @@ def _convert_response_to_chat_result(self, response: Mapping[str, Any]) -> ChatR
)
for choice in response["choices"]
]
usage = response.get("usage", {})
return ChatResult(generations=generations, llm_output=usage)
llm_output = {
k: v for k, v in response.items() if k not in ("choices", "content", "role", "type")
}
if "model" in llm_output and "model_name" not in llm_output:
llm_output["model_name"] = llm_output["model"]

return ChatResult(generations=generations, llm_output=llm_output)

def _stream(
self,
Expand Down
32 changes: 26 additions & 6 deletions integrations/langchain/tests/unit_tests/test_chat_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,12 @@

import mlflow # type: ignore # noqa: F401
import pytest
from databricks_langchain.chat_models import (
ChatDatabricks,
_convert_dict_to_message,
_convert_dict_to_message_chunk,
_convert_message_to_dict,
)
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
Expand All @@ -21,12 +27,6 @@
from langchain_core.runnables import RunnableMap
from pydantic import BaseModel, Field

from databricks_langchain.chat_models import (
ChatDatabricks,
_convert_dict_to_message,
_convert_dict_to_message_chunk,
_convert_message_to_dict,
)
from tests.utils.chat_models import ( # noqa: F401
_MOCK_CHAT_RESPONSE,
_MOCK_STREAM_RESPONSE,
Expand Down Expand Up @@ -345,3 +345,23 @@ def test_convert_tool_message_chunk() -> None:
def test_convert_message_to_dict_function() -> None:
with pytest.raises(ValueError, match="Function messages are not supported"):
_convert_message_to_dict(FunctionMessage(content="", name="name"))


def test_convert_response_to_chat_result_llm_output(llm: ChatDatabricks) -> None:
"""Test that _convert_response_to_chat_result correctly sets llm_output."""

result = llm._convert_response_to_chat_result(_MOCK_CHAT_RESPONSE)

# Verify that llm_output contains the full response metadata
assert "model_name" in result.llm_output
assert "usage" in result.llm_output
assert result.llm_output["model_name"] == _MOCK_CHAT_RESPONSE["model"]

# Verify that usage information is included directly in llm_output
assert result.llm_output["usage"] == _MOCK_CHAT_RESPONSE["usage"]

# Verify that choices, content, role, and type are excluded from llm_output
assert "choices" not in result.llm_output
assert "content" not in result.llm_output
assert "role" not in result.llm_output
assert "type" not in result.llm_output
Loading