-
Checked other resources
Commit to Help
Example Codefrom typing import Sequence
from langchain.agents import AgentExecutor, create_tool_calling_agent
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.tools import BaseTool, tool
from langchain_openai import ChatOpenAI
from my_package.chat_models.aleph_alpha import ChatAlephAlpha # See description
@tool
def multiply(first_int: int, second_int: int) -> int:
"""Multiply two integers together."""
return first_int * second_int
tools: Sequence[BaseTool] = [multiply] # type: ignore
print(multiply.name)
print(multiply.description)
print(multiply.args)
# model = ChatOpenAI(model="gpt-4o")
model = ChatAlephAlpha(model="luminous-extended")
query = "what is the value of magic_function(3)?"
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful assistant"),
("human", "{input}"),
# Placeholders fill up a **list** of messages
("placeholder", "{agent_scratchpad}"),
]
)
agent = create_tool_calling_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools) # type: ignore
> NotImplementedError: llm.bind_tools(tools) DescriptionThis is essentially this question but in its own thread (as asked here), and very specific to AlephAlpha. AlephAlpha by default does not have a chat interface, so I had to create a custom chat model and it works decently enough. How should I go about solving this issue? Please find my custom classes for implementing the custom chat model below. AlephAlphaCommon.pyfrom typing import Any, Dict, List, Optional, Sequence
from aleph_alpha_client import Client
from langchain_core.language_models import BaseLanguageModel
from langchain_core.pydantic_v1 import Extra, SecretStr
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
class AlephAlphaCommon(BaseLanguageModel):
client: Client | None = None #: :meta private:
model: Optional[str] = "luminous-base"
"""Model name to use."""
maximum_tokens: int = 64
"""The maximum number of tokens to be generated."""
temperature: float = 0.0
"""A non-negative float that tunes the degree of randomness in generation."""
top_k: int = 0
"""Number of most likely tokens to consider at each step."""
top_p: float = 0.0
"""Total probability mass of tokens to consider at each step."""
presence_penalty: float = 0.0
"""Penalizes repeated tokens."""
frequency_penalty: float = 0.0
"""Penalizes repeated tokens according to frequency."""
repetition_penalties_include_prompt: Optional[bool] = False
"""Flag deciding whether presence penalty or frequency penalty are
updated from the prompt."""
use_multiplicative_presence_penalty: Optional[bool] = False
"""Flag deciding whether presence penalty is applied
multiplicatively (True) or additively (False)."""
penalty_bias: Optional[str] = None
"""Penalty bias for the completion."""
penalty_exceptions: Optional[List[str]] = None
"""List of strings that may be generated without penalty,
regardless of other penalty settings"""
penalty_exceptions_include_stop_sequences: Optional[bool] = None
"""Should stop_sequences be included in penalty_exceptions."""
best_of: Optional[int] = None
"""returns the one with the "best of" results
(highest log probability per token)
"""
n: int = 1
"""How many completions to generate for each prompt."""
logit_bias: Optional[Dict[int, float]] = None
"""The logit bias allows to influence the likelihood of generating tokens."""
log_probs: Optional[int] = None
"""Number of top log probabilities to be returned for each generated token."""
tokens: Optional[bool] = False
"""return tokens of completion."""
disable_optimizations: Optional[bool] = False
minimum_tokens: Optional[int] = 0
"""Generate at least this number of tokens."""
echo: bool = False
"""Echo the prompt in the completion."""
use_multiplicative_frequency_penalty: bool = False
sequence_penalty: float = 0.0
sequence_penalty_min_length: int = 2
use_multiplicative_sequence_penalty: bool = False
completion_bias_inclusion: Optional[Sequence[str]] = None
completion_bias_inclusion_first_token_only: bool = False
completion_bias_exclusion: Optional[Sequence[str]] = None
completion_bias_exclusion_first_token_only: bool = False
"""Only consider the first token for the completion_bias_exclusion."""
contextual_control_threshold: Optional[float] = None
"""If set to None, attention control parameters only apply to those tokens that have
explicitly been set in the request.
If set to a non-None value, control parameters are also applied to similar tokens.
"""
control_log_additive: Optional[bool] = True
"""True: apply control by adding the log(control_factor) to attention scores.
False: (attention_scores - - attention_scores.min(-1)) * control_factor
"""
repetition_penalties_include_completion: bool = True
"""Flag deciding whether presence penalty or frequency penalty
are updated from the completion."""
raw_completion: bool = False
"""Force the raw completion of the model to be returned."""
stop_sequences: Optional[List[str]] = None
"""Stop sequences to use."""
# Client params
aleph_alpha_api_key: Optional[SecretStr] = None
"""API key for Aleph Alpha API."""
host: str = "https://api.aleph-alpha.com"
"""The hostname of the API host.
The default one is "https://api.aleph-alpha.com")"""
hosting: Optional[str] = None
"""Determines in which datacenters the request may be processed.
You can either set the parameter to "aleph-alpha" or omit it (defaulting to None).
Not setting this value, or setting it to None, gives us maximal
flexibility in processing your request in our
own datacenters and on servers hosted with other providers.
Choose this option for maximal availability.
Setting it to "aleph-alpha" allows us to only process the
request in our own datacenters.
Choose this option for maximal data privacy."""
request_timeout_seconds: int = 305
"""Client timeout that will be set for HTTP requests in the
`requests` library's API calls.
Server will close all requests after 300 seconds with an internal server error."""
total_retries: int = 8
"""The number of retries made in case requests fail with certain retryable
status codes. If the last
retry fails a corresponding exception is raised. Note, that between retries
an exponential backoff
is applied, starting with 0.5 s after the first retry and doubling for
each retry made. So with the
default setting of 8 retries a total wait time of 63.5 s is added
between the retries."""
nice: bool = False
"""Setting this to True, will signal to the API that you intend to be
nice to other users
by de-prioritizing your request below concurrent ones."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
allow_population_by_field_name = True
arbitrary_types_allowed = True
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["aleph_alpha_api_key"] = convert_to_secret_str(
get_from_dict_or_env(values, "aleph_alpha_api_key", "ALEPH_ALPHA_API_KEY")
)
values["client"] = Client(
token=values["aleph_alpha_api_key"].get_secret_value(),
host=values["host"],
hosting=values["hosting"],
request_timeout_seconds=values["request_timeout_seconds"],
total_retries=values["total_retries"],
nice=values["nice"],
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling the Aleph Alpha API."""
return {
"maximum_tokens": self.maximum_tokens,
"temperature": self.temperature,
"top_k": self.top_k,
"top_p": self.top_p,
"presence_penalty": self.presence_penalty,
"frequency_penalty": self.frequency_penalty,
"n": self.n,
"repetition_penalties_include_prompt": self.repetition_penalties_include_prompt, # noqa: E501
"use_multiplicative_presence_penalty": self.use_multiplicative_presence_penalty, # noqa: E501
"penalty_bias": self.penalty_bias,
"penalty_exceptions": self.penalty_exceptions,
"penalty_exceptions_include_stop_sequences": self.penalty_exceptions_include_stop_sequences, # noqa: E501
"best_of": self.best_of,
"logit_bias": self.logit_bias,
"log_probs": self.log_probs,
"tokens": self.tokens,
"disable_optimizations": self.disable_optimizations,
"minimum_tokens": self.minimum_tokens,
"echo": self.echo,
"use_multiplicative_frequency_penalty": self.use_multiplicative_frequency_penalty, # noqa: E501
"sequence_penalty": self.sequence_penalty,
"sequence_penalty_min_length": self.sequence_penalty_min_length,
"use_multiplicative_sequence_penalty": self.use_multiplicative_sequence_penalty, # noqa: E501
"completion_bias_inclusion": self.completion_bias_inclusion,
"completion_bias_inclusion_first_token_only": self.completion_bias_inclusion_first_token_only, # noqa: E501
"completion_bias_exclusion": self.completion_bias_exclusion,
"completion_bias_exclusion_first_token_only": self.completion_bias_exclusion_first_token_only, # noqa: E501
"contextual_control_threshold": self.contextual_control_threshold,
"control_log_additive": self.control_log_additive,
"repetition_penalties_include_completion": self.repetition_penalties_include_completion, # noqa: E501
"raw_completion": self.raw_completion,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model": self.model}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "aleph-alpha" ChatAlephAlpha.py# from langchain_community.llms.aleph_alpha import AlephAlpha
from typing import Any, Dict, List, Optional, cast
from langchain_community.llms.utils import enforce_stop_tokens
from langchain_core.callbacks import (
CallbackManagerForLLMRun,
)
from langchain_core.language_models.chat_models import (
BaseChatModel,
)
from langchain_core.messages import (
AIMessage,
BaseMessage,
ChatMessage,
HumanMessage,
SystemMessage,
)
from langchain_core.outputs import ChatGeneration, ChatResult
from pydantic.v1 import root_validator
from aiservice.chat_models.aleph_alpha_common import AlephAlphaCommon
def _convert_one_message_to_text(
message: BaseMessage,
human_prompt: str,
ai_prompt: str,
) -> str:
content = cast(str, message.content)
if isinstance(message, ChatMessage):
message_text = f"\n\n{message.role.capitalize()}: {content}"
elif isinstance(message, HumanMessage):
message_text = f"{human_prompt} {content}"
elif isinstance(message, AIMessage):
message_text = f"{ai_prompt} {content}"
elif isinstance(message, SystemMessage):
message_text = content
else:
raise ValueError(f"Got unknown type {message}")
return message_text
def convert_messages_to_prompt_aleph_alpha(
messages: List[BaseMessage],
*,
human_prompt: str = "\n\nHuman:",
ai_prompt: str = "\n\nAssistant:",
) -> str:
"""Format a list of messages into a full prompt for the AlephAlpha model
Args:
messages (List[BaseMessage]): List of BaseMessage to combine.
human_prompt (str, optional): Human prompt tag. Defaults to "\n\nHuman:".
ai_prompt (str, optional): AI prompt tag. Defaults to "\n\nAssistant:".
Returns:
str: Combined string with necessary human_prompt and ai_prompt tags.
"""
messages = messages.copy() # don't mutate the original list
if not isinstance(messages[-1], AIMessage):
messages.append(AIMessage(content=""))
text = "".join(
_convert_one_message_to_text(message, human_prompt, ai_prompt)
for message in messages
)
# trim off the trailing ' ' that might come from the "Assistant: "
return text.rstrip()
class ChatAlephAlpha(BaseChatModel, AlephAlphaCommon):
"""`AlephAlpha` chat large language models.
To use, you should have the `aleph-alpha-client` package installed, and the
environment variable `ALEPH_ALPHA_API_KEY` set to your API key or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
from aiservice.chat_models.aleph_alpha import ChatAlephAlpha
model = ChatAlephAlpha(model="<model>", aleph_alpha_api_key="my-api-key")
"""
HUMAN_PROMPT: Optional[str] = "\n\nHuman:"
AI_PROMPT: Optional[str] = "\n\nAssistant:"
@property
def lc_secrets(self) -> Dict[str, str]:
return {"aleph_alpha_api_key": "ALEPH_ALPHA_API_KEY"}
@property
def _llm_type(self) -> str:
return "aleph-alpha-chat"
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Return a dictionary of identifying parameters.
This information is used by the LangChain callback system, which
is used for tracing purposes make it possible to monitor LLMs.
"""
return {
"model_name": self.model,
}
@root_validator(pre=True)
def set_default_stop_sequences(cls, values):
"""Set default stop sequences if not provided."""
if values.get("stop_sequences") is None:
values["stop_sequences"] = [values["HUMAN_PROMPT"]]
return values
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this model can be serialized by Langchain."""
return True
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "chat_models", "aleph-alpha"]
def _convert_messages_to_prompt(self, messages: List[BaseMessage]) -> str:
"""Format a list of messages into a full prompt for the AlephAlpha model
Args:
messages (List[BaseMessage]): List of BaseMessage to combine.
Returns:
str: Combined string with necessary HUMAN_PROMPT and AI_PROMPT tags.
"""
prompt_params = {}
if self.HUMAN_PROMPT:
prompt_params["human_prompt"] = self.HUMAN_PROMPT
if self.AI_PROMPT:
prompt_params["ai_prompt"] = self.AI_PROMPT
return convert_messages_to_prompt_aleph_alpha(
messages=messages, **prompt_params
)
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.client is None or self.model is None:
raise ValueError("Client is not initialized")
from aleph_alpha_client import CompletionRequest, Prompt
prompt = self._convert_messages_to_prompt(messages)
params = self._default_params
if self.stop_sequences is not None and stop is not None:
raise ValueError(
"stop sequences found in both the input and default params."
)
elif self.stop_sequences is not None:
params["stop_sequences"] = self.stop_sequences
else:
params["stop_sequences"] = stop
params = {**params, **kwargs}
request = CompletionRequest(prompt=Prompt.from_text(prompt), **params)
response = self.client.complete(model=self.model, request=request)
text = response.completions[0].completion
if text is None:
text = ""
# Assuming enforce_stop_tokens is a function that strips stop tokens from the text
if stop is not None or self.stop_sequences is not None:
text = enforce_stop_tokens(text, params["stop_sequences"])
# Trim off leading whitespace
message = AIMessage(content=text.lstrip())
return ChatResult(generations=[ChatGeneration(message=message)]) System Infolangchain = "0.2.7"
langchain-core = "0.2.18"
langchain-openai = "0.1.16"
langchain-community = "0.2.7"
langsmith = "0.1.85"
langgraph = "0.1.8"
aleph-alpha-client = "7.1.0" |
Beta Was this translation helpful? Give feedback.
Replies: 1 comment 11 replies
-
To implement the
Here is an example implementation of the from typing import Sequence, Union, Dict, Any, Type, Optional, Literal
from langchain_core.pydantic_v1 import BaseModel
from langchain_core.tools import BaseTool
from langchain_core.utils import convert_to_openai_tool
from langchain_core.runnable import Runnable
from langchain_core.messages import LanguageModelInput, BaseMessage
class ChatAlephAlpha(BaseChatModel, AlephAlphaCommon):
# Existing properties and methods...
def bind_tools(
self,
tools: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]],
*,
tool_choice: Optional[
Union[dict, str, Literal["auto", "any", "none"], bool]
] = None,
**kwargs: Any,
) -> Runnable[LanguageModelInput, BaseMessage]:
"""Bind tool-like objects to this chat model.
Args:
tools: A list of tool definitions to bind to this chat model.
Can be a dictionary, pydantic model, callable, or BaseTool. Pydantic
models, callables, and BaseTools will be automatically converted to
their schema dictionary representation.
tool_choice: Which tool to require the model to call.
Must be the name of the single provided function,
"auto" to automatically determine which function to call
with the option to not call any function, "any" to enforce that some
function is called, or a dict of the form:
{"type": "function", "function": {"name": <<tool_name>>}}.
**kwargs: Any additional parameters to pass to the
:class:`~langchain.runnable.Runnable` constructor.
"""
formatted_tools = [convert_to_openai_tool(tool) for tool in tools]
if tool_choice is not None and tool_choice:
if isinstance(tool_choice, str) and (
tool_choice not in ("auto", "any", "none")
):
tool_choice = {"type": "function", "function": {"name": tool_choice}}
if isinstance(tool_choice, dict) and (len(formatted_tools) != 1):
raise ValueError(
"When specifying `tool_choice`, you must provide exactly one "
f"tool. Received {len(formatted_tools)} tools."
)
if isinstance(tool_choice, dict) and (
formatted_tools[0]["function"]["name"]
!= tool_choice["function"]["name"]
):
raise ValueError(
f"Tool choice {tool_choice} was specified, but the only "
f"provided tool was {formatted_tools[0]['function']['name']}."
)
if isinstance(tool_choice, bool):
if len(tools) > 1:
raise ValueError(
"tool_choice can only be True when there is one tool. Received "
f"{len(tools)} tools."
)
tool_name = formatted_tools[0]["function"]["name"]
tool_choice = {
"type": "function",
"function": {"name": tool_name},
}
kwargs["tool_choice"] = tool_choice
return super().bind(tools=formatted_tools, **kwargs) Example Usagefrom my_package.chat_models.aleph_alpha import ChatAlephAlpha
@tool
def multiply(first_int: int, second_int: int) -> int:
"""Multiply two integers together."""
return first_int * second_int
tools = [multiply]
model = ChatAlephAlpha(model="luminous-extended")
llm_with_tools = model.bind_tools(tools)
query = "what is the value of magic_function(3)?"
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful assistant"),
("human", "{input}"),
("placeholder", "{agent_scratchpad}"),
]
)
agent = create_tool_calling_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools) This implementation ensures that your |
Beta Was this translation helpful? Give feedback.
The last one was a good suggestion, here's the big picture for prosperity: