Replies: 1 comment
-
ChartOpenAI Implementation# ChatOpenAI:
# def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
# if sys.version_info[1] <= 7:
# return super().get_num_tokens_from_messages(messages)
# model, encoding = self._get_encoding_model()
# if model.startswith("gpt-3.5-turbo-0301"):
# # every message follows <im_start>{role/name}\n{content}<im_end>\n
# tokens_per_message = 4
# # if there's a name, the role is omitted
# tokens_per_name = -1
# elif model.startswith("gpt-3.5-turbo") or model.startswith("gpt-4"):
# tokens_per_message = 3
# tokens_per_name = 1
# else:
# raise NotImplementedError(
# f"get_num_tokens_from_messages() is not presently implemented "
# f"for model {model}. See "
# "https://platform.openai.com/docs/guides/text-generation/managing-tokens" # noqa: E501
# " for information on how messages are converted to tokens."
# )
# def _get_encoding_model(self) -> Tuple[str, tiktoken.Encoding]:
# if self.tiktoken_model_name is not None:
# model = self.tiktoken_model_name
# else:
# model = self.model_name
# try:
# encoding = tiktoken.encoding_for_model(model)
# except KeyError:
# model = "cl100k_base"
# encoding = tiktoken.get_encoding(model)
# return model, encoding
# """ UPDATE: Updated Your Codeclass ChatOpenAIIn05(ChatOpenAI):
def _get_encoding_model(self) -> Tuple[str, tiktoken.Encoding]:
"""
Override the method to return a hardcoded valid model and its encoding.
"""
# Set the model to a valid one to avoid errors
model = "gpt-3.5-turbo"
return model, tiktoken.encoding_for_model(model)
# Updated code (avoids errors)
llm = ChatOpenAIIn05(
temperature=0.5,
model=os.environ.get("LLM_MODELEND"), # Environment variable is still used
) |
Beta Was this translation helpful? Give feedback.
0 replies
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Uh oh!
There was an error while loading. Please reload this page.
Uh oh!
There was an error while loading. Please reload this page.
-
Checked other resources
Commit to Help
Example Code
Description
我正在用langchain中的ConversationSummaryBufferMemory学习langchain的记忆功能,但是在运行后会有以下错误提示,NotImplementedError: get_num_tokens_from_messages() is not presently implemented for model cl100k_base
System Info
python 3.11,windows11,langchain0.3
Beta Was this translation helpful? Give feedback.
All reactions