Replies: 4 comments
-
The The error suggests that the LangChain library expects a 'request' key in the response object, which might not be present under certain conditions, such as when the query length exceeds a certain threshold. However, there's no explicit mention in the LangChain documentation or codebase that directly links the presence of the 'request' key to the length of the query. The issue might stem from how the DashScope API's responses are structured or from specific conditions under which the LangChain library processes these responses. To address this issue, you might want to:
Unfortunately, without more detailed information about the query you're executing and the specific structure of the data you're working with, it's challenging to provide a more precise solution. If possible, provide more details about the query and the context in which you're using the LangChain library and the DashScope API. This could help in identifying the root cause of the issue and suggesting a more targeted fix.
|
Beta Was this translation helpful? Give feedback.
-
me too model = ChatTongyi(
model_name="qwen-vl-chat-v1",
)
model.client = dashscope.MultiModalConversation
class FileProcessingRequest(CustomUserType):
file: str
location: str
def process_file(input: Dict[str, Any]) -> List[BaseMessage]:
file_obj = {
'1': '**.png',
'2': '**.png'
}
# history = input.chat_history
user_input = input.location
user_file = file_obj[input.file]
messages = []
messages.append(HumanMessage(content=user_file))
messages.append(HumanMessage(content=user_input))
return messages
chat_model = RunnableParallel({"answer": (RunnableLambda(process_file) | model)})
chat_model.get_graph().print_ascii()
add_routes(
app,
chat_model.with_types(input_type=FileProcessingRequest),
config_keys=["configurable"],
path="/pdf",
) |
Beta Was this translation helpful? Give feedback.
-
I also encountered this issue while using ChatTongyi, and after breakpoint source code feedback, Multiple tool_calls are not supported in message This feature will be supported in the future, Perhaps Tongyi does not currently support multi model scheduling? |
Beta Was this translation helpful? Give feedback.
-
so how to deal with it |
Beta Was this translation helpful? Give feedback.
Uh oh!
There was an error while loading. Please reload this page.
-
Checked other resources
Commit to Help
Example Code
Description
if len(query) <= 5, chain will run successfully; if len(query)>5, an error will be reported.
the error message is as follows:
KeyError Traceback (most recent call last)
Cell In[113], line 1
----> 1 chain.batch(query)
File d:\anaconda\anaconda3\envs\langchain_1\Lib\site-packages\langchain_core\runnables\base.py:2643, in RunnableSequence.batch(self, inputs, config, return_exceptions, **kwargs)
2641 else:
2642 for i, step in enumerate(self.steps):
-> 2643 inputs = step.batch(
2644 inputs,
2645 [
2646 # each step a child run of the corresponding root run
2647 patch_config(
2648 config, callbacks=rm.get_child(f"seq:step:{i+1}")
2649 )
2650 for rm, config in zip(run_managers, configs)
2651 ],
2652 )
2654 # finish the root runs
2655 except BaseException as e:
File d:\anaconda\anaconda3\envs\langchain_1\Lib\site-packages\langchain_core\runnables\base.py:633, in Runnable.batch(self, inputs, config, return_exceptions, **kwargs)
630 return cast(List[Output], [invoke(inputs[0], configs[0])])
632 with get_executor_for_config(configs[0]) as executor:
--> 633 return cast(List[Output], list(executor.map(invoke, inputs, configs)))
File d:\anaconda\anaconda3\envs\langchain_1\Lib\concurrent\futures_base.py:619, in Executor.map..result_iterator()
616 while fs:
617 # Careful not to keep a reference to the popped future
618 if timeout is None:
--> 619 yield _result_or_cancel(fs.pop())
620 else:
621 yield _result_or_cancel(fs.pop(), end_time - time.monotonic())
File d:\anaconda\anaconda3\envs\langchain_1\Lib\concurrent\futures_base.py:317, in _result_or_cancel(failed resolving arguments)
315 try:
316 try:
--> 317 return fut.result(timeout)
318 finally:
319 fut.cancel()
File d:\anaconda\anaconda3\envs\langchain_1\Lib\concurrent\futures_base.py:449, in Future.result(self, timeout)
447 raise CancelledError()
448 elif self._state == FINISHED:
--> 449 return self.__get_result()
451 self._condition.wait(timeout)
453 if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
File d:\anaconda\anaconda3\envs\langchain_1\Lib\concurrent\futures_base.py:401, in Future.__get_result(self)
399 if self._exception:
400 try:
--> 401 raise self._exception
402 finally:
403 # Break a reference cycle with the exception in self._exception
404 self = None
File d:\anaconda\anaconda3\envs\langchain_1\Lib\concurrent\futures\thread.py:58, in _WorkItem.run(self)
55 return
57 try:
---> 58 result = self.fn(*self.args, **self.kwargs)
59 except BaseException as exc:
60 self.future.set_exception(exc)
File d:\anaconda\anaconda3\envs\langchain_1\Lib\site-packages\langchain_core\runnables\config.py:466, in ContextThreadPoolExecutor.map.._wrapped_fn(*args)
465 def _wrapped_fn(*args: Any) -> T:
--> 466 return contexts.pop().run(fn, *args)
File d:\anaconda\anaconda3\envs\langchain_1\Lib\site-packages\langchain_core\runnables\base.py:626, in Runnable.batch..invoke(input, config)
624 return e
625 else:
--> 626 return self.invoke(input, config, **kwargs)
File d:\anaconda\anaconda3\envs\langchain_1\Lib\site-packages\langchain_core\language_models\chat_models.py:158, in BaseChatModel.invoke(self, input, config, stop, **kwargs)
147 def invoke(
148 self,
149 input: LanguageModelInput,
(...)
153 **kwargs: Any,
154 ) -> BaseMessage:
155 config = ensure_config(config)
156 return cast(
157 ChatGeneration,
--> 158 self.generate_prompt(
159 [self._convert_input(input)],
160 stop=stop,
161 callbacks=config.get("callbacks"),
162 tags=config.get("tags"),
163 metadata=config.get("metadata"),
164 run_name=config.get("run_name"),
165 run_id=config.pop("run_id", None),
166 **kwargs,
167 ).generations[0][0],
168 ).message
File d:\anaconda\anaconda3\envs\langchain_1\Lib\site-packages\langchain_core\language_models\chat_models.py:560, in BaseChatModel.generate_prompt(self, prompts, stop, callbacks, **kwargs)
552 def generate_prompt(
553 self,
554 prompts: List[PromptValue],
(...)
557 **kwargs: Any,
558 ) -> LLMResult:
559 prompt_messages = [p.to_messages() for p in prompts]
--> 560 return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs)
File d:\anaconda\anaconda3\envs\langchain_1\Lib\site-packages\langchain_core\language_models\chat_models.py:421, in BaseChatModel.generate(self, messages, stop, callbacks, tags, metadata, run_name, run_id, **kwargs)
419 if run_managers:
420 run_managers[i].on_llm_error(e, response=LLMResult(generations=[]))
--> 421 raise e
422 flattened_outputs = [
423 LLMResult(generations=[res.generations], llm_output=res.llm_output) # type: ignore[list-item]
424 for res in results
425 ]
426 llm_output = self._combine_llm_outputs([res.llm_output for res in results])
File d:\anaconda\anaconda3\envs\langchain_1\Lib\site-packages\langchain_core\language_models\chat_models.py:411, in BaseChatModel.generate(self, messages, stop, callbacks, tags, metadata, run_name, run_id, **kwargs)
408 for i, m in enumerate(messages):
409 try:
410 results.append(
--> 411 self._generate_with_cache(
412 m,
413 stop=stop,
414 run_manager=run_managers[i] if run_managers else None,
415 **kwargs,
416 )
417 )
418 except BaseException as e:
419 if run_managers:
File d:\anaconda\anaconda3\envs\langchain_1\Lib\site-packages\langchain_core\language_models\chat_models.py:632, in BaseChatModel._generate_with_cache(self, messages, stop, run_manager, **kwargs)
630 else:
631 if inspect.signature(self._generate).parameters.get("run_manager"):
--> 632 result = self._generate(
633 messages, stop=stop, run_manager=run_manager, **kwargs
634 )
635 else:
636 result = self._generate(messages, stop=stop, **kwargs)
File d:\anaconda\anaconda3\envs\langchain_1\Lib\site-packages\langchain_community\chat_models\tongyi.py:318, in ChatTongyi._generate(self, messages, stop, run_manager, **kwargs)
314 else:
315 params: Dict[str, Any] = self._invocation_params(
316 messages=messages, stop=stop, **kwargs
317 )
--> 318 resp = self.completion_with_retry(**params)
319 generations.append(
320 ChatGeneration(**self._chat_generation_from_qwen_resp(resp))
321 )
322 return ChatResult(
323 generations=generations,
324 llm_output={
325 "model_name": self.model_name,
326 },
327 )
File d:\anaconda\anaconda3\envs\langchain_1\Lib\site-packages\langchain_community\chat_models\tongyi.py:250, in ChatTongyi.completion_with_retry(self, kwargs)
247 resp = self.client.call(_kwargs)
248 return check_response(resp)
--> 250 return _completion_with_retry(**kwargs)
File d:\anaconda\anaconda3\envs\langchain_1\Lib\site-packages\tenacity_init_.py:289, in BaseRetrying.wraps..wrapped_f(*args, **kw)
287 @functools.wraps(f)
288 def wrapped_f(*args: t.Any, **kw: t.Any) -> t.Any:
--> 289 return self(f, *args, **kw)
File d:\anaconda\anaconda3\envs\langchain_1\Lib\site-packages\tenacity_init_.py:379, in Retrying.call(self, fn, *args, **kwargs)
377 retry_state = RetryCallState(retry_object=self, fn=fn, args=args, kwargs=kwargs)
378 while True:
--> 379 do = self.iter(retry_state=retry_state)
380 if isinstance(do, DoAttempt):
381 try:
File d:\anaconda\anaconda3\envs\langchain_1\Lib\site-packages\tenacity_init_.py:314, in BaseRetrying.iter(self, retry_state)
312 is_explicit_retry = fut.failed and isinstance(fut.exception(), TryAgain)
313 if not (is_explicit_retry or self.retry(retry_state)):
--> 314 return fut.result()
316 if self.after is not None:
317 self.after(retry_state)
File d:\anaconda\anaconda3\envs\langchain_1\Lib\concurrent\futures_base.py:449, in Future.result(self, timeout)
447 raise CancelledError()
448 elif self._state == FINISHED:
--> 449 return self.__get_result()
451 self._condition.wait(timeout)
453 if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
File d:\anaconda\anaconda3\envs\langchain_1\Lib\concurrent\futures_base.py:401, in Future.__get_result(self)
399 if self._exception:
400 try:
--> 401 raise self._exception
402 finally:
403 # Break a reference cycle with the exception in self._exception
404 self = None
File d:\anaconda\anaconda3\envs\langchain_1\Lib\site-packages\tenacity_init_.py:382, in Retrying.call(self, fn, *args, **kwargs)
380 if isinstance(do, DoAttempt):
381 try:
--> 382 result = fn(*args, **kwargs)
383 except BaseException: # noqa: B902
384 retry_state.set_exception(sys.exc_info()) # type: ignore[arg-type]
File d:\anaconda\anaconda3\envs\langchain_1\Lib\site-packages\langchain_community\chat_models\tongyi.py:248, in ChatTongyi.completion_with_retry.._completion_with_retry(_kwargs)
245 @retry_decorator
246 def _completion_with_retry(_kwargs: Any) -> Any:
247 resp = self.client.call(**_kwargs)
--> 248 return check_response(resp)
File d:\anaconda\anaconda3\envs\langchain_1\Lib\site-packages\langchain_community\llms\tongyi.py:66, in check_response(resp)
61 raise ValueError(
62 f"status_code: {resp.status_code} \n "
63 f"code: {resp.code} \n message: {resp.message}"
64 )
65 else:
---> 66 raise HTTPError(
67 f"HTTP error occurred: status_code: {resp.status_code} \n "
68 f"code: {resp.code} \n message: {resp.message}",
69 response=resp,
70 )
File d:\anaconda\anaconda3\envs\langchain_1\Lib\site-packages\requests\exceptions.py:22, in RequestException.init(self, *args, **kwargs)
20 self.response = response
21 self.request = kwargs.pop("request", None)
---> 22 if response is not None and not self.request and hasattr(response, "request"):
23 self.request = self.response.request
24 super().init(*args, **kwargs)
File d:\anaconda\anaconda3\envs\langchain_1\Lib\site-packages\dashscope\api_entities\dashscope_response.py:59, in DictMixin.getattr(self, attr)
58 def getattr(self, attr):
---> 59 return self[attr]
File d:\anaconda\anaconda3\envs\langchain_1\Lib\site-packages\dashscope\api_entities\dashscope_response.py:15, in DictMixin.getitem(self, key)
14 def getitem(self, key):
---> 15 return super().getitem(key)
KeyError: 'request'
System Info
My python version :3.11
Langchain:0.1.18
Beta Was this translation helpful? Give feedback.
All reactions