|
30 | 30 | F = TypeVar('F', bound=Callable[..., Any])
|
31 | 31 |
|
32 | 32 |
|
33 |
| -# Set lazy import |
34 |
| -def get_lazy_imported_functions_module(): |
35 |
| - from camel.functions import ( |
36 |
| - MAP_FUNCS, |
37 |
| - MATH_FUNCS, |
38 |
| - OPENAPI_FUNCS, |
39 |
| - SEARCH_FUNCS, |
40 |
| - TWITTER_FUNCS, |
41 |
| - WEATHER_FUNCS, |
42 |
| - ) |
43 |
| - |
44 |
| - return [ |
45 |
| - *MATH_FUNCS, |
46 |
| - *SEARCH_FUNCS, |
47 |
| - *WEATHER_FUNCS, |
48 |
| - *MAP_FUNCS, |
49 |
| - *TWITTER_FUNCS, |
50 |
| - *OPENAPI_FUNCS, |
51 |
| - ] |
52 |
| - |
53 |
| - |
54 |
| -# Set lazy import |
55 |
| -def get_lazy_imported_types_module(): |
56 |
| - from camel.types import ModelType |
57 |
| - |
58 |
| - return ModelType.GPT_3_5_TURBO |
59 |
| - |
60 |
| - |
61 | 33 | def api_key_required(func: F) -> F:
|
62 | 34 | r"""Decorator that checks if the API key is available either as an environment variable or passed directly.
|
63 | 35 |
|
@@ -354,145 +326,3 @@ def to_pascal(snake: str) -> str:
|
354 | 326 |
|
355 | 327 |
|
356 | 328 | PYDANTIC_V2 = pydantic.VERSION.startswith("2.")
|
357 |
| - |
358 |
| - |
359 |
| -def role_playing_with_function( |
360 |
| - task_prompt: str = ( |
361 |
| - "Assume now is 2024 in the Gregorian calendar, " |
362 |
| - "estimate the current age of University of Oxford " |
363 |
| - "and then add 10 more years to this age, " |
364 |
| - "and get the current weather of the city where " |
365 |
| - "the University is located. And tell me what time " |
366 |
| - "zone University of Oxford is in. And use my twitter " |
367 |
| - "account infomation to create a tweet. Search basketball" |
368 |
| - "course from coursera And help me to choose a basketball by klarna." |
369 |
| - ), |
370 |
| - tools: Optional[List] = None, |
371 |
| - model_type=None, |
372 |
| - chat_turn_limit=10, |
373 |
| - assistant_role_name: str = "Searcher", |
374 |
| - user_role_name: str = "Professor", |
375 |
| -) -> None: |
376 |
| - r"""Initializes and conducts a `RolePlaying` with `ChatGPTConfig` |
377 |
| - session. The function creates an interactive and dynamic role-play session |
378 |
| - where the AI Assistant and User engage based on the given task, roles, and |
379 |
| - available functions. It demonstrates the versatility of AI in handling |
380 |
| - diverse tasks and user interactions within a structured `RolePlaying` |
381 |
| - framework. |
382 |
| -
|
383 |
| - Args: |
384 |
| - task_prompt (str): The initial task or scenario description to start |
385 |
| - the `RolePlaying` session. Defaults to a prompt involving the |
386 |
| - estimation of KAUST's age and weather information. |
387 |
| - tools (list): A list of functions that the agent can utilize |
388 |
| - during the session. Defaults to a combination of math, search, and |
389 |
| - weather functions. |
390 |
| - model_type (ModelType): The type of chatbot model used for both the |
391 |
| - assistant and the user. Defaults to `GPT-4 Turbo`. |
392 |
| - chat_turn_limit (int): The maximum number of turns (exchanges) in the |
393 |
| - chat session. Defaults to 10. |
394 |
| - assistant_role_name (str): The role name assigned to the AI Assistant. |
395 |
| - Defaults to 'Searcher'. |
396 |
| - user_role_name (str): The role name assigned to the User. Defaults to |
397 |
| - 'Professor'. |
398 |
| -
|
399 |
| - Returns: |
400 |
| - None: This function does not return any value but prints out the |
401 |
| - session's dialogues and outputs. |
402 |
| - """ |
403 |
| - |
404 |
| - # Run lazy import |
405 |
| - if tools is None: |
406 |
| - tools = get_lazy_imported_functions_module() |
407 |
| - if model_type is None: |
408 |
| - model_type = get_lazy_imported_types_module() |
409 |
| - |
410 |
| - from colorama import Fore |
411 |
| - |
412 |
| - from camel.agents.chat_agent import FunctionCallingRecord |
413 |
| - from camel.configs import ChatGPTConfig |
414 |
| - from camel.societies import RolePlaying |
415 |
| - |
416 |
| - task_prompt = task_prompt |
417 |
| - user_model_config = ChatGPTConfig(temperature=0.0) |
418 |
| - |
419 |
| - assistant_model_config = ChatGPTConfig( |
420 |
| - tools=tools, |
421 |
| - temperature=0.0, |
422 |
| - ) |
423 |
| - |
424 |
| - role_play_session = RolePlaying( |
425 |
| - assistant_role_name=assistant_role_name, |
426 |
| - user_role_name=user_role_name, |
427 |
| - assistant_agent_kwargs=dict( |
428 |
| - model_type=model_type, |
429 |
| - model_config=assistant_model_config, |
430 |
| - tools=tools, |
431 |
| - ), |
432 |
| - user_agent_kwargs=dict( |
433 |
| - model_type=model_type, |
434 |
| - model_config=user_model_config, |
435 |
| - ), |
436 |
| - task_prompt=task_prompt, |
437 |
| - with_task_specify=False, |
438 |
| - ) |
439 |
| - |
440 |
| - print( |
441 |
| - Fore.GREEN |
442 |
| - + f"AI Assistant sys message:\n{role_play_session.assistant_sys_msg}\n" |
443 |
| - ) |
444 |
| - print( |
445 |
| - Fore.BLUE + f"AI User sys message:\n{role_play_session.user_sys_msg}\n" |
446 |
| - ) |
447 |
| - |
448 |
| - print(Fore.YELLOW + f"Original task prompt:\n{task_prompt}\n") |
449 |
| - print( |
450 |
| - Fore.CYAN |
451 |
| - + f"Specified task prompt:\n{role_play_session.specified_task_prompt}\n" |
452 |
| - ) |
453 |
| - print(Fore.RED + f"Final task prompt:\n{role_play_session.task_prompt}\n") |
454 |
| - |
455 |
| - n = 0 |
456 |
| - input_msg = role_play_session.init_chat() |
457 |
| - while n < chat_turn_limit: |
458 |
| - n += 1 |
459 |
| - assistant_response, user_response = role_play_session.step(input_msg) |
460 |
| - |
461 |
| - if assistant_response.terminated: |
462 |
| - print( |
463 |
| - Fore.GREEN |
464 |
| - + ( |
465 |
| - "AI Assistant terminated. Reason: " |
466 |
| - f"{assistant_response.info['termination_reasons']}." |
467 |
| - ) |
468 |
| - ) |
469 |
| - break |
470 |
| - if user_response.terminated: |
471 |
| - print( |
472 |
| - Fore.GREEN |
473 |
| - + ( |
474 |
| - "AI User terminated. " |
475 |
| - f"Reason: {user_response.info['termination_reasons']}." |
476 |
| - ) |
477 |
| - ) |
478 |
| - break |
479 |
| - |
480 |
| - # Print output from the user |
481 |
| - print_text_animated( |
482 |
| - Fore.BLUE + f"AI User:\n\n{user_response.msg.content}\n" |
483 |
| - ) |
484 |
| - |
485 |
| - # Print output from the assistant, including any function |
486 |
| - # execution information |
487 |
| - print_text_animated(Fore.GREEN + "AI Assistant:") |
488 |
| - tool_calls: List[FunctionCallingRecord] = assistant_response.info[ |
489 |
| - 'tool_calls' |
490 |
| - ] |
491 |
| - for func_record in tool_calls: |
492 |
| - print_text_animated(f"{func_record}") |
493 |
| - print_text_animated(f"{assistant_response.msg.content}\n") |
494 |
| - |
495 |
| - if "CAMEL_TASK_DONE" in user_response.msg.content: |
496 |
| - break |
497 |
| - |
498 |
| - input_msg = assistant_response.msg |
0 commit comments