|
| 1 | +import os |
| 2 | +from typing import Iterator |
| 3 | + |
| 4 | +from agno.agent import Agent |
| 5 | +from agno.models.nebius import Nebius |
| 6 | + |
| 7 | +# from agno.tools.scrapegraph import ScrapeGraphTools |
| 8 | +from agno.tools.duckduckgo import DuckDuckGoTools |
| 9 | +from agno.utils.log import logger |
| 10 | +from agno.workflow import RunResponse, Workflow |
| 11 | + |
| 12 | + |
| 13 | +class DeepResearcherAgent(Workflow): |
| 14 | + """ |
| 15 | + A multi-stage research workflow that: |
| 16 | + 1. Gathers information from the web using advanced scraping tools. |
| 17 | + 2. Analyzes and synthesizes the findings. |
| 18 | + 3. Produces a clear, well-structured report. |
| 19 | + """ |
| 20 | + |
| 21 | + searcher: Agent = Agent( |
| 22 | + tools=[DuckDuckGoTools()], |
| 23 | + model=Nebius( |
| 24 | + id="deepseek-ai/DeepSeek-V3-0324", api_key=os.getenv("NEBIUS_API_KEY") |
| 25 | + ), |
| 26 | + show_tool_calls=True, |
| 27 | + markdown=True, |
| 28 | + description=( |
| 29 | + "You are ResearchBot-X, an expert at finding and extracting high-quality, " |
| 30 | + "up-to-date information from the web. Your job is to gather comprehensive, " |
| 31 | + "reliable, and diverse sources on the given topic." |
| 32 | + ), |
| 33 | + instructions=( |
| 34 | + "1. Search for the most recent and authoritative and up-to-date sources (news, blogs, official docs, research papers, forums, etc.) on the topic.\n" |
| 35 | + "2. Extract key facts, statistics, and expert opinions.\n" |
| 36 | + "3. Cover multiple perspectives and highlight any disagreements or controversies.\n" |
| 37 | + "4. Include relevant statistics, data, and expert opinions where possible.\n" |
| 38 | + "5. Organize your findings in a clear, structured format (e.g., markdown table or sections by source type).\n" |
| 39 | + "6. If the topic is ambiguous, clarify with the user before proceeding.\n" |
| 40 | + "7. Be as comprehensive and verbose as possible—err on the side of including more detail.\n" |
| 41 | + "8. Mention the References & Sources of the Content. (It's Must)" |
| 42 | + ), |
| 43 | + ) |
| 44 | + |
| 45 | + # Analyst: Synthesizes and interprets the research findings |
| 46 | + analyst: Agent = Agent( |
| 47 | + model=Nebius( |
| 48 | + id="deepseek-ai/DeepSeek-V3-0324", api_key=os.getenv("NEBIUS_API_KEY") |
| 49 | + ), |
| 50 | + markdown=True, |
| 51 | + description=( |
| 52 | + "You are AnalystBot-X, a critical thinker who synthesizes research findings " |
| 53 | + "into actionable insights. Your job is to analyze, compare, and interpret the " |
| 54 | + "information provided by the researcher." |
| 55 | + ), |
| 56 | + instructions=( |
| 57 | + "1. Identify key themes, trends, and contradictions in the research.\n" |
| 58 | + "2. Highlight the most important findings and their implications.\n" |
| 59 | + "3. Suggest areas for further investigation if gaps are found.\n" |
| 60 | + "4. Present your analysis in a structured, easy-to-read format.\n" |
| 61 | + "5. Extract and list ONLY the reference links or sources that were ACTUALLY found and provided by the researcher in their findings. Do NOT create, invent, or hallucinate any links.\n" |
| 62 | + "6. If no links were provided by the researcher, do not include a References section.\n" |
| 63 | + "7. Don't add hallucinations or make up information. Use ONLY the links that were explicitly passed to you by the researcher.\n" |
| 64 | + "8. Verify that each link you include was actually present in the researcher's findings before listing it.\n" |
| 65 | + "9. If there's no Link found from the previous agent then just say, No reference Found." |
| 66 | + ), |
| 67 | + ) |
| 68 | + |
| 69 | + # Writer: Produces a final, polished report |
| 70 | + writer: Agent = Agent( |
| 71 | + model=Nebius( |
| 72 | + id="deepseek-ai/DeepSeek-V3-0324", api_key=os.getenv("NEBIUS_API_KEY") |
| 73 | + ), |
| 74 | + markdown=True, |
| 75 | + description=( |
| 76 | + "You are WriterBot-X, a professional technical writer. Your job is to craft " |
| 77 | + "a clear, engaging, and well-structured report based on the analyst's summary." |
| 78 | + ), |
| 79 | + instructions=( |
| 80 | + "1. Write an engaging introduction that sets the context.\n" |
| 81 | + "2. Organize the main findings into logical sections with headings.\n" |
| 82 | + "3. Use bullet points, tables, or lists for clarity where appropriate.\n" |
| 83 | + "4. Conclude with a summary and actionable recommendations.\n" |
| 84 | + "5. Include a References & Sources section ONLY if the analyst provided actual links from their analysis.\n" |
| 85 | + "6. Use ONLY the reference links that were explicitly provided by the analyst in their analysis. Do NOT create, invent, or hallucinate any links.\n" |
| 86 | + "7. If the analyst provided links, format them as clickable markdown links in the References section.\n" |
| 87 | + "8. If no links were provided by the analyst, do not include a References section at all.\n" |
| 88 | + "9. Never add fake or made-up links - only use links that were actually found and passed through the research chain." |
| 89 | + ), |
| 90 | + ) |
| 91 | + |
| 92 | + def run(self, topic: str) -> Iterator[RunResponse]: |
| 93 | + """ |
| 94 | + Orchestrates the research, analysis, and report writing process for a given topic. |
| 95 | + """ |
| 96 | + logger.info(f"Running deep researcher agent for topic: {topic}") |
| 97 | + |
| 98 | + # Step 1: Research |
| 99 | + research_content = self.searcher.run(topic) |
| 100 | + # logger.info(f"Searcher content: {research_content.content}") |
| 101 | + |
| 102 | + logger.info("Analysis started") |
| 103 | + # Step 2: Analysis |
| 104 | + analysis = self.analyst.run(research_content.content) |
| 105 | + # logger.info(f"Analyst analysis: {analysis.content}") |
| 106 | + |
| 107 | + logger.info("Report Writing Started") |
| 108 | + # Step 3: Report Writing |
| 109 | + report = self.writer.run(analysis.content, stream=True) |
| 110 | + yield from report |
| 111 | + |
| 112 | + |
| 113 | +def run_research(query: str) -> str: |
| 114 | + agent = DeepResearcherAgent() |
| 115 | + final_report_iterator = agent.run( |
| 116 | + topic=query, |
| 117 | + ) |
| 118 | + logger.info("Report Generated") |
| 119 | + |
| 120 | + full_report = "" |
| 121 | + for chunk in final_report_iterator: |
| 122 | + if chunk.content: |
| 123 | + full_report += chunk.content |
| 124 | + |
| 125 | + return full_report |
0 commit comments