@@ -8,7 +8,7 @@ AG2 v0.10 introduces native support for the [Agent2Agent (A2A) Protocol](https:/
88
99This article walks through implementing A2A in AG2, with a focus on practical patterns for building distributed agent systems.
1010
11- \ <
11+ <!--  more  --> 
1212
1313##  What  is  A2A? 
1414
@@ -87,7 +87,7 @@ reviewer = ConversableAgent(
8787
8888    Be concise but thorough." " "  , 
8989    llm_config = { 
90-         " model" " gpt-4" 
90+         " model" " gpt-4.1 " 
9191        " temperature" 0.1 ,  # Low  temperature  for  consistent  reviews 
9292    } , 
9393) 
@@ -108,7 +108,7 @@ server = A2aServer(
108108) 
109109``` 
110110
111- Start the server with any ASGI server:
111+ Start  the  server  with  any  ASGI  server,    like   [uvicorn](https://www.uvicorn.org/) :
112112
113113```bash 
114114uvicorn  server :server  --host  0.0.0.0  --port  8000 
@@ -133,7 +133,7 @@ async def generate_and_review():
133133        name = " Coder" , 
134134        system_message = " " " Generate Python code with type hints.
135135        After generation, ask the reviewer to check it." " "  , 
136-         llm_config = {" model" " gpt-4" 
136+         llm_config = { " model" " gpt-4.1 " } , 
137137    ) 
138138
139139    #  Remote  reviewer  via  A2A 
@@ -144,13 +144,15 @@ async def generate_and_review():
144144    ) 
145145
146146    #  Two-agent  conversation 
147-     result  =  await  coder.a_initiate_chat (
148-         reviewer,
147+     response  =  await  coder.a_run ( 
148+         recipient = reviewer,
149149        message = " Generate a function to compute fibonacci numbers with full type hints" , 
150150        max_turns =3, 
151+         summary_method = " reflection_with_llm" , 
151152    ) 
152153
153-     print (result.summary)
154+     await  response.process() 
155+     print(response.summary) 
154156
155157asyncio.run(generate_and_review()) 
156158``` 
@@ -181,20 +183,22 @@ async def review_code(request: CodeReviewRequest):
181183        submitter  =  ConversableAgent( 
182184            name = " Submitter" , 
183185            system_message = " Submit code for review" , 
184-             llm_config = {" model" " gpt-4" 
186+             llm_config = { " model" " gpt-4.1 " } , 
185187            max_consecutive_auto_reply =0,   #  Don't  auto-reply 
186188        ) 
187189
188190        message  =  f"Review  this  code:\n\n```python\n{request.code}\n```" 
189191        if  request.context: 
190192            message  +=  f"\n\nContext:  { request .context } " 
191193
192-         result  =  await  submitter.a_initiate_chat (
193-             reviewer,
194+         response  = await submitter.a_run ( 
195+             recipient= reviewer, 
194196            message=message, 
197+             summary_method="  reflection_with_llm", 
195198        ) 
196199
197-         return  {" review" 
200+         await  response.process() 
201+         return  { " review" response .summary } 
198202
199203    except  Exception  as  e: 
200204        raise  HTTPException(status_code=500,  detail =str(e)) 
@@ -236,7 +240,7 @@ from pydantic_ai import Agent
236240from  pydantic_ai.models.openai  import  OpenAIModel 
237241
238242agent  =  Agent( 
239-     model = OpenAIModel(' gpt-4' 
243+     model =OpenAIModel('gpt-4.1 '), 
240244    system_prompt = ' You are a data analysis expert.' , 
241245) 
242246
@@ -258,10 +262,13 @@ data_analyst = A2aRemoteAgent(
258262) 
259263
260264#  Use  it  like  any  AG2  agent 
261- result  =  await  my_ag2_agent.a_initiate_chat (
262-     data_analyst,
265+ response  =  await  my_ag2_agent.a_run ( 
266+     recipient = data_analyst,
263267    message = " Analyze quarterly sales trends" , 
268+     summary_method = " reflection_with_llm" , 
264269) 
270+ 
271+ await  response.process() 
265272``` 
266273
267274The  implementation  framework  is  completely  abstracted—your  AG2  agent  doesn't  know  or  care  that  it's  talking  to  a  Pydantic  AI  agent. 
@@ -306,21 +313,29 @@ report_generator = A2aRemoteAgent(
306313
307314```python 
308315#  This  works 
309- result =  await  agent.a_initiate_chat(remote_agent, message = " ..." 
316+ response  =  await  agent.a_run(recipient=remote_agent,  message = " ..." ) 
317+ await  response.process() 
310318
311319#  This  will  fail 
312- result =  agent.initiate_chat( remote_agent, message = " ..." #  ❌
320+ result  =  agent.run(recipient= remote_agent,  message = " ..." )   #  ❌ 
313321``` 
314322
315323If  you  need  synchronous  interfaces,  wrap  async  calls: 
316324
317325```python 
318326import  asyncio 
319327
320- def  sync_wrapper (agent , remote_agent , message ):
321-     return  asyncio.run(
322-         agent.a_initiate_chat(remote_agent, message = message)
328+ async  def  async_chat(agent,  remote_agent,  message): 
329+     response  =  await  agent.a_run( 
330+         recipient =remote_agent, 
331+         message =message, 
332+         summary_method = " reflection_with_llm" 
323333    ) 
334+     await  response.process() 
335+     return  response 
336+ 
337+ def  sync_wrapper(agent,  remote_agent,  message): 
338+     return  asyncio.run(async_chat(agent,  remote_agent,  message)) 
324339``` 
325340
326341###  Error  Handling 
@@ -333,12 +348,14 @@ from httpx import HTTPError, TimeoutException
333348async  def  robust_a2a_call(agent,  remote_agent,  message,  max_retries =3): 
334349    for  attempt  in  range(max_retries): 
335350        try: 
336-             result  =  await  agent.a_initiate_chat (
337-                 remote_agent,
351+             response  =  await  agent.a_run ( 
352+                 recipient = remote_agent,
338353                message =message, 
354+                 summary_method = " reflection_with_llm" , 
339355                timeout =30,   #  Set  appropriate  timeouts 
340356            ) 
341-             return  result
357+             await  response.process() 
358+             return  response 
342359
343360        except  TimeoutException: 
344361            if  attempt  ==  max_retries  -  1: 
@@ -446,10 +463,15 @@ logger = logging.getLogger(__name__)
446463async  def  monitored_a2a_call (agent , remote_agent , message ):
447464    start =  time.time()
448465    try :
449-         result =  await  agent.a_initiate_chat(remote_agent, message = message)
466+         response =  await  agent.a_run(
467+             recipient = remote_agent,
468+             message = message,
469+             summary_method = " reflection_with_llm" 
470+         )
471+         await  response.process()
450472        duration =  time.time() -  start
451473        logger.info(f " A2A call succeeded in  { duration:.2f } s " )
452-         return  result 
474+         return  response 
453475    except  Exception  as  e:
454476        duration =  time.time() -  start
455477        logger.error(f " A2A call failed after  { duration:.2f } s:  { e} " )
@@ -495,7 +517,7 @@ uvicorn server:server \
495517
4965181 .  ** Install AG2 v0.10+** :
497519   ``` bash 
498-    pip install autogen > =0.10.0 
520+    pip install ag2 
499521   ``` 
500522
5015232 .  ** Review the documentation** : [ AG2 A2A Guide] ( https://docs.ag2.ai/latest/docs/user-guide/a2a/ ) 
0 commit comments