Step-by-step guide to integrating Agent-to-Agent (A2A) Protocol v0.3.0 with Tyler agents
This guide walks you through integrating the Agent-to-Agent (A2A) Protocol v0.3.0 with your Tyler agents, enabling multi-agent coordination and delegation across different platforms.💻 Code Examples
import asyncioasync def connect_to_agents(): # Connect to a research specialist agent research_connected = await adapter.connect( name="research_agent", base_url="https://research-ai.example.com" ) # Connect to an analysis specialist agent analysis_connected = await adapter.connect( name="analysis_agent", base_url="https://analysis-ai.example.com" ) if research_connected and analysis_connected: print("Connected to both remote agents") # Check agent capabilities for agent_name in ["research_agent", "analysis_agent"]: info = adapter.client.get_connection_info(agent_name) print(f"{agent_name}:") print(f" Protocol version: {info['protocol_version']}") print(f" Capabilities: {info['capabilities']}") print(f" Push notifications: {info['push_notifications_supported']}") else: print("Failed to connect to one or more agents")asyncio.run(connect_to_agents())
from tyler import Agent# Get delegation tools from connected agentsdelegation_tools = adapter.get_tools_for_agent()# Create Tyler agent that can delegate taskscoordinator = Agent( name="Project Coordinator", model_name="gpt-4.1", purpose="""You coordinate complex projects by delegating specialized tasks to remote agents. You have access to: - Research agent: For web research, fact-checking, and information gathering - Analysis agent: For data analysis, insights, and strategic recommendations Use these agents strategically to break down complex requests.""", tools=delegation_tools)
from tyler import Thread, Messageasync def coordinate_project(): # Create a complex request thread = Thread() thread.add_message(Message( role="user", content="""I need a comprehensive market analysis for electric vehicle charging stations. Please: 1. Research current market size, key players, and growth trends 2. Analyze competitive landscape and identify opportunities 3. Provide strategic recommendations for market entry """ )) # The coordinator will automatically delegate to appropriate agents result = await coordinator.run(thread) # Print the coordinated response for message in result.thread.messages: if message.role == "assistant": print(f"Coordinator: {message.content}")asyncio.run(coordinate_project())
from tyler import Agentfrom lye import WEB_TOOLS, FILES_TOOLS# Create a specialized research agentresearch_agent = Agent( name="Research Specialist", model_name="gpt-4.1", purpose="""You are an expert research specialist with web search and document processing capabilities. Your expertise includes: - Comprehensive web research and fact-finding - Academic and market research - Document analysis and summarization - Competitive intelligence gathering Always provide well-sourced, accurate information.""", tools=[*WEB_TOOLS, *FILES_TOOLS])
async def start_research_service(): print("Starting Tyler Research Specialist A2A Server...") print("Other agents can connect at: http://localhost:8000") print("Agent Card available at: http://localhost:8000/.well-known/agent-card.json") # Start the server (this will run indefinitely) await server.start_server(host="0.0.0.0", port=8000)# Run the serverif __name__ == "__main__": try: asyncio.run(start_research_service()) except KeyboardInterrupt: print("\nServer stopped by user")
from tyler.a2a import A2AAdapter, FilePartadapter = A2AAdapter()await adapter.connect("document_processor", "https://docs.example.com")# Send a file for processingtask_id = await adapter.create_task_with_files( "document_processor", "Summarize the key points from this document", files=["./reports/annual_report_2024.pdf"], context_id="annual-review")# Or create FilePart manually for more controlfile_part = FilePart.from_path("./data/analysis.xlsx")print(f"File: {file_part.name}, Size: {len(file_part.file_with_bytes)} bytes")
from tyler.a2a import A2AClient, TextPart, DataPartclient = A2AClient()await client.connect("agent", "https://agent.example.com")# Create a tasktask_id = await client.create_task("agent", "Generate a comprehensive report")# Wait for completion (in production, use push notifications instead)import asynciowhile True: status = await client.get_task_status("agent", task_id) if status["status"] in ["completed", "error"]: break await asyncio.sleep(1)# Get artifactsartifacts = await client.get_task_artifacts("agent", task_id)for artifact in artifacts: print(f"Artifact: {artifact.name}") print(f" ID: {artifact.artifact_id}") print(f" Created: {artifact.created_at}") for part in artifact.parts: if isinstance(part, TextPart): print(f" Text content: {part.text[:200]}...") elif isinstance(part, DataPart): print(f" Data: {part.data}")
from tyler.a2a import A2AClientclient = A2AClient()await client.connect("agent", "https://agent.example.com")# Define a context for related taskscontext_id = "market-research-project-q4"# Create multiple related taskstasks = [ ("Research competitor pricing", "phase1-research"), ("Analyze market trends", "phase2-analysis"), ("Generate recommendations", "phase3-synthesis"),]task_ids = []for description, tag in tasks: task_id = await client.create_task( "agent", f"[{tag}] {description}", context_id=context_id ) task_ids.append(task_id)# Get all tasks in the contextrelated_task_ids = client.get_tasks_by_context(context_id)print(f"Tasks in context: {len(related_task_ids)}")
For simple requests where you want to wait for the complete response:
Copy
from tyler.a2a import A2AClientasync def send_remote_task(): """Send a task and wait for complete response.""" client = A2AClient() await client.connect("agent", "https://agent.example.com") # Send task and wait for full response result = await client.send_task( "agent", "Create a brief executive summary" ) # Access the complete response print(f"Status: {result.status}") for artifact in result.artifacts: for part in artifact.parts: print(part.text)
When clients call message/stream, they receive response tokens as they’re generated by the LLM via Server-Sent Events (SSE):
Copy
from tyler.a2a import A2AClientasync def stream_remote_task(): """Stream responses from a remote A2A agent.""" client = A2AClient() await client.connect("agent", "https://agent.example.com") # Create a task task_id = await client.create_task( "agent", "Create a comprehensive business plan for a new AI startup" ) print("Streaming response...") print("=" * 50) # Stream tokens as they arrive async for message in client.stream_task_messages("agent", task_id): content = message.get("content", "") print(content, end="", flush=True) print("\n\nTask complete!")
For local coordinating agents, you can also stream using Tyler’s native streaming:
Copy
from tyler.models.execution import EventTypeasync def stream_coordinated_task(): """Example of streaming responses from coordinated agents.""" thread = Thread() thread.add_message(Message( role="user", content="Create a comprehensive business plan for a new AI startup" )) print("Starting coordinated task execution...") print("=" * 50) async for update in coordinator.stream(thread): if update.type == EventType.LLM_STREAM_CHUNK: print(update.data.get("content_chunk", ""), end="", flush=True) elif update.type == EventType.TOOL_SELECTED: tool_name = update.data.get("tool_name", "") if "delegate_to_" in tool_name: agent_name = tool_name.replace("delegate_to_", "") print(f"\n\nDelegating to {agent_name}...") print() elif update.type == EventType.EXECUTION_COMPLETE: print("\n\nTask coordination complete!")