The Agent-to-Agent (A2A) Protocol is an open standard that enables AI agents from different platforms and frameworks to communicate and collaborate effectively. Slide’s Tyler package includes comprehensive A2A support, allowing your agents to:
Delegate tasks to remote A2A-compatible agents
Expose Tyler agents as A2A endpoints for other systems
Participate in multi-agent ecosystems across platforms
Enable seamless interoperability between different agent frameworks
import asynciofrom tyler import Agentfrom tyler.a2a import A2AServerfrom lye import WEB_TOOLS, FILES_TOOLSasync def main(): # Create a Tyler agent with specific capabilities tyler_agent = Agent( name="Research Assistant", model_name="gpt-4o-mini", purpose="Advanced research specialist with web search and document processing", tools=[*WEB_TOOLS, *FILES_TOOLS] ) # Create A2A server to expose the agent server = A2AServer( tyler_agent=tyler_agent, agent_card={ "name": "Tyler Research Assistant", "description": "AI research specialist with web and document capabilities", "capabilities": ["web_research", "document_processing", "data_analysis"], "version": "1.0.0" } ) # Start the A2A server await server.start_server(host="0.0.0.0", port=8000) # Agent is now accessible at http://localhost:8000if __name__ == "__main__": asyncio.run(main())
The A2A adapter automatically creates delegation tools for each connected agent:
Copy
# Delegation tools are automatically named and configureddelegation_tools = adapter.get_tools_for_agent()# Each tool allows delegating specific tasksfor tool in delegation_tools: print(f"Tool: {tool['definition']['function']['name']}") print(f"Description: {tool['definition']['function']['description']}") print(f"Parameters: {tool['definition']['function']['parameters']}")
# List active connectionsconnections = adapter.list_connected_agents()for conn in connections: print(f"Agent: {conn['agent_name']} at {conn['base_url']}") print(f"Capabilities: {', '.join(conn['capabilities'])}")# Get detailed agent statusstatus = await adapter.get_agent_status("research_agent")print(f"Active tasks: {status['active_tasks']}")
from tyler.models.execution import EventType# Enable streaming for real-time responsesthread = Thread()thread.add_message(Message( role="user", content="Delegate to research_agent with streaming: Analyze AI market trends"))async for update in agent.go(thread, stream=True): if update.type == EventType.LLM_STREAM_CHUNK: print(update.data.get("content_chunk", ""), end="", flush=True) elif update.type == EventType.TOOL_SELECTED: print(f"\n[Delegating to: {update.data.get('tool_name', '')}]")
# Check agent availabilitytry: await adapter.connect("test_agent", "http://agent-service.com")except Exception as e: if "connection refused" in str(e).lower(): print("Agent service is not running or unreachable") elif "timeout" in str(e).lower(): print("Connection timeout - check network or increase timeout") else: print(f"Unexpected error: {e}")
# Multi-step research coordinationasync def research_workflow(topic: str): coordinator = await create_research_coordinator() thread = Thread() thread.add_message(Message( role="user", content=f""" Conduct comprehensive research on: {topic} Please: 1. Delegate initial research to the research specialist 2. Have the analysis specialist process the findings 3. Synthesize results into actionable insights """ )) return await coordinator.go(thread)