Skip to main content

Overview

The AgentResult class encapsulates the complete result of an agent’s execution in non-streaming mode. It provides access to the updated thread, new messages, and the final output.

Class Definition

@dataclass
class AgentResult:
    thread: Thread                    # Updated thread with new messages
    new_messages: List[Message]       # New messages added during execution
    content: Optional[str]            # Final assistant response content
    structured_data: Optional[BaseModel]  # Validated Pydantic model (if response_type used)

Properties

thread
Thread
The updated thread containing all messages including those added during this execution
new_messages
List[Message]
List of new messages added during this execution (excludes pre-existing messages)
content
Optional[str]
The final assistant response content. None if no assistant message was generated
structured_data
Optional[BaseModel]
When response_type is provided to agent.run(), this field contains a validated instance of the Pydantic model. Internally, the agent uses the output-tool pattern: your schema becomes a special tool, and when the LLM calls it, the arguments are validated against your model.
from pydantic import BaseModel

class Invoice(BaseModel):
    vendor: str
    total: float

result = await agent.run(thread, response_type=Invoice)
invoice: Invoice = result.structured_data
print(f"Vendor: {invoice.vendor}, Total: ${invoice.total}")
Returns None if response_type was not provided. See the Structured Output Guide for complete usage.

Usage Examples

Basic Usage

from tyler import Agent, Thread, Message

agent = Agent(name="MyAgent", purpose="To help users")
thread = Thread()
thread.add_message(Message(role="user", content="Hello!"))

# Execute and get result
result = await agent.run(thread)

# Access the response
print(f"Response: {result.content}")

Accessing Metrics

# Get token usage from thread
token_stats = result.thread.get_total_tokens()
print(f"Tokens used: {token_stats['overall']['total_tokens']}")

# Calculate duration from message timestamps
if result.new_messages:
    start_time = min(msg.timestamp for msg in result.new_messages)
    end_time = max(msg.timestamp for msg in result.new_messages)
    duration_ms = (end_time - start_time).total_seconds() * 1000
    print(f"Duration: {duration_ms:.0f}ms")

# Check tool usage
tool_usage = result.thread.get_tool_usage()
if tool_usage['total_calls'] > 0:
    print(f"\nTools used:")
    for tool_name, count in tool_usage['tools'].items():
        print(f"  {tool_name}: {count} calls")

Working with Messages

# Access all new messages
for message in result.new_messages:
    print(f"{message.role}: {message.content}")
    
    # Check for tool calls
    if message.tool_calls:
        print(f"  Tool calls: {len(message.tool_calls)}")
    
    # Check metrics
    if message.metrics:
        tokens = message.metrics.get("usage", {})
        print(f"  Tokens: {tokens.get('total_tokens', 0)}")

Error Handling

try:
    result = await agent.run(thread)
    print(f"Response: {result.content}")
except Exception as e:
    print(f"Error: {e}")
    # The thread may still have partial messages
    if thread.messages:
        last_message = thread.messages[-1]
        print(f"Last message: {last_message.content}")

Thread Management

# The thread is updated in-place
original_message_count = len(thread.messages)
result = await agent.run(thread)
new_message_count = len(result.thread.messages)

print(f"Added {new_message_count - original_message_count} messages")

# You can also access the thread directly
assert result.thread is thread  # Same object, modified in-place

Common Patterns

Conversation Loop

async def chat_loop(agent: Agent, thread: Thread):
    while True:
        user_input = input("You: ")
        if user_input.lower() == 'quit':
            break
            
        thread.add_message(Message(role="user", content=user_input))
        result = await agent.run(thread)
        
        print(f"Assistant: {result.content}")
        
        # Calculate duration
        if result.new_messages:
            start = min(msg.timestamp for msg in result.new_messages)
            end = max(msg.timestamp for msg in result.new_messages)
            duration_ms = (end - start).total_seconds() * 1000
            print(f"(Took {duration_ms:.0f}ms)")

Result Analysis

def analyze_result(result: AgentResult):
    """Analyze agent execution results"""
    # Get token usage
    token_stats = result.thread.get_total_tokens()
    
    # Calculate duration
    duration_ms = 0
    if result.new_messages:
        start = min(msg.timestamp for msg in result.new_messages)
        end = max(msg.timestamp for msg in result.new_messages)
        duration_ms = (end - start).total_seconds() * 1000
    
    # Get tool usage
    tool_usage = result.thread.get_tool_usage()
    
    stats = {
        "duration_ms": duration_ms,
        "tokens": token_stats['overall']['total_tokens'],
        "tool_calls": tool_usage['total_calls'],
        "messages_added": len(result.new_messages),
        "has_content": result.content is not None,
        "used_tools": tool_usage['total_calls'] > 0
    }
    
    return stats

Persisting Results

from narrator import ThreadStore

store = ThreadStore()

# Execute agent
result = await agent.run(thread)

# Save the updated thread
await store.save(result.thread)

# Store execution metadata
token_stats = result.thread.get_total_tokens()
duration_ms = 0
if result.new_messages:
    start = min(msg.timestamp for msg in result.new_messages)
    end = max(msg.timestamp for msg in result.new_messages)
    duration_ms = (end - start).total_seconds() * 1000

await store.update_metadata(
    thread_id=result.thread.id,
    metadata={
        "last_execution_ms": duration_ms,
        "last_tokens_used": token_stats['overall']['total_tokens'],
        "last_response": result.content
    }
)

Structured Output Usage

When using structured output, access the validated data through structured_data:
from pydantic import BaseModel, Field
from typing import List, Literal

class SupportTicket(BaseModel):
    priority: Literal["low", "medium", "high"]
    category: str
    summary: str = Field(max_length=500)
    requires_escalation: bool

# Run with response_type
result = await agent.run(thread, response_type=SupportTicket)

# Access structured data
if result.structured_data:
    ticket: SupportTicket = result.structured_data
    print(f"Priority: {ticket.priority}")
    print(f"Category: {ticket.category}")
    print(f"Needs escalation: {ticket.requires_escalation}")

# Raw content is still available
print(f"Raw JSON: {result.content}")

Handling Errors

When structured output validation fails after all retries, StructuredOutputError is raised:
from tyler import StructuredOutputError

try:
    result = await agent.run(thread, response_type=SupportTicket)
except StructuredOutputError as e:
    print(f"Failed: {e.message}")
    print(f"Validation errors: {e.validation_errors}")
    print(f"Last response: {e.last_response}")
See the Structured Output Guide for complete documentation.

See Also