Introduction: AI agents go beyond simple LLM calls by combining reasoning, planning, tool use, and memory to accomplish complex tasks autonomously. Different agent architectures suit different problems: ReAct agents interleave reasoning and action, plan-and-execute agents create upfront plans, and multi-agent systems coordinate specialized agents. This guide covers building robust agent systems: the ReAct pattern for reasoning and acting, planning architectures for complex tasks, tool integration patterns, memory systems for context, and orchestrating multi-agent workflows. These patterns enable AI systems that can break down problems, use tools, and iterate toward solutions.

ReAct Agent Pattern
from openai import OpenAI
from typing import Callable, Any
import json
client = OpenAI()
class Tool:
"""Represents a tool the agent can use."""
def __init__(
self,
name: str,
description: str,
parameters: dict,
function: Callable
):
self.name = name
self.description = description
self.parameters = parameters
self.function = function
def to_openai_format(self) -> dict:
"""Convert to OpenAI function calling format."""
return {
"type": "function",
"function": {
"name": self.name,
"description": self.description,
"parameters": self.parameters
}
}
def execute(self, **kwargs) -> Any:
"""Execute the tool."""
return self.function(**kwargs)
class ReActAgent:
"""Agent using ReAct (Reasoning + Acting) pattern."""
def __init__(
self,
tools: list[Tool],
model: str = "gpt-4o",
max_iterations: int = 10
):
self.tools = {t.name: t for t in tools}
self.model = model
self.max_iterations = max_iterations
self.messages: list[dict] = []
def _get_system_prompt(self) -> str:
return """You are a helpful AI assistant that can use tools to accomplish tasks.
For each step:
1. Think about what you need to do next
2. Decide if you need to use a tool
3. If yes, call the appropriate tool
4. Analyze the result and decide next steps
5. Continue until the task is complete
Always explain your reasoning before taking actions."""
def run(self, task: str) -> str:
"""Run the agent on a task."""
self.messages = [
{"role": "system", "content": self._get_system_prompt()},
{"role": "user", "content": task}
]
tools_format = [t.to_openai_format() for t in self.tools.values()]
for iteration in range(self.max_iterations):
response = client.chat.completions.create(
model=self.model,
messages=self.messages,
tools=tools_format if tools_format else None,
tool_choice="auto"
)
message = response.choices[0].message
self.messages.append(message.model_dump())
# Check if done (no tool calls)
if not message.tool_calls:
return message.content
# Execute tool calls
for tool_call in message.tool_calls:
tool_name = tool_call.function.name
tool_args = json.loads(tool_call.function.arguments)
if tool_name in self.tools:
result = self.tools[tool_name].execute(**tool_args)
else:
result = f"Error: Unknown tool {tool_name}"
self.messages.append({
"role": "tool",
"tool_call_id": tool_call.id,
"content": str(result)
})
return "Max iterations reached without completing the task."
# Define tools
def search_web(query: str) -> str:
"""Simulate web search."""
return f"Search results for '{query}': [Result 1, Result 2, Result 3]"
def calculate(expression: str) -> str:
"""Evaluate a math expression."""
try:
result = eval(expression)
return str(result)
except:
return "Error evaluating expression"
tools = [
Tool(
name="search_web",
description="Search the web for information",
parameters={
"type": "object",
"properties": {
"query": {"type": "string", "description": "Search query"}
},
"required": ["query"]
},
function=search_web
),
Tool(
name="calculate",
description="Evaluate a mathematical expression",
parameters={
"type": "object",
"properties": {
"expression": {"type": "string", "description": "Math expression to evaluate"}
},
"required": ["expression"]
},
function=calculate
)
]
# Usage
agent = ReActAgent(tools)
result = agent.run("What is 15% of 847?")
print(result)
Plan-and-Execute Agent
from dataclasses import dataclass
from typing import Optional
from enum import Enum
class StepStatus(str, Enum):
PENDING = "pending"
IN_PROGRESS = "in_progress"
COMPLETED = "completed"
FAILED = "failed"
@dataclass
class PlanStep:
id: int
description: str
status: StepStatus = StepStatus.PENDING
result: Optional[str] = None
dependencies: list[int] = None
class PlanAndExecuteAgent:
"""Agent that creates a plan then executes it."""
def __init__(self, tools: list[Tool], model: str = "gpt-4o"):
self.tools = {t.name: t for t in tools}
self.model = model
self.plan: list[PlanStep] = []
def create_plan(self, task: str) -> list[PlanStep]:
"""Create a plan for the task."""
tools_desc = "\n".join([
f"- {t.name}: {t.description}"
for t in self.tools.values()
])
prompt = f"""Create a step-by-step plan to accomplish this task.
Available tools:
{tools_desc}
Task: {task}
Return JSON:
{{
"steps": [
{{"id": 1, "description": "step description", "dependencies": []}},
{{"id": 2, "description": "step description", "dependencies": [1]}}
]
}}
Each step should be atomic and achievable with the available tools."""
response = client.chat.completions.create(
model=self.model,
messages=[{"role": "user", "content": prompt}],
response_format={"type": "json_object"}
)
data = json.loads(response.choices[0].message.content)
self.plan = [
PlanStep(
id=s["id"],
description=s["description"],
dependencies=s.get("dependencies", [])
)
for s in data["steps"]
]
return self.plan
def execute_step(self, step: PlanStep, context: dict) -> str:
"""Execute a single step."""
tools_format = [t.to_openai_format() for t in self.tools.values()]
# Build context from completed steps
context_str = "\n".join([
f"Step {s.id} result: {s.result}"
for s in self.plan
if s.status == StepStatus.COMPLETED
])
prompt = f"""Execute this step of the plan.
Previous results:
{context_str}
Current step: {step.description}
Use the available tools to complete this step."""
messages = [{"role": "user", "content": prompt}]
response = client.chat.completions.create(
model=self.model,
messages=messages,
tools=tools_format
)
message = response.choices[0].message
# Execute any tool calls
if message.tool_calls:
results = []
for tool_call in message.tool_calls:
tool_name = tool_call.function.name
tool_args = json.loads(tool_call.function.arguments)
if tool_name in self.tools:
result = self.tools[tool_name].execute(**tool_args)
results.append(f"{tool_name}: {result}")
return "\n".join(results)
return message.content
def can_execute(self, step: PlanStep) -> bool:
"""Check if step dependencies are met."""
if not step.dependencies:
return True
for dep_id in step.dependencies:
dep_step = next((s for s in self.plan if s.id == dep_id), None)
if dep_step and dep_step.status != StepStatus.COMPLETED:
return False
return True
def run(self, task: str) -> str:
"""Create plan and execute it."""
# Create plan
self.create_plan(task)
print(f"Created plan with {len(self.plan)} steps")
context = {}
# Execute steps
while any(s.status == StepStatus.PENDING for s in self.plan):
for step in self.plan:
if step.status == StepStatus.PENDING and self.can_execute(step):
step.status = StepStatus.IN_PROGRESS
print(f"Executing step {step.id}: {step.description}")
try:
result = self.execute_step(step, context)
step.result = result
step.status = StepStatus.COMPLETED
print(f"Step {step.id} completed: {result[:100]}...")
except Exception as e:
step.status = StepStatus.FAILED
step.result = str(e)
print(f"Step {step.id} failed: {e}")
# Summarize results
return self._summarize_results()
def _summarize_results(self) -> str:
"""Summarize the execution results."""
results = "\n".join([
f"Step {s.id}: {s.result}"
for s in self.plan
if s.status == StepStatus.COMPLETED
])
prompt = f"""Summarize the results of this plan execution.
Results:
{results}
Provide a concise summary of what was accomplished."""
response = client.chat.completions.create(
model=self.model,
messages=[{"role": "user", "content": prompt}]
)
return response.choices[0].message.content
# Usage
planner = PlanAndExecuteAgent(tools)
result = planner.run("Research the current weather in New York and calculate what temperature that is in Celsius")
Agent Memory Systems
from dataclasses import dataclass, field
from datetime import datetime
from typing import Optional
import numpy as np
@dataclass
class Memory:
content: str
timestamp: datetime
importance: float = 1.0
embedding: Optional[list[float]] = None
metadata: dict = field(default_factory=dict)
class AgentMemory:
"""Memory system for agents."""
def __init__(self, max_memories: int = 1000):
self.short_term: list[Memory] = [] # Recent conversation
self.long_term: list[Memory] = [] # Important memories
self.max_memories = max_memories
def _get_embedding(self, text: str) -> list[float]:
"""Get embedding for text."""
response = client.embeddings.create(
model="text-embedding-3-small",
input=text
)
return response.data[0].embedding
def add_short_term(self, content: str, metadata: dict = None):
"""Add to short-term memory."""
memory = Memory(
content=content,
timestamp=datetime.now(),
metadata=metadata or {}
)
self.short_term.append(memory)
# Keep only recent memories
if len(self.short_term) > 20:
self.short_term = self.short_term[-20:]
def add_long_term(self, content: str, importance: float = 1.0, metadata: dict = None):
"""Add to long-term memory with embedding."""
embedding = self._get_embedding(content)
memory = Memory(
content=content,
timestamp=datetime.now(),
importance=importance,
embedding=embedding,
metadata=metadata or {}
)
self.long_term.append(memory)
# Prune if too many memories
if len(self.long_term) > self.max_memories:
self._prune_memories()
def _prune_memories(self):
"""Remove least important memories."""
# Score by importance and recency
now = datetime.now()
def score(m: Memory) -> float:
age_hours = (now - m.timestamp).total_seconds() / 3600
recency_score = 1 / (1 + age_hours / 24) # Decay over days
return m.importance * recency_score
self.long_term.sort(key=score, reverse=True)
self.long_term = self.long_term[:self.max_memories]
def search(self, query: str, k: int = 5) -> list[Memory]:
"""Search long-term memory by similarity."""
if not self.long_term:
return []
query_embedding = self._get_embedding(query)
# Calculate similarities
similarities = []
for memory in self.long_term:
if memory.embedding:
sim = np.dot(query_embedding, memory.embedding) / (
np.linalg.norm(query_embedding) * np.linalg.norm(memory.embedding)
)
similarities.append((memory, sim))
# Sort by similarity
similarities.sort(key=lambda x: x[1], reverse=True)
return [m for m, _ in similarities[:k]]
def get_context(self, query: str = None) -> str:
"""Get relevant context for the agent."""
context_parts = []
# Add recent short-term memories
if self.short_term:
recent = self.short_term[-5:]
context_parts.append("Recent conversation:")
for m in recent:
context_parts.append(f" - {m.content}")
# Add relevant long-term memories
if query and self.long_term:
relevant = self.search(query, k=3)
if relevant:
context_parts.append("\nRelevant memories:")
for m in relevant:
context_parts.append(f" - {m.content}")
return "\n".join(context_parts)
def reflect(self) -> str:
"""Generate reflection on recent memories."""
if len(self.short_term) < 5:
return None
recent_content = "\n".join([m.content for m in self.short_term[-10:]])
prompt = f"""Reflect on these recent interactions and extract key insights.
Recent interactions:
{recent_content}
What are the most important things to remember? What patterns do you notice?"""
response = client.chat.completions.create(
model="gpt-4o-mini",
messages=[{"role": "user", "content": prompt}]
)
reflection = response.choices[0].message.content
# Store reflection as important long-term memory
self.add_long_term(reflection, importance=2.0, metadata={"type": "reflection"})
return reflection
# Usage
memory = AgentMemory()
# Add memories
memory.add_short_term("User asked about Python programming")
memory.add_long_term("User prefers concise code examples", importance=1.5)
# Get context for new query
context = memory.get_context("How do I write a function?")
Multi-Agent Systems
from abc import ABC, abstractmethod
from typing import Optional
class BaseAgent(ABC):
"""Base class for specialized agents."""
def __init__(self, name: str, model: str = "gpt-4o"):
self.name = name
self.model = model
@abstractmethod
def get_system_prompt(self) -> str:
pass
def run(self, task: str, context: str = "") -> str:
"""Run the agent on a task."""
messages = [
{"role": "system", "content": self.get_system_prompt()},
{"role": "user", "content": f"Context:\n{context}\n\nTask: {task}"}
]
response = client.chat.completions.create(
model=self.model,
messages=messages
)
return response.choices[0].message.content
class ResearchAgent(BaseAgent):
"""Agent specialized in research and information gathering."""
def get_system_prompt(self) -> str:
return """You are a research specialist. Your job is to:
- Gather relevant information
- Synthesize findings
- Identify key facts and sources
- Highlight uncertainties or gaps
Be thorough and cite your reasoning."""
class WriterAgent(BaseAgent):
"""Agent specialized in writing and content creation."""
def get_system_prompt(self) -> str:
return """You are a professional writer. Your job is to:
- Create clear, engaging content
- Structure information logically
- Use appropriate tone and style
- Edit for clarity and conciseness
Focus on quality and readability."""
class CriticAgent(BaseAgent):
"""Agent specialized in reviewing and critiquing."""
def get_system_prompt(self) -> str:
return """You are a critical reviewer. Your job is to:
- Identify weaknesses and errors
- Suggest improvements
- Check for accuracy and completeness
- Provide constructive feedback
Be thorough but fair in your critique."""
class MultiAgentOrchestrator:
"""Orchestrate multiple agents to complete tasks."""
def __init__(self):
self.agents: dict[str, BaseAgent] = {}
self.conversation_history: list[dict] = []
def add_agent(self, agent: BaseAgent):
"""Add an agent to the system."""
self.agents[agent.name] = agent
def delegate(self, task: str, agent_name: str, context: str = "") -> str:
"""Delegate a task to a specific agent."""
if agent_name not in self.agents:
raise ValueError(f"Unknown agent: {agent_name}")
agent = self.agents[agent_name]
result = agent.run(task, context)
self.conversation_history.append({
"agent": agent_name,
"task": task,
"result": result
})
return result
def run_pipeline(self, task: str, pipeline: list[tuple[str, str]]) -> str:
"""Run a pipeline of agent tasks."""
context = f"Original task: {task}\n\n"
for agent_name, sub_task in pipeline:
print(f"Running {agent_name}: {sub_task}")
result = self.delegate(sub_task, agent_name, context)
context += f"\n{agent_name} output:\n{result}\n"
return context
def collaborative_solve(self, task: str, max_rounds: int = 3) -> str:
"""Agents collaborate to solve a task."""
# Research phase
research = self.delegate(
f"Research this topic: {task}",
"researcher"
)
# Writing phase
draft = self.delegate(
f"Write content based on this research: {research}",
"writer",
context=research
)
# Review and iterate
for round in range(max_rounds):
critique = self.delegate(
f"Review this draft and suggest improvements: {draft}",
"critic",
context=draft
)
# Check if good enough
if "no major issues" in critique.lower() or "looks good" in critique.lower():
break
# Revise
draft = self.delegate(
f"Revise based on this feedback: {critique}",
"writer",
context=f"Original draft:\n{draft}\n\nFeedback:\n{critique}"
)
return draft
# Usage
orchestrator = MultiAgentOrchestrator()
orchestrator.add_agent(ResearchAgent("researcher"))
orchestrator.add_agent(WriterAgent("writer"))
orchestrator.add_agent(CriticAgent("critic"))
result = orchestrator.collaborative_solve("Explain the benefits of microservices architecture")
Production Agent Service
from fastapi import FastAPI, BackgroundTasks
from pydantic import BaseModel
from typing import Optional
import uuid
app = FastAPI()
# Store agent sessions
sessions: dict[str, dict] = {}
class AgentRequest(BaseModel):
task: str
session_id: Optional[str] = None
agent_type: str = "react" # react, plan_execute, multi_agent
class AgentResponse(BaseModel):
session_id: str
result: str
steps_taken: int
tools_used: list[str]
@app.post("/agent/run", response_model=AgentResponse)
async def run_agent(request: AgentRequest):
"""Run an agent on a task."""
# Get or create session
session_id = request.session_id or str(uuid.uuid4())
if session_id not in sessions:
sessions[session_id] = {
"memory": AgentMemory(),
"history": []
}
session = sessions[session_id]
memory = session["memory"]
# Get context from memory
context = memory.get_context(request.task)
# Select agent type
if request.agent_type == "react":
agent = ReActAgent(tools)
result = agent.run(request.task)
steps = len(agent.messages)
tools_used = list(set(
m.get("name", "")
for m in agent.messages
if m.get("role") == "tool"
))
elif request.agent_type == "plan_execute":
agent = PlanAndExecuteAgent(tools)
result = agent.run(request.task)
steps = len(agent.plan)
tools_used = []
else:
orchestrator = MultiAgentOrchestrator()
orchestrator.add_agent(ResearchAgent("researcher"))
orchestrator.add_agent(WriterAgent("writer"))
result = orchestrator.collaborative_solve(request.task)
steps = len(orchestrator.conversation_history)
tools_used = ["researcher", "writer"]
# Update memory
memory.add_short_term(f"Task: {request.task}")
memory.add_short_term(f"Result: {result[:200]}...")
session["history"].append({
"task": request.task,
"result": result
})
return AgentResponse(
session_id=session_id,
result=result,
steps_taken=steps,
tools_used=tools_used
)
@app.get("/agent/session/{session_id}")
async def get_session(session_id: str):
"""Get session history."""
if session_id not in sessions:
return {"error": "Session not found"}
return {
"session_id": session_id,
"history": sessions[session_id]["history"]
}
References
- ReAct Paper: https://arxiv.org/abs/2210.03629
- LangChain Agents: https://python.langchain.com/docs/modules/agents/
- AutoGPT: https://github.com/Significant-Gravitas/AutoGPT
- CrewAI: https://docs.crewai.com/
Conclusion
Agent architectures enable AI systems to tackle complex, multi-step tasks autonomously. ReAct agents work well for tasks requiring interleaved reasoning and tool use. Plan-and-execute agents excel at complex tasks that benefit from upfront planning. Memory systems give agents context and learning capabilities across interactions. Multi-agent systems leverage specialized agents for different aspects of a problem. Start with simple ReAct agents for most use cases, add planning for complex tasks, and consider multi-agent systems when you need diverse expertise. Always include appropriate guardrails and human oversight for production agent deployments.
Discover more from C4: Container, Code, Cloud & Context
Subscribe to get the latest posts sent to your email.