Building AI Agents with LangGraph

ai python

LangChain chains are linear. Real-world AI agents need loops, branches, and state. LangGraph provides the graph-based primitives for building robust agents.

Why Graphs Over Chains

Chains Are Linear

Input → Step1 → Step2 → Step3 → Output

Agents Need Loops

Input → Plan → Execute → Evaluate → 
        ↑                     ↓
        └─── Revise ←────────┘

LangGraph enables this.

Core Concepts

State

from typing import TypedDict, Annotated
from langgraph.graph import StateGraph

class AgentState(TypedDict):
    messages: Annotated[list, "append"]
    current_step: str
    iteration_count: int

State is passed through the graph, modified by nodes.

Nodes

def process_input(state: AgentState) -> AgentState:
    # Process and modify state
    return {"current_step": "processed"}

def generate_response(state: AgentState) -> AgentState:
    response = llm.invoke(state["messages"])
    return {"messages": [response]}

Nodes are functions that transform state.

Edges

graph = StateGraph(AgentState)

graph.add_node("process", process_input)
graph.add_node("generate", generate_response)

graph.add_edge("process", "generate")  # Always flows
graph.add_conditional_edges("generate", decide_next)  # Conditional

Simple Agent Example

from langgraph.graph import StateGraph, END
from langchain_openai import ChatOpenAI
from langchain.schema import HumanMessage, AIMessage

class State(TypedDict):
    messages: list
    
llm = ChatOpenAI(model="gpt-4")

def call_model(state: State) -> State:
    response = llm.invoke(state["messages"])
    return {"messages": state["messages"] + [response]}

def should_continue(state: State) -> str:
    last_message = state["messages"][-1]
    if "FINAL ANSWER:" in last_message.content:
        return "end"
    return "continue"

# Build graph
graph = StateGraph(State)
graph.add_node("agent", call_model)

graph.set_entry_point("agent")
graph.add_conditional_edges(
    "agent",
    should_continue,
    {"continue": "agent", "end": END}
)

# Compile and run
app = graph.compile()
result = app.invoke({"messages": [HumanMessage(content="Solve this step by step...")]})

ReAct Agent Pattern

Reasoning + Acting:

from langgraph.prebuilt import create_react_agent
from langchain_openai import ChatOpenAI
from langchain.tools import tool

@tool
def search(query: str) -> str:
    """Search the web for information."""
    return f"Results for: {query}"

@tool  
def calculate(expression: str) -> str:
    """Calculate a math expression."""
    return str(eval(expression))

llm = ChatOpenAI(model="gpt-4")
tools = [search, calculate]

agent = create_react_agent(llm, tools)

# Run
result = agent.invoke({
    "messages": [("user", "What is 25 * 4, and who invented Python?")]
})

Tool Calling with State

from langgraph.prebuilt import ToolNode

class State(TypedDict):
    messages: list

def call_model(state: State) -> State:
    response = llm.bind_tools(tools).invoke(state["messages"])
    return {"messages": [response]}

def should_call_tools(state: State) -> str:
    last_message = state["messages"][-1]
    if last_message.tool_calls:
        return "tools"
    return "end"

graph = StateGraph(State)
graph.add_node("agent", call_model)
graph.add_node("tools", ToolNode(tools))

graph.set_entry_point("agent")
graph.add_conditional_edges("agent", should_call_tools, {
    "tools": "tools",
    "end": END
})
graph.add_edge("tools", "agent")  # Loop back after tools

app = graph.compile()

Human-in-the-Loop

from langgraph.checkpoint.memory import MemorySaver

memory = MemorySaver()

def human_approval(state: State) -> State:
    # This node pauses for human input
    pass

graph.add_node("approval", human_approval)
graph.add_interrupt_before(["approval"])

app = graph.compile(checkpointer=memory)

# Run until interrupt
config = {"configurable": {"thread_id": "1"}}
result = app.invoke({"messages": [...]}, config)

# Resume after human approval
app.update_state(config, {"approved": True})
result = app.invoke(None, config)

Subgraphs

Compose complex workflows:

# Research subgraph
research = StateGraph(ResearchState)
research.add_node("search", search_web)
research.add_node("summarize", summarize_results)
# ...
research_app = research.compile()

# Main graph uses subgraph
main = StateGraph(MainState)
main.add_node("research", research_app)  # Subgraph as node
main.add_node("write", write_report)
# ...

Branching

Handle multiple paths:

def router(state: State) -> str:
    query_type = classify_query(state["query"])
    if query_type == "code":
        return "code_agent"
    elif query_type == "research":
        return "research_agent"
    else:
        return "general_agent"

graph.add_conditional_edges(
    "classify",
    router,
    {
        "code_agent": "code",
        "research_agent": "research", 
        "general_agent": "general"
    }
)

Streaming

app = graph.compile()

for event in app.stream({"messages": [...]}):
    print(event)
    # {'agent': {'messages': [...]}}
    # {'tools': {'messages': [...]}}

Error Handling

def with_retry(func, max_retries=3):
    def wrapped(state):
        for i in range(max_retries):
            try:
                return func(state)
            except Exception as e:
                if i == max_retries - 1:
                    return {"error": str(e)}
        return state
    return wrapped

graph.add_node("risky_operation", with_retry(risky_function))

Practical Example: Research Agent

from langgraph.graph import StateGraph, END
from typing import TypedDict

class ResearchState(TypedDict):
    topic: str
    search_results: list
    summary: str
    sources: list
    iteration: int

def search(state: ResearchState) -> dict:
    results = web_search(state["topic"])
    return {"search_results": results, "iteration": state.get("iteration", 0) + 1}

def evaluate(state: ResearchState) -> str:
    if len(state["search_results"]) >= 5:
        return "summarize"
    if state["iteration"] >= 3:
        return "summarize"
    return "search"

def summarize(state: ResearchState) -> dict:
    summary = llm.invoke(f"Summarize: {state['search_results']}")
    return {"summary": summary, "sources": extract_sources(state["search_results"])}

graph = StateGraph(ResearchState)
graph.add_node("search", search)
graph.add_node("summarize", summarize)

graph.set_entry_point("search")
graph.add_conditional_edges("search", evaluate)
graph.add_edge("summarize", END)

researcher = graph.compile()
result = researcher.invoke({"topic": "LangGraph best practices"})

When to Use LangGraph

Use LangGraph When

Use Simple Chains When

Final Thoughts

LangGraph brings software engineering patterns (state machines, graphs) to AI agents. It’s more complex than chains but enables more robust, controllable agents.

Start simple, add complexity as needed.


Agents that loop, branch, and remember.

All posts