from typing import TypedDict, Annotated, List, Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.messages import BaseMessage
import operator
from langgraph.graph import StateGraph, END
from langgraph.prebuilt.tool_executor import ToolExecutor
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain.tools import DuckDuckGoSearchRun, WikipediaQueryRun
from langchain_community.utilities import WikipediaAPIWrapper
# Define the agent state
class AgentState(TypedDict):
messages: Annotated[List[BaseMessage], operator.add]
agent_outcome: Union[AgentAction, AgentFinish, None]
intermediate_steps: Annotated[List[tuple], operator.add]
# Initialize tools
search_tool = DuckDuckGoSearchRun()
wikipedia_tool = WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper())
tools = [search_tool, wikipedia_tool]
tool_executor = ToolExecutor(tools)
# Create the agent
def create_agent():
llm = ChatOpenAI(temperature=0, model="gpt-4")
prompt = ChatPromptTemplate.from_messages([
("system", """You are a helpful assistant that can search for information and answer questions.
You have access to the following tools:
- search: Search the internet for current information
- wikipedia: Search Wikipedia for encyclopedic information
Use the following format:
Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [search, wikipedia]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question
Begin!"""),
MessagesPlaceholder(variable_name="messages"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
])
agent = create_openai_functions_agent(llm, tools, prompt)
return agent
# Define agent node
def run_agent(state):
agent = create_agent()
agent_outcome = agent.invoke(state)
return {"agent_outcome": agent_outcome}
# Define tool execution node
def execute_tools(state):
agent_action = state["agent_outcome"]
output = tool_executor.invoke(agent_action)
return {"intermediate_steps": [(agent_action, str(output))]}
# Define conditional logic
def should_continue(state):
if isinstance(state["agent_outcome"], AgentFinish):
return "end"
else:
return "continue"
# Build the graph
workflow = StateGraph(AgentState)
# Add nodes
workflow.add_node("agent", run_agent)
workflow.add_node("action", execute_tools)
# Set entry point
workflow.set_entry_point("agent")
# Add conditional edges
workflow.add_conditional_edges(
"agent",
should_continue,
{
"continue": "action",
"end": END
}
)
# Add edge from action back to agent
workflow.add_edge("action", "agent")
# Compile the graph
app = workflow.compile()
# Advanced LangGraph implementation with memory and error handling
class AdvancedLangGraphAgent:
def __init__(self, llm_model="gpt-4", memory_type="buffer"):
self.llm = ChatOpenAI(temperature=0, model=llm_model)
self.tools = self._initialize_tools()
self.memory = self._initialize_memory(memory_type)
self.graph = self._build_graph()
def _initialize_tools(self):
"""Initialize and configure tools"""
return [
DuckDuckGoSearchRun(name="search"),
WikipediaQueryRun(
name="wikipedia",
api_wrapper=WikipediaAPIWrapper()
),
# Add more tools as needed
]
def _initialize_memory(self, memory_type):
"""Initialize memory system"""
if memory_type == "buffer":
from langchain.memory import ConversationBufferMemory
return ConversationBufferMemory(return_messages=True)
elif memory_type == "summary":
from langchain.memory import ConversationSummaryMemory
return ConversationSummaryMemory(llm=self.llm, return_messages=True)
else:
return None
def _build_graph(self):
"""Build the LangGraph workflow"""
class State(TypedDict):
messages: Annotated[List[BaseMessage], operator.add]
agent_outcome: Union[AgentAction, AgentFinish, None]
intermediate_steps: Annotated[List[tuple], operator.add]
error_count: int
max_iterations: int
current_iteration: int
def agent_node(state: State):
"""Agent reasoning node"""
try:
# Create agent with current context
agent = self._create_agent_with_memory(state)
outcome = agent.invoke({
"messages": state["messages"],
"intermediate_steps": state["intermediate_steps"]
})
return {
"agent_outcome": outcome,
"current_iteration": state.get("current_iteration", 0) + 1
}
except Exception as e:
return {
"agent_outcome": AgentFinish(
return_values={"output": f"Error in agent reasoning: {str(e)}"},
log=f"Agent error: {str(e)}"
),
"error_count": state.get("error_count", 0) + 1
}
def tool_node(state: State):
"""Tool execution node"""
try:
action = state["agent_outcome"]
tool_executor = ToolExecutor(self.tools)
result = tool_executor.invoke(action)
return {
"intermediate_steps": [(action, str(result))]
}
except Exception as e:
return {
"intermediate_steps": [(
state["agent_outcome"],
f"Tool execution error: {str(e)}"
)],
"error_count": state.get("error_count", 0) + 1
}
def should_continue(state: State):
"""Determine next step in workflow"""
# Check for errors
if state.get("error_count", 0) > 3:
return "error_handler"
# Check iteration limit
if state.get("current_iteration", 0) > state.get("max_iterations", 10):
return "end"
# Check agent outcome
if isinstance(state.get("agent_outcome"), AgentFinish):
return "end"
else:
return "continue"
def error_handler(state: State):
"""Handle errors and recovery"""
return {
"agent_outcome": AgentFinish(
return_values={
"output": "I encountered too many errors and cannot complete this task. Please try rephrasing your question."
},
log="Error limit exceeded"
)
}
# Build the workflow graph
workflow = StateGraph(State)
# Add nodes
workflow.add_node("agent", agent_node)
workflow.add_node("tools", tool_node)
workflow.add_node("error_handler", error_handler)
# Set entry point
workflow.set_entry_point("agent")
# Add conditional edges
workflow.add_conditional_edges(
"agent",
should_continue,
{
"continue": "tools",
"end": END,
"error_handler": "error_handler"
}
)
# Add edges
workflow.add_edge("tools", "agent")
workflow.add_edge("error_handler", END)
return workflow.compile()
def _create_agent_with_memory(self, state):
"""Create agent with memory context"""
# Implementation would include memory integration
# This is a simplified version
from langchain.agents import create_openai_functions_agent
prompt = ChatPromptTemplate.from_messages([
("system", "You are a helpful assistant with access to tools."),
MessagesPlaceholder(variable_name="chat_history"),
("human", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad")
])
return create_openai_functions_agent(self.llm, self.tools, prompt)
async def run(self, query: str, max_iterations: int = 10):
"""Run the agent with a query"""
initial_state = {
"messages": [("human", query)],
"intermediate_steps": [],
"max_iterations": max_iterations,
"current_iteration": 0,
"error_count": 0
}
result = await self.graph.ainvoke(initial_state)
return result["agent_outcome"].return_values["output"]
# Usage example
async def main():
# Create advanced agent
agent = AdvancedLangGraphAgent()
# Run query
result = await agent.run(
"What are the latest developments in AI agents and how do they compare to traditional approaches?"
)
print(f"Agent Response: {result}")
# Streaming implementation
class StreamingLangGraphAgent(AdvancedLangGraphAgent):
"""LangGraph agent with streaming capabilities"""
async def stream_run(self, query: str):
"""Stream agent execution in real-time"""
initial_state = {
"messages": [("human", query)],
"intermediate_steps": [],
"max_iterations": 10,
"current_iteration": 0,
"error_count": 0
}
async for event in self.graph.astream(initial_state):
# Stream intermediate results
if "agent" in event:
yield f"🤔 Thinking: {event['agent'].get('agent_outcome', {}).get('log', '')}"
elif "tools" in event:
yield f"🔧 Using tool: {event['tools']}"
elif "error_handler" in event:
yield f"❌ Error: {event['error_handler']}"
# Production deployment with LangServe
from langserve import add_routes
from fastapi import FastAPI
def create_production_app():
"""Create production FastAPI app with LangServe"""
app = FastAPI(
title="LangGraph Agent API",
version="1.0",
description="Production AI Agent API using LangGraph"
)
# Create agent
agent = AdvancedLangGraphAgent()
# Add routes
add_routes(
app,
agent.graph,
path="/agent",
enable_feedback_endpoint=True,
enable_public_trace_link_endpoint=True,
)
return app
if __name__ == "__main__":
import asyncio
asyncio.run(main())