create_deep_agent 具有以下核心配置选项:
Copy
create_deep_agent(
name: str | None = None,
model: str | BaseChatModel | None = None,
tools: Sequence[BaseTool | Callable | dict[str, Any]] | None = None,
*,
system_prompt: str | SystemMessage | None = None
) -> CompiledStateGraph
create_deep_agent。
连接弹性
LangChain 聊天模型会自动以指数退避方式重试失败的 API 请求。默认情况下,对于网络错误、速率限制(429)和服务器错误(5xx),模型最多重试 6 次。客户端错误(如 401 未授权或 404)不会重试。 你可以在创建模型时调整max_retries 参数,以适配你的环境:
Copy
from langchain.chat_models import init_chat_model
from deepagents import create_deep_agent
agent = create_deep_agent(
model=init_chat_model(
model="claude-sonnet-4-6",
max_retries=10, # Increase for unreliable networks (default: 6)
timeout=120, # Increase timeout for slow connections
),
)
对于网络不稳定环境中的长时运行智能体任务,建议将
max_retries 增加到 10–15,并配合检查点使用,以确保在失败时保留进度。模型
默认情况下,deepagents 使用 claude-sonnet-4-6。你可以通过传入任何受支持的 或 LangChain 模型对象来自定义模型。
使用
provider:model 格式(例如 openai:gpt-5)可以快速切换模型。- OpenAI
- Anthropic
- Azure
- Google Gemini
- AWS Bedrock
- HuggingFace
👉 Read the OpenAI chat model integration docs
Copy
pip install -U "langchain[openai]"
Copy
import os
from deepagents import create_deep_agent
os.environ["OPENAI_API_KEY"] = "sk-..."
agent = create_deep_agent(model="openai:gpt-5.2")
# this calls init_chat_model for the specified model with default parameters
# to use specific modele parameters, use init_chat_model directly
👉 Read the Anthropic chat model integration docs
Copy
pip install -U "langchain[anthropic]"
Copy
import os
from deepagents import create_deep_agent
os.environ["ANTHROPIC_API_KEY"] = "sk-..."
agent = create_deep_agent(model="claude-sonnet-4-6")
# this calls init_chat_model for the specified model with default parameters
# to use specific modele parameters, use init_chat_model directly
👉 Read the Azure chat model integration docs
Copy
pip install -U "langchain[openai]"
Copy
import os
from deepagents import create_deep_agent
os.environ["AZURE_OPENAI_API_KEY"] = "..."
os.environ["AZURE_OPENAI_ENDPOINT"] = "..."
os.environ["OPENAI_API_VERSION"] = "2025-03-01-preview"
agent = create_deep_agent(model="azure_openai:gpt-5.2")
# this calls init_chat_model for the specified model with default parameters
# to use specific modele parameters, use init_chat_model directly
👉 Read the Google GenAI chat model integration docs
Copy
pip install -U "langchain[google-genai]"
Copy
import os
from deepagents import create_deep_agent
os.environ["GOOGLE_API_KEY"] = "..."
agent = create_deep_agent(model="google_genai:gemini-2.5-flash-lite")
# this calls init_chat_model for the specified model with default parameters
# to use specific modele parameters, use init_chat_model directly
👉 Read the AWS Bedrock chat model integration docs
Copy
pip install -U "langchain[aws]"
Copy
from deepagents import create_deep_agent
# Follow the steps here to configure your credentials:
# https://docs.aws.amazon.com/bedrock/latest/userguide/getting-started.html
agent = create_deep_agent(
model="anthropic.claude-3-5-sonnet-20240620-v1:0",
model_provider="bedrock_converse",
)
# this calls init_chat_model for the specified model with default parameters
# to use specific modele parameters, use init_chat_model directly
👉 Read the HuggingFace chat model integration docs
Copy
pip install -U "langchain[huggingface]"
Copy
import os
from deepagents import create_deep_agent
os.environ["HUGGINGFACEHUB_API_TOKEN"] = "hf_..."
agent = create_deep_agent(
model="microsoft/Phi-3-mini-4k-instruct",
model_provider="huggingface",
temperature=0.7,
max_tokens=1024,
)
# this calls init_chat_model for the specified model with default parameters
# to use specific modele parameters, use init_chat_model directly
工具
除了用于规划、文件管理和子智能体生成的内置工具外,你还可以提供自定义工具:Copy
import os
from typing import Literal
from tavily import TavilyClient
from deepagents import create_deep_agent
tavily_client = TavilyClient(api_key=os.environ["TAVILY_API_KEY"])
def internet_search(
query: str,
max_results: int = 5,
topic: Literal["general", "news", "finance"] = "general",
include_raw_content: bool = False,
):
"""Run a web search"""
return tavily_client.search(
query,
max_results=max_results,
include_raw_content=include_raw_content,
topic=topic,
)
agent = create_deep_agent(
tools=[internet_search]
)
系统提示
深度智能体内置了系统提示。默认系统提示包含使用内置规划工具、文件系统工具和子智能体的详细说明。当中间件添加特殊工具(如文件系统工具)时,会将其附加到系统提示中。 每个深度智能体还应包含针对其特定用途的自定义系统提示:Copy
from deepagents import create_deep_agent
research_instructions = """\
You are an expert researcher. Your job is to conduct \
thorough research, and then write a polished report. \
"""
agent = create_deep_agent(
system_prompt=research_instructions,
)
中间件
默认情况下,深度智能体可访问以下中间件:TodoListMiddleware:跟踪和管理待办事项列表,用于组织智能体任务FilesystemMiddleware:处理文件系统操作,如读取、写入和目录浏览SubAgentMiddleware:生成并协调子智能体,将任务委托给专业智能体SummarizationMiddleware:在对话过长时压缩消息历史,以保持在上下文限制内AnthropicPromptCachingMiddleware:使用 Anthropic 模型时自动减少冗余的 token 处理PatchToolCallsMiddleware:在工具调用被中断或取消(未收到结果)时自动修复消息历史
MemoryMiddleware:当提供memory参数时,跨会话持久化和检索对话上下文SkillsMiddleware:当提供skills参数时,启用自定义技能HumanInTheLoopMiddleware:当提供interrupt_on参数时,在指定节点暂停等待人工审批或输入
Copy
from langchain.tools import tool
from langchain.agents.middleware import wrap_tool_call
from deepagents import create_deep_agent
@tool
def get_weather(city: str) -> str:
"""Get the weather in a city."""
return f"The weather in {city} is sunny."
call_count = [0] # Use list to allow modification in nested function
@wrap_tool_call
def log_tool_calls(request, handler):
"""Intercept and log every tool call - demonstrates cross-cutting concern."""
call_count[0] += 1
tool_name = request.name if hasattr(request, 'name') else str(request)
print(f"[Middleware] Tool call #{call_count[0]}: {tool_name}")
print(f"[Middleware] Arguments: {request.args if hasattr(request, 'args') else 'N/A'}")
# Execute the tool call
result = handler(request)
# Log the result
print(f"[Middleware] Tool call #{call_count[0]} completed")
return result
agent = create_deep_agent(
tools=[get_weather],
middleware=[log_tool_calls],
)
初始化后请勿修改属性如果需要在钩子调用之间跟踪值(例如计数器或累积数据),请使用图状态(graph state)。
图状态按设计对线程作用域,因此在并发场景下更新是安全的。推荐做法:不推荐做法:就地修改(例如在
Copy
class CustomMiddleware(AgentMiddleware):
def __init__(self):
pass
def before_agent(self, state, runtime):
return {"x": state.get("x", 0) + 1} # Update graph state instead
Copy
class CustomMiddleware(AgentMiddleware):
def __init__(self):
self.x = 1
def before_agent(self, state, runtime):
self.x += 1 # Mutation causes race conditions
before_agent 或其他钩子中修改 self.x)可能导致微妙的 bug 和竞态条件,因为许多操作是并发执行的(子智能体、并行工具调用,以及不同线程上的并行调用)。有关使用自定义属性扩展状态的完整说明,请参见自定义中间件 - 自定义状态模式。
如果必须在自定义中间件中使用就地修改,请考虑子智能体、并行工具或并发智能体调用同时运行时会发生什么。子智能体
为了隔离详细工作并避免上下文膨胀,请使用子智能体:Copy
import os
from typing import Literal
from tavily import TavilyClient
from deepagents import create_deep_agent
tavily_client = TavilyClient(api_key=os.environ["TAVILY_API_KEY"])
def internet_search(
query: str,
max_results: int = 5,
topic: Literal["general", "news", "finance"] = "general",
include_raw_content: bool = False,
):
"""Run a web search"""
return tavily_client.search(
query,
max_results=max_results,
include_raw_content=include_raw_content,
topic=topic,
)
research_subagent = {
"name": "research-agent",
"description": "Used to research more in depth questions",
"system_prompt": "You are a great researcher",
"tools": [internet_search],
"model": "openai:gpt-5.2", # Optional override, defaults to main agent model
}
subagents = [research_subagent]
agent = create_deep_agent(
model="claude-sonnet-4-6",
subagents=subagents
)
后端
深度智能体工具可以利用虚拟文件系统来存储、访问和编辑文件。默认情况下,深度智能体使用StateBackend。
如果你使用技能或记忆,必须在创建智能体之前将预期的技能或记忆文件添加到后端。
- StateBackend
- FilesystemBackend
- LocalShellBackend
- StoreBackend
- CompositeBackend
存储在
langgraph 状态中的临时文件系统后端。此文件系统仅_在单个线程内_持久保存。Copy
# By default we provide a StateBackend
agent = create_deep_agent()
# Under the hood, it looks like
from deepagents.backends import StateBackend
agent = create_deep_agent(
backend=(lambda rt: StateBackend(rt)) # Note that the tools access State through the runtime.state
)
本地机器的文件系统。
此后端授予智能体对文件系统的直接读写访问权限。
请谨慎使用,仅在适当的环境中使用。
更多信息请参见
FilesystemBackend。Copy
from deepagents.backends import FilesystemBackend
agent = create_deep_agent(
backend=FilesystemBackend(root_dir=".", virtual_mode=True)
)
直接在宿主机上提供文件系统和 Shell 执行能力的后端。提供文件系统工具以及用于运行命令的
execute 工具。此后端授予智能体对文件系统的直接读写访问权限以及在宿主机上不受限制的 Shell 执行权限。
请极其谨慎地使用,仅在适当的环境中使用。
更多信息请参见
LocalShellBackend。Copy
from deepagents.backends import LocalShellBackend
agent = create_deep_agent(
backend=LocalShellBackend(root_dir=".", env={"PATH": "/usr/bin:/bin"})
)
提供_跨线程持久化_长期存储的文件系统后端。
Copy
from langgraph.store.memory import InMemoryStore
from deepagents.backends import StoreBackend
agent = create_deep_agent(
backend=(lambda rt: StoreBackend(rt)),
store=InMemoryStore() # Good for local dev; omit for LangSmith Deployment
)
When deploying to LangSmith Deployment, omit the
store parameter. The platform automatically provisions a store for your agent.灵活的后端,可为文件系统中的不同路由指定不同的后端。
Copy
from deepagents import create_deep_agent
from deepagents.backends import CompositeBackend, StateBackend, StoreBackend
from langgraph.store.memory import InMemoryStore
composite_backend = lambda rt: CompositeBackend(
default=StateBackend(rt),
routes={
"/memories/": StoreBackend(rt),
}
)
agent = create_deep_agent(
backend=composite_backend,
store=InMemoryStore() # Store passed to create_deep_agent, not backend
)
沙箱
沙箱是专用的后端,在独立的隔离环境中运行智能体代码,拥有独立的文件系统以及用于执行 Shell 命令的execute 工具。
当你希望深度智能体能够写入文件、安装依赖并运行命令,同时不影响本地机器时,请使用沙箱后端。
在创建深度智能体时,通过将沙箱后端传递给 backend 参数来配置沙箱:
- Modal
- Runloop
- Daytona
Copy
pip install langchain-modal
Copy
import modal
from langchain_anthropic import ChatAnthropic
from deepagents import create_deep_agent
from langchain_modal import ModalSandbox
app = modal.App.lookup("your-app")
modal_sandbox = modal.Sandbox.create(app=app)
backend = ModalSandbox(sandbox=modal_sandbox)
agent = create_deep_agent(
model=ChatAnthropic(model="claude-sonnet-4-20250514"),
system_prompt="You are a Python coding assistant with sandbox access.",
backend=backend,
)
try:
result = agent.invoke(
{
"messages": [
{
"role": "user",
"content": "Create a small Python package and run pytest",
}
]
}
)
finally:
modal_sandbox.terminate()
Copy
pip install langchain-runloop
Copy
import os
from runloop_api_client import RunloopSDK
from langchain_anthropic import ChatAnthropic
from deepagents import create_deep_agent
from langchain_runloop import RunloopSandbox
client = RunloopSDK(bearer_token=os.environ["RUNLOOP_API_KEY"])
devbox = client.devbox.create()
backend = RunloopSandbox(devbox=devbox)
agent = create_deep_agent(
model=ChatAnthropic(model="claude-sonnet-4-20250514"),
system_prompt="You are a Python coding assistant with sandbox access.",
backend=backend,
)
try:
result = agent.invoke(
{
"messages": [
{
"role": "user",
"content": "Create a small Python package and run pytest",
}
]
}
)
finally:
devbox.shutdown()
Copy
pip install langchain-daytona
Copy
from daytona import Daytona
from langchain_anthropic import ChatAnthropic
from deepagents import create_deep_agent
from langchain_daytona import DaytonaSandbox
sandbox = Daytona().create()
backend = DaytonaSandbox(sandbox=sandbox)
agent = create_deep_agent(
model=ChatAnthropic(model="claude-sonnet-4-20250514"),
system_prompt="You are a Python coding assistant with sandbox access.",
backend=backend,
)
try:
result = agent.invoke(
{
"messages": [
{
"role": "user",
"content": "Create a small Python package and run pytest",
}
]
}
)
finally:
sandbox.stop()
人机协同
某些工具操作可能较为敏感,需要在执行前获得人工审批。 你可以为每个工具配置审批方式:Copy
from langchain.tools import tool
from deepagents import create_deep_agent
from langgraph.checkpoint.memory import MemorySaver
@tool
def delete_file(path: str) -> str:
"""Delete a file from the filesystem."""
return f"Deleted {path}"
@tool
def read_file(path: str) -> str:
"""Read a file from the filesystem."""
return f"Contents of {path}"
@tool
def send_email(to: str, subject: str, body: str) -> str:
"""Send an email."""
return f"Sent email to {to}"
# Checkpointer is REQUIRED for human-in-the-loop
checkpointer = MemorySaver()
agent = create_deep_agent(
model="claude-sonnet-4-6",
tools=[delete_file, read_file, send_email],
interrupt_on={
"delete_file": True, # Default: approve, edit, reject
"read_file": False, # No interrupts needed
"send_email": {"allowed_decisions": ["approve", "reject"]}, # No editing
},
checkpointer=checkpointer # Required!
)
技能
你可以使用技能为深度智能体提供新的能力和专业知识。 工具通常覆盖底层功能(如原生文件系统操作或规划),而技能可以包含完成任务的详细说明、参考信息以及其他资产(如模板)。 这些文件只有在智能体判断该技能对当前提示有用时才会被加载。 这种渐进式披露机制减少了智能体启动时需要处理的 token 数量和上下文。 示例技能请参见 Deep Agent 示例技能。 要为深度智能体添加技能,将其作为参数传入create_deep_agent:
- StateBackend
- StoreBackend
- FilesystemBackend
Copy
from urllib.request import urlopen
from deepagents import create_deep_agent
from deepagents.backends.utils import create_file_data
from langgraph.checkpoint.memory import MemorySaver
checkpointer = MemorySaver()
skill_url = "https://raw.githubusercontent.com/langchain-ai/deepagents/refs/heads/main/libs/cli/examples/skills/langgraph-docs/SKILL.md"
with urlopen(skill_url) as response:
skill_content = response.read().decode('utf-8')
skills_files = {
"/skills/langgraph-docs/SKILL.md": create_file_data(skill_content)
}
agent = create_deep_agent(
skills=["/skills/"],
checkpointer=checkpointer,
)
result = agent.invoke(
{
"messages": [
{
"role": "user",
"content": "What is langgraph?",
}
],
# Seed the default StateBackend's in-state filesystem (virtual paths must start with "/").
"files": skills_files
},
config={"configurable": {"thread_id": "12345"}},
)
Copy
from urllib.request import urlopen
from deepagents import create_deep_agent
from deepagents.backends import StoreBackend
from deepagents.backends.utils import create_file_data
from langgraph.store.memory import InMemoryStore
store = InMemoryStore()
skill_url = "https://raw.githubusercontent.com/langchain-ai/deepagents/refs/heads/main/libs/cli/examples/skills/langgraph-docs/SKILL.md"
with urlopen(skill_url) as response:
skill_content = response.read().decode('utf-8')
store.put(
namespace=("filesystem",),
key="/skills/langgraph-docs/SKILL.md",
value=create_file_data(skill_content)
)
agent = create_deep_agent(
backend=(lambda rt: StoreBackend(rt)),
store=store,
skills=["/skills/"]
)
result = agent.invoke(
{
"messages": [
{
"role": "user",
"content": "What is langgraph?",
}
]
},
config={"configurable": {"thread_id": "12345"}},
)
Copy
from deepagents import create_deep_agent
from langgraph.checkpoint.memory import MemorySaver
from deepagents.backends.filesystem import FilesystemBackend
# Checkpointer is REQUIRED for human-in-the-loop
checkpointer = MemorySaver()
agent = create_deep_agent(
backend=FilesystemBackend(root_dir="/Users/user/{project}"),
skills=["/Users/user/{project}/skills/"],
interrupt_on={
"write_file": True, # Default: approve, edit, reject
"read_file": False, # No interrupts needed
"edit_file": True # Default: approve, edit, reject
},
checkpointer=checkpointer, # Required!
)
result = agent.invoke(
{
"messages": [
{
"role": "user",
"content": "What is langgraph?",
}
]
},
config={"configurable": {"thread_id": "12345"}},
)
记忆
使用AGENTS.md 文件为深度智能体提供额外的上下文。
创建深度智能体时,可以向 memory 参数传入一个或多个文件路径:
- StateBackend
- StoreBackend
- FilesystemBackend
Copy
from urllib.request import urlopen
from deepagents import create_deep_agent
from deepagents.backends.utils import create_file_data
from langgraph.checkpoint.memory import MemorySaver
with urlopen("https://raw.githubusercontent.com/langchain-ai/deepagents/refs/heads/main/examples/text-to-sql-agent/AGENTS.md") as response:
agents_md = response.read().decode("utf-8")
checkpointer = MemorySaver()
agent = create_deep_agent(
memory=[
"/AGENTS.md"
],
checkpointer=checkpointer,
)
result = agent.invoke(
{
"messages": [
{
"role": "user",
"content": "Please tell me what's in your memory files.",
}
],
# Seed the default StateBackend's in-state filesystem (virtual paths must start with "/").
"files": {"/AGENTS.md": create_file_data(agents_md)},
},
config={"configurable": {"thread_id": "123456"}},
)
Copy
from urllib.request import urlopen
from deepagents import create_deep_agent
from deepagents.backends import StoreBackend
from deepagents.backends.utils import create_file_data
from langgraph.store.memory import InMemoryStore
with urlopen("https://raw.githubusercontent.com/langchain-ai/deepagents/refs/heads/main/examples/text-to-sql-agent/AGENTS.md") as response:
agents_md = response.read().decode("utf-8")
# Create the store and add the file to it
store = InMemoryStore()
file_data = create_file_data(agents_md)
store.put(
namespace=("filesystem",),
key="/AGENTS.md",
value=file_data
)
agent = create_deep_agent(
backend=(lambda rt: StoreBackend(rt)),
store=store,
memory=[
"/AGENTS.md"
]
)
result = agent.invoke(
{
"messages": [
{
"role": "user",
"content": "Please tell me what's in your memory files.",
}
],
"files": {"/AGENTS.md": create_file_data(agents_md)},
},
config={"configurable": {"thread_id": "12345"}},
)
Copy
from deepagents import create_deep_agent
from langgraph.checkpoint.memory import MemorySaver
from deepagents.backends import FilesystemBackend
# Checkpointer is REQUIRED for human-in-the-loop
checkpointer = MemorySaver()
agent = create_deep_agent(
backend=FilesystemBackend(root_dir="/Users/user/{project}"),
memory=[
"./AGENTS.md"
],
interrupt_on={
"write_file": True, # Default: approve, edit, reject
"read_file": False, # No interrupts needed
"edit_file": True # Default: approve, edit, reject
},
checkpointer=checkpointer, # Required!
)
结构化输出
深度智能体支持结构化输出。 你可以通过将结构化输出模式作为response_format 参数传入 create_deep_agent() 来指定所需的输出结构。
当模型生成结构化数据时,该数据会被捕获、验证,并以 structured_response 键返回到深度智能体的状态中。
Copy
import os
from typing import Literal
from pydantic import BaseModel, Field
from tavily import TavilyClient
from deepagents import create_deep_agent
tavily_client = TavilyClient(api_key=os.environ["TAVILY_API_KEY"])
def internet_search(
query: str,
max_results: int = 5,
topic: Literal["general", "news", "finance"] = "general",
include_raw_content: bool = False,
):
"""Run a web search"""
return tavily_client.search(
query,
max_results=max_results,
include_raw_content=include_raw_content,
topic=topic,
)
class WeatherReport(BaseModel):
"""A structured weather report with current conditions and forecast."""
location: str = Field(description="The location for this weather report")
temperature: float = Field(description="Current temperature in Celsius")
condition: str = Field(description="Current weather condition (e.g., sunny, cloudy, rainy)")
humidity: int = Field(description="Humidity percentage")
wind_speed: float = Field(description="Wind speed in km/h")
forecast: str = Field(description="Brief forecast for the next 24 hours")
agent = create_deep_agent(
response_format=WeatherReport,
tools=[internet_search]
)
result = agent.invoke({
"messages": [{
"role": "user",
"content": "What's the weather like in San Francisco?"
}]
})
print(result["structured_response"])
# location='San Francisco, California' temperature=18.3 condition='Sunny' humidity=48 wind_speed=7.6 forecast='Pleasant sunny conditions expected to continue with temperatures around 64°F (18°C) during the day, dropping to around 52°F (11°C) at night. Clear skies with minimal precipitation expected.'
Connect these docs to Claude, VSCode, and more via MCP for real-time answers.

