Agent 开发的推荐模式、技巧和常见问题解决方案
每个 Agent 应该专注于一个特定任务。
# ✅ 好的设计:专注的 Agent
researcher = Agent(
name="Researcher",
instructions=["专注于信息搜索和整理"],
tools=[DuckDuckGoTool(), ArxivTool()],
)
writer = Agent(
name="Writer",
instructions=["专注于内容写作"],
)
# ❌ 避免:功能过多的 Agent
do_everything_agent = Agent(
name="DoEverything",
instructions=["搜索、写作、编程、分析..."],
tools=[...20个工具...],
)
提供明确、具体的指令。
# ✅ 好的指令
agent = Agent(
instructions=[
"你是一个 Python 代码审查专家",
"检查代码时关注:安全性、性能、可读性",
"使用中文回复",
"发现问题时提供修复建议",
],
)
# ❌ 模糊的指令
agent = Agent(
instructions=["你是一个助手"],
)
工具过多会降低准确性。
# ✅ 推荐:3-7 个相关工具
agent = Agent(
tools=[
DuckDuckGoTool(),
UrlCrawlerTool(),
FileTool(),
],
)
# ❌ 避免:过多工具
agent = Agent(
tools=[...15个工具...], # 模型可能混淆
)
agent = Agent(
instructions=[
# 角色定义
"你是一个专业的数据分析师",
# 能力描述
"你可以:分析数据、生成图表、撰写报告",
# 行为约束
"分析时始终验证数据质量",
"使用 Python 进行数据处理",
# 输出格式
"报告格式:摘要 -> 详细分析 -> 结论",
],
)
from agentica.agent.config import PromptConfig
# 静态系统提示词
agent = Agent(
prompt_config=PromptConfig(
system_prompt="你是一个友好的助手,使用简洁的语言回答问题。",
),
)
# 动态系统提示词
def get_system_prompt(agent):
from datetime import datetime
return f"当前时间: {datetime.now()}\n你是一个智能助手。"
agent = Agent(
prompt_config=PromptConfig(system_prompt=get_system_prompt),
)
def get_instructions(agent):
base = ["你是一个助手"]
# 根据上下文添加指令
if agent.session_state.get("mode") == "expert":
base.append("使用专业术语回答")
else:
base.append("使用简单易懂的语言")
return base
agent = Agent(instructions=get_instructions)
agent = Agent(
instructions=[
"将用户输入转换为 SQL 查询",
"",
"示例:",
"输入:查找所有年龄大于 30 的用户",
"输出:SELECT * FROM users WHERE age > 30",
"",
"输入:统计每个城市的用户数",
"输出:SELECT city, COUNT(*) FROM users GROUP BY city",
],
)
def search_products(
query: str,
category: str = None,
max_price: float = None,
) -> str:
"""搜索产品目录
在产品数据库中搜索匹配的商品。
Args:
query: 搜索关键词,如 "iPhone" 或 "笔记本电脑"
category: 产品类别,可选值:electronics, clothing, books
max_price: 最高价格限制(人民币)
Returns:
JSON 格式的产品列表,包含名称、价格、描述
Example:
search_products("手机", category="electronics", max_price=5000)
"""
...
# ✅ 结构化返回
def get_weather(city: str) -> str:
data = fetch_weather(city)
return json.dumps({
"city": city,
"temperature": data["temp"],
"condition": data["condition"],
"humidity": data["humidity"],
}, ensure_ascii=False)
# ❌ 避免:非结构化返回
def get_weather(city: str) -> str:
return f"天气很好,温度25度" # 难以解析
def safe_api_call(url: str) -> str:
"""安全的 API 调用"""
try:
response = requests.get(url, timeout=10)
response.raise_for_status()
return response.text
except requests.Timeout:
return "错误:请求超时,请稍后重试"
except requests.HTTPError as e:
return f"错误:HTTP {e.response.status_code}"
except Exception as e:
return f"错误:{str(e)}"
from agentica.agent.config import ToolConfig
agent = Agent(
tools=[...],
tool_config=ToolConfig(tool_call_limit=10), # 限制工具调用次数
)
from agentica import Agent, SqliteDb, AgentMemory
# 使用数据库持久化会话
db = SqliteDb(table_name="sessions", db_file="agent.db")
agent = Agent(
session_id="user-123-session",
memory=AgentMemory.with_db(db=db),
)
# 会话会自动保存和恢复
agent = Agent(
user_id="user-123",
memory=AgentMemory.with_db(
db=db,
create_user_memories=True, # 启用长期记忆
),
)
# Agent 会自动记住用户偏好
# "记住我喜欢 Python" -> 保存到长期记忆
agent = Agent(
add_history_to_messages=True, # 添加历史到上下文
history_window=5, # 最近 5 轮对话
)
agent = Agent(
memory=AgentMemory(
create_session_summary=True, # 生成会话摘要
),
)
# 长对话会自动生成摘要,减少 token 使用
from agentica import Knowledge
from agentica.vectordb import LanceDb
from agentica.emb import OpenAIEmbedder
knowledge = Knowledge(
data_path="./documents",
vector_db=LanceDb(
table_name="docs",
uri="./lancedb",
embedder=OpenAIEmbedder(),
),
chunk_size=1000, # 适当的分块大小
num_documents=5, # 检索文档数
)
# 加载知识库
knowledge.load(recreate=False, upsert=True)
from agentica.agent.config import ToolConfig
agent = Agent(
knowledge=knowledge,
tool_config=ToolConfig(add_references=True), # 添加引用到响应
instructions=[
"基于知识库回答问题",
"如果知识库中没有相关信息,明确告知用户",
"引用来源时注明文档名称",
],
)
让 Agent 主动搜索知识库。
from agentica.agent.config import ToolConfig
agent = Agent(
knowledge=knowledge,
tool_config=ToolConfig(search_knowledge=True), # Agent 可以主动搜索
instructions=[
"遇到专业问题时,先搜索知识库",
"综合多个来源的信息回答",
],
)
from agentica.vectordb import LanceDb
db = LanceDb(
search_type="hybrid", # 混合检索:向量 + 关键词
reranker=CohereReranker(), # 重排序
)
agent = Agent(
enable_multi_round=True,
max_rounds=50, # 最大轮数
max_tokens=100000, # token 限制
)
async for response in agent.run_stream("复杂任务"):
if response.event == "MultiRoundTurn":
print(f"轮次 {response.extra_data.round}")
elif response.event == "MultiRoundToolCall":
print(f"调用工具: {response.content}")
elif response.event == "MultiRoundCompleted":
print("任务完成")
from agentica import CompressionManager
from agentica.agent.config import ToolConfig
agent = Agent(
tool_config=ToolConfig(
compress_tool_results=True,
compression_manager=CompressionManager(
compress_token_limit=50000,
),
),
)
researcher = Agent(
name="Researcher",
role="研究员",
instructions=["负责信息搜索"],
tools=[DuckDuckGoTool()],
)
writer = Agent(
name="Writer",
role="写手",
instructions=["负责内容创作"],
)
# 主 Agent 协调团队
leader = Agent(
name="Leader",
team=[researcher, writer],
instructions=[
"协调团队完成任务",
"研究任务交给 Researcher",
"写作任务交给 Writer",
],
)
leader.print_response("写一篇关于 AI 的文章")
# 将 Agent 转换为工具
research_tool = researcher.as_tool(
tool_name="research",
tool_description="进行深度研究",
)
main_agent = Agent(
tools=[research_tool, other_tools...],
)
from agentica import Workflow, RunResponse
class ArticleWorkflow(Workflow):
researcher: Agent
writer: Agent
reviewer: Agent
def run(self, topic: str) -> RunResponse:
# 1. 研究
research = self.researcher.run(f"研究: {topic}")
# 2. 写作
draft = self.writer.run(f"基于研究写文章:\n{research.content}")
# 3. 审核
final = self.reviewer.run(f"审核并改进:\n{draft.content}")
return RunResponse(content=final.content)
# 同步流式输出提升用户体验
for chunk in agent.run_stream_sync("问题"):
print(chunk.content, end="", flush=True)
# 异步流式输出
async for chunk in agent.run_stream("问题"):
print(chunk.content, end="", flush=True)
import asyncio
async def process_queries(queries):
tasks = [agent.run(q) for q in queries]
return await asyncio.gather(*tasks)
results = asyncio.run(process_queries(["问题1", "问题2", "问题3"]))
# 简单任务用小模型
simple_agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
)
# 复杂任务用大模型
complex_agent = Agent(
model=OpenAIChat(id="gpt-4o"),
)
from functools import lru_cache
@lru_cache(maxsize=100)
def cached_search(query: str) -> str:
"""缓存搜索结果"""
return do_search(query)
from agentica import count_tokens
# 检查 token 使用
tokens = count_tokens(messages, model_id="gpt-4o")
if tokens > 100000:
# 压缩或截断
...
from tenacity import retry, stop_after_attempt, wait_exponential
@retry(
stop=stop_after_attempt(3),
wait=wait_exponential(min=1, max=10),
)
def robust_agent_call(agent, message):
return agent.run(message)
agent = Agent(
model=OpenAIChat(
timeout=60, # 请求超时
max_retries=3,
),
)
try:
response = agent.run("问题")
except Exception as e:
logger.error(f"Agent 错误: {e}")
response = RunResponse(content="抱歉,我遇到了一些问题,请稍后重试。")
from agentica.tools import ToolCallException
def risky_tool(data: str) -> str:
try:
return process(data)
except ValueError as e:
raise ToolCallException(
user_message=f"处理失败: {e}",
stop_execution=False, # 继续执行
)
import os
# 使用环境变量
agent = Agent(
model=OpenAIChat(
api_key=os.getenv("OPENAI_API_KEY"),
base_url=os.getenv("OPENAI_BASE_URL"),
),
)
from agentica.utils.log import logger, set_log_level_to_debug
# 开发环境
set_log_level_to_debug()
# 生产环境
import logging
logger.setLevel(logging.WARNING)
# 响应中包含指标
response = agent.run("问题")
print(response.metrics)
# {
# "input_tokens": 100,
# "output_tokens": 200,
# "time_to_first_token": 0.5,
# "total_time": 2.3,
# }
from fastapi import FastAPI
from pydantic import BaseModel
app = FastAPI()
agent = Agent(...)
class Query(BaseModel):
message: str
session_id: str = None
@app.post("/chat")
async def chat(query: Query):
agent.session_id = query.session_id
response = await agent.run(query.message)
return {"content": response.content}
# 限制工具权限
agent = Agent(
tools=[
ShellTool(allowed_commands=["ls", "cat"]), # 白名单
FileTool(base_dir="./safe_dir"), # 限制目录
],
)
# 输入验证
def validate_input(message: str) -> str:
if len(message) > 10000:
raise ValueError("输入过长")
# 其他验证...
return message
A: 检查以下几点:
agent = Agent(
tools=[my_tool],
instructions=["使用 my_tool 工具完成任务"],
)
A: 优化建议:
A: 解决方案:
A: 改进方法:
文档最后更新: 2026-02-16