knowledge.py 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114
  1. """
  2. 原子知识保存工具
  3. 提供便捷的 API 让 Agent 快速保存结构化的原子知识
  4. """
  5. import os
  6. import re
  7. import json
  8. import yaml
  9. import logging
  10. from datetime import datetime
  11. from pathlib import Path
  12. from typing import List, Dict, Optional, Any
  13. from agent.tools import tool, ToolResult, ToolContext
  14. from ...llm.openrouter import openrouter_llm_call
  15. logger = logging.getLogger(__name__)
  16. def _generate_knowledge_id() -> str:
  17. """生成知识原子 ID"""
  18. return f"research-{datetime.now().strftime('%Y%m%d-%H%M%S')}"
  19. def _format_yaml_list(items: List[str], indent: int = 2) -> str:
  20. """格式化 YAML 列表"""
  21. if not items:
  22. return "[]"
  23. indent_str = " " * indent
  24. return "\n" + "\n".join(f"{indent_str}- {item}" for item in items)
  25. @tool()
  26. async def save_knowledge(
  27. scenario: str,
  28. content: str,
  29. tags_type: List[str],
  30. urls: List[str] = None,
  31. agent_id: str = "research_agent",
  32. score: int = 3,
  33. trace_id: str = "",
  34. ) -> ToolResult:
  35. """
  36. 保存原子知识到本地文件(JSON 格式)
  37. Args:
  38. scenario: 任务描述(在什么情景下 + 要完成什么目标 + 得到能达成一个什么结果)
  39. content: 核心内容
  40. tags_type: 知识类型标签,可选:tool, usercase, definition, plan, strategy
  41. urls: 参考来源链接列表(论文/GitHub/博客等)
  42. agent_id: 执行此调研的 agent ID
  43. score: 初始评分 1-5(默认 3)
  44. trace_id: 当前 trace ID(可选)
  45. Returns:
  46. 保存结果
  47. """
  48. try:
  49. # 生成 ID
  50. knowledge_id = _generate_knowledge_id()
  51. # 准备目录
  52. knowledge_dir = Path(".cache/knowledge_atoms")
  53. knowledge_dir.mkdir(parents=True, exist_ok=True)
  54. # 构建文件路径(使用 .json 扩展名)
  55. file_path = knowledge_dir / f"{knowledge_id}.json"
  56. # 构建 JSON 数据结构
  57. knowledge_data = {
  58. "id": knowledge_id,
  59. "trace_id": trace_id or "N/A",
  60. "tags": {
  61. "type": tags_type
  62. },
  63. "scenario": scenario,
  64. "content": content,
  65. "trace": {
  66. "urls": urls or [],
  67. "agent_id": agent_id,
  68. "timestamp": datetime.now().isoformat()
  69. },
  70. "eval": {
  71. "score": score,
  72. "helpful": 0,
  73. "harmful": 0,
  74. "helpful_history": [],
  75. "harmful_history": []
  76. },
  77. "metrics": {
  78. "helpful": 1,
  79. "harmful": 0
  80. },
  81. "created_at": datetime.now().strftime('%Y-%m-%d %H:%M:%S')
  82. }
  83. # 保存为 JSON 文件
  84. with open(file_path, "w", encoding="utf-8") as f:
  85. json.dump(knowledge_data, f, ensure_ascii=False, indent=2)
  86. return ToolResult(
  87. title="✅ 原子知识已保存",
  88. output=f"知识 ID: {knowledge_id}\n文件路径: {file_path}\n\n场景:\n{scenario[:100]}...",
  89. long_term_memory=f"保存原子知识: {knowledge_id} - {scenario[:50]}",
  90. metadata={"knowledge_id": knowledge_id, "file_path": str(file_path)}
  91. )
  92. except Exception as e:
  93. return ToolResult(
  94. title="❌ 保存失败",
  95. output=f"错误: {str(e)}",
  96. error=str(e)
  97. )
  98. @tool()
  99. async def update_knowledge(
  100. knowledge_id: str,
  101. add_helpful_case: Optional[Dict[str, str]] = None,
  102. add_harmful_case: Optional[Dict[str, str]] = None,
  103. update_score: Optional[int] = None,
  104. evolve_feedback: Optional[str] = None,
  105. ) -> ToolResult:
  106. """
  107. 更新已有的原子知识的评估反馈
  108. Args:
  109. knowledge_id: 知识 ID(如 research-20260302-001)
  110. add_helpful_case: 添加好用的案例 {"case_id": "...", "scenario": "...", "result": "...", "timestamp": "..."}
  111. add_harmful_case: 添加不好用的案例 {"case_id": "...", "scenario": "...", "result": "...", "timestamp": "..."}
  112. update_score: 更新评分(1-5)
  113. evolve_feedback: 经验进化反馈(当提供时,会使用 LLM 重写知识内容)
  114. Returns:
  115. 更新结果
  116. """
  117. try:
  118. # 查找文件(支持 JSON 和 MD 格式)
  119. knowledge_dir = Path(".cache/knowledge_atoms")
  120. json_path = knowledge_dir / f"{knowledge_id}.json"
  121. md_path = knowledge_dir / f"{knowledge_id}.md"
  122. file_path = None
  123. if json_path.exists():
  124. file_path = json_path
  125. is_json = True
  126. elif md_path.exists():
  127. file_path = md_path
  128. is_json = False
  129. else:
  130. return ToolResult(
  131. title="❌ 文件不存在",
  132. output=f"未找到知识文件: {knowledge_id}",
  133. error="文件不存在"
  134. )
  135. # 读取现有内容
  136. with open(file_path, "r", encoding="utf-8") as f:
  137. content = f.read()
  138. # 解析数据
  139. if is_json:
  140. data = json.loads(content)
  141. else:
  142. # 解析 YAML frontmatter
  143. yaml_match = re.search(r'^---\n(.*?)\n---', content, re.DOTALL)
  144. if not yaml_match:
  145. return ToolResult(
  146. title="❌ 格式错误",
  147. output=f"无法解析知识文件格式: {file_path}",
  148. error="格式错误"
  149. )
  150. data = yaml.safe_load(yaml_match.group(1))
  151. # 更新内容
  152. updated = False
  153. summary = []
  154. if add_helpful_case:
  155. data["eval"]["helpful"] += 1
  156. data["eval"]["helpful_history"].append(add_helpful_case)
  157. data["metrics"]["helpful"] += 1
  158. summary.append(f"添加 helpful 案例: {add_helpful_case.get('case_id')}")
  159. updated = True
  160. if add_harmful_case:
  161. data["eval"]["harmful"] += 1
  162. data["eval"]["harmful_history"].append(add_harmful_case)
  163. data["metrics"]["harmful"] += 1
  164. summary.append(f"添加 harmful 案例: {add_harmful_case.get('case_id')}")
  165. updated = True
  166. if update_score is not None:
  167. data["eval"]["score"] = update_score
  168. summary.append(f"更新评分: {update_score}")
  169. updated = True
  170. # 经验进化机制
  171. if evolve_feedback:
  172. old_content = data.get("content", "")
  173. evolved_content = await _evolve_knowledge_with_llm(old_content, evolve_feedback)
  174. data["content"] = evolved_content
  175. data["metrics"]["helpful"] += 1
  176. summary.append(f"知识进化: 基于反馈重写内容")
  177. updated = True
  178. if not updated:
  179. return ToolResult(
  180. title="⚠️ 无更新",
  181. output="未指定任何更新内容",
  182. long_term_memory="尝试更新原子知识但未指定更新内容"
  183. )
  184. # 更新时间戳
  185. data["updated_at"] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
  186. # 保存更新
  187. if is_json:
  188. with open(file_path, "w", encoding="utf-8") as f:
  189. json.dump(data, f, ensure_ascii=False, indent=2)
  190. else:
  191. # 重新生成 YAML frontmatter
  192. meta_str = yaml.dump(data, allow_unicode=True).strip()
  193. with open(file_path, "w", encoding="utf-8") as f:
  194. f.write(f"---\n{meta_str}\n---\n")
  195. return ToolResult(
  196. title="✅ 原子知识已更新",
  197. output=f"知识 ID: {knowledge_id}\n文件路径: {file_path}\n\n更新内容:\n" + "\n".join(f"- {s}" for s in summary),
  198. long_term_memory=f"更新原子知识: {knowledge_id}"
  199. )
  200. except Exception as e:
  201. return ToolResult(
  202. title="❌ 更新失败",
  203. output=f"错误: {str(e)}",
  204. error=str(e)
  205. )
  206. @tool()
  207. async def list_knowledge(
  208. limit: int = 10,
  209. tags_type: Optional[List[str]] = None,
  210. ) -> ToolResult:
  211. """
  212. 列出已保存的原子知识
  213. Args:
  214. limit: 返回数量限制(默认 10)
  215. tags_type: 按类型过滤(可选)
  216. Returns:
  217. 知识列表
  218. """
  219. try:
  220. knowledge_dir = Path(".cache/knowledge_atoms")
  221. if not knowledge_dir.exists():
  222. return ToolResult(
  223. title="📂 知识库为空",
  224. output="还没有保存任何原子知识",
  225. long_term_memory="知识库为空"
  226. )
  227. # 获取所有文件
  228. files = sorted(knowledge_dir.glob("*.md"), key=lambda x: x.stat().st_mtime, reverse=True)
  229. if not files:
  230. return ToolResult(
  231. title="📂 知识库为空",
  232. output="还没有保存任何原子知识",
  233. long_term_memory="知识库为空"
  234. )
  235. # 读取并过滤
  236. results = []
  237. for file_path in files[:limit]:
  238. with open(file_path, "r", encoding="utf-8") as f:
  239. content = f.read()
  240. # 提取关键信息
  241. import re
  242. id_match = re.search(r"id: (.+)", content)
  243. scenario_match = re.search(r"scenario: \|\n (.+)", content)
  244. score_match = re.search(r"score: (\d+)", content)
  245. knowledge_id = id_match.group(1) if id_match else "unknown"
  246. scenario = scenario_match.group(1) if scenario_match else "N/A"
  247. score = score_match.group(1) if score_match else "N/A"
  248. results.append(f"- [{knowledge_id}] (⭐{score}) {scenario[:60]}...")
  249. output = f"共找到 {len(files)} 条原子知识,显示最近 {len(results)} 条:\n\n" + "\n".join(results)
  250. return ToolResult(
  251. title="📚 原子知识列表",
  252. output=output,
  253. long_term_memory=f"列出 {len(results)} 条原子知识"
  254. )
  255. except Exception as e:
  256. return ToolResult(
  257. title="❌ 列表失败",
  258. output=f"错误: {str(e)}",
  259. error=str(e)
  260. )
  261. # ===== 语义检索功能 =====
  262. async def _route_knowledge_by_llm(query_text: str, metadata_list: List[Dict], k: int = 5) -> List[str]:
  263. """
  264. 第一阶段:语义路由。
  265. 让 LLM 挑选出 2*k 个语义相关的 ID。
  266. """
  267. if not metadata_list:
  268. return []
  269. # 扩大筛选范围到 2*k
  270. routing_k = k * 2
  271. routing_data = [
  272. {
  273. "id": m["id"],
  274. "tags": m["tags"],
  275. "scenario": m["scenario"][:100] # 只取前100字符
  276. } for m in metadata_list
  277. ]
  278. prompt = f"""
  279. 你是一个知识检索专家。根据用户的当前任务需求,从下列原子知识元数据中挑选出最相关的最多 {routing_k} 个知识 ID。
  280. 任务需求:"{query_text}"
  281. 可选知识列表:
  282. {json.dumps(routing_data, ensure_ascii=False, indent=1)}
  283. 请直接输出 ID 列表,用逗号分隔(例如: research-20260302-001, research-20260302-002)。若无相关项请输出 "None"。
  284. """
  285. try:
  286. print(f"\n[Step 1: 知识语义路由] 任务: '{query_text}' | 候选总数: {len(metadata_list)} | 目标提取数: {routing_k}")
  287. response = await openrouter_llm_call(
  288. messages=[{"role": "user", "content": prompt}],
  289. model="google/gemini-2.0-flash-001"
  290. )
  291. content = response.get("content", "").strip()
  292. selected_ids = [idx.strip() for idx in re.split(r'[,\s]+', content) if idx.strip().startswith("research-")]
  293. print(f"[Step 1: 知识语义路由] LLM 初选 ID ({len(selected_ids)}个): {selected_ids}")
  294. return selected_ids
  295. except Exception as e:
  296. logger.error(f"LLM 知识路由失败: {e}")
  297. return []
  298. async def _evolve_knowledge_with_llm(old_content: str, feedback: str) -> str:
  299. """
  300. 使用 LLM 进行知识进化重写(类似经验进化机制)
  301. """
  302. prompt = f"""你是一个 AI Agent 知识库管理员。请根据反馈建议,对现有的知识内容进行重写进化。
  303. 【原知识内容】:
  304. {old_content}
  305. 【实战反馈建议】:
  306. {feedback}
  307. 【重写要求】:
  308. 1. 融合知识:将反馈中的避坑指南、新参数或修正后的选择逻辑融入原知识,使其更具通用性和准确性。
  309. 2. 保持结构:如果原内容有特定格式(如 Markdown、代码示例等),请保持该格式。
  310. 3. 语言:简洁直接,使用中文。
  311. 4. 禁止:严禁输出任何开场白、解释语或额外的 Markdown 标题,直接返回重写后的正文。
  312. """
  313. try:
  314. response = await openrouter_llm_call(
  315. messages=[{"role": "user", "content": prompt}],
  316. model="google/gemini-2.0-flash-001"
  317. )
  318. evolved_content = response.get("content", "").strip()
  319. # 简单安全校验:如果 LLM 返回太短或为空,回退到原内容+追加
  320. if len(evolved_content) < 5:
  321. raise ValueError("LLM output too short")
  322. return evolved_content
  323. except Exception as e:
  324. logger.warning(f"知识进化失败,采用追加模式回退: {e}")
  325. timestamp = datetime.now().strftime('%Y-%m-%d')
  326. return f"{old_content}\n\n---\n[Update {timestamp}]: {feedback}"
  327. async def _route_knowledge_by_llm(query_text: str, metadata_list: List[Dict], k: int = 5) -> List[str]:
  328. """
  329. 第一阶段:语义路由。
  330. 让 LLM 挑选出 2*k 个语义相关的 ID。
  331. """
  332. if not metadata_list:
  333. return []
  334. # 扩大筛选范围到 2*k
  335. routing_k = k * 2
  336. routing_data = [
  337. {
  338. "id": m["id"],
  339. "tags": m["tags"],
  340. "scenario": m["scenario"][:100] # 只取前100字符
  341. } for m in metadata_list
  342. ]
  343. prompt = f"""
  344. 你是一个知识检索专家。根据用户的当前任务需求,从下列原子知识元数据中挑选出最相关的最多 {routing_k} 个知识 ID。
  345. 任务需求:"{query_text}"
  346. 可选知识列表:
  347. {json.dumps(routing_data, ensure_ascii=False, indent=1)}
  348. 请直接输出 ID 列表,用逗号分隔(例如: research-20260302-001, research-20260302-002)。若无相关项请输出 "None"。
  349. """
  350. try:
  351. print(f"\n[Step 1: 知识语义路由] 任务: '{query_text}' | 候选总数: {len(metadata_list)} | 目标提取数: {routing_k}")
  352. response = await openrouter_llm_call(
  353. messages=[{"role": "user", "content": prompt}],
  354. model="google/gemini-2.0-flash-001"
  355. )
  356. content = response.get("content", "").strip()
  357. selected_ids = [idx.strip() for idx in re.split(r'[,\s]+', content) if idx.strip().startswith("research-")]
  358. print(f"[Step 1: 知识语义路由] LLM 初选 ID ({len(selected_ids)}个): {selected_ids}")
  359. return selected_ids
  360. except Exception as e:
  361. logger.error(f"LLM 知识路由失败: {e}")
  362. return []
  363. async def _get_structured_knowledge(
  364. query_text: str,
  365. top_k: int = 5,
  366. min_score: int = 3,
  367. context: Optional[Any] = None,
  368. tags_filter: Optional[List[str]] = None
  369. ) -> List[Dict]:
  370. """
  371. 语义检索原子知识(包括经验)
  372. 1. 解析知识库文件(支持 JSON 和 YAML 格式)
  373. 2. 语义路由:提取 2*k 个 ID
  374. 3. 质量精排:基于评分筛选出最终的 k 个
  375. Args:
  376. query_text: 查询文本
  377. top_k: 返回数量
  378. min_score: 最低评分过滤
  379. context: 上下文(兼容 experience 接口)
  380. tags_filter: 标签过滤(如 ["strategy"] 只返回经验)
  381. """
  382. knowledge_dir = Path(".cache/knowledge_atoms")
  383. if not knowledge_dir.exists():
  384. print(f"[Knowledge System] 警告: 知识库目录不存在 ({knowledge_dir})")
  385. return []
  386. # 同时支持 .json 和 .md 文件
  387. json_files = list(knowledge_dir.glob("*.json"))
  388. md_files = list(knowledge_dir.glob("*.md"))
  389. files = json_files + md_files
  390. if not files:
  391. print(f"[Knowledge System] 警告: 知识库为空")
  392. return []
  393. # --- 阶段 1: 解析所有知识文件 ---
  394. content_map = {}
  395. metadata_list = []
  396. for file_path in files:
  397. try:
  398. with open(file_path, "r", encoding="utf-8") as f:
  399. content = f.read()
  400. # 根据文件扩展名选择解析方式
  401. if file_path.suffix == ".json":
  402. # 解析 JSON 格式
  403. metadata = json.loads(content)
  404. else:
  405. # 解析 YAML frontmatter(兼容旧格式)
  406. yaml_match = re.search(r'^---\n(.*?)\n---', content, re.DOTALL)
  407. if not yaml_match:
  408. logger.warning(f"跳过无效文件: {file_path}")
  409. continue
  410. metadata = yaml.safe_load(yaml_match.group(1))
  411. if not isinstance(metadata, dict):
  412. logger.warning(f"跳过损坏的知识文件: {file_path}")
  413. continue
  414. kid = metadata.get("id")
  415. if not kid:
  416. logger.warning(f"跳过缺少 id 的知识文件: {file_path}")
  417. continue
  418. # 提取 scenario 和 content
  419. scenario = metadata.get("scenario", "").strip()
  420. content_text = metadata.get("content", "").strip()
  421. # 标签过滤
  422. tags = metadata.get("tags", {})
  423. if tags_filter:
  424. # 检查 tags.type 是否包含任何过滤标签
  425. tag_types = tags.get("type", [])
  426. if isinstance(tag_types, str):
  427. tag_types = [tag_types]
  428. if not any(tag in tag_types for tag in tags_filter):
  429. continue # 跳过不匹配的标签
  430. meta_item = {
  431. "id": kid,
  432. "tags": tags,
  433. "scenario": scenario,
  434. "score": metadata.get("eval", {}).get("score", 3),
  435. "helpful": metadata.get("metrics", {}).get("helpful", 0),
  436. "harmful": metadata.get("metrics", {}).get("harmful", 0),
  437. }
  438. metadata_list.append(meta_item)
  439. content_map[kid] = {
  440. "scenario": scenario,
  441. "content": content_text,
  442. "score": meta_item["score"],
  443. "helpful": meta_item["helpful"],
  444. "harmful": meta_item["harmful"],
  445. }
  446. except Exception as e:
  447. logger.error(f"解析知识文件失败 {file_path}: {e}")
  448. continue
  449. if not metadata_list:
  450. print(f"[Knowledge System] 警告: 没有有效的知识条目")
  451. return []
  452. # --- 阶段 2: 语义路由 (取 2*k) ---
  453. candidate_ids = await _route_knowledge_by_llm(query_text, metadata_list, k=top_k)
  454. # --- 阶段 3: 质量精排 (根据评分和反馈选出最终的 k) ---
  455. print(f"[Step 2: 知识质量精排] 正在根据评分和反馈进行打分...")
  456. scored_items = []
  457. for kid in candidate_ids:
  458. if kid in content_map:
  459. item = content_map[kid]
  460. score = item["score"]
  461. helpful = item["helpful"]
  462. harmful = item["harmful"]
  463. # 计算综合分:基础分 + helpful - harmful*2
  464. quality_score = score + helpful - (harmful * 2.0)
  465. # 过滤门槛:评分低于 min_score 或质量分过低
  466. if score < min_score or quality_score < 0:
  467. print(f" - 剔除低质量知识: {kid} (Score: {score}, Helpful: {helpful}, Harmful: {harmful})")
  468. continue
  469. scored_items.append({
  470. "id": kid,
  471. "scenario": item["scenario"],
  472. "content": item["content"],
  473. "score": score,
  474. "quality_score": quality_score
  475. })
  476. # 按照质量分排序
  477. final_sorted = sorted(scored_items, key=lambda x: x["quality_score"], reverse=True)
  478. # 截取最终的 top_k
  479. result = final_sorted[:top_k]
  480. print(f"[Step 2: 知识质量精排] 最终选定知识: {[it['id'] for it in result]}")
  481. print(f"[Knowledge System] 检索结束。\n")
  482. return result
  483. @tool()
  484. async def search_knowledge(
  485. query: str,
  486. top_k: int = 5,
  487. min_score: int = 3,
  488. tags_type: Optional[List[str]] = None,
  489. context: Optional[ToolContext] = None,
  490. ) -> ToolResult:
  491. """
  492. 语义检索原子知识库
  493. Args:
  494. query: 搜索查询(任务描述)
  495. top_k: 返回数量(默认 5)
  496. min_score: 最低评分过滤(默认 3)
  497. tags_type: 按类型过滤(tool/usercase/definition/plan)
  498. context: 工具上下文
  499. Returns:
  500. 相关知识列表
  501. """
  502. try:
  503. relevant_items = await _get_structured_knowledge(
  504. query_text=query,
  505. top_k=top_k,
  506. min_score=min_score
  507. )
  508. if not relevant_items:
  509. return ToolResult(
  510. title="🔍 未找到相关知识",
  511. output=f"查询: {query}\n\n知识库中暂无相关的高质量知识。建议进行调研。",
  512. long_term_memory=f"知识检索: 未找到相关知识 - {query[:50]}"
  513. )
  514. # 格式化输出
  515. output_lines = [f"查询: {query}\n", f"找到 {len(relevant_items)} 条相关知识:\n"]
  516. for idx, item in enumerate(relevant_items, 1):
  517. output_lines.append(f"\n### {idx}. [{item['id']}] (⭐ {item['score']})")
  518. output_lines.append(f"**场景**: {item['scenario'][:150]}...")
  519. output_lines.append(f"**内容**: {item['content'][:200]}...")
  520. return ToolResult(
  521. title="✅ 知识检索成功",
  522. output="\n".join(output_lines),
  523. long_term_memory=f"知识检索: 找到 {len(relevant_items)} 条相关知识 - {query[:50]}",
  524. metadata={
  525. "count": len(relevant_items),
  526. "knowledge_ids": [item["id"] for item in relevant_items],
  527. "items": relevant_items
  528. }
  529. )
  530. except Exception as e:
  531. logger.error(f"知识检索失败: {e}")
  532. return ToolResult(
  533. title="❌ 检索失败",
  534. output=f"错误: {str(e)}",
  535. error=str(e)
  536. )
  537. # ===== 批量更新功能(类似经验机制)=====
  538. async def _batch_update_knowledge(
  539. update_map: Dict[str, Dict[str, Any]],
  540. context: Optional[Any] = None
  541. ) -> int:
  542. """
  543. 内部函数:批量更新知识(兼容 experience 接口)
  544. Args:
  545. update_map: 更新映射 {knowledge_id: {"action": "helpful/harmful/evolve", "feedback": "..."}}
  546. context: 上下文(兼容 experience 接口)
  547. Returns:
  548. 成功更新的数量
  549. """
  550. if not update_map:
  551. return 0
  552. knowledge_dir = Path(".cache/knowledge_atoms")
  553. if not knowledge_dir.exists():
  554. return 0
  555. success_count = 0
  556. evolution_tasks = []
  557. evolution_registry = {} # task_idx -> (file_path, data)
  558. for knowledge_id, instr in update_map.items():
  559. try:
  560. # 查找文件
  561. json_path = knowledge_dir / f"{knowledge_id}.json"
  562. md_path = knowledge_dir / f"{knowledge_id}.md"
  563. file_path = None
  564. is_json = False
  565. if json_path.exists():
  566. file_path = json_path
  567. is_json = True
  568. elif md_path.exists():
  569. file_path = md_path
  570. is_json = False
  571. else:
  572. continue
  573. # 读取并解析
  574. with open(file_path, "r", encoding="utf-8") as f:
  575. content = f.read()
  576. if is_json:
  577. data = json.loads(content)
  578. else:
  579. yaml_match = re.search(r'^---\n(.*?)\n---', content, re.DOTALL)
  580. if not yaml_match:
  581. continue
  582. data = yaml.safe_load(yaml_match.group(1))
  583. # 更新 metrics
  584. action = instr.get("action")
  585. feedback = instr.get("feedback", "")
  586. # 处理 mixed 中间态
  587. if action == "mixed":
  588. data["metrics"]["helpful"] = data.get("metrics", {}).get("helpful", 0) + 1
  589. action = "evolve"
  590. if action == "helpful":
  591. data["metrics"]["helpful"] = data.get("metrics", {}).get("helpful", 0) + 1
  592. elif action == "harmful":
  593. data["metrics"]["harmful"] = data.get("metrics", {}).get("harmful", 0) + 1
  594. elif action == "evolve" and feedback:
  595. # 注册进化任务
  596. old_content = data.get("content", "")
  597. task = _evolve_knowledge_with_llm(old_content, feedback)
  598. evolution_tasks.append(task)
  599. evolution_registry[len(evolution_tasks) - 1] = (file_path, data, is_json)
  600. data["metrics"]["helpful"] = data.get("metrics", {}).get("helpful", 0) + 1
  601. data["updated_at"] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
  602. # 如果不需要进化,直接保存
  603. if action != "evolve" or not feedback:
  604. if is_json:
  605. with open(file_path, "w", encoding="utf-8") as f:
  606. json.dump(data, f, ensure_ascii=False, indent=2)
  607. else:
  608. meta_str = yaml.dump(data, allow_unicode=True).strip()
  609. with open(file_path, "w", encoding="utf-8") as f:
  610. f.write(f"---\n{meta_str}\n---\n")
  611. success_count += 1
  612. except Exception as e:
  613. logger.error(f"更新知识失败 {knowledge_id}: {e}")
  614. continue
  615. # 并发进化
  616. if evolution_tasks:
  617. import asyncio
  618. print(f"🧬 并发处理 {len(evolution_tasks)} 条知识进化...")
  619. evolved_results = await asyncio.gather(*evolution_tasks)
  620. # 回填进化结果
  621. for task_idx, (file_path, data, is_json) in evolution_registry.items():
  622. data["content"] = evolved_results[task_idx].strip()
  623. if is_json:
  624. with open(file_path, "w", encoding="utf-8") as f:
  625. json.dump(data, f, ensure_ascii=False, indent=2)
  626. else:
  627. meta_str = yaml.dump(data, allow_unicode=True).strip()
  628. with open(file_path, "w", encoding="utf-8") as f:
  629. f.write(f"---\n{meta_str}\n---\n")
  630. success_count += 1
  631. return success_count
  632. @tool()
  633. async def batch_update_knowledge(
  634. feedback_list: List[Dict[str, Any]],
  635. context: Optional[ToolContext] = None,
  636. ) -> ToolResult:
  637. """
  638. 批量反馈知识的有效性(类似经验机制)
  639. Args:
  640. feedback_list: 评价列表,每个元素包含:
  641. - knowledge_id: (str) 知识 ID
  642. - is_effective: (bool) 是否有效
  643. - feedback: (str, optional) 改进建议,若有效且有建议则触发知识进化
  644. Returns:
  645. 批量更新结果
  646. """
  647. try:
  648. if not feedback_list:
  649. return ToolResult(
  650. title="⚠️ 反馈列表为空",
  651. output="未提供任何反馈",
  652. long_term_memory="批量更新知识: 反馈列表为空"
  653. )
  654. knowledge_dir = Path(".cache/knowledge_atoms")
  655. if not knowledge_dir.exists():
  656. return ToolResult(
  657. title="❌ 知识库不存在",
  658. output="知识库目录不存在",
  659. error="知识库不存在"
  660. )
  661. success_count = 0
  662. failed_items = []
  663. for item in feedback_list:
  664. knowledge_id = item.get("knowledge_id")
  665. is_effective = item.get("is_effective")
  666. feedback = item.get("feedback", "")
  667. if not knowledge_id:
  668. failed_items.append({"id": "unknown", "reason": "缺少 knowledge_id"})
  669. continue
  670. try:
  671. # 查找文件
  672. json_path = knowledge_dir / f"{knowledge_id}.json"
  673. md_path = knowledge_dir / f"{knowledge_id}.md"
  674. file_path = None
  675. is_json = False
  676. if json_path.exists():
  677. file_path = json_path
  678. is_json = True
  679. elif md_path.exists():
  680. file_path = md_path
  681. is_json = False
  682. else:
  683. failed_items.append({"id": knowledge_id, "reason": "文件不存在"})
  684. continue
  685. # 读取并解析
  686. with open(file_path, "r", encoding="utf-8") as f:
  687. content = f.read()
  688. if is_json:
  689. data = json.loads(content)
  690. else:
  691. yaml_match = re.search(r'^---\n(.*?)\n---', content, re.DOTALL)
  692. if not yaml_match:
  693. failed_items.append({"id": knowledge_id, "reason": "格式错误"})
  694. continue
  695. data = yaml.safe_load(yaml_match.group(1))
  696. # 更新 metrics
  697. if is_effective:
  698. data["metrics"]["helpful"] = data.get("metrics", {}).get("helpful", 0) + 1
  699. # 如果有反馈建议,触发进化
  700. if feedback:
  701. old_content = data.get("content", "")
  702. evolved_content = await _evolve_knowledge_with_llm(old_content, feedback)
  703. data["content"] = evolved_content
  704. else:
  705. data["metrics"]["harmful"] = data.get("metrics", {}).get("harmful", 0) + 1
  706. data["updated_at"] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
  707. # 保存
  708. if is_json:
  709. with open(file_path, "w", encoding="utf-8") as f:
  710. json.dump(data, f, ensure_ascii=False, indent=2)
  711. else:
  712. meta_str = yaml.dump(data, allow_unicode=True).strip()
  713. with open(file_path, "w", encoding="utf-8") as f:
  714. f.write(f"---\n{meta_str}\n---\n")
  715. success_count += 1
  716. except Exception as e:
  717. failed_items.append({"id": knowledge_id, "reason": str(e)})
  718. continue
  719. output_lines = [f"成功更新 {success_count} 条知识"]
  720. if failed_items:
  721. output_lines.append(f"\n失败 {len(failed_items)} 条:")
  722. for item in failed_items:
  723. output_lines.append(f" - {item['id']}: {item['reason']}")
  724. return ToolResult(
  725. title="✅ 批量更新完成",
  726. output="\n".join(output_lines),
  727. long_term_memory=f"批量更新知识: 成功 {success_count} 条,失败 {len(failed_items)} 条"
  728. )
  729. except Exception as e:
  730. logger.error(f"批量更新知识失败: {e}")
  731. return ToolResult(
  732. title="❌ 批量更新失败",
  733. output=f"错误: {str(e)}",
  734. error=str(e)
  735. )
  736. # ===== 知识库瘦身功能(类似经验机制)=====
  737. @tool()
  738. async def slim_knowledge(
  739. model: str = "anthropic/claude-sonnet-4.5",
  740. context: Optional[ToolContext] = None,
  741. ) -> ToolResult:
  742. """
  743. 知识库瘦身:调用顶级大模型,将知识库中语义相似的知识合并精简
  744. Args:
  745. model: 使用的模型(默认 claude-sonnet-4.5)
  746. context: 工具上下文
  747. Returns:
  748. 瘦身结果报告
  749. """
  750. try:
  751. knowledge_dir = Path(".cache/knowledge_atoms")
  752. if not knowledge_dir.exists():
  753. return ToolResult(
  754. title="📂 知识库不存在",
  755. output="知识库目录不存在,无需瘦身",
  756. long_term_memory="知识库瘦身: 目录不存在"
  757. )
  758. # 获取所有文件
  759. json_files = list(knowledge_dir.glob("*.json"))
  760. md_files = list(knowledge_dir.glob("*.md"))
  761. files = json_files + md_files
  762. if len(files) < 2:
  763. return ToolResult(
  764. title="📂 知识库过小",
  765. output=f"知识库仅有 {len(files)} 条,无需瘦身",
  766. long_term_memory=f"知识库瘦身: 仅有 {len(files)} 条"
  767. )
  768. # 解析所有知识
  769. parsed = []
  770. for file_path in files:
  771. try:
  772. with open(file_path, "r", encoding="utf-8") as f:
  773. content = f.read()
  774. if file_path.suffix == ".json":
  775. data = json.loads(content)
  776. else:
  777. yaml_match = re.search(r'^---\n(.*?)\n---', content, re.DOTALL)
  778. if not yaml_match:
  779. continue
  780. data = yaml.safe_load(yaml_match.group(1))
  781. parsed.append({
  782. "file_path": file_path,
  783. "data": data,
  784. "is_json": file_path.suffix == ".json"
  785. })
  786. except Exception as e:
  787. logger.error(f"解析文件失败 {file_path}: {e}")
  788. continue
  789. if len(parsed) < 2:
  790. return ToolResult(
  791. title="📂 有效知识过少",
  792. output=f"有效知识仅有 {len(parsed)} 条,无需瘦身",
  793. long_term_memory=f"知识库瘦身: 有效知识 {len(parsed)} 条"
  794. )
  795. # 构造发给大模型的内容
  796. entries_text = ""
  797. for p in parsed:
  798. data = p["data"]
  799. entries_text += f"[ID: {data.get('id')}] [Tags: {data.get('tags', {})}] "
  800. entries_text += f"[Metrics: {data.get('metrics', {})}] [Score: {data.get('eval', {}).get('score', 3)}]\n"
  801. entries_text += f"Scenario: {data.get('scenario', 'N/A')}\n"
  802. entries_text += f"Content: {data.get('content', '')[:200]}...\n\n"
  803. prompt = f"""你是一个 AI Agent 知识库管理员。以下是当前知识库的全部条目,请执行瘦身操作:
  804. 【任务】:
  805. 1. 识别语义高度相似或重复的知识,将它们合并为一条更精炼、更通用的知识。
  806. 2. 合并时保留 helpful 最高的那条的 ID 和 metrics(metrics 中 helpful/harmful 取各条之和)。
  807. 3. 对于独立的、无重复的知识,保持原样不动。
  808. 4. 保持原有的知识结构和格式。
  809. 【当前知识库】:
  810. {entries_text}
  811. 【输出格式要求】:
  812. 严格按以下格式输出每条知识,条目之间用 === 分隔:
  813. ID: <保留的id>
  814. TAGS: <yaml格式的tags>
  815. METRICS: <yaml格式的metrics>
  816. SCORE: <评分>
  817. SCENARIO: <场景描述>
  818. CONTENT: <合并后的知识内容>
  819. ===
  820. 最后一行输出合并报告,格式:
  821. REPORT: 原有 X 条,合并后 Y 条,精简了 Z 条。
  822. 禁止输出任何开场白或解释。"""
  823. print(f"\n[知识瘦身] 正在调用 {model} 分析 {len(parsed)} 条知识...")
  824. response = await openrouter_llm_call(
  825. messages=[{"role": "user", "content": prompt}],
  826. model=model
  827. )
  828. content = response.get("content", "").strip()
  829. if not content:
  830. return ToolResult(
  831. title="❌ 大模型返回为空",
  832. output="大模型返回为空,瘦身失败",
  833. error="大模型返回为空"
  834. )
  835. # 解析大模型输出
  836. report_line = ""
  837. new_entries = []
  838. blocks = [b.strip() for b in content.split("===") if b.strip()]
  839. for block in blocks:
  840. if block.startswith("REPORT:"):
  841. report_line = block
  842. continue
  843. lines = block.split("\n")
  844. kid, tags, metrics, score, scenario, content_lines = None, {}, {}, 3, "", []
  845. current_field = None
  846. for line in lines:
  847. if line.startswith("ID:"):
  848. kid = line[3:].strip()
  849. current_field = None
  850. elif line.startswith("TAGS:"):
  851. try:
  852. tags = yaml.safe_load(line[5:].strip()) or {}
  853. except Exception:
  854. tags = {}
  855. current_field = None
  856. elif line.startswith("METRICS:"):
  857. try:
  858. metrics = yaml.safe_load(line[8:].strip()) or {}
  859. except Exception:
  860. metrics = {"helpful": 0, "harmful": 0}
  861. current_field = None
  862. elif line.startswith("SCORE:"):
  863. try:
  864. score = int(line[6:].strip())
  865. except Exception:
  866. score = 3
  867. current_field = None
  868. elif line.startswith("SCENARIO:"):
  869. scenario = line[9:].strip()
  870. current_field = "scenario"
  871. elif line.startswith("CONTENT:"):
  872. content_lines.append(line[8:].strip())
  873. current_field = "content"
  874. elif current_field == "scenario":
  875. scenario += "\n" + line
  876. elif current_field == "content":
  877. content_lines.append(line)
  878. if kid and content_lines:
  879. new_data = {
  880. "id": kid,
  881. "tags": tags,
  882. "scenario": scenario,
  883. "content": "\n".join(content_lines).strip(),
  884. "metrics": metrics,
  885. "eval": {
  886. "score": score,
  887. "helpful": 0,
  888. "harmful": 0,
  889. "helpful_history": [],
  890. "harmful_history": []
  891. },
  892. "updated_at": datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
  893. }
  894. new_entries.append(new_data)
  895. if not new_entries:
  896. return ToolResult(
  897. title="❌ 解析失败",
  898. output="解析大模型输出失败,知识库未修改",
  899. error="解析失败"
  900. )
  901. # 删除旧文件
  902. for p in parsed:
  903. try:
  904. p["file_path"].unlink()
  905. except Exception as e:
  906. logger.error(f"删除旧文件失败 {p['file_path']}: {e}")
  907. # 写入新文件(统一使用 JSON 格式)
  908. for data in new_entries:
  909. file_path = knowledge_dir / f"{data['id']}.json"
  910. with open(file_path, "w", encoding="utf-8") as f:
  911. json.dump(data, f, ensure_ascii=False, indent=2)
  912. result = f"瘦身完成:{len(parsed)} → {len(new_entries)} 条知识"
  913. if report_line:
  914. result += f"\n{report_line}"
  915. print(f"[知识瘦身] {result}")
  916. return ToolResult(
  917. title="✅ 知识库瘦身完成",
  918. output=result,
  919. long_term_memory=f"知识库瘦身: {len(parsed)} → {len(new_entries)} 条"
  920. )
  921. except Exception as e:
  922. logger.error(f"知识库瘦身失败: {e}")
  923. return ToolResult(
  924. title="❌ 瘦身失败",
  925. output=f"错误: {str(e)}",
  926. error=str(e)
  927. )