knowledge.py.backup 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183
  1. """
  2. 原子知识保存工具
  3. 提供便捷的 API 让 Agent 快速保存结构化的原子知识
  4. """
  5. import os
  6. import re
  7. import json
  8. import yaml
  9. import logging
  10. from datetime import datetime
  11. from pathlib import Path
  12. from typing import List, Dict, Optional, Any
  13. from agent.tools import tool, ToolResult, ToolContext
  14. from ...llm.openrouter import openrouter_llm_call
  15. logger = logging.getLogger(__name__)
  16. def _generate_knowledge_id() -> str:
  17. """生成知识原子 ID(带微秒和随机后缀避免冲突)"""
  18. import uuid
  19. timestamp = datetime.now().strftime('%Y%m%d-%H%M%S')
  20. random_suffix = uuid.uuid4().hex[:4]
  21. return f"knowledge-{timestamp}-{random_suffix}"
  22. def _format_yaml_list(items: List[str], indent: int = 2) -> str:
  23. """格式化 YAML 列表"""
  24. if not items:
  25. return "[]"
  26. indent_str = " " * indent
  27. return "\n" + "\n".join(f"{indent_str}- {item}" for item in items)
  28. @tool()
  29. async def save_knowledge(
  30. scenario: str,
  31. content: str,
  32. tags_type: List[str],
  33. urls: List[str] = None,
  34. agent_id: str = "research_agent",
  35. score: int = 3,
  36. trace_id: str = "",
  37. ) -> ToolResult:
  38. """
  39. 保存原子知识到本地文件(JSON 格式)
  40. Args:
  41. scenario: 任务描述(在什么情景下 + 要完成什么目标 + 得到能达成一个什么结果)
  42. content: 核心内容
  43. tags_type: 知识类型标签,可选:tool, usercase, definition, plan, strategy
  44. urls: 参考来源链接列表(论文/GitHub/博客等)
  45. agent_id: 执行此调研的 agent ID
  46. score: 初始评分 1-5(默认 3)
  47. trace_id: 当前 trace ID(可选)
  48. Returns:
  49. 保存结果
  50. """
  51. try:
  52. # 生成 ID
  53. knowledge_id = _generate_knowledge_id()
  54. # 准备目录
  55. knowledge_dir = Path(".cache/knowledge_atoms")
  56. knowledge_dir.mkdir(parents=True, exist_ok=True)
  57. # 构建文件路径(使用 .json 扩展名)
  58. file_path = knowledge_dir / f"{knowledge_id}.json"
  59. # 构建 JSON 数据结构
  60. knowledge_data = {
  61. "id": knowledge_id,
  62. "trace_id": trace_id or "N/A",
  63. "tags": {
  64. "type": tags_type
  65. },
  66. "scenario": scenario,
  67. "content": content,
  68. "trace": {
  69. "urls": urls or [],
  70. "agent_id": agent_id,
  71. "timestamp": datetime.now().isoformat()
  72. },
  73. "eval": {
  74. "score": score,
  75. "helpful": 0,
  76. "harmful": 0,
  77. "helpful_history": [],
  78. "harmful_history": []
  79. },
  80. "metrics": {
  81. "helpful": 1,
  82. "harmful": 0
  83. },
  84. "created_at": datetime.now().strftime('%Y-%m-%d %H:%M:%S')
  85. }
  86. # 保存为 JSON 文件
  87. with open(file_path, "w", encoding="utf-8") as f:
  88. json.dump(knowledge_data, f, ensure_ascii=False, indent=2)
  89. return ToolResult(
  90. title="✅ 原子知识已保存",
  91. output=f"知识 ID: {knowledge_id}\n文件路径: {file_path}\n\n场景:\n{scenario[:100]}...",
  92. long_term_memory=f"保存原子知识: {knowledge_id} - {scenario[:50]}",
  93. metadata={"knowledge_id": knowledge_id, "file_path": str(file_path)}
  94. )
  95. except Exception as e:
  96. return ToolResult(
  97. title="❌ 保存失败",
  98. output=f"错误: {str(e)}",
  99. error=str(e)
  100. )
  101. @tool()
  102. async def update_knowledge(
  103. knowledge_id: str,
  104. add_helpful_case: Optional[Dict[str, str]] = None,
  105. add_harmful_case: Optional[Dict[str, str]] = None,
  106. update_score: Optional[int] = None,
  107. evolve_feedback: Optional[str] = None,
  108. ) -> ToolResult:
  109. """
  110. 更新已有的原子知识的评估反馈
  111. Args:
  112. knowledge_id: 知识 ID(如 research-20260302-001)
  113. add_helpful_case: 添加好用的案例 {"case_id": "...", "scenario": "...", "result": "...", "timestamp": "..."}
  114. add_harmful_case: 添加不好用的案例 {"case_id": "...", "scenario": "...", "result": "...", "timestamp": "..."}
  115. update_score: 更新评分(1-5)
  116. evolve_feedback: 经验进化反馈(当提供时,会使用 LLM 重写知识内容)
  117. Returns:
  118. 更新结果
  119. """
  120. try:
  121. # 查找文件(支持 JSON 和 MD 格式)
  122. knowledge_dir = Path(".cache/knowledge_atoms")
  123. json_path = knowledge_dir / f"{knowledge_id}.json"
  124. md_path = knowledge_dir / f"{knowledge_id}.md"
  125. file_path = None
  126. if json_path.exists():
  127. file_path = json_path
  128. is_json = True
  129. elif md_path.exists():
  130. file_path = md_path
  131. is_json = False
  132. else:
  133. return ToolResult(
  134. title="❌ 文件不存在",
  135. output=f"未找到知识文件: {knowledge_id}",
  136. error="文件不存在"
  137. )
  138. # 读取现有内容
  139. with open(file_path, "r", encoding="utf-8") as f:
  140. content = f.read()
  141. # 解析数据
  142. if is_json:
  143. data = json.loads(content)
  144. else:
  145. # 解析 YAML frontmatter
  146. yaml_match = re.search(r'^---\n(.*?)\n---', content, re.DOTALL)
  147. if not yaml_match:
  148. return ToolResult(
  149. title="❌ 格式错误",
  150. output=f"无法解析知识文件格式: {file_path}",
  151. error="格式错误"
  152. )
  153. data = yaml.safe_load(yaml_match.group(1))
  154. # 更新内容
  155. updated = False
  156. summary = []
  157. if add_helpful_case:
  158. data["eval"]["helpful"] += 1
  159. data["eval"]["helpful_history"].append(add_helpful_case)
  160. data["metrics"]["helpful"] += 1
  161. summary.append(f"添加 helpful 案例: {add_helpful_case.get('case_id')}")
  162. updated = True
  163. if add_harmful_case:
  164. data["eval"]["harmful"] += 1
  165. data["eval"]["harmful_history"].append(add_harmful_case)
  166. data["metrics"]["harmful"] += 1
  167. summary.append(f"添加 harmful 案例: {add_harmful_case.get('case_id')}")
  168. updated = True
  169. if update_score is not None:
  170. data["eval"]["score"] = update_score
  171. summary.append(f"更新评分: {update_score}")
  172. updated = True
  173. # 经验进化机制
  174. if evolve_feedback:
  175. old_content = data.get("content", "")
  176. evolved_content = await _evolve_knowledge_with_llm(old_content, evolve_feedback)
  177. data["content"] = evolved_content
  178. data["metrics"]["helpful"] += 1
  179. summary.append(f"知识进化: 基于反馈重写内容")
  180. updated = True
  181. if not updated:
  182. return ToolResult(
  183. title="⚠️ 无更新",
  184. output="未指定任何更新内容",
  185. long_term_memory="尝试更新原子知识但未指定更新内容"
  186. )
  187. # 更新时间戳
  188. data["updated_at"] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
  189. # 保存更新
  190. if is_json:
  191. with open(file_path, "w", encoding="utf-8") as f:
  192. json.dump(data, f, ensure_ascii=False, indent=2)
  193. else:
  194. # 重新生成 YAML frontmatter
  195. meta_str = yaml.dump(data, allow_unicode=True).strip()
  196. with open(file_path, "w", encoding="utf-8") as f:
  197. f.write(f"---\n{meta_str}\n---\n")
  198. return ToolResult(
  199. title="✅ 原子知识已更新",
  200. output=f"知识 ID: {knowledge_id}\n文件路径: {file_path}\n\n更新内容:\n" + "\n".join(f"- {s}" for s in summary),
  201. long_term_memory=f"更新原子知识: {knowledge_id}"
  202. )
  203. except Exception as e:
  204. return ToolResult(
  205. title="❌ 更新失败",
  206. output=f"错误: {str(e)}",
  207. error=str(e)
  208. )
  209. @tool()
  210. async def list_knowledge(
  211. limit: int = 10,
  212. tags_type: Optional[List[str]] = None,
  213. ) -> ToolResult:
  214. """
  215. 列出已保存的原子知识
  216. Args:
  217. limit: 返回数量限制(默认 10)
  218. tags_type: 按类型过滤(可选)
  219. Returns:
  220. 知识列表
  221. """
  222. try:
  223. knowledge_dir = Path(".cache/knowledge_atoms")
  224. if not knowledge_dir.exists():
  225. return ToolResult(
  226. title="📂 知识库为空",
  227. output="还没有保存任何原子知识",
  228. long_term_memory="知识库为空"
  229. )
  230. # 获取所有文件
  231. files = sorted(knowledge_dir.glob("*.md"), key=lambda x: x.stat().st_mtime, reverse=True)
  232. if not files:
  233. return ToolResult(
  234. title="📂 知识库为空",
  235. output="还没有保存任何原子知识",
  236. long_term_memory="知识库为空"
  237. )
  238. # 读取并过滤
  239. results = []
  240. for file_path in files[:limit]:
  241. with open(file_path, "r", encoding="utf-8") as f:
  242. content = f.read()
  243. # 提取关键信息
  244. import re
  245. id_match = re.search(r"id: (.+)", content)
  246. scenario_match = re.search(r"scenario: \|\n (.+)", content)
  247. score_match = re.search(r"score: (\d+)", content)
  248. knowledge_id = id_match.group(1) if id_match else "unknown"
  249. scenario = scenario_match.group(1) if scenario_match else "N/A"
  250. score = score_match.group(1) if score_match else "N/A"
  251. results.append(f"- [{knowledge_id}] (⭐{score}) {scenario[:60]}...")
  252. output = f"共找到 {len(files)} 条原子知识,显示最近 {len(results)} 条:\n\n" + "\n".join(results)
  253. return ToolResult(
  254. title="📚 原子知识列表",
  255. output=output,
  256. long_term_memory=f"列出 {len(results)} 条原子知识"
  257. )
  258. except Exception as e:
  259. return ToolResult(
  260. title="❌ 列表失败",
  261. output=f"错误: {str(e)}",
  262. error=str(e)
  263. )
  264. # ===== 语义检索功能 =====
  265. async def _route_knowledge_by_llm(query_text: str, metadata_list: List[Dict], k: int = 5) -> List[str]:
  266. """
  267. 第一阶段:语义路由。
  268. 让 LLM 挑选出 2*k 个语义相关的 ID。
  269. """
  270. if not metadata_list:
  271. return []
  272. # 扩大筛选范围到 2*k
  273. routing_k = k * 2
  274. routing_data = [
  275. {
  276. "id": m["id"],
  277. "tags": m["tags"],
  278. "scenario": m["scenario"][:100] # 只取前100字符
  279. } for m in metadata_list
  280. ]
  281. prompt = f"""
  282. 你是一个知识检索专家。根据用户的当前任务需求,从下列原子知识元数据中挑选出最相关的最多 {routing_k} 个知识 ID。
  283. 任务需求:"{query_text}"
  284. 可选知识列表:
  285. {json.dumps(routing_data, ensure_ascii=False, indent=1)}
  286. 请直接输出 ID 列表,用逗号分隔(例如: knowledge-20260302-001, research-20260302-002)。若无相关项请输出 "None"。
  287. """
  288. try:
  289. print(f"\n[Step 1: 知识语义路由] 任务: '{query_text}' | 候选总数: {len(metadata_list)} | 目标提取数: {routing_k}")
  290. response = await openrouter_llm_call(
  291. messages=[{"role": "user", "content": prompt}],
  292. model="google/gemini-2.0-flash-001"
  293. )
  294. content = response.get("content", "").strip()
  295. selected_ids = [idx.strip() for idx in re.split(r'[,\s]+', content) if idx.strip().startswith(("knowledge-", "research-"))]
  296. print(f"[Step 1: 知识语义路由] LLM 初选 ID ({len(selected_ids)}个): {selected_ids}")
  297. return selected_ids
  298. except Exception as e:
  299. logger.error(f"LLM 知识路由失败: {e}")
  300. return []
  301. async def _evolve_knowledge_with_llm(old_content: str, feedback: str) -> str:
  302. """
  303. 使用 LLM 进行知识进化重写(类似经验进化机制)
  304. """
  305. prompt = f"""你是一个 AI Agent 知识库管理员。请根据反馈建议,对现有的知识内容进行重写进化。
  306. 【原知识内容】:
  307. {old_content}
  308. 【实战反馈建议】:
  309. {feedback}
  310. 【重写要求】:
  311. 1. 融合知识:将反馈中的避坑指南、新参数或修正后的选择逻辑融入原知识,使其更具通用性和准确性。
  312. 2. 保持结构:如果原内容有特定格式(如 Markdown、代码示例等),请保持该格式。
  313. 3. 语言:简洁直接,使用中文。
  314. 4. 禁止:严禁输出任何开场白、解释语或额外的 Markdown 标题,直接返回重写后的正文。
  315. """
  316. try:
  317. response = await openrouter_llm_call(
  318. messages=[{"role": "user", "content": prompt}],
  319. model="google/gemini-2.0-flash-001"
  320. )
  321. evolved_content = response.get("content", "").strip()
  322. # 简单安全校验:如果 LLM 返回太短或为空,回退到原内容+追加
  323. if len(evolved_content) < 5:
  324. raise ValueError("LLM output too short")
  325. return evolved_content
  326. except Exception as e:
  327. logger.warning(f"知识进化失败,采用追加模式回退: {e}")
  328. timestamp = datetime.now().strftime('%Y-%m-%d')
  329. return f"{old_content}\n\n---\n[Update {timestamp}]: {feedback}"
  330. async def _route_knowledge_by_llm(query_text: str, metadata_list: List[Dict], k: int = 5) -> List[str]:
  331. """
  332. 第一阶段:语义路由。
  333. 让 LLM 挑选出 2*k 个语义相关的 ID。
  334. """
  335. if not metadata_list:
  336. return []
  337. # 扩大筛选范围到 2*k
  338. routing_k = k * 2
  339. routing_data = [
  340. {
  341. "id": m["id"],
  342. "tags": m["tags"],
  343. "scenario": m["scenario"][:100] # 只取前100字符
  344. } for m in metadata_list
  345. ]
  346. prompt = f"""
  347. 你是一个知识检索专家。根据用户的当前任务需求,从下列原子知识元数据中挑选出最相关的最多 {routing_k} 个知识 ID。
  348. 任务需求:"{query_text}"
  349. 可选知识列表:
  350. {json.dumps(routing_data, ensure_ascii=False, indent=1)}
  351. 请直接输出 ID 列表,用逗号分隔(例如: knowledge-20260302-001, research-20260302-002)。若无相关项请输出 "None"。
  352. """
  353. try:
  354. print(f"\n[Step 1: 知识语义路由] 任务: '{query_text}' | 候选总数: {len(metadata_list)} | 目标提取数: {routing_k}")
  355. response = await openrouter_llm_call(
  356. messages=[{"role": "user", "content": prompt}],
  357. model="google/gemini-2.0-flash-001"
  358. )
  359. content = response.get("content", "").strip()
  360. selected_ids = [idx.strip() for idx in re.split(r'[,\s]+', content) if idx.strip().startswith(("knowledge-", "research-"))]
  361. print(f"[Step 1: 知识语义路由] LLM 初选 ID ({len(selected_ids)}个): {selected_ids}")
  362. return selected_ids
  363. except Exception as e:
  364. logger.error(f"LLM 知识路由失败: {e}")
  365. return []
  366. async def _get_structured_knowledge(
  367. query_text: str,
  368. top_k: int = 5,
  369. min_score: int = 3,
  370. context: Optional[Any] = None,
  371. tags_filter: Optional[List[str]] = None
  372. ) -> List[Dict]:
  373. """
  374. 语义检索原子知识(包括经验)
  375. 1. 解析知识库文件(支持 JSON 和 YAML 格式)
  376. 2. 语义路由:提取 2*k 个 ID
  377. 3. 质量精排:基于评分筛选出最终的 k 个
  378. Args:
  379. query_text: 查询文本
  380. top_k: 返回数量
  381. min_score: 最低评分过滤
  382. context: 上下文(兼容 experience 接口)
  383. tags_filter: 标签过滤(如 ["strategy"] 只返回经验)
  384. """
  385. knowledge_dir = Path(".cache/knowledge_atoms")
  386. if not knowledge_dir.exists():
  387. print(f"[Knowledge System] 警告: 知识库目录不存在 ({knowledge_dir})")
  388. return []
  389. # 同时支持 .json 和 .md 文件
  390. json_files = list(knowledge_dir.glob("*.json"))
  391. md_files = list(knowledge_dir.glob("*.md"))
  392. files = json_files + md_files
  393. if not files:
  394. print(f"[Knowledge System] 警告: 知识库为空")
  395. return []
  396. # --- 阶段 1: 解析所有知识文件 ---
  397. content_map = {}
  398. metadata_list = []
  399. for file_path in files:
  400. try:
  401. with open(file_path, "r", encoding="utf-8") as f:
  402. content = f.read()
  403. # 根据文件扩展名选择解析方式
  404. if file_path.suffix == ".json":
  405. # 解析 JSON 格式
  406. metadata = json.loads(content)
  407. else:
  408. # 解析 YAML frontmatter(兼容旧格式)
  409. yaml_match = re.search(r'^---\n(.*?)\n---', content, re.DOTALL)
  410. if not yaml_match:
  411. logger.warning(f"跳过无效文件: {file_path}")
  412. continue
  413. metadata = yaml.safe_load(yaml_match.group(1))
  414. if not isinstance(metadata, dict):
  415. logger.warning(f"跳过损坏的知识文件: {file_path}")
  416. continue
  417. kid = metadata.get("id")
  418. if not kid:
  419. logger.warning(f"跳过缺少 id 的知识文件: {file_path}")
  420. continue
  421. # 提取 scenario 和 content
  422. scenario = metadata.get("scenario", "").strip()
  423. content_text = metadata.get("content", "").strip()
  424. # 标签过滤
  425. tags = metadata.get("tags", {})
  426. if tags_filter:
  427. # 检查 tags.type 是否包含任何过滤标签
  428. tag_types = tags.get("type", [])
  429. if isinstance(tag_types, str):
  430. tag_types = [tag_types]
  431. if not any(tag in tag_types for tag in tags_filter):
  432. continue # 跳过不匹配的标签
  433. meta_item = {
  434. "id": kid,
  435. "tags": tags,
  436. "scenario": scenario,
  437. "score": metadata.get("eval", {}).get("score", 3),
  438. "helpful": metadata.get("metrics", {}).get("helpful", 0),
  439. "harmful": metadata.get("metrics", {}).get("harmful", 0),
  440. }
  441. metadata_list.append(meta_item)
  442. content_map[kid] = {
  443. "scenario": scenario,
  444. "content": content_text,
  445. "tags": tags,
  446. "score": meta_item["score"],
  447. "helpful": meta_item["helpful"],
  448. "harmful": meta_item["harmful"],
  449. }
  450. except Exception as e:
  451. logger.error(f"解析知识文件失败 {file_path}: {e}")
  452. continue
  453. if not metadata_list:
  454. print(f"[Knowledge System] 警告: 没有有效的知识条目")
  455. return []
  456. # --- 阶段 2: 语义路由 (取 2*k) ---
  457. candidate_ids = await _route_knowledge_by_llm(query_text, metadata_list, k=top_k)
  458. # --- 阶段 3: 质量精排 (根据评分和反馈选出最终的 k) ---
  459. print(f"[Step 2: 知识质量精排] 正在根据评分和反馈进行打分...")
  460. scored_items = []
  461. for kid in candidate_ids:
  462. if kid in content_map:
  463. item = content_map[kid]
  464. score = item["score"]
  465. helpful = item["helpful"]
  466. harmful = item["harmful"]
  467. # 计算综合分:基础分 + helpful - harmful*2
  468. quality_score = score + helpful - (harmful * 2.0)
  469. # 过滤门槛:评分低于 min_score 或质量分过低
  470. if score < min_score or quality_score < 0:
  471. print(f" - 剔除低质量知识: {kid} (Score: {score}, Helpful: {helpful}, Harmful: {harmful})")
  472. continue
  473. scored_items.append({
  474. "id": kid,
  475. "scenario": item["scenario"],
  476. "content": item["content"],
  477. "tags": item["tags"],
  478. "score": score,
  479. "quality_score": quality_score,
  480. "metrics": {
  481. "helpful": helpful,
  482. "harmful": harmful
  483. }
  484. })
  485. # 按照质量分排序
  486. final_sorted = sorted(scored_items, key=lambda x: x["quality_score"], reverse=True)
  487. # 截取最终的 top_k
  488. result = final_sorted[:top_k]
  489. print(f"[Step 2: 知识质量精排] 最终选定知识: {[it['id'] for it in result]}")
  490. print(f"[Knowledge System] 检索结束。\n")
  491. return result
  492. @tool()
  493. async def search_knowledge(
  494. query: str,
  495. top_k: int = 5,
  496. min_score: int = 3,
  497. tags_type: Optional[List[str]] = None,
  498. context: Optional[ToolContext] = None,
  499. ) -> ToolResult:
  500. """
  501. 语义检索原子知识库
  502. Args:
  503. query: 搜索查询(任务描述)
  504. top_k: 返回数量(默认 5)
  505. min_score: 最低评分过滤(默认 3)
  506. tags_type: 按类型过滤(tool/usercase/definition/plan)
  507. context: 工具上下文
  508. Returns:
  509. 相关知识列表
  510. """
  511. try:
  512. relevant_items = await _get_structured_knowledge(
  513. query_text=query,
  514. top_k=top_k,
  515. min_score=min_score
  516. )
  517. if not relevant_items:
  518. return ToolResult(
  519. title="🔍 未找到相关知识",
  520. output=f"查询: {query}\n\n知识库中暂无相关的高质量知识。建议进行调研。",
  521. long_term_memory=f"知识检索: 未找到相关知识 - {query[:50]}"
  522. )
  523. # 格式化输出
  524. output_lines = [f"查询: {query}\n", f"找到 {len(relevant_items)} 条相关知识:\n"]
  525. for idx, item in enumerate(relevant_items, 1):
  526. output_lines.append(f"\n### {idx}. [{item['id']}] (⭐ {item['score']})")
  527. output_lines.append(f"**场景**: {item['scenario'][:150]}...")
  528. output_lines.append(f"**内容**: {item['content'][:200]}...")
  529. return ToolResult(
  530. title="✅ 知识检索成功",
  531. output="\n".join(output_lines),
  532. long_term_memory=f"知识检索: 找到 {len(relevant_items)} 条相关知识 - {query[:50]}",
  533. metadata={
  534. "count": len(relevant_items),
  535. "knowledge_ids": [item["id"] for item in relevant_items],
  536. "items": relevant_items
  537. }
  538. )
  539. except Exception as e:
  540. logger.error(f"知识检索失败: {e}")
  541. return ToolResult(
  542. title="❌ 检索失败",
  543. output=f"错误: {str(e)}",
  544. error=str(e)
  545. )
  546. @tool(description="通过两阶段检索获取最相关的历史经验(strategy 标签的知识)")
  547. async def get_experience(
  548. query: str,
  549. k: int = 3,
  550. context: Optional[ToolContext] = None,
  551. ) -> ToolResult:
  552. """
  553. 检索历史经验(兼容旧接口,实际调用 search_knowledge 并过滤 strategy 标签)
  554. Args:
  555. query: 搜索查询(任务描述)
  556. k: 返回数量(默认 3)
  557. context: 工具上下文
  558. Returns:
  559. 相关经验列表
  560. """
  561. try:
  562. relevant_items = await _get_structured_knowledge(
  563. query_text=query,
  564. top_k=k,
  565. min_score=1, # 经验的评分门槛较低
  566. context=context,
  567. tags_filter=["strategy"] # 只返回经验
  568. )
  569. if not relevant_items:
  570. return ToolResult(
  571. title="🔍 未找到相关经验",
  572. output=f"查询: {query}\n\n经验库中暂无相关的经验。",
  573. long_term_memory=f"经验检索: 未找到相关经验 - {query[:50]}",
  574. metadata={"items": [], "count": 0}
  575. )
  576. # 格式化输出(兼容旧格式)
  577. output_lines = [f"查询: {query}\n", f"找到 {len(relevant_items)} 条相关经验:\n"]
  578. for idx, item in enumerate(relevant_items, 1):
  579. output_lines.append(f"\n### {idx}. [{item['id']}]")
  580. output_lines.append(f"{item['content'][:300]}...")
  581. return ToolResult(
  582. title="✅ 经验检索成功",
  583. output="\n".join(output_lines),
  584. long_term_memory=f"经验检索: 找到 {len(relevant_items)} 条相关经验 - {query[:50]}",
  585. metadata={
  586. "items": relevant_items,
  587. "count": len(relevant_items)
  588. }
  589. )
  590. except Exception as e:
  591. logger.error(f"经验检索失败: {e}")
  592. return ToolResult(
  593. title="❌ 检索失败",
  594. output=f"错误: {str(e)}",
  595. error=str(e)
  596. )
  597. # ===== 批量更新功能(类似经验机制)=====
  598. async def _batch_update_knowledge(
  599. update_map: Dict[str, Dict[str, Any]],
  600. context: Optional[Any] = None
  601. ) -> int:
  602. """
  603. 内部函数:批量更新知识(兼容 experience 接口)
  604. Args:
  605. update_map: 更新映射 {knowledge_id: {"action": "helpful/harmful/evolve", "feedback": "..."}}
  606. context: 上下文(兼容 experience 接口)
  607. Returns:
  608. 成功更新的数量
  609. """
  610. if not update_map:
  611. return 0
  612. knowledge_dir = Path(".cache/knowledge_atoms")
  613. if not knowledge_dir.exists():
  614. return 0
  615. success_count = 0
  616. evolution_tasks = []
  617. evolution_registry = {} # task_idx -> (file_path, data)
  618. for knowledge_id, instr in update_map.items():
  619. try:
  620. # 查找文件
  621. json_path = knowledge_dir / f"{knowledge_id}.json"
  622. md_path = knowledge_dir / f"{knowledge_id}.md"
  623. file_path = None
  624. is_json = False
  625. if json_path.exists():
  626. file_path = json_path
  627. is_json = True
  628. elif md_path.exists():
  629. file_path = md_path
  630. is_json = False
  631. else:
  632. continue
  633. # 读取并解析
  634. with open(file_path, "r", encoding="utf-8") as f:
  635. content = f.read()
  636. if is_json:
  637. data = json.loads(content)
  638. else:
  639. yaml_match = re.search(r'^---\n(.*?)\n---', content, re.DOTALL)
  640. if not yaml_match:
  641. continue
  642. data = yaml.safe_load(yaml_match.group(1))
  643. # 更新 metrics
  644. action = instr.get("action")
  645. feedback = instr.get("feedback", "")
  646. # 处理 mixed 中间态
  647. if action == "mixed":
  648. data["metrics"]["helpful"] = data.get("metrics", {}).get("helpful", 0) + 1
  649. action = "evolve"
  650. if action == "helpful":
  651. data["metrics"]["helpful"] = data.get("metrics", {}).get("helpful", 0) + 1
  652. elif action == "harmful":
  653. data["metrics"]["harmful"] = data.get("metrics", {}).get("harmful", 0) + 1
  654. elif action == "evolve" and feedback:
  655. # 注册进化任务
  656. old_content = data.get("content", "")
  657. task = _evolve_knowledge_with_llm(old_content, feedback)
  658. evolution_tasks.append(task)
  659. evolution_registry[len(evolution_tasks) - 1] = (file_path, data, is_json)
  660. data["metrics"]["helpful"] = data.get("metrics", {}).get("helpful", 0) + 1
  661. data["updated_at"] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
  662. # 如果不需要进化,直接保存
  663. if action != "evolve" or not feedback:
  664. if is_json:
  665. with open(file_path, "w", encoding="utf-8") as f:
  666. json.dump(data, f, ensure_ascii=False, indent=2)
  667. else:
  668. meta_str = yaml.dump(data, allow_unicode=True).strip()
  669. with open(file_path, "w", encoding="utf-8") as f:
  670. f.write(f"---\n{meta_str}\n---\n")
  671. success_count += 1
  672. except Exception as e:
  673. logger.error(f"更新知识失败 {knowledge_id}: {e}")
  674. continue
  675. # 并发进化
  676. if evolution_tasks:
  677. import asyncio
  678. print(f"🧬 并发处理 {len(evolution_tasks)} 条知识进化...")
  679. evolved_results = await asyncio.gather(*evolution_tasks)
  680. # 回填进化结果
  681. for task_idx, (file_path, data, is_json) in evolution_registry.items():
  682. data["content"] = evolved_results[task_idx].strip()
  683. if is_json:
  684. with open(file_path, "w", encoding="utf-8") as f:
  685. json.dump(data, f, ensure_ascii=False, indent=2)
  686. else:
  687. meta_str = yaml.dump(data, allow_unicode=True).strip()
  688. with open(file_path, "w", encoding="utf-8") as f:
  689. f.write(f"---\n{meta_str}\n---\n")
  690. success_count += 1
  691. return success_count
  692. @tool()
  693. async def batch_update_knowledge(
  694. feedback_list: List[Dict[str, Any]],
  695. context: Optional[ToolContext] = None,
  696. ) -> ToolResult:
  697. """
  698. 批量反馈知识的有效性(类似经验机制)
  699. Args:
  700. feedback_list: 评价列表,每个元素包含:
  701. - knowledge_id: (str) 知识 ID
  702. - is_effective: (bool) 是否有效
  703. - feedback: (str, optional) 改进建议,若有效且有建议则触发知识进化
  704. Returns:
  705. 批量更新结果
  706. """
  707. try:
  708. if not feedback_list:
  709. return ToolResult(
  710. title="⚠️ 反馈列表为空",
  711. output="未提供任何反馈",
  712. long_term_memory="批量更新知识: 反馈列表为空"
  713. )
  714. knowledge_dir = Path(".cache/knowledge_atoms")
  715. if not knowledge_dir.exists():
  716. return ToolResult(
  717. title="❌ 知识库不存在",
  718. output="知识库目录不存在",
  719. error="知识库不存在"
  720. )
  721. success_count = 0
  722. failed_items = []
  723. for item in feedback_list:
  724. knowledge_id = item.get("knowledge_id")
  725. is_effective = item.get("is_effective")
  726. feedback = item.get("feedback", "")
  727. if not knowledge_id:
  728. failed_items.append({"id": "unknown", "reason": "缺少 knowledge_id"})
  729. continue
  730. try:
  731. # 查找文件
  732. json_path = knowledge_dir / f"{knowledge_id}.json"
  733. md_path = knowledge_dir / f"{knowledge_id}.md"
  734. file_path = None
  735. is_json = False
  736. if json_path.exists():
  737. file_path = json_path
  738. is_json = True
  739. elif md_path.exists():
  740. file_path = md_path
  741. is_json = False
  742. else:
  743. failed_items.append({"id": knowledge_id, "reason": "文件不存在"})
  744. continue
  745. # 读取并解析
  746. with open(file_path, "r", encoding="utf-8") as f:
  747. content = f.read()
  748. if is_json:
  749. data = json.loads(content)
  750. else:
  751. yaml_match = re.search(r'^---\n(.*?)\n---', content, re.DOTALL)
  752. if not yaml_match:
  753. failed_items.append({"id": knowledge_id, "reason": "格式错误"})
  754. continue
  755. data = yaml.safe_load(yaml_match.group(1))
  756. # 更新 metrics
  757. if is_effective:
  758. data["metrics"]["helpful"] = data.get("metrics", {}).get("helpful", 0) + 1
  759. # 如果有反馈建议,触发进化
  760. if feedback:
  761. old_content = data.get("content", "")
  762. evolved_content = await _evolve_knowledge_with_llm(old_content, feedback)
  763. data["content"] = evolved_content
  764. else:
  765. data["metrics"]["harmful"] = data.get("metrics", {}).get("harmful", 0) + 1
  766. data["updated_at"] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
  767. # 保存
  768. if is_json:
  769. with open(file_path, "w", encoding="utf-8") as f:
  770. json.dump(data, f, ensure_ascii=False, indent=2)
  771. else:
  772. meta_str = yaml.dump(data, allow_unicode=True).strip()
  773. with open(file_path, "w", encoding="utf-8") as f:
  774. f.write(f"---\n{meta_str}\n---\n")
  775. success_count += 1
  776. except Exception as e:
  777. failed_items.append({"id": knowledge_id, "reason": str(e)})
  778. continue
  779. output_lines = [f"成功更新 {success_count} 条知识"]
  780. if failed_items:
  781. output_lines.append(f"\n失败 {len(failed_items)} 条:")
  782. for item in failed_items:
  783. output_lines.append(f" - {item['id']}: {item['reason']}")
  784. return ToolResult(
  785. title="✅ 批量更新完成",
  786. output="\n".join(output_lines),
  787. long_term_memory=f"批量更新知识: 成功 {success_count} 条,失败 {len(failed_items)} 条"
  788. )
  789. except Exception as e:
  790. logger.error(f"批量更新知识失败: {e}")
  791. return ToolResult(
  792. title="❌ 批量更新失败",
  793. output=f"错误: {str(e)}",
  794. error=str(e)
  795. )
  796. # ===== 知识库瘦身功能(类似经验机制)=====
  797. @tool()
  798. async def slim_knowledge(
  799. model: str = "anthropic/claude-sonnet-4.5",
  800. context: Optional[ToolContext] = None,
  801. ) -> ToolResult:
  802. """
  803. 知识库瘦身:调用顶级大模型,将知识库中语义相似的知识合并精简
  804. Args:
  805. model: 使用的模型(默认 claude-sonnet-4.5)
  806. context: 工具上下文
  807. Returns:
  808. 瘦身结果报告
  809. """
  810. try:
  811. knowledge_dir = Path(".cache/knowledge_atoms")
  812. if not knowledge_dir.exists():
  813. return ToolResult(
  814. title="📂 知识库不存在",
  815. output="知识库目录不存在,无需瘦身",
  816. long_term_memory="知识库瘦身: 目录不存在"
  817. )
  818. # 获取所有文件
  819. json_files = list(knowledge_dir.glob("*.json"))
  820. md_files = list(knowledge_dir.glob("*.md"))
  821. files = json_files + md_files
  822. if len(files) < 2:
  823. return ToolResult(
  824. title="📂 知识库过小",
  825. output=f"知识库仅有 {len(files)} 条,无需瘦身",
  826. long_term_memory=f"知识库瘦身: 仅有 {len(files)} 条"
  827. )
  828. # 解析所有知识
  829. parsed = []
  830. for file_path in files:
  831. try:
  832. with open(file_path, "r", encoding="utf-8") as f:
  833. content = f.read()
  834. if file_path.suffix == ".json":
  835. data = json.loads(content)
  836. else:
  837. yaml_match = re.search(r'^---\n(.*?)\n---', content, re.DOTALL)
  838. if not yaml_match:
  839. continue
  840. data = yaml.safe_load(yaml_match.group(1))
  841. parsed.append({
  842. "file_path": file_path,
  843. "data": data,
  844. "is_json": file_path.suffix == ".json"
  845. })
  846. except Exception as e:
  847. logger.error(f"解析文件失败 {file_path}: {e}")
  848. continue
  849. if len(parsed) < 2:
  850. return ToolResult(
  851. title="📂 有效知识过少",
  852. output=f"有效知识仅有 {len(parsed)} 条,无需瘦身",
  853. long_term_memory=f"知识库瘦身: 有效知识 {len(parsed)} 条"
  854. )
  855. # 构造发给大模型的内容
  856. entries_text = ""
  857. for p in parsed:
  858. data = p["data"]
  859. entries_text += f"[ID: {data.get('id')}] [Tags: {data.get('tags', {})}] "
  860. entries_text += f"[Metrics: {data.get('metrics', {})}] [Score: {data.get('eval', {}).get('score', 3)}]\n"
  861. entries_text += f"Scenario: {data.get('scenario', 'N/A')}\n"
  862. entries_text += f"Content: {data.get('content', '')[:200]}...\n\n"
  863. prompt = f"""你是一个 AI Agent 知识库管理员。以下是当前知识库的全部条目,请执行瘦身操作:
  864. 【任务】:
  865. 1. 识别语义高度相似或重复的知识,将它们合并为一条更精炼、更通用的知识。
  866. 2. 合并时保留 helpful 最高的那条的 ID 和 metrics(metrics 中 helpful/harmful 取各条之和)。
  867. 3. 对于独立的、无重复的知识,保持原样不动。
  868. 4. 保持原有的知识结构和格式。
  869. 【当前知识库】:
  870. {entries_text}
  871. 【输出格式要求】:
  872. 严格按以下格式输出每条知识,条目之间用 === 分隔:
  873. ID: <保留的id>
  874. TAGS: <yaml格式的tags>
  875. METRICS: <yaml格式的metrics>
  876. SCORE: <评分>
  877. SCENARIO: <场景描述>
  878. CONTENT: <合并后的知识内容>
  879. ===
  880. 最后一行输出合并报告,格式:
  881. REPORT: 原有 X 条,合并后 Y 条,精简了 Z 条。
  882. 禁止输出任何开场白或解释。"""
  883. print(f"\n[知识瘦身] 正在调用 {model} 分析 {len(parsed)} 条知识...")
  884. response = await openrouter_llm_call(
  885. messages=[{"role": "user", "content": prompt}],
  886. model=model
  887. )
  888. content = response.get("content", "").strip()
  889. if not content:
  890. return ToolResult(
  891. title="❌ 大模型返回为空",
  892. output="大模型返回为空,瘦身失败",
  893. error="大模型返回为空"
  894. )
  895. # 解析大模型输出
  896. report_line = ""
  897. new_entries = []
  898. blocks = [b.strip() for b in content.split("===") if b.strip()]
  899. for block in blocks:
  900. if block.startswith("REPORT:"):
  901. report_line = block
  902. continue
  903. lines = block.split("\n")
  904. kid, tags, metrics, score, scenario, content_lines = None, {}, {}, 3, "", []
  905. current_field = None
  906. for line in lines:
  907. if line.startswith("ID:"):
  908. kid = line[3:].strip()
  909. current_field = None
  910. elif line.startswith("TAGS:"):
  911. try:
  912. tags = yaml.safe_load(line[5:].strip()) or {}
  913. except Exception:
  914. tags = {}
  915. current_field = None
  916. elif line.startswith("METRICS:"):
  917. try:
  918. metrics = yaml.safe_load(line[8:].strip()) or {}
  919. except Exception:
  920. metrics = {"helpful": 0, "harmful": 0}
  921. current_field = None
  922. elif line.startswith("SCORE:"):
  923. try:
  924. score = int(line[6:].strip())
  925. except Exception:
  926. score = 3
  927. current_field = None
  928. elif line.startswith("SCENARIO:"):
  929. scenario = line[9:].strip()
  930. current_field = "scenario"
  931. elif line.startswith("CONTENT:"):
  932. content_lines.append(line[8:].strip())
  933. current_field = "content"
  934. elif current_field == "scenario":
  935. scenario += "\n" + line
  936. elif current_field == "content":
  937. content_lines.append(line)
  938. if kid and content_lines:
  939. new_data = {
  940. "id": kid,
  941. "tags": tags,
  942. "scenario": scenario,
  943. "content": "\n".join(content_lines).strip(),
  944. "metrics": metrics,
  945. "eval": {
  946. "score": score,
  947. "helpful": 0,
  948. "harmful": 0,
  949. "helpful_history": [],
  950. "harmful_history": []
  951. },
  952. "updated_at": datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
  953. }
  954. new_entries.append(new_data)
  955. if not new_entries:
  956. return ToolResult(
  957. title="❌ 解析失败",
  958. output="解析大模型输出失败,知识库未修改",
  959. error="解析失败"
  960. )
  961. # 删除旧文件
  962. for p in parsed:
  963. try:
  964. p["file_path"].unlink()
  965. except Exception as e:
  966. logger.error(f"删除旧文件失败 {p['file_path']}: {e}")
  967. # 写入新文件(统一使用 JSON 格式)
  968. for data in new_entries:
  969. file_path = knowledge_dir / f"{data['id']}.json"
  970. with open(file_path, "w", encoding="utf-8") as f:
  971. json.dump(data, f, ensure_ascii=False, indent=2)
  972. result = f"瘦身完成:{len(parsed)} → {len(new_entries)} 条知识"
  973. if report_line:
  974. result += f"\n{report_line}"
  975. print(f"[知识瘦身] {result}")
  976. return ToolResult(
  977. title="✅ 知识库瘦身完成",
  978. output=result,
  979. long_term_memory=f"知识库瘦身: {len(parsed)} → {len(new_entries)} 条"
  980. )
  981. except Exception as e:
  982. logger.error(f"知识库瘦身失败: {e}")
  983. return ToolResult(
  984. title="❌ 瘦身失败",
  985. output=f"错误: {str(e)}",
  986. error=str(e)
  987. )