server.py 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048
  1. """
  2. KnowHub Server
  3. Agent 工具使用经验的共享平台。
  4. FastAPI + SQLite,单文件部署。
  5. """
  6. import os
  7. import re
  8. import json
  9. import sqlite3
  10. import asyncio
  11. from contextlib import asynccontextmanager
  12. from datetime import datetime, timezone
  13. from typing import Optional
  14. from pathlib import Path
  15. from fastapi import FastAPI, HTTPException, Query
  16. from pydantic import BaseModel, Field
  17. # 导入 LLM 调用(需要 agent 模块在 Python path 中)
  18. import sys
  19. sys.path.insert(0, str(Path(__file__).parent.parent))
  20. from agent.llm.openrouter import openrouter_llm_call
  21. BRAND_NAME = os.getenv("BRAND_NAME", "KnowHub")
  22. BRAND_API_ENV = os.getenv("BRAND_API_ENV", "KNOWHUB_API")
  23. BRAND_DB = os.getenv("BRAND_DB", "knowhub.db")
  24. DB_PATH = Path(__file__).parent / BRAND_DB
  25. # --- 数据库 ---
  26. def get_db() -> sqlite3.Connection:
  27. conn = sqlite3.connect(str(DB_PATH))
  28. conn.row_factory = sqlite3.Row
  29. conn.execute("PRAGMA journal_mode=WAL")
  30. return conn
  31. def init_db():
  32. conn = get_db()
  33. conn.execute("""
  34. CREATE TABLE IF NOT EXISTS experiences (
  35. id INTEGER PRIMARY KEY AUTOINCREMENT,
  36. name TEXT NOT NULL,
  37. url TEXT DEFAULT '',
  38. category TEXT DEFAULT '',
  39. task TEXT NOT NULL,
  40. score INTEGER CHECK(score BETWEEN 1 AND 5),
  41. outcome TEXT DEFAULT '',
  42. tips TEXT DEFAULT '',
  43. content_id TEXT DEFAULT '',
  44. submitted_by TEXT DEFAULT '',
  45. created_at TEXT NOT NULL
  46. )
  47. """)
  48. conn.execute("CREATE INDEX IF NOT EXISTS idx_name ON experiences(name)")
  49. conn.execute("""
  50. CREATE TABLE IF NOT EXISTS contents (
  51. id TEXT PRIMARY KEY,
  52. title TEXT DEFAULT '',
  53. body TEXT NOT NULL,
  54. sort_order INTEGER DEFAULT 0,
  55. submitted_by TEXT DEFAULT '',
  56. created_at TEXT NOT NULL
  57. )
  58. """)
  59. conn.execute("""
  60. CREATE TABLE IF NOT EXISTS knowledge (
  61. id TEXT PRIMARY KEY,
  62. message_id TEXT DEFAULT '',
  63. tags_type TEXT NOT NULL,
  64. scenario TEXT NOT NULL,
  65. content TEXT NOT NULL,
  66. source_urls TEXT DEFAULT '',
  67. source_agent_id TEXT DEFAULT '',
  68. source_timestamp TEXT NOT NULL,
  69. eval_score INTEGER DEFAULT 3 CHECK(eval_score BETWEEN 1 AND 5),
  70. eval_helpful INTEGER DEFAULT 0,
  71. eval_harmful INTEGER DEFAULT 0,
  72. eval_helpful_history TEXT DEFAULT '[]',
  73. eval_harmful_history TEXT DEFAULT '[]',
  74. metrics_helpful INTEGER DEFAULT 1,
  75. metrics_harmful INTEGER DEFAULT 0,
  76. created_at TEXT NOT NULL,
  77. updated_at TEXT DEFAULT ''
  78. )
  79. """)
  80. conn.execute("CREATE INDEX IF NOT EXISTS idx_knowledge_tags ON knowledge(tags_type)")
  81. conn.execute("CREATE INDEX IF NOT EXISTS idx_knowledge_scenario ON knowledge(scenario)")
  82. conn.commit()
  83. conn.close()
  84. # --- Models ---
  85. class ExperienceIn(BaseModel):
  86. name: str
  87. url: str = ""
  88. category: str = ""
  89. task: str
  90. score: int = Field(ge=1, le=5)
  91. outcome: str = ""
  92. tips: str = ""
  93. content_id: str = ""
  94. submitted_by: str = ""
  95. class ExperienceOut(BaseModel):
  96. task: str
  97. score: int
  98. outcome: str
  99. tips: str
  100. content_id: str
  101. submitted_by: str
  102. created_at: str
  103. class ResourceResult(BaseModel):
  104. name: str
  105. url: str
  106. relevant_experiences: list[ExperienceOut]
  107. avg_score: float
  108. experience_count: int
  109. class SearchResponse(BaseModel):
  110. results: list[ResourceResult]
  111. class ResourceDetailResponse(BaseModel):
  112. name: str
  113. url: str
  114. category: str
  115. avg_score: float
  116. experience_count: int
  117. experiences: list[ExperienceOut]
  118. class ContentIn(BaseModel):
  119. id: str
  120. title: str = ""
  121. body: str
  122. sort_order: int = 0
  123. submitted_by: str = ""
  124. # Knowledge Models
  125. class KnowledgeIn(BaseModel):
  126. scenario: str
  127. content: str
  128. tags_type: list[str]
  129. urls: list[str] = []
  130. agent_id: str = "research_agent"
  131. score: int = Field(default=3, ge=1, le=5)
  132. message_id: str = ""
  133. class KnowledgeOut(BaseModel):
  134. id: str
  135. message_id: str
  136. tags: dict
  137. scenario: str
  138. content: str
  139. source: dict
  140. eval: dict
  141. metrics: dict
  142. created_at: str
  143. updated_at: str
  144. class KnowledgeUpdateIn(BaseModel):
  145. add_helpful_case: Optional[dict] = None
  146. add_harmful_case: Optional[dict] = None
  147. update_score: Optional[int] = Field(default=None, ge=1, le=5)
  148. evolve_feedback: Optional[str] = None
  149. class KnowledgeBatchUpdateIn(BaseModel):
  150. feedback_list: list[dict]
  151. class KnowledgeSearchResponse(BaseModel):
  152. results: list[dict]
  153. count: int
  154. class ContentNode(BaseModel):
  155. id: str
  156. title: str
  157. class ContentOut(BaseModel):
  158. id: str
  159. title: str
  160. body: str
  161. toc: Optional[ContentNode] = None
  162. children: list[ContentNode]
  163. prev: Optional[ContentNode] = None
  164. next: Optional[ContentNode] = None
  165. # --- App ---
  166. @asynccontextmanager
  167. async def lifespan(app: FastAPI):
  168. init_db()
  169. yield
  170. app = FastAPI(title=BRAND_NAME, lifespan=lifespan)
  171. def _search_rows(conn: sqlite3.Connection, q: str, category: Optional[str]) -> list[sqlite3.Row]:
  172. """LIKE 搜索,拆词后 AND 连接,匹配 task + tips + outcome + name"""
  173. terms = q.split()
  174. if not terms:
  175. return []
  176. conditions = []
  177. params: list[str] = []
  178. for term in terms:
  179. like = f"%{term}%"
  180. conditions.append(
  181. "(task LIKE ? OR tips LIKE ? OR outcome LIKE ? OR name LIKE ?)"
  182. )
  183. params.extend([like, like, like, like])
  184. if category:
  185. conditions.append("category = ?")
  186. params.append(category)
  187. sql = (
  188. "SELECT name, url, category, task, score, outcome, tips, content_id, "
  189. "submitted_by, created_at FROM experiences WHERE "
  190. + " AND ".join(conditions)
  191. + " ORDER BY created_at DESC"
  192. )
  193. return conn.execute(sql, params).fetchall()
  194. def _group_by_resource(rows: list[sqlite3.Row], limit: int) -> list[ResourceResult]:
  195. """按 name 分组并聚合"""
  196. groups: dict[str, list[sqlite3.Row]] = {}
  197. for row in rows:
  198. name = row["name"]
  199. if name not in groups:
  200. groups[name] = []
  201. groups[name].append(row)
  202. results = []
  203. for resource_name, resource_rows in groups.items():
  204. scores = [r["score"] for r in resource_rows]
  205. avg = sum(scores) / len(scores)
  206. results.append(ResourceResult(
  207. name=resource_name,
  208. url=resource_rows[0]["url"],
  209. relevant_experiences=[
  210. ExperienceOut(
  211. task=r["task"],
  212. score=r["score"],
  213. outcome=r["outcome"],
  214. tips=r["tips"],
  215. content_id=r["content_id"],
  216. submitted_by=r["submitted_by"],
  217. created_at=r["created_at"],
  218. )
  219. for r in resource_rows
  220. ],
  221. avg_score=round(avg, 1),
  222. experience_count=len(resource_rows),
  223. ))
  224. results.sort(key=lambda r: r.avg_score * r.experience_count, reverse=True)
  225. return results[:limit]
  226. @app.get("/api/search", response_model=SearchResponse)
  227. def search_experiences(
  228. q: str = Query(..., min_length=1),
  229. category: Optional[str] = None,
  230. limit: int = Query(default=10, ge=1, le=50),
  231. ):
  232. conn = get_db()
  233. try:
  234. rows = _search_rows(conn, q, category)
  235. return SearchResponse(results=_group_by_resource(rows, limit))
  236. finally:
  237. conn.close()
  238. @app.post("/api/experience", status_code=201)
  239. def submit_experience(exp: ExperienceIn):
  240. conn = get_db()
  241. try:
  242. now = datetime.now(timezone.utc).isoformat()
  243. conn.execute(
  244. "INSERT INTO experiences"
  245. "(name, url, category, task, score, outcome, tips, content_id, submitted_by, created_at)"
  246. " VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
  247. (exp.name, exp.url, exp.category, exp.task,
  248. exp.score, exp.outcome, exp.tips, exp.content_id, exp.submitted_by, now),
  249. )
  250. conn.commit()
  251. return {"status": "ok"}
  252. finally:
  253. conn.close()
  254. @app.get("/api/resource/{name}", response_model=ResourceDetailResponse)
  255. def get_resource_experiences(name: str):
  256. conn = get_db()
  257. try:
  258. rows = conn.execute(
  259. "SELECT name, url, category, task, score, outcome, tips, content_id, "
  260. "submitted_by, created_at FROM experiences "
  261. "WHERE name = ? ORDER BY created_at DESC",
  262. (name,),
  263. ).fetchall()
  264. if not rows:
  265. raise HTTPException(status_code=404, detail=f"No experiences found for resource: {name}")
  266. scores = [r["score"] for r in rows]
  267. avg = sum(scores) / len(scores)
  268. return ResourceDetailResponse(
  269. name=name,
  270. url=rows[0]["url"],
  271. category=rows[0]["category"],
  272. avg_score=round(avg, 1),
  273. experience_count=len(rows),
  274. experiences=[
  275. ExperienceOut(
  276. task=r["task"],
  277. score=r["score"],
  278. outcome=r["outcome"],
  279. tips=r["tips"],
  280. content_id=r["content_id"],
  281. submitted_by=r["submitted_by"],
  282. created_at=r["created_at"],
  283. )
  284. for r in rows
  285. ],
  286. )
  287. finally:
  288. conn.close()
  289. @app.post("/api/content", status_code=201)
  290. def submit_content(content: ContentIn):
  291. conn = get_db()
  292. try:
  293. now = datetime.now(timezone.utc).isoformat()
  294. conn.execute(
  295. "INSERT OR REPLACE INTO contents"
  296. "(id, title, body, sort_order, submitted_by, created_at)"
  297. " VALUES (?, ?, ?, ?, ?, ?)",
  298. (content.id, content.title, content.body, content.sort_order, content.submitted_by, now),
  299. )
  300. conn.commit()
  301. return {"status": "ok"}
  302. finally:
  303. conn.close()
  304. @app.get("/api/content/{content_id:path}", response_model=ContentOut)
  305. def get_content(content_id: str):
  306. conn = get_db()
  307. try:
  308. row = conn.execute(
  309. "SELECT id, title, body, sort_order FROM contents WHERE id = ?",
  310. (content_id,),
  311. ).fetchone()
  312. if not row:
  313. raise HTTPException(status_code=404, detail=f"Content not found: {content_id}")
  314. # 计算导航上下文
  315. root_id = content_id.split("/")[0] if "/" in content_id else content_id
  316. # TOC (根节点)
  317. toc = None
  318. if "/" in content_id:
  319. toc_row = conn.execute(
  320. "SELECT id, title FROM contents WHERE id = ?",
  321. (root_id,),
  322. ).fetchone()
  323. if toc_row:
  324. toc = ContentNode(id=toc_row["id"], title=toc_row["title"])
  325. # Children (子节点)
  326. children = []
  327. children_rows = conn.execute(
  328. "SELECT id, title FROM contents WHERE id LIKE ? AND id != ? ORDER BY sort_order",
  329. (f"{content_id}/%", content_id),
  330. ).fetchall()
  331. children = [ContentNode(id=r["id"], title=r["title"]) for r in children_rows]
  332. # Prev/Next (同级节点)
  333. prev_node = None
  334. next_node = None
  335. if "/" in content_id:
  336. siblings = conn.execute(
  337. "SELECT id, title, sort_order FROM contents WHERE id LIKE ? AND id NOT LIKE ? ORDER BY sort_order",
  338. (f"{root_id}/%", f"{root_id}/%/%"),
  339. ).fetchall()
  340. for i, sib in enumerate(siblings):
  341. if sib["id"] == content_id:
  342. if i > 0:
  343. prev_node = ContentNode(id=siblings[i-1]["id"], title=siblings[i-1]["title"])
  344. if i < len(siblings) - 1:
  345. next_node = ContentNode(id=siblings[i+1]["id"], title=siblings[i+1]["title"])
  346. break
  347. return ContentOut(
  348. id=row["id"],
  349. title=row["title"],
  350. body=row["body"],
  351. toc=toc,
  352. children=children,
  353. prev=prev_node,
  354. next=next_node,
  355. )
  356. finally:
  357. conn.close()
  358. # ===== Knowledge API =====
  359. # 两阶段检索逻辑
  360. async def _route_knowledge_by_llm(query_text: str, metadata_list: list[dict], k: int = 5) -> list[str]:
  361. """
  362. 第一阶段:语义路由。
  363. 让 LLM 挑选出 2*k 个语义相关的 ID。
  364. """
  365. if not metadata_list:
  366. return []
  367. routing_k = k * 2
  368. routing_data = [
  369. {
  370. "id": m["id"],
  371. "tags": m["tags"],
  372. "scenario": m["scenario"][:100]
  373. } for m in metadata_list
  374. ]
  375. prompt = f"""
  376. 你是一个知识检索专家。根据用户的当前任务需求,从下列原子知识元数据中挑选出最相关的最多 {routing_k} 个知识 ID。
  377. 任务需求:"{query_text}"
  378. 可选知识列表:
  379. {json.dumps(routing_data, ensure_ascii=False, indent=1)}
  380. 请直接输出 ID 列表,用逗号分隔(例如: knowledge-20260302-001, research-20260302-002)。若无相关项请输出 "None"。
  381. """
  382. try:
  383. print(f"\n[Step 1: 知识语义路由] 任务: '{query_text}' | 候选总数: {len(metadata_list)} | 目标提取数: {routing_k}")
  384. response = await openrouter_llm_call(
  385. messages=[{"role": "user", "content": prompt}],
  386. model="google/gemini-2.0-flash-001"
  387. )
  388. content = response.get("content", "").strip()
  389. selected_ids = [idx.strip() for idx in re.split(r'[,\s]+', content) if idx.strip().startswith(("knowledge-", "research-"))]
  390. print(f"[Step 1: 知识语义路由] LLM 初选 ID ({len(selected_ids)}个): {selected_ids}")
  391. return selected_ids
  392. except Exception as e:
  393. print(f"LLM 知识路由失败: {e}")
  394. return []
  395. async def _search_knowledge_two_stage(
  396. query_text: str,
  397. top_k: int = 5,
  398. min_score: int = 3,
  399. tags_filter: Optional[list[str]] = None,
  400. conn: sqlite3.Connection = None
  401. ) -> list[dict]:
  402. """
  403. 两阶段检索:语义路由 + 质量精排
  404. """
  405. if conn is None:
  406. conn = get_db()
  407. should_close = True
  408. else:
  409. should_close = False
  410. try:
  411. # 阶段 1: 解析所有知识
  412. query = "SELECT * FROM knowledge"
  413. rows = conn.execute(query).fetchall()
  414. if not rows:
  415. return []
  416. content_map = {}
  417. metadata_list = []
  418. for row in rows:
  419. kid = row["id"]
  420. tags_type = row["tags_type"].split(",") if row["tags_type"] else []
  421. # 标签过滤
  422. if tags_filter:
  423. if not any(tag in tags_type for tag in tags_filter):
  424. continue
  425. scenario = row["scenario"]
  426. content_text = row["content"]
  427. meta_item = {
  428. "id": kid,
  429. "tags": {"type": tags_type},
  430. "scenario": scenario,
  431. "score": row["eval_score"],
  432. "helpful": row["metrics_helpful"],
  433. "harmful": row["metrics_harmful"],
  434. }
  435. metadata_list.append(meta_item)
  436. content_map[kid] = {
  437. "scenario": scenario,
  438. "content": content_text,
  439. "tags": {"type": tags_type},
  440. "score": meta_item["score"],
  441. "helpful": meta_item["helpful"],
  442. "harmful": meta_item["harmful"],
  443. "message_id": row["message_id"],
  444. "source": {
  445. "urls": row["source_urls"].split(",") if row["source_urls"] else [],
  446. "agent_id": row["source_agent_id"],
  447. "timestamp": row["source_timestamp"]
  448. },
  449. "created_at": row["created_at"],
  450. "updated_at": row["updated_at"]
  451. }
  452. if not metadata_list:
  453. return []
  454. # 阶段 2: 语义路由 (取 2*k)
  455. candidate_ids = await _route_knowledge_by_llm(query_text, metadata_list, k=top_k)
  456. # 阶段 3: 质量精排
  457. print(f"[Step 2: 知识质量精排] 正在根据评分和反馈进行打分...")
  458. scored_items = []
  459. for kid in candidate_ids:
  460. if kid in content_map:
  461. item = content_map[kid]
  462. score = item["score"]
  463. helpful = item["helpful"]
  464. harmful = item["harmful"]
  465. # 计算综合分:基础分 + helpful - harmful*2
  466. quality_score = score + helpful - (harmful * 2.0)
  467. # 过滤门槛
  468. if score < min_score or quality_score < 0:
  469. print(f" - 剔除低质量知识: {kid} (Score: {score}, Helpful: {helpful}, Harmful: {harmful})")
  470. continue
  471. scored_items.append({
  472. "id": kid,
  473. "message_id": item["message_id"],
  474. "scenario": item["scenario"],
  475. "content": item["content"],
  476. "tags": item["tags"],
  477. "score": score,
  478. "quality_score": quality_score,
  479. "metrics": {
  480. "helpful": helpful,
  481. "harmful": harmful
  482. },
  483. "source": item["source"],
  484. "created_at": item["created_at"],
  485. "updated_at": item["updated_at"]
  486. })
  487. # 按照质量分排序
  488. final_sorted = sorted(scored_items, key=lambda x: x["quality_score"], reverse=True)
  489. # 截取最终的 top_k
  490. result = final_sorted[:top_k]
  491. print(f"[Step 2: 知识质量精排] 最终选定知识: {[it['id'] for it in result]}")
  492. print(f"[Knowledge System] 检索结束。\n")
  493. return result
  494. finally:
  495. if should_close:
  496. conn.close()
  497. @app.get("/api/knowledge/search")
  498. async def search_knowledge_api(
  499. q: str = Query(..., description="查询文本"),
  500. top_k: int = Query(default=5, ge=1, le=20),
  501. min_score: int = Query(default=3, ge=1, le=5),
  502. tags_type: Optional[str] = None
  503. ):
  504. """检索知识(两阶段:语义路由 + 质量精排)"""
  505. conn = get_db()
  506. try:
  507. tags_filter = tags_type.split(",") if tags_type else None
  508. results = await _search_knowledge_two_stage(
  509. query_text=q,
  510. top_k=top_k,
  511. min_score=min_score,
  512. tags_filter=tags_filter,
  513. conn=conn
  514. )
  515. return {"results": results, "count": len(results)}
  516. finally:
  517. conn.close()
  518. @app.post("/api/knowledge", status_code=201)
  519. def save_knowledge(knowledge: KnowledgeIn):
  520. """保存新知识"""
  521. import uuid
  522. conn = get_db()
  523. try:
  524. # 生成 ID
  525. timestamp = datetime.now().strftime('%Y%m%d-%H%M%S')
  526. random_suffix = uuid.uuid4().hex[:4]
  527. knowledge_id = f"knowledge-{timestamp}-{random_suffix}"
  528. now = datetime.now(timezone.utc).isoformat()
  529. conn.execute(
  530. """INSERT INTO knowledge
  531. (id, message_id, tags_type, scenario, content,
  532. source_urls, source_agent_id, source_timestamp,
  533. eval_score, eval_helpful, eval_harmful,
  534. eval_helpful_history, eval_harmful_history,
  535. metrics_helpful, metrics_harmful, created_at, updated_at)
  536. VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""",
  537. (
  538. knowledge_id,
  539. knowledge.message_id,
  540. ",".join(knowledge.tags_type),
  541. knowledge.scenario,
  542. knowledge.content,
  543. ",".join(knowledge.urls),
  544. knowledge.agent_id,
  545. now,
  546. knowledge.score,
  547. 0, # eval_helpful
  548. 0, # eval_harmful
  549. "[]", # eval_helpful_history
  550. "[]", # eval_harmful_history
  551. 1, # metrics_helpful
  552. 0, # metrics_harmful
  553. now,
  554. now,
  555. ),
  556. )
  557. conn.commit()
  558. return {"status": "ok", "knowledge_id": knowledge_id}
  559. finally:
  560. conn.close()
  561. @app.get("/api/knowledge")
  562. def list_knowledge(
  563. limit: int = Query(default=10, ge=1, le=100),
  564. tags_type: Optional[str] = None
  565. ):
  566. """列出知识"""
  567. conn = get_db()
  568. try:
  569. query = "SELECT * FROM knowledge"
  570. params = []
  571. if tags_type:
  572. query += " WHERE tags_type LIKE ?"
  573. params.append(f"%{tags_type}%")
  574. query += " ORDER BY created_at DESC LIMIT ?"
  575. params.append(limit)
  576. rows = conn.execute(query, params).fetchall()
  577. results = []
  578. for row in rows:
  579. results.append({
  580. "id": row["id"],
  581. "message_id": row["message_id"],
  582. "tags": {"type": row["tags_type"].split(",") if row["tags_type"] else []},
  583. "scenario": row["scenario"],
  584. "content": row["content"],
  585. "source": {
  586. "urls": row["source_urls"].split(",") if row["source_urls"] else [],
  587. "agent_id": row["source_agent_id"],
  588. "timestamp": row["source_timestamp"]
  589. },
  590. "eval": {
  591. "score": row["eval_score"],
  592. "helpful": row["eval_helpful"],
  593. "harmful": row["eval_harmful"]
  594. },
  595. "metrics": {
  596. "helpful": row["metrics_helpful"],
  597. "harmful": row["metrics_harmful"]
  598. },
  599. "created_at": row["created_at"],
  600. "updated_at": row["updated_at"]
  601. })
  602. return {"results": results, "count": len(results)}
  603. finally:
  604. conn.close()
  605. @app.get("/api/knowledge/{knowledge_id}")
  606. def get_knowledge(knowledge_id: str):
  607. """获取单条知识"""
  608. conn = get_db()
  609. try:
  610. row = conn.execute(
  611. "SELECT * FROM knowledge WHERE id = ?",
  612. (knowledge_id,)
  613. ).fetchone()
  614. if not row:
  615. raise HTTPException(status_code=404, detail=f"Knowledge not found: {knowledge_id}")
  616. return {
  617. "id": row["id"],
  618. "message_id": row["message_id"],
  619. "tags": {"type": row["tags_type"].split(",") if row["tags_type"] else []},
  620. "scenario": row["scenario"],
  621. "content": row["content"],
  622. "source": {
  623. "urls": row["source_urls"].split(",") if row["source_urls"] else [],
  624. "agent_id": row["source_agent_id"],
  625. "timestamp": row["source_timestamp"]
  626. },
  627. "eval": {
  628. "score": row["eval_score"],
  629. "helpful": row["eval_helpful"],
  630. "harmful": row["eval_harmful"],
  631. "helpful_history": [],
  632. "harmful_history": []
  633. },
  634. "metrics": {
  635. "helpful": row["metrics_helpful"],
  636. "harmful": row["metrics_harmful"]
  637. },
  638. "created_at": row["created_at"],
  639. "updated_at": row["updated_at"]
  640. }
  641. finally:
  642. conn.close()
  643. async def _evolve_knowledge_with_llm(old_content: str, feedback: str) -> str:
  644. """使用 LLM 进行知识进化重写"""
  645. prompt = f"""你是一个 AI Agent 知识库管理员。请根据反馈建议,对现有的知识内容进行重写进化。
  646. 【原知识内容】:
  647. {old_content}
  648. 【实战反馈建议】:
  649. {feedback}
  650. 【重写要求】:
  651. 1. 融合知识:将反馈中的避坑指南、新参数或修正后的选择逻辑融入原知识,使其更具通用性和准确性。
  652. 2. 保持结构:如果原内容有特定格式(如 Markdown、代码示例等),请保持该格式。
  653. 3. 语言:简洁直接,使用中文。
  654. 4. 禁止:严禁输出任何开场白、解释语或额外的 Markdown 标题,直接返回重写后的正文。
  655. """
  656. try:
  657. response = await openrouter_llm_call(
  658. messages=[{"role": "user", "content": prompt}],
  659. model="google/gemini-2.0-flash-001"
  660. )
  661. evolved = response.get("content", "").strip()
  662. if len(evolved) < 5:
  663. raise ValueError("LLM output too short")
  664. return evolved
  665. except Exception as e:
  666. print(f"知识进化失败,采用追加模式回退: {e}")
  667. return f"{old_content}\n\n---\n[Update {datetime.now().strftime('%Y-%m-%d')}]: {feedback}"
  668. @app.put("/api/knowledge/{knowledge_id}")
  669. async def update_knowledge(knowledge_id: str, update: KnowledgeUpdateIn):
  670. """更新知识评估,支持知识进化"""
  671. conn = get_db()
  672. try:
  673. row = conn.execute("SELECT * FROM knowledge WHERE id = ?", (knowledge_id,)).fetchone()
  674. if not row:
  675. raise HTTPException(status_code=404, detail=f"Knowledge not found: {knowledge_id}")
  676. now = datetime.now(timezone.utc).isoformat()
  677. updates = {"updated_at": now}
  678. if update.update_score is not None:
  679. updates["eval_score"] = update.update_score
  680. if update.add_helpful_case:
  681. helpful_history = json.loads(row["eval_helpful_history"] or "[]")
  682. helpful_history.append(update.add_helpful_case)
  683. updates["eval_helpful"] = row["eval_helpful"] + 1
  684. updates["eval_helpful_history"] = json.dumps(helpful_history, ensure_ascii=False)
  685. updates["metrics_helpful"] = row["metrics_helpful"] + 1
  686. if update.add_harmful_case:
  687. harmful_history = json.loads(row["eval_harmful_history"] or "[]")
  688. harmful_history.append(update.add_harmful_case)
  689. updates["eval_harmful"] = row["eval_harmful"] + 1
  690. updates["eval_harmful_history"] = json.dumps(harmful_history, ensure_ascii=False)
  691. updates["metrics_harmful"] = row["metrics_harmful"] + 1
  692. if update.evolve_feedback:
  693. evolved_content = await _evolve_knowledge_with_llm(row["content"], update.evolve_feedback)
  694. updates["content"] = evolved_content
  695. updates["metrics_helpful"] = updates.get("metrics_helpful", row["metrics_helpful"]) + 1
  696. set_clause = ", ".join(f"{k} = ?" for k in updates)
  697. values = list(updates.values()) + [knowledge_id]
  698. conn.execute(f"UPDATE knowledge SET {set_clause} WHERE id = ?", values)
  699. conn.commit()
  700. return {"status": "ok", "knowledge_id": knowledge_id}
  701. finally:
  702. conn.close()
  703. @app.post("/api/knowledge/batch_update")
  704. async def batch_update_knowledge(batch: KnowledgeBatchUpdateIn):
  705. """批量反馈知识有效性"""
  706. if not batch.feedback_list:
  707. return {"status": "ok", "updated": 0}
  708. conn = get_db()
  709. try:
  710. # 先处理无需进化的,收集需要进化的
  711. evolution_tasks = [] # [(knowledge_id, old_content, feedback)]
  712. simple_updates = [] # [(knowledge_id, is_effective)]
  713. for item in batch.feedback_list:
  714. knowledge_id = item.get("knowledge_id")
  715. is_effective = item.get("is_effective")
  716. feedback = item.get("feedback", "")
  717. if not knowledge_id:
  718. continue
  719. row = conn.execute("SELECT * FROM knowledge WHERE id = ?", (knowledge_id,)).fetchone()
  720. if not row:
  721. continue
  722. if is_effective and feedback:
  723. evolution_tasks.append((knowledge_id, row["content"], feedback, row["metrics_helpful"]))
  724. else:
  725. simple_updates.append((knowledge_id, is_effective, row["metrics_helpful"], row["metrics_harmful"]))
  726. # 执行简单更新
  727. now = datetime.now(timezone.utc).isoformat()
  728. for knowledge_id, is_effective, cur_helpful, cur_harmful in simple_updates:
  729. if is_effective:
  730. conn.execute(
  731. "UPDATE knowledge SET metrics_helpful = ?, updated_at = ? WHERE id = ?",
  732. (cur_helpful + 1, now, knowledge_id)
  733. )
  734. else:
  735. conn.execute(
  736. "UPDATE knowledge SET metrics_harmful = ?, updated_at = ? WHERE id = ?",
  737. (cur_harmful + 1, now, knowledge_id)
  738. )
  739. # 并发执行知识进化
  740. if evolution_tasks:
  741. print(f"🧬 并发处理 {len(evolution_tasks)} 条知识进化...")
  742. evolved_results = await asyncio.gather(
  743. *[_evolve_knowledge_with_llm(old, fb) for _, old, fb, _ in evolution_tasks]
  744. )
  745. for (knowledge_id, _, _, cur_helpful), evolved_content in zip(evolution_tasks, evolved_results):
  746. conn.execute(
  747. "UPDATE knowledge SET content = ?, metrics_helpful = ?, updated_at = ? WHERE id = ?",
  748. (evolved_content, cur_helpful + 1, now, knowledge_id)
  749. )
  750. conn.commit()
  751. return {"status": "ok", "updated": len(simple_updates) + len(evolution_tasks)}
  752. finally:
  753. conn.close()
  754. @app.post("/api/knowledge/slim")
  755. async def slim_knowledge(model: str = "anthropic/claude-sonnet-4-5"):
  756. """知识库瘦身:合并语义相似知识"""
  757. conn = get_db()
  758. try:
  759. rows = conn.execute("SELECT * FROM knowledge ORDER BY metrics_helpful DESC").fetchall()
  760. if len(rows) < 2:
  761. return {"status": "ok", "message": f"知识库仅有 {len(rows)} 条,无需瘦身"}
  762. # 构造发给大模型的内容
  763. entries_text = ""
  764. for row in rows:
  765. entries_text += f"[ID: {row['id']}] [Tags: {row['tags_type']}] "
  766. entries_text += f"[Helpful: {row['metrics_helpful']}, Harmful: {row['metrics_harmful']}] [Score: {row['eval_score']}]\n"
  767. entries_text += f"Scenario: {row['scenario']}\n"
  768. entries_text += f"Content: {row['content'][:200]}...\n\n"
  769. prompt = f"""你是一个 AI Agent 知识库管理员。以下是当前知识库的全部条目,请执行瘦身操作:
  770. 【任务】:
  771. 1. 识别语义高度相似或重复的知识,将它们合并为一条更精炼、更通用的知识。
  772. 2. 合并时保留 helpful 最高的那条的 ID(metrics_helpful 取各条之和)。
  773. 3. 对于独立的、无重复的知识,保持原样不动。
  774. 【当前知识库】:
  775. {entries_text}
  776. 【输出格式要求】:
  777. 严格按以下格式输出每条知识,条目之间用 === 分隔:
  778. ID: <保留的id>
  779. TAGS: <逗号分隔的type列表>
  780. HELPFUL: <合并后的helpful计数>
  781. HARMFUL: <合并后的harmful计数>
  782. SCORE: <评分>
  783. SCENARIO: <场景描述>
  784. CONTENT: <合并后的知识内容>
  785. ===
  786. 最后输出合并报告:
  787. REPORT: 原有 X 条,合并后 Y 条,精简了 Z 条。
  788. 禁止输出任何开场白或解释。"""
  789. print(f"\n[知识瘦身] 正在调用 {model} 分析 {len(rows)} 条知识...")
  790. response = await openrouter_llm_call(
  791. messages=[{"role": "user", "content": prompt}],
  792. model=model
  793. )
  794. content = response.get("content", "").strip()
  795. if not content:
  796. raise HTTPException(status_code=500, detail="LLM 返回为空")
  797. # 解析大模型输出
  798. report_line = ""
  799. new_entries = []
  800. blocks = [b.strip() for b in content.split("===") if b.strip()]
  801. for block in blocks:
  802. if block.startswith("REPORT:"):
  803. report_line = block
  804. continue
  805. lines = block.split("\n")
  806. kid, tags, helpful, harmful, score, scenario, content_lines = None, "", 0, 0, 3, "", []
  807. current_field = None
  808. for line in lines:
  809. if line.startswith("ID:"):
  810. kid = line[3:].strip()
  811. current_field = None
  812. elif line.startswith("TAGS:"):
  813. tags = line[5:].strip()
  814. current_field = None
  815. elif line.startswith("HELPFUL:"):
  816. try:
  817. helpful = int(line[8:].strip())
  818. except Exception:
  819. helpful = 0
  820. current_field = None
  821. elif line.startswith("HARMFUL:"):
  822. try:
  823. harmful = int(line[8:].strip())
  824. except Exception:
  825. harmful = 0
  826. current_field = None
  827. elif line.startswith("SCORE:"):
  828. try:
  829. score = int(line[6:].strip())
  830. except Exception:
  831. score = 3
  832. current_field = None
  833. elif line.startswith("SCENARIO:"):
  834. scenario = line[9:].strip()
  835. current_field = "scenario"
  836. elif line.startswith("CONTENT:"):
  837. content_lines.append(line[8:].strip())
  838. current_field = "content"
  839. elif current_field == "scenario":
  840. scenario += "\n" + line
  841. elif current_field == "content":
  842. content_lines.append(line)
  843. if kid and content_lines:
  844. new_entries.append({
  845. "id": kid,
  846. "tags": tags,
  847. "helpful": helpful,
  848. "harmful": harmful,
  849. "score": score,
  850. "scenario": scenario.strip(),
  851. "content": "\n".join(content_lines).strip()
  852. })
  853. if not new_entries:
  854. raise HTTPException(status_code=500, detail="解析大模型输出失败")
  855. # 原子化写回
  856. now = datetime.now(timezone.utc).isoformat()
  857. conn.execute("DELETE FROM knowledge")
  858. for e in new_entries:
  859. conn.execute(
  860. """INSERT INTO knowledge
  861. (id, message_id, tags_type, scenario, content,
  862. source_urls, source_agent_id, source_timestamp,
  863. eval_score, eval_helpful, eval_harmful,
  864. eval_helpful_history, eval_harmful_history,
  865. metrics_helpful, metrics_harmful, created_at, updated_at)
  866. VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""",
  867. (e["id"], "", e["tags"], e["scenario"], e["content"],
  868. "", "slim", now,
  869. e["score"], 0, 0, "[]", "[]",
  870. e["helpful"], e["harmful"], now, now)
  871. )
  872. conn.commit()
  873. result_msg = f"瘦身完成:{len(rows)} → {len(new_entries)} 条知识"
  874. if report_line:
  875. result_msg += f"\n{report_line}"
  876. print(f"[知识瘦身] {result_msg}")
  877. return {"status": "ok", "before": len(rows), "after": len(new_entries), "report": report_line}
  878. finally:
  879. conn.close()
  880. if __name__ == "__main__":
  881. import uvicorn
  882. uvicorn.run(app, host="0.0.0.0", port=8000)