server.py 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053
  1. """
  2. KnowHub Server
  3. Agent 工具使用经验的共享平台。
  4. FastAPI + SQLite,单文件部署。
  5. """
  6. import os
  7. import re
  8. import json
  9. import sqlite3
  10. import asyncio
  11. from contextlib import asynccontextmanager
  12. from datetime import datetime, timezone
  13. from typing import Optional
  14. from pathlib import Path
  15. from fastapi import FastAPI, HTTPException, Query
  16. from pydantic import BaseModel, Field
  17. # 导入 LLM 调用(需要 agent 模块在 Python path 中)
  18. import sys
  19. sys.path.insert(0, str(Path(__file__).parent.parent))
  20. # 加载环境变量
  21. from dotenv import load_dotenv
  22. load_dotenv(Path(__file__).parent.parent / ".env")
  23. from agent.llm.openrouter import openrouter_llm_call
  24. BRAND_NAME = os.getenv("BRAND_NAME", "KnowHub")
  25. BRAND_API_ENV = os.getenv("BRAND_API_ENV", "KNOWHUB_API")
  26. BRAND_DB = os.getenv("BRAND_DB", "knowhub.db")
  27. DB_PATH = Path(__file__).parent / BRAND_DB
  28. # --- 数据库 ---
  29. def get_db() -> sqlite3.Connection:
  30. conn = sqlite3.connect(str(DB_PATH))
  31. conn.row_factory = sqlite3.Row
  32. conn.execute("PRAGMA journal_mode=WAL")
  33. return conn
  34. def init_db():
  35. conn = get_db()
  36. conn.execute("""
  37. CREATE TABLE IF NOT EXISTS experiences (
  38. id INTEGER PRIMARY KEY AUTOINCREMENT,
  39. name TEXT NOT NULL,
  40. url TEXT DEFAULT '',
  41. category TEXT DEFAULT '',
  42. task TEXT NOT NULL,
  43. score INTEGER CHECK(score BETWEEN 1 AND 5),
  44. outcome TEXT DEFAULT '',
  45. tips TEXT DEFAULT '',
  46. content_id TEXT DEFAULT '',
  47. submitted_by TEXT DEFAULT '',
  48. created_at TEXT NOT NULL
  49. )
  50. """)
  51. conn.execute("CREATE INDEX IF NOT EXISTS idx_name ON experiences(name)")
  52. conn.execute("""
  53. CREATE TABLE IF NOT EXISTS contents (
  54. id TEXT PRIMARY KEY,
  55. title TEXT DEFAULT '',
  56. body TEXT NOT NULL,
  57. sort_order INTEGER DEFAULT 0,
  58. submitted_by TEXT DEFAULT '',
  59. created_at TEXT NOT NULL
  60. )
  61. """)
  62. conn.execute("""
  63. CREATE TABLE IF NOT EXISTS knowledge (
  64. id TEXT PRIMARY KEY,
  65. message_id TEXT DEFAULT '',
  66. tags_type TEXT NOT NULL,
  67. scenario TEXT NOT NULL,
  68. content TEXT NOT NULL,
  69. source_urls TEXT DEFAULT '',
  70. source_agent_id TEXT DEFAULT '',
  71. source_timestamp TEXT NOT NULL,
  72. eval_score INTEGER DEFAULT 3 CHECK(eval_score BETWEEN 1 AND 5),
  73. eval_helpful INTEGER DEFAULT 0,
  74. eval_harmful INTEGER DEFAULT 0,
  75. eval_helpful_history TEXT DEFAULT '[]',
  76. eval_harmful_history TEXT DEFAULT '[]',
  77. metrics_helpful INTEGER DEFAULT 1,
  78. metrics_harmful INTEGER DEFAULT 0,
  79. created_at TEXT NOT NULL,
  80. updated_at TEXT DEFAULT ''
  81. )
  82. """)
  83. conn.execute("CREATE INDEX IF NOT EXISTS idx_knowledge_tags ON knowledge(tags_type)")
  84. conn.execute("CREATE INDEX IF NOT EXISTS idx_knowledge_scenario ON knowledge(scenario)")
  85. conn.commit()
  86. conn.close()
  87. # --- Models ---
  88. class ExperienceIn(BaseModel):
  89. name: str
  90. url: str = ""
  91. category: str = ""
  92. task: str
  93. score: int = Field(ge=1, le=5)
  94. outcome: str = ""
  95. tips: str = ""
  96. content_id: str = ""
  97. submitted_by: str = ""
  98. class ExperienceOut(BaseModel):
  99. task: str
  100. score: int
  101. outcome: str
  102. tips: str
  103. content_id: str
  104. submitted_by: str
  105. created_at: str
  106. class ResourceResult(BaseModel):
  107. name: str
  108. url: str
  109. relevant_experiences: list[ExperienceOut]
  110. avg_score: float
  111. experience_count: int
  112. class SearchResponse(BaseModel):
  113. results: list[ResourceResult]
  114. class ResourceDetailResponse(BaseModel):
  115. name: str
  116. url: str
  117. category: str
  118. avg_score: float
  119. experience_count: int
  120. experiences: list[ExperienceOut]
  121. class ContentIn(BaseModel):
  122. id: str
  123. title: str = ""
  124. body: str
  125. sort_order: int = 0
  126. submitted_by: str = ""
  127. # Knowledge Models
  128. class KnowledgeIn(BaseModel):
  129. scenario: str
  130. content: str
  131. tags_type: list[str]
  132. urls: list[str] = []
  133. agent_id: str = "research_agent"
  134. score: int = Field(default=3, ge=1, le=5)
  135. message_id: str = ""
  136. class KnowledgeOut(BaseModel):
  137. id: str
  138. message_id: str
  139. tags: dict
  140. scenario: str
  141. content: str
  142. source: dict
  143. eval: dict
  144. metrics: dict
  145. created_at: str
  146. updated_at: str
  147. class KnowledgeUpdateIn(BaseModel):
  148. add_helpful_case: Optional[dict] = None
  149. add_harmful_case: Optional[dict] = None
  150. update_score: Optional[int] = Field(default=None, ge=1, le=5)
  151. evolve_feedback: Optional[str] = None
  152. class KnowledgeBatchUpdateIn(BaseModel):
  153. feedback_list: list[dict]
  154. class KnowledgeSearchResponse(BaseModel):
  155. results: list[dict]
  156. count: int
  157. class ContentNode(BaseModel):
  158. id: str
  159. title: str
  160. class ContentOut(BaseModel):
  161. id: str
  162. title: str
  163. body: str
  164. toc: Optional[ContentNode] = None
  165. children: list[ContentNode]
  166. prev: Optional[ContentNode] = None
  167. next: Optional[ContentNode] = None
  168. # --- App ---
  169. @asynccontextmanager
  170. async def lifespan(app: FastAPI):
  171. init_db()
  172. yield
  173. app = FastAPI(title=BRAND_NAME, lifespan=lifespan)
  174. def _search_rows(conn: sqlite3.Connection, q: str, category: Optional[str]) -> list[sqlite3.Row]:
  175. """LIKE 搜索,拆词后 AND 连接,匹配 task + tips + outcome + name"""
  176. terms = q.split()
  177. if not terms:
  178. return []
  179. conditions = []
  180. params: list[str] = []
  181. for term in terms:
  182. like = f"%{term}%"
  183. conditions.append(
  184. "(task LIKE ? OR tips LIKE ? OR outcome LIKE ? OR name LIKE ?)"
  185. )
  186. params.extend([like, like, like, like])
  187. if category:
  188. conditions.append("category = ?")
  189. params.append(category)
  190. sql = (
  191. "SELECT name, url, category, task, score, outcome, tips, content_id, "
  192. "submitted_by, created_at FROM experiences WHERE "
  193. + " AND ".join(conditions)
  194. + " ORDER BY created_at DESC"
  195. )
  196. return conn.execute(sql, params).fetchall()
  197. def _group_by_resource(rows: list[sqlite3.Row], limit: int) -> list[ResourceResult]:
  198. """按 name 分组并聚合"""
  199. groups: dict[str, list[sqlite3.Row]] = {}
  200. for row in rows:
  201. name = row["name"]
  202. if name not in groups:
  203. groups[name] = []
  204. groups[name].append(row)
  205. results = []
  206. for resource_name, resource_rows in groups.items():
  207. scores = [r["score"] for r in resource_rows]
  208. avg = sum(scores) / len(scores)
  209. results.append(ResourceResult(
  210. name=resource_name,
  211. url=resource_rows[0]["url"],
  212. relevant_experiences=[
  213. ExperienceOut(
  214. task=r["task"],
  215. score=r["score"],
  216. outcome=r["outcome"],
  217. tips=r["tips"],
  218. content_id=r["content_id"],
  219. submitted_by=r["submitted_by"],
  220. created_at=r["created_at"],
  221. )
  222. for r in resource_rows
  223. ],
  224. avg_score=round(avg, 1),
  225. experience_count=len(resource_rows),
  226. ))
  227. results.sort(key=lambda r: r.avg_score * r.experience_count, reverse=True)
  228. return results[:limit]
  229. @app.get("/api/search", response_model=SearchResponse)
  230. def search_experiences(
  231. q: str = Query(..., min_length=1),
  232. category: Optional[str] = None,
  233. limit: int = Query(default=10, ge=1, le=50),
  234. ):
  235. conn = get_db()
  236. try:
  237. rows = _search_rows(conn, q, category)
  238. return SearchResponse(results=_group_by_resource(rows, limit))
  239. finally:
  240. conn.close()
  241. @app.post("/api/experience", status_code=201)
  242. def submit_experience(exp: ExperienceIn):
  243. conn = get_db()
  244. try:
  245. now = datetime.now(timezone.utc).isoformat()
  246. conn.execute(
  247. "INSERT INTO experiences"
  248. "(name, url, category, task, score, outcome, tips, content_id, submitted_by, created_at)"
  249. " VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
  250. (exp.name, exp.url, exp.category, exp.task,
  251. exp.score, exp.outcome, exp.tips, exp.content_id, exp.submitted_by, now),
  252. )
  253. conn.commit()
  254. return {"status": "ok"}
  255. finally:
  256. conn.close()
  257. @app.get("/api/resource/{name}", response_model=ResourceDetailResponse)
  258. def get_resource_experiences(name: str):
  259. conn = get_db()
  260. try:
  261. rows = conn.execute(
  262. "SELECT name, url, category, task, score, outcome, tips, content_id, "
  263. "submitted_by, created_at FROM experiences "
  264. "WHERE name = ? ORDER BY created_at DESC",
  265. (name,),
  266. ).fetchall()
  267. if not rows:
  268. raise HTTPException(status_code=404, detail=f"No experiences found for resource: {name}")
  269. scores = [r["score"] for r in rows]
  270. avg = sum(scores) / len(scores)
  271. return ResourceDetailResponse(
  272. name=name,
  273. url=rows[0]["url"],
  274. category=rows[0]["category"],
  275. avg_score=round(avg, 1),
  276. experience_count=len(rows),
  277. experiences=[
  278. ExperienceOut(
  279. task=r["task"],
  280. score=r["score"],
  281. outcome=r["outcome"],
  282. tips=r["tips"],
  283. content_id=r["content_id"],
  284. submitted_by=r["submitted_by"],
  285. created_at=r["created_at"],
  286. )
  287. for r in rows
  288. ],
  289. )
  290. finally:
  291. conn.close()
  292. @app.post("/api/content", status_code=201)
  293. def submit_content(content: ContentIn):
  294. conn = get_db()
  295. try:
  296. now = datetime.now(timezone.utc).isoformat()
  297. conn.execute(
  298. "INSERT OR REPLACE INTO contents"
  299. "(id, title, body, sort_order, submitted_by, created_at)"
  300. " VALUES (?, ?, ?, ?, ?, ?)",
  301. (content.id, content.title, content.body, content.sort_order, content.submitted_by, now),
  302. )
  303. conn.commit()
  304. return {"status": "ok"}
  305. finally:
  306. conn.close()
  307. @app.get("/api/content/{content_id:path}", response_model=ContentOut)
  308. def get_content(content_id: str):
  309. conn = get_db()
  310. try:
  311. row = conn.execute(
  312. "SELECT id, title, body, sort_order FROM contents WHERE id = ?",
  313. (content_id,),
  314. ).fetchone()
  315. if not row:
  316. raise HTTPException(status_code=404, detail=f"Content not found: {content_id}")
  317. # 计算导航上下文
  318. root_id = content_id.split("/")[0] if "/" in content_id else content_id
  319. # TOC (根节点)
  320. toc = None
  321. if "/" in content_id:
  322. toc_row = conn.execute(
  323. "SELECT id, title FROM contents WHERE id = ?",
  324. (root_id,),
  325. ).fetchone()
  326. if toc_row:
  327. toc = ContentNode(id=toc_row["id"], title=toc_row["title"])
  328. # Children (子节点)
  329. children = []
  330. children_rows = conn.execute(
  331. "SELECT id, title FROM contents WHERE id LIKE ? AND id != ? ORDER BY sort_order",
  332. (f"{content_id}/%", content_id),
  333. ).fetchall()
  334. children = [ContentNode(id=r["id"], title=r["title"]) for r in children_rows]
  335. # Prev/Next (同级节点)
  336. prev_node = None
  337. next_node = None
  338. if "/" in content_id:
  339. siblings = conn.execute(
  340. "SELECT id, title, sort_order FROM contents WHERE id LIKE ? AND id NOT LIKE ? ORDER BY sort_order",
  341. (f"{root_id}/%", f"{root_id}/%/%"),
  342. ).fetchall()
  343. for i, sib in enumerate(siblings):
  344. if sib["id"] == content_id:
  345. if i > 0:
  346. prev_node = ContentNode(id=siblings[i-1]["id"], title=siblings[i-1]["title"])
  347. if i < len(siblings) - 1:
  348. next_node = ContentNode(id=siblings[i+1]["id"], title=siblings[i+1]["title"])
  349. break
  350. return ContentOut(
  351. id=row["id"],
  352. title=row["title"],
  353. body=row["body"],
  354. toc=toc,
  355. children=children,
  356. prev=prev_node,
  357. next=next_node,
  358. )
  359. finally:
  360. conn.close()
  361. # ===== Knowledge API =====
  362. # 两阶段检索逻辑
  363. async def _route_knowledge_by_llm(query_text: str, metadata_list: list[dict], k: int = 5) -> list[str]:
  364. """
  365. 第一阶段:语义路由。
  366. 让 LLM 挑选出 2*k 个语义相关的 ID。
  367. """
  368. if not metadata_list:
  369. return []
  370. routing_k = k * 2
  371. routing_data = [
  372. {
  373. "id": m["id"],
  374. "tags": m["tags"],
  375. "scenario": m["scenario"][:100]
  376. } for m in metadata_list
  377. ]
  378. prompt = f"""
  379. 你是一个知识检索专家。根据用户的当前任务需求,从下列原子知识元数据中挑选出最相关的最多 {routing_k} 个知识 ID。
  380. 任务需求:"{query_text}"
  381. 可选知识列表:
  382. {json.dumps(routing_data, ensure_ascii=False, indent=1)}
  383. 请直接输出 ID 列表,用逗号分隔(例如: knowledge-20260302-001, research-20260302-002)。若无相关项请输出 "None"。
  384. """
  385. try:
  386. print(f"\n[Step 1: 知识语义路由] 任务: '{query_text}' | 候选总数: {len(metadata_list)} | 目标提取数: {routing_k}")
  387. response = await openrouter_llm_call(
  388. messages=[{"role": "user", "content": prompt}],
  389. model="google/gemini-2.0-flash-001"
  390. )
  391. content = response.get("content", "").strip()
  392. selected_ids = [idx.strip() for idx in re.split(r'[,\s]+', content) if idx.strip().startswith(("knowledge-", "research-"))]
  393. print(f"[Step 1: 知识语义路由] LLM 初选 ID ({len(selected_ids)}个): {selected_ids}")
  394. return selected_ids
  395. except Exception as e:
  396. print(f"LLM 知识路由失败: {e}")
  397. return []
  398. async def _search_knowledge_two_stage(
  399. query_text: str,
  400. top_k: int = 5,
  401. min_score: int = 3,
  402. tags_filter: Optional[list[str]] = None,
  403. conn: sqlite3.Connection = None
  404. ) -> list[dict]:
  405. """
  406. 两阶段检索:语义路由 + 质量精排
  407. """
  408. if conn is None:
  409. conn = get_db()
  410. should_close = True
  411. else:
  412. should_close = False
  413. try:
  414. # 阶段 1: 解析所有知识
  415. query = "SELECT * FROM knowledge"
  416. rows = conn.execute(query).fetchall()
  417. if not rows:
  418. return []
  419. content_map = {}
  420. metadata_list = []
  421. for row in rows:
  422. kid = row["id"]
  423. tags_type = row["tags_type"].split(",") if row["tags_type"] else []
  424. # 标签过滤
  425. if tags_filter:
  426. if not any(tag in tags_type for tag in tags_filter):
  427. continue
  428. scenario = row["scenario"]
  429. content_text = row["content"]
  430. meta_item = {
  431. "id": kid,
  432. "tags": {"type": tags_type},
  433. "scenario": scenario,
  434. "score": row["eval_score"],
  435. "helpful": row["metrics_helpful"],
  436. "harmful": row["metrics_harmful"],
  437. }
  438. metadata_list.append(meta_item)
  439. content_map[kid] = {
  440. "scenario": scenario,
  441. "content": content_text,
  442. "tags": {"type": tags_type},
  443. "score": meta_item["score"],
  444. "helpful": meta_item["helpful"],
  445. "harmful": meta_item["harmful"],
  446. "message_id": row["message_id"],
  447. "source": {
  448. "urls": row["source_urls"].split(",") if row["source_urls"] else [],
  449. "agent_id": row["source_agent_id"],
  450. "timestamp": row["source_timestamp"]
  451. },
  452. "created_at": row["created_at"],
  453. "updated_at": row["updated_at"]
  454. }
  455. if not metadata_list:
  456. return []
  457. # 阶段 2: 语义路由 (取 2*k)
  458. candidate_ids = await _route_knowledge_by_llm(query_text, metadata_list, k=top_k)
  459. # 阶段 3: 质量精排
  460. print(f"[Step 2: 知识质量精排] 正在根据评分和反馈进行打分...")
  461. scored_items = []
  462. for kid in candidate_ids:
  463. if kid in content_map:
  464. item = content_map[kid]
  465. score = item["score"]
  466. helpful = item["helpful"]
  467. harmful = item["harmful"]
  468. # 计算综合分:基础分 + helpful - harmful*2
  469. quality_score = score + helpful - (harmful * 2.0)
  470. # 过滤门槛
  471. if score < min_score or quality_score < 0:
  472. print(f" - 剔除低质量知识: {kid} (Score: {score}, Helpful: {helpful}, Harmful: {harmful})")
  473. continue
  474. scored_items.append({
  475. "id": kid,
  476. "message_id": item["message_id"],
  477. "scenario": item["scenario"],
  478. "content": item["content"],
  479. "tags": item["tags"],
  480. "score": score,
  481. "quality_score": quality_score,
  482. "metrics": {
  483. "helpful": helpful,
  484. "harmful": harmful
  485. },
  486. "source": item["source"],
  487. "created_at": item["created_at"],
  488. "updated_at": item["updated_at"]
  489. })
  490. # 按照质量分排序
  491. final_sorted = sorted(scored_items, key=lambda x: x["quality_score"], reverse=True)
  492. # 截取最终的 top_k
  493. result = final_sorted[:top_k]
  494. print(f"[Step 2: 知识质量精排] 最终选定知识: {[it['id'] for it in result]}")
  495. print(f"[Knowledge System] 检索结束。\n")
  496. return result
  497. finally:
  498. if should_close:
  499. conn.close()
  500. @app.get("/api/knowledge/search")
  501. async def search_knowledge_api(
  502. q: str = Query(..., description="查询文本"),
  503. top_k: int = Query(default=5, ge=1, le=20),
  504. min_score: int = Query(default=3, ge=1, le=5),
  505. tags_type: Optional[str] = None
  506. ):
  507. """检索知识(两阶段:语义路由 + 质量精排)"""
  508. conn = get_db()
  509. try:
  510. tags_filter = tags_type.split(",") if tags_type else None
  511. results = await _search_knowledge_two_stage(
  512. query_text=q,
  513. top_k=top_k,
  514. min_score=min_score,
  515. tags_filter=tags_filter,
  516. conn=conn
  517. )
  518. return {"results": results, "count": len(results)}
  519. finally:
  520. conn.close()
  521. @app.post("/api/knowledge", status_code=201)
  522. def save_knowledge(knowledge: KnowledgeIn):
  523. """保存新知识"""
  524. import uuid
  525. conn = get_db()
  526. try:
  527. # 生成 ID
  528. timestamp = datetime.now().strftime('%Y%m%d-%H%M%S')
  529. random_suffix = uuid.uuid4().hex[:4]
  530. knowledge_id = f"knowledge-{timestamp}-{random_suffix}"
  531. now = datetime.now(timezone.utc).isoformat()
  532. conn.execute(
  533. """INSERT INTO knowledge
  534. (id, message_id, tags_type, scenario, content,
  535. source_urls, source_agent_id, source_timestamp,
  536. eval_score, eval_helpful, eval_harmful,
  537. eval_helpful_history, eval_harmful_history,
  538. metrics_helpful, metrics_harmful, created_at, updated_at)
  539. VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""",
  540. (
  541. knowledge_id,
  542. knowledge.message_id,
  543. ",".join(knowledge.tags_type),
  544. knowledge.scenario,
  545. knowledge.content,
  546. ",".join(knowledge.urls),
  547. knowledge.agent_id,
  548. now,
  549. knowledge.score,
  550. 0, # eval_helpful
  551. 0, # eval_harmful
  552. "[]", # eval_helpful_history
  553. "[]", # eval_harmful_history
  554. 1, # metrics_helpful
  555. 0, # metrics_harmful
  556. now,
  557. now,
  558. ),
  559. )
  560. conn.commit()
  561. return {"status": "ok", "knowledge_id": knowledge_id}
  562. finally:
  563. conn.close()
  564. @app.get("/api/knowledge")
  565. def list_knowledge(
  566. limit: int = Query(default=10, ge=1, le=100),
  567. tags_type: Optional[str] = None
  568. ):
  569. """列出知识"""
  570. conn = get_db()
  571. try:
  572. query = "SELECT * FROM knowledge"
  573. params = []
  574. if tags_type:
  575. query += " WHERE tags_type LIKE ?"
  576. params.append(f"%{tags_type}%")
  577. query += " ORDER BY created_at DESC LIMIT ?"
  578. params.append(limit)
  579. rows = conn.execute(query, params).fetchall()
  580. results = []
  581. for row in rows:
  582. results.append({
  583. "id": row["id"],
  584. "message_id": row["message_id"],
  585. "tags": {"type": row["tags_type"].split(",") if row["tags_type"] else []},
  586. "scenario": row["scenario"],
  587. "content": row["content"],
  588. "source": {
  589. "urls": row["source_urls"].split(",") if row["source_urls"] else [],
  590. "agent_id": row["source_agent_id"],
  591. "timestamp": row["source_timestamp"]
  592. },
  593. "eval": {
  594. "score": row["eval_score"],
  595. "helpful": row["eval_helpful"],
  596. "harmful": row["eval_harmful"]
  597. },
  598. "metrics": {
  599. "helpful": row["metrics_helpful"],
  600. "harmful": row["metrics_harmful"]
  601. },
  602. "created_at": row["created_at"],
  603. "updated_at": row["updated_at"]
  604. })
  605. return {"results": results, "count": len(results)}
  606. finally:
  607. conn.close()
  608. @app.get("/api/knowledge/{knowledge_id}")
  609. def get_knowledge(knowledge_id: str):
  610. """获取单条知识"""
  611. conn = get_db()
  612. try:
  613. row = conn.execute(
  614. "SELECT * FROM knowledge WHERE id = ?",
  615. (knowledge_id,)
  616. ).fetchone()
  617. if not row:
  618. raise HTTPException(status_code=404, detail=f"Knowledge not found: {knowledge_id}")
  619. return {
  620. "id": row["id"],
  621. "message_id": row["message_id"],
  622. "tags": {"type": row["tags_type"].split(",") if row["tags_type"] else []},
  623. "scenario": row["scenario"],
  624. "content": row["content"],
  625. "source": {
  626. "urls": row["source_urls"].split(",") if row["source_urls"] else [],
  627. "agent_id": row["source_agent_id"],
  628. "timestamp": row["source_timestamp"]
  629. },
  630. "eval": {
  631. "score": row["eval_score"],
  632. "helpful": row["eval_helpful"],
  633. "harmful": row["eval_harmful"],
  634. "helpful_history": [],
  635. "harmful_history": []
  636. },
  637. "metrics": {
  638. "helpful": row["metrics_helpful"],
  639. "harmful": row["metrics_harmful"]
  640. },
  641. "created_at": row["created_at"],
  642. "updated_at": row["updated_at"]
  643. }
  644. finally:
  645. conn.close()
  646. async def _evolve_knowledge_with_llm(old_content: str, feedback: str) -> str:
  647. """使用 LLM 进行知识进化重写"""
  648. prompt = f"""你是一个 AI Agent 知识库管理员。请根据反馈建议,对现有的知识内容进行重写进化。
  649. 【原知识内容】:
  650. {old_content}
  651. 【实战反馈建议】:
  652. {feedback}
  653. 【重写要求】:
  654. 1. 融合知识:将反馈中的避坑指南、新参数或修正后的选择逻辑融入原知识,使其更具通用性和准确性。
  655. 2. 保持结构:如果原内容有特定格式(如 Markdown、代码示例等),请保持该格式。
  656. 3. 语言:简洁直接,使用中文。
  657. 4. 禁止:严禁输出任何开场白、解释语或额外的 Markdown 标题,直接返回重写后的正文。
  658. """
  659. try:
  660. response = await openrouter_llm_call(
  661. messages=[{"role": "user", "content": prompt}],
  662. model="google/gemini-2.0-flash-001"
  663. )
  664. evolved = response.get("content", "").strip()
  665. if len(evolved) < 5:
  666. raise ValueError("LLM output too short")
  667. return evolved
  668. except Exception as e:
  669. print(f"知识进化失败,采用追加模式回退: {e}")
  670. return f"{old_content}\n\n---\n[Update {datetime.now().strftime('%Y-%m-%d')}]: {feedback}"
  671. @app.put("/api/knowledge/{knowledge_id}")
  672. async def update_knowledge(knowledge_id: str, update: KnowledgeUpdateIn):
  673. """更新知识评估,支持知识进化"""
  674. conn = get_db()
  675. try:
  676. row = conn.execute("SELECT * FROM knowledge WHERE id = ?", (knowledge_id,)).fetchone()
  677. if not row:
  678. raise HTTPException(status_code=404, detail=f"Knowledge not found: {knowledge_id}")
  679. now = datetime.now(timezone.utc).isoformat()
  680. updates = {"updated_at": now}
  681. if update.update_score is not None:
  682. updates["eval_score"] = update.update_score
  683. if update.add_helpful_case:
  684. helpful_history = json.loads(row["eval_helpful_history"] or "[]")
  685. helpful_history.append(update.add_helpful_case)
  686. updates["eval_helpful"] = row["eval_helpful"] + 1
  687. updates["eval_helpful_history"] = json.dumps(helpful_history, ensure_ascii=False)
  688. updates["metrics_helpful"] = row["metrics_helpful"] + 1
  689. if update.add_harmful_case:
  690. harmful_history = json.loads(row["eval_harmful_history"] or "[]")
  691. harmful_history.append(update.add_harmful_case)
  692. updates["eval_harmful"] = row["eval_harmful"] + 1
  693. updates["eval_harmful_history"] = json.dumps(harmful_history, ensure_ascii=False)
  694. updates["metrics_harmful"] = row["metrics_harmful"] + 1
  695. if update.evolve_feedback:
  696. evolved_content = await _evolve_knowledge_with_llm(row["content"], update.evolve_feedback)
  697. updates["content"] = evolved_content
  698. updates["metrics_helpful"] = updates.get("metrics_helpful", row["metrics_helpful"]) + 1
  699. set_clause = ", ".join(f"{k} = ?" for k in updates)
  700. values = list(updates.values()) + [knowledge_id]
  701. conn.execute(f"UPDATE knowledge SET {set_clause} WHERE id = ?", values)
  702. conn.commit()
  703. return {"status": "ok", "knowledge_id": knowledge_id}
  704. finally:
  705. conn.close()
  706. @app.post("/api/knowledge/batch_update")
  707. async def batch_update_knowledge(batch: KnowledgeBatchUpdateIn):
  708. """批量反馈知识有效性"""
  709. if not batch.feedback_list:
  710. return {"status": "ok", "updated": 0}
  711. conn = get_db()
  712. try:
  713. # 先处理无需进化的,收集需要进化的
  714. evolution_tasks = [] # [(knowledge_id, old_content, feedback)]
  715. simple_updates = [] # [(knowledge_id, is_effective)]
  716. for item in batch.feedback_list:
  717. knowledge_id = item.get("knowledge_id")
  718. is_effective = item.get("is_effective")
  719. feedback = item.get("feedback", "")
  720. if not knowledge_id:
  721. continue
  722. row = conn.execute("SELECT * FROM knowledge WHERE id = ?", (knowledge_id,)).fetchone()
  723. if not row:
  724. continue
  725. if is_effective and feedback:
  726. evolution_tasks.append((knowledge_id, row["content"], feedback, row["metrics_helpful"]))
  727. else:
  728. simple_updates.append((knowledge_id, is_effective, row["metrics_helpful"], row["metrics_harmful"]))
  729. # 执行简单更新
  730. now = datetime.now(timezone.utc).isoformat()
  731. for knowledge_id, is_effective, cur_helpful, cur_harmful in simple_updates:
  732. if is_effective:
  733. conn.execute(
  734. "UPDATE knowledge SET metrics_helpful = ?, updated_at = ? WHERE id = ?",
  735. (cur_helpful + 1, now, knowledge_id)
  736. )
  737. else:
  738. conn.execute(
  739. "UPDATE knowledge SET metrics_harmful = ?, updated_at = ? WHERE id = ?",
  740. (cur_harmful + 1, now, knowledge_id)
  741. )
  742. # 并发执行知识进化
  743. if evolution_tasks:
  744. print(f"🧬 并发处理 {len(evolution_tasks)} 条知识进化...")
  745. evolved_results = await asyncio.gather(
  746. *[_evolve_knowledge_with_llm(old, fb) for _, old, fb, _ in evolution_tasks]
  747. )
  748. for (knowledge_id, _, _, cur_helpful), evolved_content in zip(evolution_tasks, evolved_results):
  749. conn.execute(
  750. "UPDATE knowledge SET content = ?, metrics_helpful = ?, updated_at = ? WHERE id = ?",
  751. (evolved_content, cur_helpful + 1, now, knowledge_id)
  752. )
  753. conn.commit()
  754. return {"status": "ok", "updated": len(simple_updates) + len(evolution_tasks)}
  755. finally:
  756. conn.close()
  757. @app.post("/api/knowledge/slim")
  758. async def slim_knowledge(model: str = "anthropic/claude-sonnet-4-5"):
  759. """知识库瘦身:合并语义相似知识"""
  760. conn = get_db()
  761. try:
  762. rows = conn.execute("SELECT * FROM knowledge ORDER BY metrics_helpful DESC").fetchall()
  763. if len(rows) < 2:
  764. return {"status": "ok", "message": f"知识库仅有 {len(rows)} 条,无需瘦身"}
  765. # 构造发给大模型的内容
  766. entries_text = ""
  767. for row in rows:
  768. entries_text += f"[ID: {row['id']}] [Tags: {row['tags_type']}] "
  769. entries_text += f"[Helpful: {row['metrics_helpful']}, Harmful: {row['metrics_harmful']}] [Score: {row['eval_score']}]\n"
  770. entries_text += f"Scenario: {row['scenario']}\n"
  771. entries_text += f"Content: {row['content'][:200]}...\n\n"
  772. prompt = f"""你是一个 AI Agent 知识库管理员。以下是当前知识库的全部条目,请执行瘦身操作:
  773. 【任务】:
  774. 1. 识别语义高度相似或重复的知识,将它们合并为一条更精炼、更通用的知识。
  775. 2. 合并时保留 helpful 最高的那条的 ID(metrics_helpful 取各条之和)。
  776. 3. 对于独立的、无重复的知识,保持原样不动。
  777. 【当前知识库】:
  778. {entries_text}
  779. 【输出格式要求】:
  780. 严格按以下格式输出每条知识,条目之间用 === 分隔:
  781. ID: <保留的id>
  782. TAGS: <逗号分隔的type列表>
  783. HELPFUL: <合并后的helpful计数>
  784. HARMFUL: <合并后的harmful计数>
  785. SCORE: <评分>
  786. SCENARIO: <场景描述>
  787. CONTENT: <合并后的知识内容>
  788. ===
  789. 最后输出合并报告:
  790. REPORT: 原有 X 条,合并后 Y 条,精简了 Z 条。
  791. 禁止输出任何开场白或解释。"""
  792. print(f"\n[知识瘦身] 正在调用 {model} 分析 {len(rows)} 条知识...")
  793. response = await openrouter_llm_call(
  794. messages=[{"role": "user", "content": prompt}],
  795. model=model
  796. )
  797. content = response.get("content", "").strip()
  798. if not content:
  799. raise HTTPException(status_code=500, detail="LLM 返回为空")
  800. # 解析大模型输出
  801. report_line = ""
  802. new_entries = []
  803. blocks = [b.strip() for b in content.split("===") if b.strip()]
  804. for block in blocks:
  805. if block.startswith("REPORT:"):
  806. report_line = block
  807. continue
  808. lines = block.split("\n")
  809. kid, tags, helpful, harmful, score, scenario, content_lines = None, "", 0, 0, 3, "", []
  810. current_field = None
  811. for line in lines:
  812. if line.startswith("ID:"):
  813. kid = line[3:].strip()
  814. current_field = None
  815. elif line.startswith("TAGS:"):
  816. tags = line[5:].strip()
  817. current_field = None
  818. elif line.startswith("HELPFUL:"):
  819. try:
  820. helpful = int(line[8:].strip())
  821. except Exception:
  822. helpful = 0
  823. current_field = None
  824. elif line.startswith("HARMFUL:"):
  825. try:
  826. harmful = int(line[8:].strip())
  827. except Exception:
  828. harmful = 0
  829. current_field = None
  830. elif line.startswith("SCORE:"):
  831. try:
  832. score = int(line[6:].strip())
  833. except Exception:
  834. score = 3
  835. current_field = None
  836. elif line.startswith("SCENARIO:"):
  837. scenario = line[9:].strip()
  838. current_field = "scenario"
  839. elif line.startswith("CONTENT:"):
  840. content_lines.append(line[8:].strip())
  841. current_field = "content"
  842. elif current_field == "scenario":
  843. scenario += "\n" + line
  844. elif current_field == "content":
  845. content_lines.append(line)
  846. if kid and content_lines:
  847. new_entries.append({
  848. "id": kid,
  849. "tags": tags,
  850. "helpful": helpful,
  851. "harmful": harmful,
  852. "score": score,
  853. "scenario": scenario.strip(),
  854. "content": "\n".join(content_lines).strip()
  855. })
  856. if not new_entries:
  857. raise HTTPException(status_code=500, detail="解析大模型输出失败")
  858. # 原子化写回
  859. now = datetime.now(timezone.utc).isoformat()
  860. conn.execute("DELETE FROM knowledge")
  861. for e in new_entries:
  862. conn.execute(
  863. """INSERT INTO knowledge
  864. (id, message_id, tags_type, scenario, content,
  865. source_urls, source_agent_id, source_timestamp,
  866. eval_score, eval_helpful, eval_harmful,
  867. eval_helpful_history, eval_harmful_history,
  868. metrics_helpful, metrics_harmful, created_at, updated_at)
  869. VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""",
  870. (e["id"], "", e["tags"], e["scenario"], e["content"],
  871. "", "slim", now,
  872. e["score"], 0, 0, "[]", "[]",
  873. e["helpful"], e["harmful"], now, now)
  874. )
  875. conn.commit()
  876. result_msg = f"瘦身完成:{len(rows)} → {len(new_entries)} 条知识"
  877. if report_line:
  878. result_msg += f"\n{report_line}"
  879. print(f"[知识瘦身] {result_msg}")
  880. return {"status": "ok", "before": len(rows), "after": len(new_entries), "report": report_line}
  881. finally:
  882. conn.close()
  883. if __name__ == "__main__":
  884. import uvicorn
  885. uvicorn.run(app, host="0.0.0.0", port=8000)