server.py 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091
  1. """
  2. KnowHub Server
  3. Agent 工具使用经验的共享平台。
  4. FastAPI + SQLite,单文件部署。
  5. """
  6. import os
  7. import re
  8. import json
  9. import sqlite3
  10. import asyncio
  11. from contextlib import asynccontextmanager
  12. from datetime import datetime, timezone
  13. from typing import Optional
  14. from pathlib import Path
  15. from fastapi import FastAPI, HTTPException, Query
  16. from pydantic import BaseModel, Field
  17. # 导入 LLM 调用(需要 agent 模块在 Python path 中)
  18. import sys
  19. sys.path.insert(0, str(Path(__file__).parent.parent))
  20. # 加载环境变量
  21. from dotenv import load_dotenv
  22. load_dotenv(Path(__file__).parent.parent / ".env")
  23. from agent.llm.openrouter import openrouter_llm_call
  24. BRAND_NAME = os.getenv("BRAND_NAME", "KnowHub")
  25. BRAND_API_ENV = os.getenv("BRAND_API_ENV", "KNOWHUB_API")
  26. BRAND_DB = os.getenv("BRAND_DB", "knowhub.db")
  27. DB_PATH = Path(__file__).parent / BRAND_DB
  28. # --- 数据库 ---
  29. def get_db() -> sqlite3.Connection:
  30. conn = sqlite3.connect(str(DB_PATH))
  31. conn.row_factory = sqlite3.Row
  32. conn.execute("PRAGMA journal_mode=WAL")
  33. return conn
  34. def init_db():
  35. conn = get_db()
  36. conn.execute("""
  37. CREATE TABLE IF NOT EXISTS experiences (
  38. id INTEGER PRIMARY KEY AUTOINCREMENT,
  39. name TEXT NOT NULL,
  40. url TEXT DEFAULT '',
  41. category TEXT DEFAULT '',
  42. task TEXT NOT NULL,
  43. score INTEGER CHECK(score BETWEEN 1 AND 5),
  44. outcome TEXT DEFAULT '',
  45. tips TEXT DEFAULT '',
  46. content_id TEXT DEFAULT '',
  47. submitted_by TEXT DEFAULT '',
  48. created_at TEXT NOT NULL
  49. )
  50. """)
  51. conn.execute("CREATE INDEX IF NOT EXISTS idx_name ON experiences(name)")
  52. conn.execute("""
  53. CREATE TABLE IF NOT EXISTS contents (
  54. id TEXT PRIMARY KEY,
  55. title TEXT DEFAULT '',
  56. body TEXT NOT NULL,
  57. sort_order INTEGER DEFAULT 0,
  58. submitted_by TEXT DEFAULT '',
  59. created_at TEXT NOT NULL
  60. )
  61. """)
  62. conn.execute("""
  63. CREATE TABLE IF NOT EXISTS knowledge (
  64. id TEXT PRIMARY KEY,
  65. message_id TEXT DEFAULT '',
  66. types TEXT NOT NULL, -- JSON array: ["strategy", "tool"]
  67. task TEXT NOT NULL,
  68. tags TEXT DEFAULT '{}', -- JSON object: {"category": "...", "domain": "..."}
  69. scopes TEXT DEFAULT '["org:cybertogether"]', -- JSON array
  70. owner TEXT DEFAULT '',
  71. content TEXT NOT NULL,
  72. source TEXT DEFAULT '{}', -- JSON object: {name, category, urls, agent_id, submitted_by, timestamp}
  73. eval TEXT DEFAULT '{}', -- JSON object: {score, helpful, harmful, confidence, histories}
  74. created_at TEXT NOT NULL,
  75. updated_at TEXT DEFAULT ''
  76. )
  77. """)
  78. conn.execute("CREATE INDEX IF NOT EXISTS idx_knowledge_types ON knowledge(types)")
  79. conn.execute("CREATE INDEX IF NOT EXISTS idx_knowledge_task ON knowledge(task)")
  80. conn.execute("CREATE INDEX IF NOT EXISTS idx_knowledge_owner ON knowledge(owner)")
  81. conn.execute("CREATE INDEX IF NOT EXISTS idx_knowledge_scopes ON knowledge(scopes)")
  82. conn.commit()
  83. conn.close()
  84. # --- Models ---
  85. class ExperienceIn(BaseModel):
  86. name: str
  87. url: str = ""
  88. category: str = ""
  89. task: str
  90. score: int = Field(ge=1, le=5)
  91. outcome: str = ""
  92. tips: str = ""
  93. content_id: str = ""
  94. submitted_by: str = ""
  95. class ExperienceOut(BaseModel):
  96. task: str
  97. score: int
  98. outcome: str
  99. tips: str
  100. content_id: str
  101. submitted_by: str
  102. created_at: str
  103. class ResourceResult(BaseModel):
  104. name: str
  105. url: str
  106. relevant_experiences: list[ExperienceOut]
  107. avg_score: float
  108. experience_count: int
  109. class SearchResponse(BaseModel):
  110. results: list[ResourceResult]
  111. class ResourceDetailResponse(BaseModel):
  112. name: str
  113. url: str
  114. category: str
  115. avg_score: float
  116. experience_count: int
  117. experiences: list[ExperienceOut]
  118. class ContentIn(BaseModel):
  119. id: str
  120. title: str = ""
  121. body: str
  122. sort_order: int = 0
  123. submitted_by: str = ""
  124. # Knowledge Models
  125. class KnowledgeIn(BaseModel):
  126. task: str
  127. content: str
  128. types: list[str] = ["strategy"]
  129. tags: dict = {}
  130. scopes: list[str] = ["org:cybertogether"]
  131. owner: str = ""
  132. message_id: str = ""
  133. source: dict = {} # {name, category, urls, agent_id, submitted_by, timestamp}
  134. eval: dict = {} # {score, helpful, harmful, confidence}
  135. class KnowledgeOut(BaseModel):
  136. id: str
  137. message_id: str
  138. types: list[str]
  139. task: str
  140. tags: dict
  141. scopes: list[str]
  142. owner: str
  143. content: str
  144. source: dict
  145. eval: dict
  146. created_at: str
  147. updated_at: str
  148. class KnowledgeUpdateIn(BaseModel):
  149. add_helpful_case: Optional[dict] = None
  150. add_harmful_case: Optional[dict] = None
  151. update_score: Optional[int] = Field(default=None, ge=1, le=5)
  152. evolve_feedback: Optional[str] = None
  153. class KnowledgeBatchUpdateIn(BaseModel):
  154. feedback_list: list[dict]
  155. class KnowledgeSearchResponse(BaseModel):
  156. results: list[dict]
  157. count: int
  158. class ContentNode(BaseModel):
  159. id: str
  160. title: str
  161. class ContentOut(BaseModel):
  162. id: str
  163. title: str
  164. body: str
  165. toc: Optional[ContentNode] = None
  166. children: list[ContentNode]
  167. prev: Optional[ContentNode] = None
  168. next: Optional[ContentNode] = None
  169. # --- App ---
  170. @asynccontextmanager
  171. async def lifespan(app: FastAPI):
  172. init_db()
  173. yield
  174. app = FastAPI(title=BRAND_NAME, lifespan=lifespan)
  175. def _search_rows(conn: sqlite3.Connection, q: str, category: Optional[str]) -> list[sqlite3.Row]:
  176. """LIKE 搜索,拆词后 AND 连接,匹配 task + tips + outcome + name"""
  177. terms = q.split()
  178. if not terms:
  179. return []
  180. conditions = []
  181. params: list[str] = []
  182. for term in terms:
  183. like = f"%{term}%"
  184. conditions.append(
  185. "(task LIKE ? OR tips LIKE ? OR outcome LIKE ? OR name LIKE ?)"
  186. )
  187. params.extend([like, like, like, like])
  188. if category:
  189. conditions.append("category = ?")
  190. params.append(category)
  191. sql = (
  192. "SELECT name, url, category, task, score, outcome, tips, content_id, "
  193. "submitted_by, created_at FROM experiences WHERE "
  194. + " AND ".join(conditions)
  195. + " ORDER BY created_at DESC"
  196. )
  197. return conn.execute(sql, params).fetchall()
  198. def _group_by_resource(rows: list[sqlite3.Row], limit: int) -> list[ResourceResult]:
  199. """按 name 分组并聚合"""
  200. groups: dict[str, list[sqlite3.Row]] = {}
  201. for row in rows:
  202. name = row["name"]
  203. if name not in groups:
  204. groups[name] = []
  205. groups[name].append(row)
  206. results = []
  207. for resource_name, resource_rows in groups.items():
  208. scores = [r["score"] for r in resource_rows]
  209. avg = sum(scores) / len(scores)
  210. results.append(ResourceResult(
  211. name=resource_name,
  212. url=resource_rows[0]["url"],
  213. relevant_experiences=[
  214. ExperienceOut(
  215. task=r["task"],
  216. score=r["score"],
  217. outcome=r["outcome"],
  218. tips=r["tips"],
  219. content_id=r["content_id"],
  220. submitted_by=r["submitted_by"],
  221. created_at=r["created_at"],
  222. )
  223. for r in resource_rows
  224. ],
  225. avg_score=round(avg, 1),
  226. experience_count=len(resource_rows),
  227. ))
  228. results.sort(key=lambda r: r.avg_score * r.experience_count, reverse=True)
  229. return results[:limit]
  230. @app.get("/api/search", response_model=SearchResponse)
  231. def search_experiences(
  232. q: str = Query(..., min_length=1),
  233. category: Optional[str] = None,
  234. limit: int = Query(default=10, ge=1, le=50),
  235. ):
  236. conn = get_db()
  237. try:
  238. rows = _search_rows(conn, q, category)
  239. return SearchResponse(results=_group_by_resource(rows, limit))
  240. finally:
  241. conn.close()
  242. @app.post("/api/experience", status_code=201)
  243. def submit_experience(exp: ExperienceIn):
  244. conn = get_db()
  245. try:
  246. now = datetime.now(timezone.utc).isoformat()
  247. conn.execute(
  248. "INSERT INTO experiences"
  249. "(name, url, category, task, score, outcome, tips, content_id, submitted_by, created_at)"
  250. " VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
  251. (exp.name, exp.url, exp.category, exp.task,
  252. exp.score, exp.outcome, exp.tips, exp.content_id, exp.submitted_by, now),
  253. )
  254. conn.commit()
  255. return {"status": "ok"}
  256. finally:
  257. conn.close()
  258. @app.get("/api/resource/{name}", response_model=ResourceDetailResponse)
  259. def get_resource_experiences(name: str):
  260. conn = get_db()
  261. try:
  262. rows = conn.execute(
  263. "SELECT name, url, category, task, score, outcome, tips, content_id, "
  264. "submitted_by, created_at FROM experiences "
  265. "WHERE name = ? ORDER BY created_at DESC",
  266. (name,),
  267. ).fetchall()
  268. if not rows:
  269. raise HTTPException(status_code=404, detail=f"No experiences found for resource: {name}")
  270. scores = [r["score"] for r in rows]
  271. avg = sum(scores) / len(scores)
  272. return ResourceDetailResponse(
  273. name=name,
  274. url=rows[0]["url"],
  275. category=rows[0]["category"],
  276. avg_score=round(avg, 1),
  277. experience_count=len(rows),
  278. experiences=[
  279. ExperienceOut(
  280. task=r["task"],
  281. score=r["score"],
  282. outcome=r["outcome"],
  283. tips=r["tips"],
  284. content_id=r["content_id"],
  285. submitted_by=r["submitted_by"],
  286. created_at=r["created_at"],
  287. )
  288. for r in rows
  289. ],
  290. )
  291. finally:
  292. conn.close()
  293. @app.post("/api/content", status_code=201)
  294. def submit_content(content: ContentIn):
  295. conn = get_db()
  296. try:
  297. now = datetime.now(timezone.utc).isoformat()
  298. conn.execute(
  299. "INSERT OR REPLACE INTO contents"
  300. "(id, title, body, sort_order, submitted_by, created_at)"
  301. " VALUES (?, ?, ?, ?, ?, ?)",
  302. (content.id, content.title, content.body, content.sort_order, content.submitted_by, now),
  303. )
  304. conn.commit()
  305. return {"status": "ok"}
  306. finally:
  307. conn.close()
  308. @app.get("/api/content/{content_id:path}", response_model=ContentOut)
  309. def get_content(content_id: str):
  310. conn = get_db()
  311. try:
  312. row = conn.execute(
  313. "SELECT id, title, body, sort_order FROM contents WHERE id = ?",
  314. (content_id,),
  315. ).fetchone()
  316. if not row:
  317. raise HTTPException(status_code=404, detail=f"Content not found: {content_id}")
  318. # 计算导航上下文
  319. root_id = content_id.split("/")[0] if "/" in content_id else content_id
  320. # TOC (根节点)
  321. toc = None
  322. if "/" in content_id:
  323. toc_row = conn.execute(
  324. "SELECT id, title FROM contents WHERE id = ?",
  325. (root_id,),
  326. ).fetchone()
  327. if toc_row:
  328. toc = ContentNode(id=toc_row["id"], title=toc_row["title"])
  329. # Children (子节点)
  330. children = []
  331. children_rows = conn.execute(
  332. "SELECT id, title FROM contents WHERE id LIKE ? AND id != ? ORDER BY sort_order",
  333. (f"{content_id}/%", content_id),
  334. ).fetchall()
  335. children = [ContentNode(id=r["id"], title=r["title"]) for r in children_rows]
  336. # Prev/Next (同级节点)
  337. prev_node = None
  338. next_node = None
  339. if "/" in content_id:
  340. siblings = conn.execute(
  341. "SELECT id, title, sort_order FROM contents WHERE id LIKE ? AND id NOT LIKE ? ORDER BY sort_order",
  342. (f"{root_id}/%", f"{root_id}/%/%"),
  343. ).fetchall()
  344. for i, sib in enumerate(siblings):
  345. if sib["id"] == content_id:
  346. if i > 0:
  347. prev_node = ContentNode(id=siblings[i-1]["id"], title=siblings[i-1]["title"])
  348. if i < len(siblings) - 1:
  349. next_node = ContentNode(id=siblings[i+1]["id"], title=siblings[i+1]["title"])
  350. break
  351. return ContentOut(
  352. id=row["id"],
  353. title=row["title"],
  354. body=row["body"],
  355. toc=toc,
  356. children=children,
  357. prev=prev_node,
  358. next=next_node,
  359. )
  360. finally:
  361. conn.close()
  362. # ===== Knowledge API =====
  363. # 两阶段检索逻辑
  364. async def _route_knowledge_by_llm(query_text: str, metadata_list: list[dict], k: int = 5) -> list[str]:
  365. """
  366. 第一阶段:语义路由。
  367. 让 LLM 挑选出 2*k 个语义相关的 ID。
  368. """
  369. if not metadata_list:
  370. return []
  371. routing_k = k * 2
  372. routing_data = [
  373. {
  374. "id": m["id"],
  375. "types": m["types"],
  376. "task": m["task"][:100]
  377. } for m in metadata_list
  378. ]
  379. prompt = f"""
  380. 你是一个知识检索专家。根据用户的当前任务需求,从下列原子知识元数据中挑选出最相关的最多 {routing_k} 个知识 ID。
  381. 任务需求:"{query_text}"
  382. 可选知识列表:
  383. {json.dumps(routing_data, ensure_ascii=False, indent=1)}
  384. 请直接输出 ID 列表,用逗号分隔(例如: knowledge-20260302-001, research-20260302-002)。若无相关项请输出 "None"。
  385. """
  386. try:
  387. print(f"\n[Step 1: 知识语义路由] 任务: '{query_text}' | 候选总数: {len(metadata_list)} | 目标提取数: {routing_k}")
  388. response = await openrouter_llm_call(
  389. messages=[{"role": "user", "content": prompt}],
  390. model="google/gemini-2.0-flash-001"
  391. )
  392. content = response.get("content", "").strip()
  393. selected_ids = [idx.strip() for idx in re.split(r'[,\s]+', content) if idx.strip().startswith(("knowledge-", "research-"))]
  394. print(f"[Step 1: 知识语义路由] LLM 初选 ID ({len(selected_ids)}个): {selected_ids}")
  395. return selected_ids
  396. except Exception as e:
  397. print(f"LLM 知识路由失败: {e}")
  398. return []
  399. async def _search_knowledge_two_stage(
  400. query_text: str,
  401. top_k: int = 5,
  402. min_score: int = 3,
  403. types_filter: Optional[list[str]] = None,
  404. conn: sqlite3.Connection = None
  405. ) -> list[dict]:
  406. """
  407. 两阶段检索:语义路由 + 质量精排
  408. """
  409. if conn is None:
  410. conn = get_db()
  411. should_close = True
  412. else:
  413. should_close = False
  414. try:
  415. # 阶段 1: 解析所有知识
  416. query = "SELECT * FROM knowledge"
  417. rows = conn.execute(query).fetchall()
  418. if not rows:
  419. return []
  420. content_map = {}
  421. metadata_list = []
  422. for row in rows:
  423. kid = row["id"]
  424. types = json.loads(row["types"])
  425. # 标签过滤
  426. if types_filter:
  427. if not any(t in types for t in types_filter):
  428. continue
  429. task = row["task"]
  430. content_text = row["content"]
  431. eval_data = json.loads(row["eval"])
  432. source = json.loads(row["source"])
  433. meta_item = {
  434. "id": kid,
  435. "types": types,
  436. "task": task,
  437. "score": eval_data.get("score", 3),
  438. "helpful": eval_data.get("helpful", 0),
  439. "harmful": eval_data.get("harmful", 0),
  440. }
  441. metadata_list.append(meta_item)
  442. content_map[kid] = {
  443. "task": task,
  444. "content": content_text,
  445. "types": types,
  446. "tags": json.loads(row["tags"]),
  447. "scopes": json.loads(row["scopes"]),
  448. "owner": row["owner"],
  449. "score": meta_item["score"],
  450. "helpful": meta_item["helpful"],
  451. "harmful": meta_item["harmful"],
  452. "message_id": row["message_id"],
  453. "source": source,
  454. "eval": eval_data,
  455. "created_at": row["created_at"],
  456. "updated_at": row["updated_at"]
  457. }
  458. if not metadata_list:
  459. return []
  460. # 阶段 2: 语义路由 (取 2*k)
  461. candidate_ids = await _route_knowledge_by_llm(query_text, metadata_list, k=top_k)
  462. # 阶段 3: 质量精排
  463. print(f"[Step 2: 知识质量精排] 正在根据评分和反馈进行打分...")
  464. scored_items = []
  465. for kid in candidate_ids:
  466. if kid in content_map:
  467. item = content_map[kid]
  468. score = item["score"]
  469. helpful = item["helpful"]
  470. harmful = item["harmful"]
  471. # 计算综合分:基础分 + helpful - harmful*2
  472. quality_score = score + helpful - (harmful * 2.0)
  473. # 过滤门槛
  474. if score < min_score or quality_score < 0:
  475. print(f" - 剔除低质量知识: {kid} (Score: {score}, Helpful: {helpful}, Harmful: {harmful})")
  476. continue
  477. scored_items.append({
  478. "id": kid,
  479. "message_id": item["message_id"],
  480. "types": item["types"],
  481. "task": item["task"],
  482. "tags": item["tags"],
  483. "scopes": item["scopes"],
  484. "owner": item["owner"],
  485. "content": item["content"],
  486. "source": item["source"],
  487. "eval": item["eval"],
  488. "quality_score": quality_score,
  489. "created_at": item["created_at"],
  490. "updated_at": item["updated_at"]
  491. })
  492. # 按照质量分排序
  493. final_sorted = sorted(scored_items, key=lambda x: x["quality_score"], reverse=True)
  494. # 截取最终的 top_k
  495. result = final_sorted[:top_k]
  496. print(f"[Step 2: 知识质量精排] 最终选定知识: {[it['id'] for it in result]}")
  497. print(f"[Knowledge System] 检索结束。\n")
  498. return result
  499. finally:
  500. if should_close:
  501. conn.close()
  502. @app.get("/api/knowledge/search")
  503. async def search_knowledge_api(
  504. q: str = Query(..., description="查询文本"),
  505. top_k: int = Query(default=5, ge=1, le=20),
  506. min_score: int = Query(default=3, ge=1, le=5),
  507. types: Optional[str] = None
  508. ):
  509. """检索知识(两阶段:语义路由 + 质量精排)"""
  510. conn = get_db()
  511. try:
  512. types_filter = types.split(",") if types else None
  513. results = await _search_knowledge_two_stage(
  514. query_text=q,
  515. top_k=top_k,
  516. min_score=min_score,
  517. types_filter=types_filter,
  518. conn=conn
  519. )
  520. return {"results": results, "count": len(results)}
  521. finally:
  522. conn.close()
  523. @app.post("/api/knowledge", status_code=201)
  524. def save_knowledge(knowledge: KnowledgeIn):
  525. """保存新知识"""
  526. import uuid
  527. conn = get_db()
  528. try:
  529. # 生成 ID
  530. timestamp = datetime.now().strftime('%Y%m%d-%H%M%S')
  531. random_suffix = uuid.uuid4().hex[:4]
  532. knowledge_id = f"knowledge-{timestamp}-{random_suffix}"
  533. now = datetime.now(timezone.utc).isoformat()
  534. # 设置默认值
  535. owner = knowledge.owner or f"agent:{knowledge.source.get('agent_id', 'unknown')}"
  536. # 准备 source
  537. source = {
  538. "name": knowledge.source.get("name", ""),
  539. "category": knowledge.source.get("category", ""),
  540. "urls": knowledge.source.get("urls", []),
  541. "agent_id": knowledge.source.get("agent_id", "unknown"),
  542. "submitted_by": knowledge.source.get("submitted_by", ""),
  543. "timestamp": now,
  544. "message_id": knowledge.message_id
  545. }
  546. # 准备 eval
  547. eval_data = {
  548. "score": knowledge.eval.get("score", 3),
  549. "helpful": knowledge.eval.get("helpful", 1),
  550. "harmful": knowledge.eval.get("harmful", 0),
  551. "confidence": knowledge.eval.get("confidence", 0.5),
  552. "helpful_history": [],
  553. "harmful_history": []
  554. }
  555. conn.execute(
  556. """INSERT INTO knowledge
  557. (id, message_id, types, task, tags, scopes, owner, content,
  558. source, eval, created_at, updated_at)
  559. VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""",
  560. (
  561. knowledge_id,
  562. knowledge.message_id,
  563. json.dumps(knowledge.types),
  564. knowledge.task,
  565. json.dumps(knowledge.tags),
  566. json.dumps(knowledge.scopes),
  567. owner,
  568. knowledge.content,
  569. json.dumps(source),
  570. json.dumps(eval_data),
  571. now,
  572. now,
  573. ),
  574. )
  575. conn.commit()
  576. return {"status": "ok", "knowledge_id": knowledge_id}
  577. finally:
  578. conn.close()
  579. @app.get("/api/knowledge")
  580. def list_knowledge(
  581. limit: int = Query(default=10, ge=1, le=100),
  582. types: Optional[str] = None,
  583. scopes: Optional[str] = None
  584. ):
  585. """列出知识"""
  586. conn = get_db()
  587. try:
  588. query = "SELECT * FROM knowledge"
  589. params = []
  590. conditions = []
  591. if types:
  592. conditions.append("types LIKE ?")
  593. params.append(f"%{types}%")
  594. if scopes:
  595. conditions.append("scopes LIKE ?")
  596. params.append(f"%{scopes}%")
  597. if conditions:
  598. query += " WHERE " + " AND ".join(conditions)
  599. query += " ORDER BY created_at DESC LIMIT ?"
  600. params.append(limit)
  601. rows = conn.execute(query, params).fetchall()
  602. results = []
  603. for row in rows:
  604. results.append({
  605. "id": row["id"],
  606. "message_id": row["message_id"],
  607. "types": json.loads(row["types"]),
  608. "task": row["task"],
  609. "tags": json.loads(row["tags"]),
  610. "scopes": json.loads(row["scopes"]),
  611. "owner": row["owner"],
  612. "content": row["content"],
  613. "source": json.loads(row["source"]),
  614. "eval": json.loads(row["eval"]),
  615. "created_at": row["created_at"],
  616. "updated_at": row["updated_at"]
  617. })
  618. return {"results": results, "count": len(results)}
  619. finally:
  620. conn.close()
  621. @app.get("/api/knowledge/{knowledge_id}")
  622. def get_knowledge(knowledge_id: str):
  623. """获取单条知识"""
  624. conn = get_db()
  625. try:
  626. row = conn.execute(
  627. "SELECT * FROM knowledge WHERE id = ?",
  628. (knowledge_id,)
  629. ).fetchone()
  630. if not row:
  631. raise HTTPException(status_code=404, detail=f"Knowledge not found: {knowledge_id}")
  632. return {
  633. "id": row["id"],
  634. "message_id": row["message_id"],
  635. "types": json.loads(row["types"]),
  636. "task": row["task"],
  637. "tags": json.loads(row["tags"]),
  638. "scopes": json.loads(row["scopes"]),
  639. "owner": row["owner"],
  640. "content": row["content"],
  641. "source": json.loads(row["source"]),
  642. "eval": json.loads(row["eval"]),
  643. "created_at": row["created_at"],
  644. "updated_at": row["updated_at"]
  645. }
  646. finally:
  647. conn.close()
  648. async def _evolve_knowledge_with_llm(old_content: str, feedback: str) -> str:
  649. """使用 LLM 进行知识进化重写"""
  650. prompt = f"""你是一个 AI Agent 知识库管理员。请根据反馈建议,对现有的知识内容进行重写进化。
  651. 【原知识内容】:
  652. {old_content}
  653. 【实战反馈建议】:
  654. {feedback}
  655. 【重写要求】:
  656. 1. 融合知识:将反馈中的避坑指南、新参数或修正后的选择逻辑融入原知识,使其更具通用性和准确性。
  657. 2. 保持结构:如果原内容有特定格式(如 Markdown、代码示例等),请保持该格式。
  658. 3. 语言:简洁直接,使用中文。
  659. 4. 禁止:严禁输出任何开场白、解释语或额外的 Markdown 标题,直接返回重写后的正文。
  660. """
  661. try:
  662. response = await openrouter_llm_call(
  663. messages=[{"role": "user", "content": prompt}],
  664. model="google/gemini-2.0-flash-001"
  665. )
  666. evolved = response.get("content", "").strip()
  667. if len(evolved) < 5:
  668. raise ValueError("LLM output too short")
  669. return evolved
  670. except Exception as e:
  671. print(f"知识进化失败,采用追加模式回退: {e}")
  672. return f"{old_content}\n\n---\n[Update {datetime.now().strftime('%Y-%m-%d')}]: {feedback}"
  673. @app.put("/api/knowledge/{knowledge_id}")
  674. async def update_knowledge(knowledge_id: str, update: KnowledgeUpdateIn):
  675. """更新知识评估,支持知识进化"""
  676. conn = get_db()
  677. try:
  678. row = conn.execute("SELECT * FROM knowledge WHERE id = ?", (knowledge_id,)).fetchone()
  679. if not row:
  680. raise HTTPException(status_code=404, detail=f"Knowledge not found: {knowledge_id}")
  681. now = datetime.now(timezone.utc).isoformat()
  682. eval_data = json.loads(row["eval"])
  683. # 更新评分
  684. if update.update_score is not None:
  685. eval_data["score"] = update.update_score
  686. # 添加有效案例
  687. if update.add_helpful_case:
  688. eval_data["helpful"] = eval_data.get("helpful", 0) + 1
  689. if "helpful_history" not in eval_data:
  690. eval_data["helpful_history"] = []
  691. eval_data["helpful_history"].append(update.add_helpful_case)
  692. # 添加有害案例
  693. if update.add_harmful_case:
  694. eval_data["harmful"] = eval_data.get("harmful", 0) + 1
  695. if "harmful_history" not in eval_data:
  696. eval_data["harmful_history"] = []
  697. eval_data["harmful_history"].append(update.add_harmful_case)
  698. # 知识进化
  699. content = row["content"]
  700. if update.evolve_feedback:
  701. content = await _evolve_knowledge_with_llm(content, update.evolve_feedback)
  702. eval_data["helpful"] = eval_data.get("helpful", 0) + 1
  703. # 更新数据库
  704. conn.execute(
  705. "UPDATE knowledge SET content = ?, eval = ?, updated_at = ? WHERE id = ?",
  706. (content, json.dumps(eval_data, ensure_ascii=False), now, knowledge_id)
  707. )
  708. conn.commit()
  709. return {"status": "ok", "knowledge_id": knowledge_id}
  710. finally:
  711. conn.close()
  712. @app.post("/api/knowledge/batch_update")
  713. async def batch_update_knowledge(batch: KnowledgeBatchUpdateIn):
  714. """批量反馈知识有效性"""
  715. if not batch.feedback_list:
  716. return {"status": "ok", "updated": 0}
  717. conn = get_db()
  718. try:
  719. # 先处理无需进化的,收集需要进化的
  720. evolution_tasks = [] # [(knowledge_id, old_content, feedback, eval_data)]
  721. simple_updates = [] # [(knowledge_id, is_effective, eval_data)]
  722. for item in batch.feedback_list:
  723. knowledge_id = item.get("knowledge_id")
  724. is_effective = item.get("is_effective")
  725. feedback = item.get("feedback", "")
  726. if not knowledge_id:
  727. continue
  728. row = conn.execute("SELECT * FROM knowledge WHERE id = ?", (knowledge_id,)).fetchone()
  729. if not row:
  730. continue
  731. eval_data = json.loads(row["eval"])
  732. if is_effective and feedback:
  733. evolution_tasks.append((knowledge_id, row["content"], feedback, eval_data))
  734. else:
  735. simple_updates.append((knowledge_id, is_effective, eval_data))
  736. # 执行简单更新
  737. now = datetime.now(timezone.utc).isoformat()
  738. for knowledge_id, is_effective, eval_data in simple_updates:
  739. if is_effective:
  740. eval_data["helpful"] = eval_data.get("helpful", 0) + 1
  741. else:
  742. eval_data["harmful"] = eval_data.get("harmful", 0) + 1
  743. conn.execute(
  744. "UPDATE knowledge SET eval = ?, updated_at = ? WHERE id = ?",
  745. (json.dumps(eval_data, ensure_ascii=False), now, knowledge_id)
  746. )
  747. # 并发执行知识进化
  748. if evolution_tasks:
  749. print(f"🧬 并发处理 {len(evolution_tasks)} 条知识进化...")
  750. evolved_results = await asyncio.gather(
  751. *[_evolve_knowledge_with_llm(old, fb) for _, old, fb, _ in evolution_tasks]
  752. )
  753. for (knowledge_id, _, _, eval_data), evolved_content in zip(evolution_tasks, evolved_results):
  754. eval_data["helpful"] = eval_data.get("helpful", 0) + 1
  755. conn.execute(
  756. "UPDATE knowledge SET content = ?, eval = ?, updated_at = ? WHERE id = ?",
  757. (evolved_content, json.dumps(eval_data, ensure_ascii=False), now, knowledge_id)
  758. )
  759. conn.commit()
  760. return {"status": "ok", "updated": len(simple_updates) + len(evolution_tasks)}
  761. finally:
  762. conn.close()
  763. @app.post("/api/knowledge/slim")
  764. async def slim_knowledge(model: str = "google/gemini-2.0-flash-001"):
  765. """知识库瘦身:合并语义相似知识"""
  766. conn = get_db()
  767. try:
  768. rows = conn.execute("SELECT * FROM knowledge").fetchall()
  769. if len(rows) < 2:
  770. return {"status": "ok", "message": f"知识库仅有 {len(rows)} 条,无需瘦身"}
  771. # 构造发给大模型的内容
  772. entries_text = ""
  773. for row in rows:
  774. eval_data = json.loads(row["eval"])
  775. types = json.loads(row["types"])
  776. entries_text += f"[ID: {row['id']}] [Types: {','.join(types)}] "
  777. entries_text += f"[Helpful: {eval_data.get('helpful', 0)}, Harmful: {eval_data.get('harmful', 0)}] [Score: {eval_data.get('score', 3)}]\n"
  778. entries_text += f"Task: {row['task']}\n"
  779. entries_text += f"Content: {row['content'][:200]}...\n\n"
  780. prompt = f"""你是一个 AI Agent 知识库管理员。以下是当前知识库的全部条目,请执行瘦身操作:
  781. 【任务】:
  782. 1. 识别语义高度相似或重复的知识,将它们合并为一条更精炼、更通用的知识。
  783. 2. 合并时保留 helpful 最高的那条的 ID(helpful 取各条之和)。
  784. 3. 对于独立的、无重复的知识,保持原样不动。
  785. 【当前知识库】:
  786. {entries_text}
  787. 【输出格式要求】:
  788. 严格按以下格式输出每条知识,条目之间用 === 分隔:
  789. ID: <保留的id>
  790. TYPES: <逗号分隔的type列表>
  791. HELPFUL: <合并后的helpful计数>
  792. HARMFUL: <合并后的harmful计数>
  793. SCORE: <评分>
  794. TASK: <任务描述>
  795. CONTENT: <合并后的知识内容>
  796. ===
  797. 最后输出合并报告:
  798. REPORT: 原有 X 条,合并后 Y 条,精简了 Z 条。
  799. 禁止输出任何开场白或解释。"""
  800. print(f"\n[知识瘦身] 正在调用 {model} 分析 {len(rows)} 条知识...")
  801. response = await openrouter_llm_call(
  802. messages=[{"role": "user", "content": prompt}],
  803. model=model
  804. )
  805. content = response.get("content", "").strip()
  806. if not content:
  807. raise HTTPException(status_code=500, detail="LLM 返回为空")
  808. # 解析大模型输出
  809. report_line = ""
  810. new_entries = []
  811. blocks = [b.strip() for b in content.split("===") if b.strip()]
  812. for block in blocks:
  813. if block.startswith("REPORT:"):
  814. report_line = block
  815. continue
  816. lines = block.split("\n")
  817. kid, types, helpful, harmful, score, task, content_lines = None, [], 0, 0, 3, "", []
  818. current_field = None
  819. for line in lines:
  820. if line.startswith("ID:"):
  821. kid = line[3:].strip()
  822. current_field = None
  823. elif line.startswith("TYPES:"):
  824. types_str = line[6:].strip()
  825. types = [t.strip() for t in types_str.split(",") if t.strip()]
  826. current_field = None
  827. elif line.startswith("HELPFUL:"):
  828. try:
  829. helpful = int(line[8:].strip())
  830. except Exception:
  831. helpful = 0
  832. current_field = None
  833. elif line.startswith("HARMFUL:"):
  834. try:
  835. harmful = int(line[8:].strip())
  836. except Exception:
  837. harmful = 0
  838. current_field = None
  839. elif line.startswith("SCORE:"):
  840. try:
  841. score = int(line[6:].strip())
  842. except Exception:
  843. score = 3
  844. current_field = None
  845. elif line.startswith("TASK:"):
  846. task = line[5:].strip()
  847. current_field = "task"
  848. elif line.startswith("CONTENT:"):
  849. content_lines.append(line[8:].strip())
  850. current_field = "content"
  851. elif current_field == "task":
  852. task += "\n" + line
  853. elif current_field == "content":
  854. content_lines.append(line)
  855. if kid and content_lines:
  856. new_entries.append({
  857. "id": kid,
  858. "types": types if types else ["strategy"],
  859. "helpful": helpful,
  860. "harmful": harmful,
  861. "score": score,
  862. "task": task.strip(),
  863. "content": "\n".join(content_lines).strip()
  864. })
  865. if not new_entries:
  866. raise HTTPException(status_code=500, detail="解析大模型输出失败")
  867. # 原子化写回
  868. now = datetime.now(timezone.utc).isoformat()
  869. conn.execute("DELETE FROM knowledge")
  870. for e in new_entries:
  871. eval_data = {
  872. "score": e["score"],
  873. "helpful": e["helpful"],
  874. "harmful": e["harmful"],
  875. "confidence": 0.9,
  876. "helpful_history": [],
  877. "harmful_history": []
  878. }
  879. source = {
  880. "name": "slim",
  881. "category": "exp",
  882. "urls": [],
  883. "agent_id": "slim",
  884. "submitted_by": "system",
  885. "timestamp": now
  886. }
  887. conn.execute(
  888. """INSERT INTO knowledge
  889. (id, message_id, types, task, tags, scopes, owner, content, source, eval, created_at, updated_at)
  890. VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""",
  891. (
  892. e["id"],
  893. "",
  894. json.dumps(e["types"]),
  895. e["task"],
  896. json.dumps({}),
  897. json.dumps(["org:cybertogether"]),
  898. "agent:slim",
  899. e["content"],
  900. json.dumps(source, ensure_ascii=False),
  901. json.dumps(eval_data, ensure_ascii=False),
  902. now,
  903. now
  904. )
  905. )
  906. conn.commit()
  907. result_msg = f"瘦身完成:{len(rows)} → {len(new_entries)} 条知识"
  908. if report_line:
  909. result_msg += f"\n{report_line}"
  910. print(f"[知识瘦身] {result_msg}")
  911. return {"status": "ok", "before": len(rows), "after": len(new_entries), "report": report_line}
  912. finally:
  913. conn.close()
  914. if __name__ == "__main__":
  915. import uvicorn
  916. uvicorn.run(app, host="0.0.0.0", port=9999)