server.py 67 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869
  1. """
  2. KnowHub Server
  3. Agent 工具使用经验的共享平台。
  4. FastAPI + Milvus Lite(知识)+ SQLite(资源),单文件部署。
  5. """
  6. import os
  7. import re
  8. import json
  9. import asyncio
  10. import base64
  11. import time
  12. import uuid
  13. from contextlib import asynccontextmanager
  14. from datetime import datetime, timezone
  15. from typing import Optional, List, Dict
  16. from pathlib import Path
  17. from cryptography.hazmat.primitives.ciphers.aead import AESGCM
  18. from fastapi import FastAPI, HTTPException, Query, Header, Body, BackgroundTasks
  19. from fastapi.responses import HTMLResponse, FileResponse
  20. from fastapi.staticfiles import StaticFiles
  21. from pydantic import BaseModel, Field
  22. # 导入 LLM 调用(需要 agent 模块在 Python path 中)
  23. import sys
  24. sys.path.insert(0, str(Path(__file__).parent.parent))
  25. # 加载环境变量
  26. from dotenv import load_dotenv
  27. load_dotenv(Path(__file__).parent.parent / ".env")
  28. from agent.llm import create_openrouter_llm_call, create_qwen_llm_call
  29. from knowhub.kb_manage_prompts import (
  30. DEDUP_RELATION_PROMPT,
  31. TOOL_ANALYSIS_PROMPT,
  32. RERANK_PROMPT_TEMPLATE,
  33. KNOWLEDGE_EVOLVE_PROMPT_TEMPLATE,
  34. KNOWLEDGE_SLIM_PROMPT_TEMPLATE,
  35. MESSAGE_EXTRACT_PROMPT_TEMPLATE,
  36. )
  37. _dedup_llm = create_openrouter_llm_call(model="google/gemini-2.5-flash-lite")
  38. _tool_analysis_llm = create_qwen_llm_call(model="qwen3.5-plus")
  39. # 导入向量存储和 embedding
  40. from knowhub.knowhub_db.pg_store import PostgreSQLStore
  41. from knowhub.knowhub_db.pg_resource_store import PostgreSQLResourceStore
  42. from knowhub.embeddings import get_embedding, get_embeddings_batch
  43. BRAND_NAME = os.getenv("BRAND_NAME", "KnowHub")
  44. BRAND_API_ENV = os.getenv("BRAND_API_ENV", "KNOWHUB_API")
  45. BRAND_DB = os.getenv("BRAND_DB", "knowhub.db")
  46. # 组织密钥配置(格式:org1:key1_base64,org2:key2_base64)
  47. ORG_KEYS_RAW = os.getenv("ORG_KEYS", "")
  48. ORG_KEYS = {}
  49. if ORG_KEYS_RAW:
  50. for pair in ORG_KEYS_RAW.split(","):
  51. if ":" in pair:
  52. org, key_b64 = pair.split(":", 1)
  53. ORG_KEYS[org.strip()] = key_b64.strip()
  54. DB_PATH = Path(__file__).parent / BRAND_DB
  55. # 全局 PostgreSQL 存储实例
  56. pg_store: Optional[PostgreSQLStore] = None
  57. pg_resource_store: Optional[PostgreSQLResourceStore] = None
  58. # --- 加密/解密 ---
  59. def get_org_key(resource_id: str) -> Optional[bytes]:
  60. """从content_id提取组织前缀,返回对应密钥"""
  61. if "/" in resource_id:
  62. org = resource_id.split("/")[0]
  63. if org in ORG_KEYS:
  64. return base64.b64decode(ORG_KEYS[org])
  65. return None
  66. def encrypt_content(resource_id: str, plaintext: str) -> str:
  67. """加密内容,返回格式:encrypted:AES256-GCM:{base64_data}"""
  68. if not plaintext:
  69. return ""
  70. key = get_org_key(resource_id)
  71. if not key:
  72. # 没有配置密钥,明文存储(不推荐)
  73. return plaintext
  74. aesgcm = AESGCM(key)
  75. nonce = os.urandom(12) # 96-bit nonce
  76. ciphertext = aesgcm.encrypt(nonce, plaintext.encode("utf-8"), None)
  77. # 组合 nonce + ciphertext
  78. encrypted_data = nonce + ciphertext
  79. encoded = base64.b64encode(encrypted_data).decode("ascii")
  80. return f"encrypted:AES256-GCM:{encoded}"
  81. def decrypt_content(resource_id: str, encrypted_text: str, provided_key: Optional[str] = None) -> str:
  82. """解密内容,如果没有提供密钥或密钥错误,返回[ENCRYPTED]"""
  83. if not encrypted_text:
  84. return ""
  85. if not encrypted_text.startswith("encrypted:AES256-GCM:"):
  86. # 未加密的内容,直接返回
  87. return encrypted_text
  88. # 提取加密数据
  89. encoded = encrypted_text.split(":", 2)[2]
  90. encrypted_data = base64.b64decode(encoded)
  91. nonce = encrypted_data[:12]
  92. ciphertext = encrypted_data[12:]
  93. # 获取密钥
  94. key = None
  95. if provided_key:
  96. # 使用提供的密钥
  97. try:
  98. key = base64.b64decode(provided_key)
  99. except Exception:
  100. return "[ENCRYPTED]"
  101. else:
  102. # 从配置中获取
  103. key = get_org_key(resource_id)
  104. if not key:
  105. return "[ENCRYPTED]"
  106. try:
  107. aesgcm = AESGCM(key)
  108. plaintext = aesgcm.decrypt(nonce, ciphertext, None)
  109. return plaintext.decode("utf-8")
  110. except Exception:
  111. return "[ENCRYPTED]"
  112. def serialize_milvus_result(data):
  113. """将 Milvus 返回的数据转换为可序列化的字典"""
  114. # 基本类型直接返回
  115. if data is None or isinstance(data, (str, int, float, bool)):
  116. return data
  117. # 字典类型递归处理
  118. if isinstance(data, dict):
  119. return {k: serialize_milvus_result(v) for k, v in data.items()}
  120. # 列表/元组类型递归处理
  121. if isinstance(data, (list, tuple)):
  122. return [serialize_milvus_result(item) for item in data]
  123. # 尝试转换为字典(对于有 to_dict 方法的对象)
  124. if hasattr(data, 'to_dict') and callable(getattr(data, 'to_dict')):
  125. try:
  126. return serialize_milvus_result(data.to_dict())
  127. except:
  128. pass
  129. # 尝试转换为列表(对于可迭代对象,如 RepeatedScalarContainer)
  130. if hasattr(data, '__iter__') and not isinstance(data, (str, bytes, dict)):
  131. try:
  132. # 强制转换为列表并递归处理
  133. result = []
  134. for item in data:
  135. result.append(serialize_milvus_result(item))
  136. return result
  137. except:
  138. pass
  139. # 尝试获取对象的属性字典
  140. if hasattr(data, '__dict__'):
  141. try:
  142. return serialize_milvus_result(vars(data))
  143. except:
  144. pass
  145. # 最后的 fallback:对于无法处理的类型,返回 None 而不是字符串表示
  146. # 这样可以避免产生无法序列化的字符串
  147. return None
  148. # --- Models ---
  149. class ResourceIn(BaseModel):
  150. id: str
  151. title: str = ""
  152. body: str
  153. secure_body: str = ""
  154. content_type: str = "text" # text|code|credential|cookie
  155. metadata: dict = {}
  156. sort_order: int = 0
  157. submitted_by: str = ""
  158. class ResourcePatchIn(BaseModel):
  159. """PATCH /api/resource/{id} 请求体"""
  160. title: Optional[str] = None
  161. body: Optional[str] = None
  162. secure_body: Optional[str] = None
  163. content_type: Optional[str] = None
  164. metadata: Optional[dict] = None
  165. # Knowledge Models
  166. class KnowledgeIn(BaseModel):
  167. task: str
  168. content: str
  169. types: list[str] = ["strategy"]
  170. tags: dict = {}
  171. scopes: list[str] = ["org:cybertogether"]
  172. owner: str = ""
  173. message_id: str = ""
  174. resource_ids: list[str] = []
  175. source: dict = {} # {name, category, urls, agent_id, submitted_by, timestamp}
  176. eval: dict = {} # {score, helpful, harmful, confidence}
  177. class KnowledgeOut(BaseModel):
  178. id: str
  179. message_id: str
  180. types: list[str]
  181. task: str
  182. tags: dict
  183. scopes: list[str]
  184. owner: str
  185. content: str
  186. resource_ids: list[str]
  187. source: dict
  188. eval: dict
  189. created_at: str
  190. updated_at: str
  191. class KnowledgeUpdateIn(BaseModel):
  192. add_helpful_case: Optional[dict] = None
  193. add_harmful_case: Optional[dict] = None
  194. update_score: Optional[int] = Field(default=None, ge=1, le=5)
  195. evolve_feedback: Optional[str] = None
  196. class KnowledgePatchIn(BaseModel):
  197. """PATCH /api/knowledge/{id} 请求体(直接字段编辑)"""
  198. task: Optional[str] = None
  199. content: Optional[str] = None
  200. types: Optional[list[str]] = None
  201. tags: Optional[dict] = None
  202. scopes: Optional[list[str]] = None
  203. owner: Optional[str] = None
  204. class MessageExtractIn(BaseModel):
  205. """POST /api/extract 请求体(消息历史提取)"""
  206. messages: list[dict] # [{role: str, content: str}, ...]
  207. agent_id: str = "unknown"
  208. submitted_by: str # 必填,作为 owner
  209. session_key: str = ""
  210. class KnowledgeBatchUpdateIn(BaseModel):
  211. feedback_list: list[dict]
  212. class KnowledgeVerifyIn(BaseModel):
  213. action: str # "approve" | "reject"
  214. verified_by: str = "user"
  215. class KnowledgeBatchVerifyIn(BaseModel):
  216. knowledge_ids: List[str]
  217. action: str # "approve"
  218. verified_by: str
  219. class KnowledgeSearchResponse(BaseModel):
  220. results: list[dict]
  221. count: int
  222. class ResourceNode(BaseModel):
  223. id: str
  224. title: str
  225. class ResourceOut(BaseModel):
  226. id: str
  227. title: str
  228. body: str
  229. secure_body: str = ""
  230. content_type: str = "text"
  231. metadata: dict = {}
  232. toc: Optional[ResourceNode] = None
  233. children: list[ResourceNode]
  234. prev: Optional[ResourceNode] = None
  235. next: Optional[ResourceNode] = None
  236. # --- Dedup: Globals & Prompt ---
  237. knowledge_processor: Optional["KnowledgeProcessor"] = None
  238. # --- Dedup: RelationCache ---
  239. class RelationCache:
  240. """关系缓存,存储在内存中"""
  241. def __init__(self):
  242. self._cache: Dict[str, List[str]] = {}
  243. def load(self) -> dict:
  244. return self._cache
  245. def save(self, cache: dict):
  246. self._cache = cache
  247. def add_relation(self, relation_type: str, knowledge_id: str):
  248. if relation_type not in self._cache:
  249. self._cache[relation_type] = []
  250. if knowledge_id not in self._cache[relation_type]:
  251. self._cache[relation_type].append(knowledge_id)
  252. # --- Dedup: KnowledgeProcessor ---
  253. class KnowledgeProcessor:
  254. def __init__(self):
  255. self._lock = asyncio.Lock()
  256. self._relation_cache = RelationCache()
  257. async def process_pending(self):
  258. """持续处理 pending 和 dedup_passed 知识直到队列为空,有锁防并发"""
  259. if self._lock.locked():
  260. return
  261. async with self._lock:
  262. # 第一阶段:处理 pending(去重)
  263. while True:
  264. try:
  265. pending = pg_store.query('status == "pending"', limit=50)
  266. except Exception as e:
  267. print(f"[KnowledgeProcessor] 查询 pending 失败: {e}")
  268. break
  269. if not pending:
  270. break
  271. for knowledge in pending:
  272. await self._process_one(knowledge)
  273. # 第二阶段:处理 dedup_passed(工具关联)
  274. while True:
  275. try:
  276. dedup_passed = pg_store.query('status == "dedup_passed"', limit=50)
  277. except Exception as e:
  278. print(f"[KnowledgeProcessor] 查询 dedup_passed 失败: {e}")
  279. break
  280. if not dedup_passed:
  281. break
  282. for knowledge in dedup_passed:
  283. await self._analyze_tool_relation(knowledge)
  284. async def _process_one(self, knowledge: dict):
  285. kid = knowledge["id"]
  286. now = int(time.time())
  287. # 乐观锁:pending → processing(时间戳存秒级)
  288. try:
  289. pg_store.update(kid, {"status": "processing", "updated_at": now})
  290. except Exception as e:
  291. print(f"[KnowledgeProcessor] 锁定 {kid} 失败: {e}")
  292. return
  293. try:
  294. # 向量召回 top-10(只召回 approved/checked)
  295. embedding = knowledge.get("task_embedding") or knowledge.get("embedding")
  296. if not embedding:
  297. embedding = await get_embedding(knowledge["task"])
  298. candidates = pg_store.search(
  299. query_embedding=embedding,
  300. filters='(status == "approved" or status == "checked")',
  301. limit=10
  302. )
  303. candidates = [c for c in candidates if c["id"] != kid]
  304. # 只保留相似度 >= 0.75 的候选,低于阈值的 task 语义差异太大,直接视为 none
  305. candidates = [c for c in candidates if c.get("score", 0) >= 0.75]
  306. if not candidates:
  307. pg_store.update(kid, {"status": "dedup_passed", "updated_at": now})
  308. return
  309. llm_result = await self._llm_judge_relations(knowledge, candidates)
  310. await self._apply_decision(knowledge, llm_result)
  311. except Exception as e:
  312. print(f"[KnowledgeProcessor] 处理 {kid} 失败: {e},回退到 pending")
  313. try:
  314. pg_store.update(kid, {"status": "pending", "updated_at": int(time.time())})
  315. except Exception:
  316. pass
  317. async def _llm_judge_relations(self, new_knowledge: dict, candidates: list) -> dict:
  318. existing_list = "\n".join([
  319. f"[{i+1}] ID: {c['id']} | Task: {c['task']} | Content: {c['content'][:300]}"
  320. for i, c in enumerate(candidates)
  321. ])
  322. prompt = DEDUP_RELATION_PROMPT.format(
  323. new_task=new_knowledge["task"],
  324. new_content=new_knowledge["content"],
  325. existing_list=existing_list
  326. )
  327. for attempt in range(3):
  328. try:
  329. response = await _dedup_llm(
  330. messages=[{"role": "user", "content": prompt}],
  331. )
  332. content = response.get("content", "").strip()
  333. # 清理 markdown 代码块
  334. if "```" in content:
  335. parts = content.split("```")
  336. for part in parts:
  337. part = part.strip()
  338. if part.startswith("json"):
  339. part = part[4:].strip()
  340. try:
  341. result = json.loads(part)
  342. if "final_decision" in result:
  343. content = part
  344. break
  345. except Exception:
  346. continue
  347. result = json.loads(content)
  348. assert result.get("final_decision") in ("approved", "rejected")
  349. return result
  350. except Exception as e:
  351. print(f"[LLM Judge] 第{attempt+1}次失败: {e}")
  352. if attempt < 2:
  353. await asyncio.sleep(1)
  354. return {"final_decision": "approved", "relations": []}
  355. async def _apply_decision(self, new_knowledge: dict, llm_result: dict):
  356. kid = new_knowledge["id"]
  357. final_decision = llm_result.get("final_decision", "approved")
  358. relations = llm_result.get("relations", [])
  359. now = int(time.time())
  360. # 强制规则:如果存在 duplicate 或 subset 关系,必须 rejected
  361. if any(rel.get("type") in ("duplicate", "subset") for rel in relations):
  362. final_decision = "rejected"
  363. if final_decision == "rejected":
  364. # 记录 rejected 知识的关系(便于溯源为什么被拒绝)
  365. rejected_relationships = []
  366. for rel in relations:
  367. old_id = rel.get("old_id")
  368. rel_type = rel.get("type", "none")
  369. if old_id and rel_type != "none":
  370. rejected_relationships.append({"type": rel_type, "target": old_id})
  371. if rel_type in ("duplicate", "subset") and old_id:
  372. try:
  373. old = pg_store.get_by_id(old_id)
  374. if not old:
  375. continue
  376. eval_data = old.get("eval") or {}
  377. eval_data["helpful"] = eval_data.get("helpful", 0) + 1
  378. helpful_history = eval_data.get("helpful_history") or []
  379. helpful_history.append({
  380. "source": "dedup",
  381. "related_id": kid,
  382. "relation_type": rel_type,
  383. "timestamp": now
  384. })
  385. eval_data["helpful_history"] = helpful_history
  386. pg_store.update(old_id, {"eval": eval_data, "updated_at": now})
  387. except Exception as e:
  388. print(f"[Apply Decision] 更新旧知识 {old_id} helpful 失败: {e}")
  389. pg_store.update(kid, {"status": "rejected", "relationships": json.dumps(rejected_relationships), "updated_at": now})
  390. else:
  391. new_relationships = []
  392. for rel in relations:
  393. rel_type = rel.get("type", "none")
  394. reverse_type = rel.get("reverse_type", "none")
  395. old_id = rel.get("old_id")
  396. if not old_id or rel_type == "none":
  397. continue
  398. new_relationships.append({"type": rel_type, "target": old_id})
  399. self._relation_cache.add_relation(rel_type, kid)
  400. self._relation_cache.add_relation(rel_type, old_id)
  401. if reverse_type and reverse_type != "none":
  402. try:
  403. old = pg_store.get_by_id(old_id)
  404. if old:
  405. old_rels = old.get("relationships") or []
  406. old_rels.append({"type": reverse_type, "target": kid})
  407. pg_store.update(old_id, {"relationships": json.dumps(old_rels), "updated_at": now})
  408. self._relation_cache.add_relation(reverse_type, old_id)
  409. self._relation_cache.add_relation(reverse_type, kid)
  410. except Exception as e:
  411. print(f"[Apply Decision] 更新旧知识关系 {old_id} 失败: {e}")
  412. pg_store.update(kid, {
  413. "status": "dedup_passed",
  414. "relationships": json.dumps(new_relationships),
  415. "updated_at": now
  416. })
  417. async def _llm_analyze_tools(self, knowledge: dict) -> dict:
  418. """使用 LLM 分析知识中涉及的工具(复用迁移脚本逻辑)"""
  419. task = (knowledge.get("task") or "")[:600]
  420. content = (knowledge.get("content") or "")[:1200]
  421. prompt = TOOL_ANALYSIS_PROMPT.format(task=task, content=content)
  422. try:
  423. response = await _tool_analysis_llm(
  424. messages=[{"role": "user", "content": prompt}],
  425. max_tokens=2048,
  426. temperature=0.1,
  427. )
  428. raw = (response.get("content") or "").strip()
  429. if raw.startswith("```"):
  430. lines = raw.split("\n")
  431. inner = []
  432. in_block = False
  433. for line in lines:
  434. if line.startswith("```"):
  435. in_block = not in_block
  436. continue
  437. if in_block:
  438. inner.append(line)
  439. raw = "\n".join(inner).strip()
  440. return json.loads(raw)
  441. except Exception as e:
  442. print(f"[Tool Analysis LLM] 调用失败: {e}")
  443. raise
  444. async def _create_or_get_tool_resource(self, tool_info: dict) -> Optional[str]:
  445. """创建或获取工具资源(存入 PostgreSQL tool_table)"""
  446. category = tool_info.get("category", "other")
  447. slug = tool_info.get("slug", "")
  448. if not slug:
  449. return None
  450. tool_id = f"tools/{category}/{slug}"
  451. now_ts = int(time.time())
  452. cursor = pg_store._get_cursor()
  453. try:
  454. cursor.execute("SELECT id FROM tool_table WHERE id = %s", (tool_id,))
  455. if cursor.fetchone():
  456. return tool_id
  457. cursor.execute("""
  458. INSERT INTO tool_table (id, name, version, introduction, tutorial, input, output,
  459. updated_time, status, tool_knowledge, case_knowledge, process_knowledge)
  460. VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
  461. """, (
  462. tool_id,
  463. tool_info.get("name", slug),
  464. tool_info.get("version") or None,
  465. tool_info.get("description", ""),
  466. tool_info.get("usage", ""),
  467. json.dumps(tool_info.get("input", "")),
  468. json.dumps(tool_info.get("output", "")),
  469. now_ts,
  470. tool_info.get("status", "未接入"),
  471. json.dumps([]),
  472. json.dumps([]),
  473. json.dumps([]),
  474. ))
  475. pg_store.conn.commit()
  476. print(f"[Tool Resource] 创建新工具: {tool_id}")
  477. return tool_id
  478. finally:
  479. cursor.close()
  480. async def _update_tool_knowledge_index(self, tool_id: str, knowledge_id: str):
  481. """更新工具的 tool_knowledge 关联索引(PostgreSQL tool_table)"""
  482. now_ts = int(time.time())
  483. cursor = pg_store._get_cursor()
  484. try:
  485. cursor.execute("SELECT tool_knowledge FROM tool_table WHERE id = %s", (tool_id,))
  486. row = cursor.fetchone()
  487. if not row:
  488. return
  489. knowledge_ids = row["tool_knowledge"] if isinstance(row["tool_knowledge"], list) else json.loads(row["tool_knowledge"] or "[]")
  490. if knowledge_id not in knowledge_ids:
  491. knowledge_ids.append(knowledge_id)
  492. cursor.execute(
  493. "UPDATE tool_table SET tool_knowledge = %s, updated_time = %s WHERE id = %s",
  494. (json.dumps(knowledge_ids), now_ts, tool_id)
  495. )
  496. pg_store.conn.commit()
  497. finally:
  498. cursor.close()
  499. async def _analyze_tool_relation(self, knowledge: dict):
  500. """分析知识与工具的关联关系"""
  501. kid = knowledge["id"]
  502. now = int(time.time())
  503. # 乐观锁:dedup_passed → analyzing
  504. try:
  505. pg_store.update(kid, {"status": "analyzing", "updated_at": now})
  506. except Exception as e:
  507. print(f"[Tool Analysis] 锁定 {kid} 失败: {e}")
  508. return
  509. try:
  510. tool_analysis = await self._llm_analyze_tools(knowledge)
  511. has_tools = bool(tool_analysis and tool_analysis.get("has_tools"))
  512. existing_tags = knowledge.get("tags") or {}
  513. has_tool_tag = existing_tags.get("tool") is True
  514. # 情况1:LLM 判定无工具,但有 tool tag → 重新分析一次
  515. if not has_tools and has_tool_tag:
  516. print(f"[Tool Analysis] {kid} LLM 判定无工具但有 tool tag,重新分析")
  517. tool_analysis = await self._llm_analyze_tools(knowledge)
  518. has_tools = bool(tool_analysis and tool_analysis.get("has_tools"))
  519. # 重新分析后仍然不一致 → 知识模糊,rejected
  520. if not has_tools:
  521. pg_store.update(kid, {"status": "rejected", "updated_at": now})
  522. print(f"[Tool Analysis] {kid} 两次判定不一致,知识模糊,rejected")
  523. return
  524. # 情况2:无工具且无 tool tag → 直接 approved
  525. if not has_tools:
  526. pg_store.update(kid, {"status": "approved", "updated_at": now})
  527. return
  528. # 情况3/4:有工具 → 创建资源并关联
  529. tool_ids = []
  530. for tool_info in (tool_analysis.get("tools") or []):
  531. tool_id = await self._create_or_get_tool_resource(tool_info)
  532. if tool_id:
  533. tool_ids.append(tool_id)
  534. existing_resource_ids = knowledge.get("resource_ids") or []
  535. updated_resource_ids = list(set(existing_resource_ids + tool_ids))
  536. updates: dict = {
  537. "status": "approved",
  538. "resource_ids": updated_resource_ids,
  539. "updated_at": now
  540. }
  541. # 有工具但无 tool tag → 添加 tag
  542. if not has_tool_tag:
  543. updated_tags = dict(existing_tags)
  544. updated_tags["tool"] = True
  545. updates["tags"] = updated_tags
  546. print(f"[Tool Analysis] {kid} 添加 tool tag")
  547. pg_store.update(kid, updates)
  548. for tool_id in tool_ids:
  549. await self._update_tool_knowledge_index(tool_id, kid)
  550. print(f"[Tool Analysis] {kid} 关联了 {len(tool_ids)} 个工具")
  551. except Exception as e:
  552. print(f"[Tool Analysis] {kid} 分析失败: {e},回退到 dedup_passed")
  553. try:
  554. pg_store.update(kid, {"status": "dedup_passed", "updated_at": int(time.time())})
  555. except Exception:
  556. pass
  557. async def _periodic_processor():
  558. """每60秒检测超时条目并回滚:processing(>5min)→pending,analyzing(>10min)→dedup_passed"""
  559. while True:
  560. await asyncio.sleep(60)
  561. try:
  562. now = int(time.time())
  563. # 回滚超时的 processing(5分钟 → pending)
  564. timeout_5min = now - 300
  565. processing = pg_store.query('status == "processing"', limit=200)
  566. for item in processing:
  567. updated_at = item.get("updated_at", 0) or 0
  568. updated_at_sec = updated_at // 1000 if updated_at > 1_000_000_000_000 else updated_at
  569. if updated_at_sec < timeout_5min:
  570. print(f"[Periodic] 回滚超时 processing → pending: {item['id']}")
  571. pg_store.update(item["id"], {"status": "pending", "updated_at": int(time.time())})
  572. # 回滚超时的 analyzing(10分钟 → dedup_passed)
  573. timeout_10min = now - 600
  574. analyzing = pg_store.query('status == "analyzing"', limit=200)
  575. for item in analyzing:
  576. updated_at = item.get("updated_at", 0) or 0
  577. updated_at_sec = updated_at // 1000 if updated_at > 1_000_000_000_000 else updated_at
  578. if updated_at_sec < timeout_10min:
  579. print(f"[Periodic] 回滚超时 analyzing → dedup_passed: {item['id']}")
  580. pg_store.update(item["id"], {"status": "dedup_passed", "updated_at": int(time.time())})
  581. except Exception as e:
  582. print(f"[Periodic] 定时任务错误: {e}")
  583. # --- App ---
  584. @asynccontextmanager
  585. async def lifespan(app: FastAPI):
  586. global pg_store, pg_resource_store, knowledge_processor
  587. # 初始化 PostgreSQL(knowledge + resources)
  588. pg_store = PostgreSQLStore()
  589. pg_resource_store = PostgreSQLResourceStore()
  590. # 初始化去重处理器 + 启动定时兜底任务
  591. knowledge_processor = KnowledgeProcessor()
  592. periodic_task = asyncio.create_task(_periodic_processor())
  593. yield
  594. # 清理
  595. periodic_task.cancel()
  596. try:
  597. await periodic_task
  598. except asyncio.CancelledError:
  599. pass
  600. pg_store.close()
  601. pg_resource_store.close()
  602. app = FastAPI(title=BRAND_NAME, lifespan=lifespan)
  603. # 挂载静态文件
  604. STATIC_DIR = Path(__file__).parent / "static"
  605. if STATIC_DIR.exists():
  606. app.mount("/static", StaticFiles(directory=str(STATIC_DIR)), name="static")
  607. # --- Knowledge API ---
  608. @app.post("/api/resource", status_code=201)
  609. def submit_resource(resource: ResourceIn):
  610. """提交资源(存入 PostgreSQL resources 表)"""
  611. try:
  612. # 加密敏感内容
  613. encrypted_secure_body = encrypt_content(resource.id, resource.secure_body)
  614. pg_resource_store.insert_or_update({
  615. 'id': resource.id,
  616. 'title': resource.title,
  617. 'body': resource.body,
  618. 'secure_body': encrypted_secure_body,
  619. 'content_type': resource.content_type,
  620. 'metadata': resource.metadata,
  621. 'sort_order': resource.sort_order,
  622. 'submitted_by': resource.submitted_by
  623. })
  624. return {"status": "ok", "id": resource.id}
  625. except Exception as e:
  626. raise HTTPException(status_code=500, detail=str(e))
  627. @app.get("/api/resource/{resource_id:path}", response_model=ResourceOut)
  628. def get_resource(resource_id: str, x_org_key: Optional[str] = Header(None)):
  629. """获取资源详情(从 PostgreSQL)"""
  630. try:
  631. row = pg_resource_store.get_by_id(resource_id)
  632. if not row:
  633. raise HTTPException(status_code=404, detail=f"Resource not found: {resource_id}")
  634. # 解密敏感内容
  635. secure_body = decrypt_content(resource_id, row.get("secure_body", ""), x_org_key)
  636. # 计算导航上下文
  637. root_id = resource_id.split("/")[0] if "/" in resource_id else resource_id
  638. # TOC (根节点)
  639. toc = None
  640. if "/" in resource_id:
  641. toc_row = pg_resource_store.get_by_id(root_id)
  642. if toc_row:
  643. toc = ResourceNode(id=toc_row["id"], title=toc_row["title"])
  644. # Children (子节点)
  645. children_rows = pg_resource_store.list_resources(prefix=f"{resource_id}/", limit=1000)
  646. children = [ResourceNode(id=r["id"], title=r["title"]) for r in children_rows
  647. if r["id"].count("/") == resource_id.count("/") + 1]
  648. # Prev/Next (同级节点)
  649. prev_node, next_node = pg_resource_store.get_siblings(resource_id)
  650. prev = ResourceNode(id=prev_node["id"], title=prev_node["title"]) if prev_node else None
  651. next = ResourceNode(id=next_node["id"], title=next_node["title"]) if next_node else None
  652. return ResourceOut(
  653. id=row["id"],
  654. title=row["title"],
  655. body=row["body"],
  656. secure_body=secure_body,
  657. content_type=row["content_type"],
  658. metadata=row.get("metadata", {}),
  659. toc=toc,
  660. children=children,
  661. prev=prev,
  662. next=next,
  663. )
  664. except HTTPException:
  665. raise
  666. except Exception as e:
  667. raise HTTPException(status_code=500, detail=str(e))
  668. @app.patch("/api/resource/{resource_id:path}")
  669. def patch_resource(resource_id: str, patch: ResourcePatchIn):
  670. """更新resource字段(PostgreSQL)"""
  671. try:
  672. # 检查是否存在
  673. if not pg_resource_store.get_by_id(resource_id):
  674. raise HTTPException(status_code=404, detail=f"Resource not found: {resource_id}")
  675. # 构建更新字典
  676. updates = {}
  677. if patch.title is not None:
  678. updates['title'] = patch.title
  679. if patch.body is not None:
  680. updates['body'] = patch.body
  681. if patch.secure_body is not None:
  682. updates['secure_body'] = encrypt_content(resource_id, patch.secure_body)
  683. if patch.content_type is not None:
  684. updates['content_type'] = patch.content_type
  685. if patch.metadata is not None:
  686. updates['metadata'] = patch.metadata
  687. if not updates:
  688. return {"status": "ok", "message": "No fields to update"}
  689. pg_resource_store.update(resource_id, updates)
  690. return {"status": "ok", "id": resource_id}
  691. except HTTPException:
  692. raise
  693. except Exception as e:
  694. raise HTTPException(status_code=500, detail=str(e))
  695. @app.get("/api/resource")
  696. def list_resources(
  697. content_type: Optional[str] = Query(None),
  698. limit: int = Query(100, ge=1, le=1000)
  699. ):
  700. """列出所有resource(PostgreSQL)"""
  701. try:
  702. results = pg_resource_store.list_resources(
  703. content_type=content_type,
  704. limit=limit
  705. )
  706. return {"results": results, "count": len(results)}
  707. except Exception as e:
  708. raise HTTPException(status_code=500, detail=str(e))
  709. @app.delete("/api/resource/{resource_id:path}")
  710. def delete_resource(resource_id: str):
  711. """删除单个resource(PostgreSQL)"""
  712. try:
  713. if not pg_resource_store.get_by_id(resource_id):
  714. raise HTTPException(status_code=404, detail=f"Resource not found: {resource_id}")
  715. pg_resource_store.delete(resource_id)
  716. return {"status": "ok", "id": resource_id}
  717. except HTTPException:
  718. raise
  719. except Exception as e:
  720. raise HTTPException(status_code=500, detail=str(e))
  721. # --- Knowledge API ---
  722. # ===== Knowledge API =====
  723. async def _llm_rerank(query: str, candidates: list[dict], top_k: int) -> list[str]:
  724. """
  725. 使用 LLM 对候选知识进行精排
  726. Args:
  727. query: 查询文本
  728. candidates: 候选知识列表
  729. top_k: 返回数量
  730. Returns:
  731. 排序后的知识 ID 列表
  732. """
  733. if not candidates:
  734. return []
  735. # 构造 prompt
  736. candidates_text = "\n".join([
  737. f"[{i+1}] ID: {c['id']}\nTask: {c['task']}\nContent: {c['content'][:200]}..."
  738. for i, c in enumerate(candidates)
  739. ])
  740. prompt = RERANK_PROMPT_TEMPLATE.format(
  741. top_k=top_k,
  742. query=query,
  743. candidates_text=candidates_text
  744. )
  745. try:
  746. response = await _dedup_llm(
  747. messages=[{"role": "user", "content": prompt}],
  748. )
  749. content = response.get("content", "").strip()
  750. # 解析 ID 列表
  751. selected_ids = [
  752. idx.strip()
  753. for idx in re.split(r'[,\s]+', content)
  754. if idx.strip().startswith(("knowledge-", "research-"))
  755. ]
  756. return selected_ids[:top_k]
  757. except Exception as e:
  758. print(f"[LLM Rerank] 失败: {e}")
  759. return []
  760. @app.get("/api/knowledge/search")
  761. async def search_knowledge_api(
  762. q: str = Query(..., description="查询文本"),
  763. top_k: int = Query(default=5, ge=1, le=20),
  764. min_score: int = Query(default=3, ge=1, le=5),
  765. types: Optional[str] = None,
  766. owner: Optional[str] = None
  767. ):
  768. """检索知识(向量召回 + LLM 精排)"""
  769. try:
  770. # 1. 生成查询向量
  771. query_embedding = await get_embedding(q)
  772. # 2. 构建过滤表达式
  773. filters = []
  774. if types:
  775. type_list = [t.strip() for t in types.split(',') if t.strip()]
  776. for t in type_list:
  777. filters.append(f'array_contains(types, "{t}")')
  778. if owner:
  779. owner_list = [o.strip() for o in owner.split(',') if o.strip()]
  780. if len(owner_list) == 1:
  781. filters.append(f'owner == "{owner_list[0]}"')
  782. else:
  783. # 多个owner用OR连接
  784. owner_filters = [f'owner == "{o}"' for o in owner_list]
  785. filters.append(f'({" or ".join(owner_filters)})')
  786. # 添加 min_score 过滤
  787. filters.append(f'eval["score"] >= {min_score}')
  788. # 只搜索 approved 和 checked 的知识
  789. filters.append('(status == "approved" or status == "checked")')
  790. filter_expr = ' and '.join(filters) if filters else None
  791. # 3. 向量召回(3*k 个候选)
  792. recall_limit = top_k * 3
  793. candidates = pg_store.search(
  794. query_embedding=query_embedding,
  795. filters=filter_expr,
  796. limit=recall_limit
  797. )
  798. if not candidates:
  799. return {"results": [], "count": 0, "reranked": False}
  800. # 转换为可序列化的格式
  801. serialized_candidates = [serialize_milvus_result(c) for c in candidates]
  802. # 4. LLM 精排
  803. reranked_ids = await _llm_rerank(q, serialized_candidates, top_k)
  804. if reranked_ids:
  805. # 按 LLM 排序返回
  806. id_to_candidate = {c["id"]: c for c in serialized_candidates}
  807. results = [id_to_candidate[id] for id in reranked_ids if id in id_to_candidate]
  808. return {"results": results, "count": len(results), "reranked": True}
  809. else:
  810. # Fallback:直接返回向量召回的 top k
  811. print(f"[Knowledge Search] LLM 精排失败,fallback 到向量 top-{top_k}")
  812. return {"results": serialized_candidates[:top_k], "count": len(serialized_candidates[:top_k]), "reranked": False}
  813. except Exception as e:
  814. print(f"[Knowledge Search] 错误: {e}")
  815. raise HTTPException(status_code=500, detail=str(e))
  816. @app.post("/api/knowledge", status_code=201)
  817. async def save_knowledge(knowledge: KnowledgeIn, background_tasks: BackgroundTasks):
  818. """保存新知识"""
  819. try:
  820. # 生成 ID
  821. timestamp = datetime.now().strftime('%Y%m%d-%H%M%S')
  822. random_suffix = uuid.uuid4().hex[:4]
  823. knowledge_id = f"knowledge-{timestamp}-{random_suffix}"
  824. now = int(time.time())
  825. # 设置默认值
  826. owner = knowledge.owner or f"agent:{knowledge.source.get('agent_id', 'unknown')}"
  827. # 准备 source
  828. source = {
  829. "name": knowledge.source.get("name", ""),
  830. "category": knowledge.source.get("category", ""),
  831. "urls": knowledge.source.get("urls", []),
  832. "agent_id": knowledge.source.get("agent_id", "unknown"),
  833. "submitted_by": knowledge.source.get("submitted_by", ""),
  834. "timestamp": datetime.now(timezone.utc).isoformat(),
  835. "message_id": knowledge.message_id
  836. }
  837. # 准备 eval
  838. eval_data = {
  839. "score": knowledge.eval.get("score", 3),
  840. "helpful": knowledge.eval.get("helpful", 1),
  841. "harmful": knowledge.eval.get("harmful", 0),
  842. "confidence": knowledge.eval.get("confidence", 0.5),
  843. "helpful_history": [],
  844. "harmful_history": []
  845. }
  846. # 生成向量(只基于 task,因为搜索时用户描述的是任务场景)
  847. embedding = await get_embedding(knowledge.task)
  848. # 提取 tag keys(用于高效筛选)
  849. tag_keys = list(knowledge.tags.keys()) if isinstance(knowledge.tags, dict) else []
  850. # 准备插入数据
  851. insert_data = {
  852. "id": knowledge_id,
  853. "task_embedding": embedding,
  854. "message_id": knowledge.message_id,
  855. "task": knowledge.task,
  856. "content": knowledge.content,
  857. "types": knowledge.types,
  858. "tags": knowledge.tags,
  859. "tag_keys": tag_keys,
  860. "scopes": knowledge.scopes,
  861. "owner": owner,
  862. "resource_ids": knowledge.resource_ids,
  863. "source": source,
  864. "eval": eval_data,
  865. "created_at": now,
  866. "updated_at": now,
  867. "status": "pending",
  868. "relationships": json.dumps([]),
  869. }
  870. print(f"[Save Knowledge] 插入数据: {json.dumps({k: v for k, v in insert_data.items() if k != 'embedding'}, ensure_ascii=False)}")
  871. # 插入 Milvus
  872. pg_store.insert(insert_data)
  873. # 触发后台去重处理
  874. background_tasks.add_task(knowledge_processor.process_pending)
  875. return {"status": "pending", "knowledge_id": knowledge_id, "message": "知识已入队,正在处理去重..."}
  876. except Exception as e:
  877. print(f"[Save Knowledge] 错误: {e}")
  878. raise HTTPException(status_code=500, detail=str(e))
  879. @app.get("/api/knowledge")
  880. def list_knowledge(
  881. page: int = Query(default=1, ge=1),
  882. page_size: int = Query(default=200, ge=1, le=500),
  883. types: Optional[str] = None,
  884. scopes: Optional[str] = None,
  885. owner: Optional[str] = None,
  886. tags: Optional[str] = None,
  887. status: Optional[str] = None
  888. ):
  889. """列出知识(支持后端筛选和分页)"""
  890. try:
  891. # 构建过滤表达式
  892. filters = []
  893. # types 支持多个,用 AND 连接(交集:必须同时包含所有选中的type)
  894. if types:
  895. type_list = [t.strip() for t in types.split(',') if t.strip()]
  896. for t in type_list:
  897. filters.append(f'array_contains(types, "{t}")')
  898. if scopes:
  899. filters.append(f'array_contains(scopes, "{scopes}")')
  900. if owner:
  901. owner_list = [o.strip() for o in owner.split(',') if o.strip()]
  902. if len(owner_list) == 1:
  903. filters.append(f'owner == "{owner_list[0]}"')
  904. else:
  905. # 多个owner用OR连接
  906. owner_filters = [f'owner == "{o}"' for o in owner_list]
  907. filters.append(f'({" or ".join(owner_filters)})')
  908. # tags 支持多个,用 AND 连接(使用 tag_keys 数组进行高效筛选)
  909. if tags:
  910. tag_list = [t.strip() for t in tags.split(',') if t.strip()]
  911. for t in tag_list:
  912. filters.append(f'array_contains(tag_keys, "{t}")')
  913. # 只返回指定 status 的知识(默认 approved 和 checked)
  914. status_list = [s.strip() for s in (status or "approved,checked").split(',') if s.strip()]
  915. status_conditions = ' or '.join([f'status == "{s}"' for s in status_list])
  916. filters.append(f'({status_conditions})')
  917. # 如果没有过滤条件,查询所有
  918. filter_expr = ' and '.join(filters) if filters else 'id != ""'
  919. # 查询 Milvus(先获取所有符合条件的数据)
  920. # Milvus 的 limit 是总数限制,我们需要获取足够多的数据来支持分页
  921. max_limit = 10000 # 设置一个合理的上限
  922. results = pg_store.query(filter_expr, limit=max_limit)
  923. # 转换为可序列化的格式
  924. serialized_results = [serialize_milvus_result(r) for r in results]
  925. # 按 created_at 降序排序(最新的在前)
  926. serialized_results.sort(key=lambda x: x.get('created_at', 0), reverse=True)
  927. # 计算分页
  928. total = len(serialized_results)
  929. total_pages = (total + page_size - 1) // page_size # 向上取整
  930. start_idx = (page - 1) * page_size
  931. end_idx = start_idx + page_size
  932. page_results = serialized_results[start_idx:end_idx]
  933. return {
  934. "results": page_results,
  935. "pagination": {
  936. "page": page,
  937. "page_size": page_size,
  938. "total": total,
  939. "total_pages": total_pages
  940. }
  941. }
  942. except Exception as e:
  943. print(f"[List Knowledge] 错误: {e}")
  944. raise HTTPException(status_code=500, detail=str(e))
  945. @app.get("/api/knowledge/meta/tags")
  946. def get_all_tags():
  947. """获取所有已有的 tags"""
  948. try:
  949. # 查询所有知识
  950. results = pg_store.query('id != ""', limit=10000)
  951. all_tags = set()
  952. for item in results:
  953. # 转换为标准字典
  954. serialized_item = serialize_milvus_result(item)
  955. tags_dict = serialized_item.get("tags", {})
  956. if isinstance(tags_dict, dict):
  957. for key in tags_dict.keys():
  958. all_tags.add(key)
  959. return {"tags": sorted(list(all_tags))}
  960. except Exception as e:
  961. print(f"[Get Tags] 错误: {e}")
  962. raise HTTPException(status_code=500, detail=str(e))
  963. @app.get("/api/knowledge/pending")
  964. def get_pending_knowledge(limit: int = Query(default=50, ge=1, le=200)):
  965. """查询待处理队列(pending + processing + dedup_passed + analyzing)"""
  966. try:
  967. pending = pg_store.query(
  968. 'status == "pending" or status == "processing" or status == "dedup_passed" or status == "analyzing"',
  969. limit=limit
  970. )
  971. serialized = [serialize_milvus_result(r) for r in pending]
  972. return {"results": serialized, "count": len(serialized)}
  973. except Exception as e:
  974. print(f"[Pending] 错误: {e}")
  975. raise HTTPException(status_code=500, detail=str(e))
  976. @app.post("/api/knowledge/process")
  977. async def trigger_process(force: bool = Query(default=False)):
  978. """手动触发去重处理。force=true 时先回滚所有 processing → pending,analyzing → dedup_passed"""
  979. try:
  980. if force:
  981. processing = pg_store.query('status == "processing"', limit=200)
  982. for item in processing:
  983. pg_store.update(item["id"], {"status": "pending", "updated_at": int(time.time())})
  984. print(f"[Manual Process] 回滚 {len(processing)} 条 processing → pending")
  985. analyzing = pg_store.query('status == "analyzing"', limit=200)
  986. for item in analyzing:
  987. pg_store.update(item["id"], {"status": "dedup_passed", "updated_at": int(time.time())})
  988. print(f"[Manual Process] 回滚 {len(analyzing)} 条 analyzing → dedup_passed")
  989. asyncio.create_task(knowledge_processor.process_pending())
  990. return {"status": "ok", "message": "处理任务已触发"}
  991. except Exception as e:
  992. print(f"[Manual Process] 错误: {e}")
  993. raise HTTPException(status_code=500, detail=str(e))
  994. @app.post("/api/knowledge/migrate")
  995. async def migrate_knowledge_schema():
  996. """手动触发 schema 迁移(PostgreSQL不需要此功能)"""
  997. return {"status": "ok", "message": "PostgreSQL不需要schema迁移"}
  998. @app.get("/api/knowledge/status/{knowledge_id}")
  999. def get_knowledge_status(knowledge_id: str):
  1000. """查询单条知识的处理状态和关系"""
  1001. try:
  1002. result = pg_store.get_by_id(knowledge_id)
  1003. if not result:
  1004. raise HTTPException(status_code=404, detail=f"Knowledge not found: {knowledge_id}")
  1005. serialized = serialize_milvus_result(result)
  1006. return {
  1007. "id": knowledge_id,
  1008. "status": serialized.get("status", "approved"),
  1009. "relationships": serialized.get("relationships", []),
  1010. "created_at": serialized.get("created_at"),
  1011. "updated_at": serialized.get("updated_at"),
  1012. }
  1013. except HTTPException:
  1014. raise
  1015. except Exception as e:
  1016. print(f"[Knowledge Status] 错误: {e}")
  1017. raise HTTPException(status_code=500, detail=str(e))
  1018. @app.get("/api/knowledge/{knowledge_id}")
  1019. def get_knowledge(knowledge_id: str):
  1020. """获取单条知识"""
  1021. try:
  1022. result = pg_store.get_by_id(knowledge_id)
  1023. if not result:
  1024. raise HTTPException(status_code=404, detail=f"Knowledge not found: {knowledge_id}")
  1025. return serialize_milvus_result(result)
  1026. except HTTPException:
  1027. raise
  1028. except Exception as e:
  1029. print(f"[Get Knowledge] 错误: {e}")
  1030. raise HTTPException(status_code=500, detail=str(e))
  1031. async def _evolve_knowledge_with_llm(old_content: str, feedback: str) -> str:
  1032. """使用 LLM 进行知识进化重写"""
  1033. prompt = KNOWLEDGE_EVOLVE_PROMPT_TEMPLATE.format(
  1034. old_content=old_content,
  1035. feedback=feedback
  1036. )
  1037. try:
  1038. response = await _dedup_llm(
  1039. messages=[{"role": "user", "content": prompt}],
  1040. )
  1041. evolved = response.get("content", "").strip()
  1042. if len(evolved) < 5:
  1043. raise ValueError("LLM output too short")
  1044. return evolved
  1045. except Exception as e:
  1046. print(f"知识进化失败,采用追加模式回退: {e}")
  1047. return f"{old_content}\n\n---\n[Update {datetime.now().strftime('%Y-%m-%d')}]: {feedback}"
  1048. @app.put("/api/knowledge/{knowledge_id}")
  1049. async def update_knowledge(knowledge_id: str, update: KnowledgeUpdateIn):
  1050. """更新知识评估,支持知识进化"""
  1051. try:
  1052. # 获取现有知识
  1053. existing = pg_store.get_by_id(knowledge_id)
  1054. if not existing:
  1055. raise HTTPException(status_code=404, detail=f"Knowledge not found: {knowledge_id}")
  1056. eval_data = existing.get("eval", {})
  1057. # 更新评分
  1058. if update.update_score is not None:
  1059. eval_data["score"] = update.update_score
  1060. # 添加有效案例
  1061. if update.add_helpful_case:
  1062. eval_data["helpful"] = eval_data.get("helpful", 0) + 1
  1063. if "helpful_history" not in eval_data:
  1064. eval_data["helpful_history"] = []
  1065. eval_data["helpful_history"].append(update.add_helpful_case)
  1066. # 添加有害案例
  1067. if update.add_harmful_case:
  1068. eval_data["harmful"] = eval_data.get("harmful", 0) + 1
  1069. if "harmful_history" not in eval_data:
  1070. eval_data["harmful_history"] = []
  1071. eval_data["harmful_history"].append(update.add_harmful_case)
  1072. # 知识进化
  1073. content = existing["content"]
  1074. need_reembed = False
  1075. if update.evolve_feedback:
  1076. content = await _evolve_knowledge_with_llm(content, update.evolve_feedback)
  1077. eval_data["helpful"] = eval_data.get("helpful", 0) + 1
  1078. need_reembed = True
  1079. # 准备更新数据
  1080. updates = {
  1081. "content": content,
  1082. "eval": eval_data,
  1083. }
  1084. # 如果内容变化,重新生成向量
  1085. if need_reembed:
  1086. embedding = await get_embedding(existing['task'])
  1087. updates["task_embedding"] = embedding
  1088. # 更新 Milvus
  1089. pg_store.update(knowledge_id, updates)
  1090. return {"status": "ok", "knowledge_id": knowledge_id}
  1091. except HTTPException:
  1092. raise
  1093. except Exception as e:
  1094. print(f"[Update Knowledge] 错误: {e}")
  1095. raise HTTPException(status_code=500, detail=str(e))
  1096. @app.patch("/api/knowledge/{knowledge_id}")
  1097. async def patch_knowledge(knowledge_id: str, patch: KnowledgePatchIn):
  1098. """直接编辑知识字段"""
  1099. try:
  1100. # 获取现有知识
  1101. existing = pg_store.get_by_id(knowledge_id)
  1102. if not existing:
  1103. raise HTTPException(status_code=404, detail=f"Knowledge not found: {knowledge_id}")
  1104. updates = {}
  1105. need_reembed = False
  1106. if patch.task is not None:
  1107. updates["task"] = patch.task
  1108. need_reembed = True
  1109. if patch.content is not None:
  1110. updates["content"] = patch.content
  1111. # content 变化不需要重新生成 embedding(只基于 task)
  1112. if patch.types is not None:
  1113. updates["types"] = patch.types
  1114. if patch.tags is not None:
  1115. updates["tags"] = patch.tags
  1116. # 同时更新 tag_keys
  1117. updates["tag_keys"] = list(patch.tags.keys()) if isinstance(patch.tags, dict) else []
  1118. if patch.scopes is not None:
  1119. updates["scopes"] = patch.scopes
  1120. if patch.owner is not None:
  1121. updates["owner"] = patch.owner
  1122. if not updates:
  1123. return {"status": "ok", "knowledge_id": knowledge_id}
  1124. # 如果 task 变化,重新生成向量
  1125. if need_reembed:
  1126. task = updates.get("task", existing["task"])
  1127. embedding = await get_embedding(task)
  1128. updates["task_embedding"] = embedding
  1129. # 更新 Milvus
  1130. pg_store.update(knowledge_id, updates)
  1131. return {"status": "ok", "knowledge_id": knowledge_id}
  1132. except HTTPException:
  1133. raise
  1134. except Exception as e:
  1135. print(f"[Patch Knowledge] 错误: {e}")
  1136. raise HTTPException(status_code=500, detail=str(e))
  1137. @app.delete("/api/knowledge/{knowledge_id}")
  1138. def delete_knowledge(knowledge_id: str):
  1139. """删除单条知识"""
  1140. try:
  1141. # 检查知识是否存在
  1142. existing = pg_store.get_by_id(knowledge_id)
  1143. if not existing:
  1144. raise HTTPException(status_code=404, detail=f"Knowledge not found: {knowledge_id}")
  1145. # 从 PostgreSQL 删除
  1146. pg_store.delete(knowledge_id)
  1147. print(f"[Delete Knowledge] 已删除知识: {knowledge_id}")
  1148. return {"status": "ok", "knowledge_id": knowledge_id}
  1149. except HTTPException:
  1150. raise
  1151. except Exception as e:
  1152. print(f"[Delete Knowledge] 错误: {e}")
  1153. raise HTTPException(status_code=500, detail=str(e))
  1154. @app.post("/api/knowledge/batch_delete")
  1155. def batch_delete_knowledge(knowledge_ids: List[str] = Body(...)):
  1156. """批量删除知识"""
  1157. try:
  1158. if not knowledge_ids:
  1159. raise HTTPException(status_code=400, detail="knowledge_ids cannot be empty")
  1160. # 批量删除
  1161. cursor = pg_store._get_cursor()
  1162. try:
  1163. cursor.execute(
  1164. "DELETE FROM knowledge WHERE id = ANY(%s)",
  1165. (knowledge_ids,)
  1166. )
  1167. pg_store.conn.commit()
  1168. deleted_count = cursor.rowcount
  1169. print(f"[Batch Delete] 已删除 {deleted_count} 条知识")
  1170. return {"status": "ok", "deleted_count": deleted_count}
  1171. finally:
  1172. cursor.close()
  1173. except HTTPException:
  1174. raise
  1175. except Exception as e:
  1176. print(f"[Batch Delete] 错误: {e}")
  1177. raise HTTPException(status_code=500, detail=str(e))
  1178. @app.post("/api/knowledge/batch_verify")
  1179. async def batch_verify_knowledge(batch: KnowledgeBatchVerifyIn):
  1180. """批量验证通过(approved → checked)"""
  1181. if not batch.knowledge_ids:
  1182. return {"status": "ok", "updated": 0}
  1183. try:
  1184. now_iso = datetime.now(timezone.utc).isoformat()
  1185. updated_count = 0
  1186. for kid in batch.knowledge_ids:
  1187. existing = pg_store.get_by_id(kid)
  1188. if not existing:
  1189. continue
  1190. eval_data = existing.get("eval") or {}
  1191. eval_data["verification"] = {
  1192. "status": "checked",
  1193. "verified_by": batch.verified_by,
  1194. "verified_at": now_iso,
  1195. "note": None,
  1196. "issue_type": None,
  1197. "issue_action": None,
  1198. }
  1199. pg_store.update(kid, {"eval": eval_data, "status": "checked", "updated_at": int(time.time())})
  1200. updated_count += 1
  1201. return {"status": "ok", "updated": updated_count}
  1202. except Exception as e:
  1203. print(f"[Batch Verify] 错误: {e}")
  1204. raise HTTPException(status_code=500, detail=str(e))
  1205. @app.post("/api/knowledge/{knowledge_id}/verify")
  1206. async def verify_knowledge(knowledge_id: str, verify: KnowledgeVerifyIn):
  1207. """知识验证:approve 切换 approved↔checked,reject 设为 rejected"""
  1208. try:
  1209. existing = pg_store.get_by_id(knowledge_id)
  1210. if not existing:
  1211. raise HTTPException(status_code=404, detail=f"Knowledge not found: {knowledge_id}")
  1212. current_status = existing.get("status", "approved")
  1213. if verify.action == "approve":
  1214. # checked → approved(取消验证),其他 → checked
  1215. new_status = "approved" if current_status == "checked" else "checked"
  1216. pg_store.update(knowledge_id, {
  1217. "status": new_status,
  1218. "updated_at": int(time.time())
  1219. })
  1220. return {"status": "ok", "new_status": new_status,
  1221. "message": "已取消验证" if new_status == "approved" else "验证通过"}
  1222. elif verify.action == "reject":
  1223. pg_store.update(knowledge_id, {
  1224. "status": "rejected",
  1225. "updated_at": int(time.time())
  1226. })
  1227. return {"status": "ok", "new_status": "rejected", "message": "已拒绝"}
  1228. else:
  1229. raise HTTPException(status_code=400, detail=f"Unknown action: {verify.action}")
  1230. except HTTPException:
  1231. raise
  1232. except Exception as e:
  1233. print(f"[Verify Knowledge] 错误: {e}")
  1234. raise HTTPException(status_code=500, detail=str(e))
  1235. @app.post("/api/knowledge/batch_update")
  1236. async def batch_update_knowledge(batch: KnowledgeBatchUpdateIn):
  1237. """批量反馈知识有效性"""
  1238. if not batch.feedback_list:
  1239. return {"status": "ok", "updated": 0}
  1240. try:
  1241. # 先处理无需进化的,收集需要进化的
  1242. evolution_tasks = [] # [(knowledge_id, old_content, feedback, eval_data)]
  1243. simple_updates = [] # [(knowledge_id, is_effective, eval_data)]
  1244. for item in batch.feedback_list:
  1245. knowledge_id = item.get("knowledge_id")
  1246. is_effective = item.get("is_effective")
  1247. feedback = item.get("feedback", "")
  1248. if not knowledge_id:
  1249. continue
  1250. existing = pg_store.get_by_id(knowledge_id)
  1251. if not existing:
  1252. continue
  1253. eval_data = existing.get("eval", {})
  1254. if is_effective and feedback:
  1255. evolution_tasks.append((knowledge_id, existing["content"], feedback, eval_data, existing["task"]))
  1256. else:
  1257. simple_updates.append((knowledge_id, is_effective, eval_data))
  1258. # 执行简单更新
  1259. for knowledge_id, is_effective, eval_data in simple_updates:
  1260. if is_effective:
  1261. eval_data["helpful"] = eval_data.get("helpful", 0) + 1
  1262. else:
  1263. eval_data["harmful"] = eval_data.get("harmful", 0) + 1
  1264. pg_store.update(knowledge_id, {"eval": eval_data})
  1265. # 并发执行知识进化
  1266. if evolution_tasks:
  1267. print(f"🧬 并发处理 {len(evolution_tasks)} 条知识进化...")
  1268. evolved_results = await asyncio.gather(
  1269. *[_evolve_knowledge_with_llm(old, fb) for _, old, fb, _, _ in evolution_tasks]
  1270. )
  1271. for (knowledge_id, _, _, eval_data, task), evolved_content in zip(evolution_tasks, evolved_results):
  1272. eval_data["helpful"] = eval_data.get("helpful", 0) + 1
  1273. # 重新生成向量(只基于 task)
  1274. embedding = await get_embedding(task)
  1275. pg_store.update(knowledge_id, {
  1276. "content": evolved_content,
  1277. "eval": eval_data,
  1278. "task_embedding": embedding
  1279. })
  1280. return {"status": "ok", "updated": len(simple_updates) + len(evolution_tasks)}
  1281. except Exception as e:
  1282. print(f"[Batch Update] 错误: {e}")
  1283. raise HTTPException(status_code=500, detail=str(e))
  1284. @app.post("/api/knowledge/slim")
  1285. async def slim_knowledge(model: str = "google/gemini-2.5-flash-lite"):
  1286. """知识库瘦身:合并语义相似知识"""
  1287. try:
  1288. # 获取所有知识
  1289. all_knowledge = pg_store.query('id != ""', limit=10000)
  1290. # 转换为可序列化的格式
  1291. all_knowledge = [serialize_milvus_result(item) for item in all_knowledge]
  1292. if len(all_knowledge) < 2:
  1293. return {"status": "ok", "message": f"知识库仅有 {len(all_knowledge)} 条,无需瘦身"}
  1294. # 构造发给大模型的内容
  1295. entries_text = ""
  1296. for item in all_knowledge:
  1297. eval_data = item.get("eval", {})
  1298. types = item.get("types", [])
  1299. entries_text += f"[ID: {item['id']}] [Types: {','.join(types)}] "
  1300. entries_text += f"[Helpful: {eval_data.get('helpful', 0)}, Harmful: {eval_data.get('harmful', 0)}] [Score: {eval_data.get('score', 3)}]\n"
  1301. entries_text += f"Task: {item['task']}\n"
  1302. entries_text += f"Content: {item['content'][:200]}...\n\n"
  1303. prompt = KNOWLEDGE_SLIM_PROMPT_TEMPLATE.format(entries_text=entries_text)
  1304. print(f"\n[知识瘦身] 正在调用 {model} 分析 {len(all_knowledge)} 条知识...")
  1305. slim_llm = create_openrouter_llm_call(model=model)
  1306. response = await slim_llm(
  1307. messages=[{"role": "user", "content": prompt}],
  1308. )
  1309. content = response.get("content", "").strip()
  1310. if not content:
  1311. raise HTTPException(status_code=500, detail="LLM 返回为空")
  1312. # 解析大模型输出
  1313. report_line = ""
  1314. new_entries = []
  1315. blocks = [b.strip() for b in content.split("===") if b.strip()]
  1316. for block in blocks:
  1317. if block.startswith("REPORT:"):
  1318. report_line = block
  1319. continue
  1320. lines = block.split("\n")
  1321. kid, types, helpful, harmful, score, task, content_lines = None, [], 0, 0, 3, "", []
  1322. current_field = None
  1323. for line in lines:
  1324. if line.startswith("ID:"):
  1325. kid = line[3:].strip()
  1326. current_field = None
  1327. elif line.startswith("TYPES:"):
  1328. types_str = line[6:].strip()
  1329. types = [t.strip() for t in types_str.split(",") if t.strip()]
  1330. current_field = None
  1331. elif line.startswith("HELPFUL:"):
  1332. try:
  1333. helpful = int(line[8:].strip())
  1334. except Exception:
  1335. helpful = 0
  1336. current_field = None
  1337. elif line.startswith("HARMFUL:"):
  1338. try:
  1339. harmful = int(line[8:].strip())
  1340. except Exception:
  1341. harmful = 0
  1342. current_field = None
  1343. elif line.startswith("SCORE:"):
  1344. try:
  1345. score = int(line[6:].strip())
  1346. except Exception:
  1347. score = 3
  1348. current_field = None
  1349. elif line.startswith("TASK:"):
  1350. task = line[5:].strip()
  1351. current_field = "task"
  1352. elif line.startswith("CONTENT:"):
  1353. content_lines.append(line[8:].strip())
  1354. current_field = "content"
  1355. elif current_field == "task":
  1356. task += "\n" + line
  1357. elif current_field == "content":
  1358. content_lines.append(line)
  1359. if kid and content_lines:
  1360. new_entries.append({
  1361. "id": kid,
  1362. "types": types if types else ["strategy"],
  1363. "helpful": helpful,
  1364. "harmful": harmful,
  1365. "score": score,
  1366. "task": task.strip(),
  1367. "content": "\n".join(content_lines).strip()
  1368. })
  1369. if not new_entries:
  1370. raise HTTPException(status_code=500, detail="解析大模型输出失败")
  1371. # 生成向量并重建知识库
  1372. print(f"[知识瘦身] 正在为 {len(new_entries)} 条知识生成向量...")
  1373. # 批量生成向量(只基于 task)
  1374. texts = [e['task'] for e in new_entries]
  1375. embeddings = await get_embeddings_batch(texts)
  1376. # 清空并重建(PostgreSQL使用TRUNCATE)
  1377. cursor = pg_store._get_cursor()
  1378. try:
  1379. cursor.execute("TRUNCATE TABLE knowledge")
  1380. pg_store.conn.commit()
  1381. finally:
  1382. cursor.close()
  1383. knowledge_list = []
  1384. for e, embedding in zip(new_entries, embeddings):
  1385. eval_data = {
  1386. "score": e["score"],
  1387. "helpful": e["helpful"],
  1388. "harmful": e["harmful"],
  1389. "confidence": 0.9,
  1390. "helpful_history": [],
  1391. "harmful_history": []
  1392. }
  1393. source = {
  1394. "name": "slim",
  1395. "category": "exp",
  1396. "urls": [],
  1397. "agent_id": "slim",
  1398. "submitted_by": "system",
  1399. "timestamp": datetime.now(timezone.utc).isoformat()
  1400. }
  1401. knowledge_list.append({
  1402. "id": e["id"],
  1403. "task_embedding": embedding,
  1404. "message_id": "",
  1405. "task": e["task"],
  1406. "content": e["content"],
  1407. "types": e["types"],
  1408. "tags": {},
  1409. "tag_keys": [],
  1410. "scopes": ["org:cybertogether"],
  1411. "owner": "agent:slim",
  1412. "resource_ids": [],
  1413. "source": source,
  1414. "eval": eval_data,
  1415. "created_at": now,
  1416. "updated_at": now,
  1417. "status": "approved",
  1418. "relationships": json.dumps([])
  1419. })
  1420. pg_store.insert_batch(knowledge_list)
  1421. result_msg = f"瘦身完成:{len(all_knowledge)} → {len(new_entries)} 条知识"
  1422. if report_line:
  1423. result_msg += f"\n{report_line}"
  1424. print(f"[知识瘦身] {result_msg}")
  1425. return {"status": "ok", "before": len(all_knowledge), "after": len(new_entries), "report": report_line}
  1426. except HTTPException:
  1427. raise
  1428. except Exception as e:
  1429. print(f"[Slim Knowledge] 错误: {e}")
  1430. raise HTTPException(status_code=500, detail=str(e))
  1431. @app.post("/api/extract")
  1432. async def extract_knowledge_from_messages(extract_req: MessageExtractIn, background_tasks: BackgroundTasks):
  1433. """从消息历史中提取知识(LLM 分析)"""
  1434. if not extract_req.submitted_by:
  1435. raise HTTPException(status_code=400, detail="submitted_by is required")
  1436. messages = extract_req.messages
  1437. if not messages or len(messages) == 0:
  1438. return {"status": "ok", "extracted_count": 0, "knowledge_ids": []}
  1439. # 构造消息历史文本
  1440. messages_text = ""
  1441. for msg in messages:
  1442. role = msg.get("role", "unknown")
  1443. content = msg.get("content", "")
  1444. messages_text += f"[{role}]: {content}\n\n"
  1445. # LLM 提取知识
  1446. prompt = MESSAGE_EXTRACT_PROMPT_TEMPLATE.format(messages_text=messages_text)
  1447. try:
  1448. print(f"\n[Extract] 正在从 {len(messages)} 条消息中提取知识...")
  1449. response = await _dedup_llm(
  1450. messages=[{"role": "user", "content": prompt}],
  1451. )
  1452. content = response.get("content", "").strip()
  1453. # 尝试解析 JSON
  1454. # 移除可能的 markdown 代码块标记
  1455. if content.startswith("```json"):
  1456. content = content[7:]
  1457. if content.startswith("```"):
  1458. content = content[3:]
  1459. if content.endswith("```"):
  1460. content = content[:-3]
  1461. content = content.strip()
  1462. extracted_knowledge = json.loads(content)
  1463. if not isinstance(extracted_knowledge, list):
  1464. raise ValueError("LLM output is not a list")
  1465. if not extracted_knowledge:
  1466. return {"status": "ok", "extracted_count": 0, "knowledge_ids": []}
  1467. # 批量生成向量(只基于 task)
  1468. texts = [item.get('task', '') for item in extracted_knowledge]
  1469. embeddings = await get_embeddings_batch(texts)
  1470. # 保存提取的知识
  1471. knowledge_ids = []
  1472. now = int(time.time())
  1473. knowledge_list = []
  1474. for item, embedding in zip(extracted_knowledge, embeddings):
  1475. task = item.get("task", "")
  1476. knowledge_content = item.get("content", "")
  1477. types = item.get("types", ["strategy"])
  1478. score = item.get("score", 3)
  1479. if not task or not knowledge_content:
  1480. continue
  1481. # 生成 ID
  1482. timestamp = datetime.now().strftime('%Y%m%d-%H%M%S')
  1483. random_suffix = uuid.uuid4().hex[:4]
  1484. knowledge_id = f"knowledge-{timestamp}-{random_suffix}"
  1485. # 准备数据
  1486. source = {
  1487. "name": "message_extraction",
  1488. "category": "exp",
  1489. "urls": [],
  1490. "agent_id": extract_req.agent_id,
  1491. "submitted_by": extract_req.submitted_by,
  1492. "timestamp": datetime.now(timezone.utc).isoformat(),
  1493. "session_key": extract_req.session_key
  1494. }
  1495. eval_data = {
  1496. "score": score,
  1497. "helpful": 1,
  1498. "harmful": 0,
  1499. "confidence": 0.7,
  1500. "helpful_history": [],
  1501. "harmful_history": []
  1502. }
  1503. knowledge_list.append({
  1504. "id": knowledge_id,
  1505. "task_embedding": embedding,
  1506. "message_id": "",
  1507. "task": task,
  1508. "content": knowledge_content,
  1509. "types": types,
  1510. "tags": {},
  1511. "tag_keys": [],
  1512. "scopes": ["org:cybertogether"],
  1513. "owner": extract_req.submitted_by,
  1514. "resource_ids": [],
  1515. "source": source,
  1516. "eval": eval_data,
  1517. "created_at": now,
  1518. "updated_at": now,
  1519. "status": "pending",
  1520. "relationships": json.dumps([]),
  1521. })
  1522. knowledge_ids.append(knowledge_id)
  1523. # 批量插入
  1524. if knowledge_list:
  1525. pg_store.insert_batch(knowledge_list)
  1526. background_tasks.add_task(knowledge_processor.process_pending)
  1527. print(f"[Extract] 成功提取并保存 {len(knowledge_ids)} 条知识")
  1528. return {
  1529. "status": "ok",
  1530. "extracted_count": len(knowledge_ids),
  1531. "knowledge_ids": knowledge_ids
  1532. }
  1533. except json.JSONDecodeError as e:
  1534. print(f"[Extract] JSON 解析失败: {e}")
  1535. print(f"[Extract] LLM 输出: {content[:500]}")
  1536. return {"status": "error", "error": "Failed to parse LLM output", "extracted_count": 0}
  1537. except Exception as e:
  1538. print(f"[Extract] 提取失败: {e}")
  1539. return {"status": "error", "error": str(e), "extracted_count": 0}
  1540. @app.get("/", response_class=FileResponse)
  1541. def frontend():
  1542. """KnowHub 管理前端"""
  1543. index_file = STATIC_DIR / "index.html"
  1544. if not index_file.exists():
  1545. return HTMLResponse("<h1>KnowHub Frontend Not Found</h1><p>Please ensure knowhub/static/index.html exists.</p>", status_code=404)
  1546. return FileResponse(str(index_file))
  1547. if __name__ == "__main__":
  1548. import uvicorn
  1549. uvicorn.run(app, host="0.0.0.0", port=9999)