server.py 73 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040
  1. """
  2. KnowHub Server
  3. Agent 工具使用经验的共享平台。
  4. FastAPI + Milvus Lite(知识)+ SQLite(资源),单文件部署。
  5. """
  6. import os
  7. import re
  8. import json
  9. import asyncio
  10. import base64
  11. import time
  12. import uuid
  13. from contextlib import asynccontextmanager
  14. from datetime import datetime, timezone
  15. from typing import Optional, List, Dict
  16. from pathlib import Path
  17. from cryptography.hazmat.primitives.ciphers.aead import AESGCM
  18. from fastapi import FastAPI, HTTPException, Query, Header, Body, BackgroundTasks
  19. from fastapi.responses import HTMLResponse, FileResponse
  20. from fastapi.staticfiles import StaticFiles
  21. from pydantic import BaseModel, Field
  22. # 导入 LLM 调用(需要 agent 模块在 Python path 中)
  23. import sys
  24. sys.path.insert(0, str(Path(__file__).parent.parent))
  25. # 加载环境变量
  26. from dotenv import load_dotenv
  27. load_dotenv(Path(__file__).parent.parent / ".env")
  28. from agent.llm import create_openrouter_llm_call, create_qwen_llm_call
  29. _dedup_llm = create_openrouter_llm_call(model="google/gemini-2.5-flash-lite")
  30. _tool_analysis_llm = create_qwen_llm_call(model="qwen3.5-plus")
  31. # 导入向量存储和 embedding
  32. from knowhub.knowhub_db.pg_store import PostgreSQLStore
  33. from knowhub.knowhub_db.pg_resource_store import PostgreSQLResourceStore
  34. from knowhub.embeddings import get_embedding, get_embeddings_batch
  35. BRAND_NAME = os.getenv("BRAND_NAME", "KnowHub")
  36. BRAND_API_ENV = os.getenv("BRAND_API_ENV", "KNOWHUB_API")
  37. BRAND_DB = os.getenv("BRAND_DB", "knowhub.db")
  38. # 组织密钥配置(格式:org1:key1_base64,org2:key2_base64)
  39. ORG_KEYS_RAW = os.getenv("ORG_KEYS", "")
  40. ORG_KEYS = {}
  41. if ORG_KEYS_RAW:
  42. for pair in ORG_KEYS_RAW.split(","):
  43. if ":" in pair:
  44. org, key_b64 = pair.split(":", 1)
  45. ORG_KEYS[org.strip()] = key_b64.strip()
  46. DB_PATH = Path(__file__).parent / BRAND_DB
  47. # 全局 PostgreSQL 存储实例
  48. pg_store: Optional[PostgreSQLStore] = None
  49. pg_resource_store: Optional[PostgreSQLResourceStore] = None
  50. # --- 加密/解密 ---
  51. def get_org_key(resource_id: str) -> Optional[bytes]:
  52. """从content_id提取组织前缀,返回对应密钥"""
  53. if "/" in resource_id:
  54. org = resource_id.split("/")[0]
  55. if org in ORG_KEYS:
  56. return base64.b64decode(ORG_KEYS[org])
  57. return None
  58. def encrypt_content(resource_id: str, plaintext: str) -> str:
  59. """加密内容,返回格式:encrypted:AES256-GCM:{base64_data}"""
  60. if not plaintext:
  61. return ""
  62. key = get_org_key(resource_id)
  63. if not key:
  64. # 没有配置密钥,明文存储(不推荐)
  65. return plaintext
  66. aesgcm = AESGCM(key)
  67. nonce = os.urandom(12) # 96-bit nonce
  68. ciphertext = aesgcm.encrypt(nonce, plaintext.encode("utf-8"), None)
  69. # 组合 nonce + ciphertext
  70. encrypted_data = nonce + ciphertext
  71. encoded = base64.b64encode(encrypted_data).decode("ascii")
  72. return f"encrypted:AES256-GCM:{encoded}"
  73. def decrypt_content(resource_id: str, encrypted_text: str, provided_key: Optional[str] = None) -> str:
  74. """解密内容,如果没有提供密钥或密钥错误,返回[ENCRYPTED]"""
  75. if not encrypted_text:
  76. return ""
  77. if not encrypted_text.startswith("encrypted:AES256-GCM:"):
  78. # 未加密的内容,直接返回
  79. return encrypted_text
  80. # 提取加密数据
  81. encoded = encrypted_text.split(":", 2)[2]
  82. encrypted_data = base64.b64decode(encoded)
  83. nonce = encrypted_data[:12]
  84. ciphertext = encrypted_data[12:]
  85. # 获取密钥
  86. key = None
  87. if provided_key:
  88. # 使用提供的密钥
  89. try:
  90. key = base64.b64decode(provided_key)
  91. except Exception:
  92. return "[ENCRYPTED]"
  93. else:
  94. # 从配置中获取
  95. key = get_org_key(resource_id)
  96. if not key:
  97. return "[ENCRYPTED]"
  98. try:
  99. aesgcm = AESGCM(key)
  100. plaintext = aesgcm.decrypt(nonce, ciphertext, None)
  101. return plaintext.decode("utf-8")
  102. except Exception:
  103. return "[ENCRYPTED]"
  104. def serialize_milvus_result(data):
  105. """将 Milvus 返回的数据转换为可序列化的字典"""
  106. # 基本类型直接返回
  107. if data is None or isinstance(data, (str, int, float, bool)):
  108. return data
  109. # 字典类型递归处理
  110. if isinstance(data, dict):
  111. return {k: serialize_milvus_result(v) for k, v in data.items()}
  112. # 列表/元组类型递归处理
  113. if isinstance(data, (list, tuple)):
  114. return [serialize_milvus_result(item) for item in data]
  115. # 尝试转换为字典(对于有 to_dict 方法的对象)
  116. if hasattr(data, 'to_dict') and callable(getattr(data, 'to_dict')):
  117. try:
  118. return serialize_milvus_result(data.to_dict())
  119. except:
  120. pass
  121. # 尝试转换为列表(对于可迭代对象,如 RepeatedScalarContainer)
  122. if hasattr(data, '__iter__') and not isinstance(data, (str, bytes, dict)):
  123. try:
  124. # 强制转换为列表并递归处理
  125. result = []
  126. for item in data:
  127. result.append(serialize_milvus_result(item))
  128. return result
  129. except:
  130. pass
  131. # 尝试获取对象的属性字典
  132. if hasattr(data, '__dict__'):
  133. try:
  134. return serialize_milvus_result(vars(data))
  135. except:
  136. pass
  137. # 最后的 fallback:对于无法处理的类型,返回 None 而不是字符串表示
  138. # 这样可以避免产生无法序列化的字符串
  139. return None
  140. # --- Models ---
  141. class ResourceIn(BaseModel):
  142. id: str
  143. title: str = ""
  144. body: str
  145. secure_body: str = ""
  146. content_type: str = "text" # text|code|credential|cookie
  147. metadata: dict = {}
  148. sort_order: int = 0
  149. submitted_by: str = ""
  150. class ResourcePatchIn(BaseModel):
  151. """PATCH /api/resource/{id} 请求体"""
  152. title: Optional[str] = None
  153. body: Optional[str] = None
  154. secure_body: Optional[str] = None
  155. content_type: Optional[str] = None
  156. metadata: Optional[dict] = None
  157. # Knowledge Models
  158. class KnowledgeIn(BaseModel):
  159. task: str
  160. content: str
  161. types: list[str] = ["strategy"]
  162. tags: dict = {}
  163. scopes: list[str] = ["org:cybertogether"]
  164. owner: str = ""
  165. message_id: str = ""
  166. resource_ids: list[str] = []
  167. source: dict = {} # {name, category, urls, agent_id, submitted_by, timestamp}
  168. eval: dict = {} # {score, helpful, harmful, confidence}
  169. class KnowledgeOut(BaseModel):
  170. id: str
  171. message_id: str
  172. types: list[str]
  173. task: str
  174. tags: dict
  175. scopes: list[str]
  176. owner: str
  177. content: str
  178. resource_ids: list[str]
  179. source: dict
  180. eval: dict
  181. created_at: str
  182. updated_at: str
  183. class KnowledgeUpdateIn(BaseModel):
  184. add_helpful_case: Optional[dict] = None
  185. add_harmful_case: Optional[dict] = None
  186. update_score: Optional[int] = Field(default=None, ge=1, le=5)
  187. evolve_feedback: Optional[str] = None
  188. class KnowledgePatchIn(BaseModel):
  189. """PATCH /api/knowledge/{id} 请求体(直接字段编辑)"""
  190. task: Optional[str] = None
  191. content: Optional[str] = None
  192. types: Optional[list[str]] = None
  193. tags: Optional[dict] = None
  194. scopes: Optional[list[str]] = None
  195. owner: Optional[str] = None
  196. class MessageExtractIn(BaseModel):
  197. """POST /api/extract 请求体(消息历史提取)"""
  198. messages: list[dict] # [{role: str, content: str}, ...]
  199. agent_id: str = "unknown"
  200. submitted_by: str # 必填,作为 owner
  201. session_key: str = ""
  202. class KnowledgeBatchUpdateIn(BaseModel):
  203. feedback_list: list[dict]
  204. class KnowledgeVerifyIn(BaseModel):
  205. action: str # "approve" | "reject"
  206. verified_by: str = "user"
  207. class KnowledgeBatchVerifyIn(BaseModel):
  208. knowledge_ids: List[str]
  209. action: str # "approve"
  210. verified_by: str
  211. class KnowledgeSearchResponse(BaseModel):
  212. results: list[dict]
  213. count: int
  214. class ResourceNode(BaseModel):
  215. id: str
  216. title: str
  217. class ResourceOut(BaseModel):
  218. id: str
  219. title: str
  220. body: str
  221. secure_body: str = ""
  222. content_type: str = "text"
  223. metadata: dict = {}
  224. toc: Optional[ResourceNode] = None
  225. children: list[ResourceNode]
  226. prev: Optional[ResourceNode] = None
  227. next: Optional[ResourceNode] = None
  228. # --- Dedup: Globals & Prompt ---
  229. knowledge_processor: Optional["KnowledgeProcessor"] = None
  230. DEDUP_RELATION_PROMPT = """你是知识库管理专家。请判断【新知识】与【相似知识列表】中每条知识的关系。
  231. 【新知识】
  232. Task: {new_task}
  233. Content: {new_content}
  234. 【相似知识列表】(向量召回 top-10,按相似度排序)
  235. {existing_list}
  236. 格式: [序号] ID: xxx | Task: xxx | Content: xxx
  237. 【关系类型定义】
  238. - duplicate: task 和 content 语义完全相同,无新增信息 → 新知识应 rejected
  239. - subset: task语义一致,新知识的content信息完全被某条已有知识覆盖 → 新知识应 rejected
  240. - superset: task语义一致,新知识包含某条已有知识的全部信息,且有额外内容 → 新知识应 approved
  241. - conflict: 同一 task 下给出相互矛盾的结论 → 新知识应 approved
  242. - complement: 描述同一 task 的不同方面,互补 → 新知识应 approved
  243. - none: task 语义不同,或无实质关系 → 新知识应 approved,不写入 relations
  244. 【判断步骤】
  245. 第一步:逐条比较新知识的 task 与列表中每条知识的 task 语义是否一致。
  246. - task 语义一致 = 两者描述的是同一个问题或目标(即使措辞不同)
  247. - task 语义不同 = 描述的是不同的问题、不同的工具、不同的场景
  248. - 如果 task 语义不同,该条关系直接判定为 none,**不再看 content**
  249. - 只有 task 语义一致时,才进入第二步比较 content
  250. 第二步:对 task 语义一致的知识,比较 content,判断具体关系类型(duplicate/subset/superset/conflict/complement)。
  251. **规则**:
  252. 1. 如果以上类型无法准确描述,可自定义关系类型(英文小写下划线),并自行决定 approved/rejected
  253. 2. final_decision 为 rejected 时,relations 中必须至少有一条关系说明拒绝原因(type 不能为 none)
  254. 【输出格式】(严格 JSON,不要其他内容)
  255. 示例1 - 无关知识(task 不同):
  256. {{
  257. "final_decision": "approved",
  258. "relations": []
  259. }}
  260. 示例2 - 重复知识:
  261. {{
  262. "final_decision": "rejected",
  263. "relations": [
  264. {{
  265. "old_id": "knowledge-xxx",
  266. "type": "duplicate",
  267. "reverse_type": "duplicate"
  268. }}
  269. ]
  270. }}
  271. 示例3 - 互补知识:
  272. {{
  273. "final_decision": "approved",
  274. "relations": [
  275. {{
  276. "old_id": "knowledge-xxx",
  277. "type": "complement",
  278. "reverse_type": "complement"
  279. }}
  280. ]
  281. }}
  282. """
  283. TOOL_ANALYSIS_PROMPT = """\
  284. 分析以下知识条目,判断是否涉及"图像创作或解构任务中使用的工具"。
  285. 工具范畴(包括但不限于):
  286. - AI 生图平台/模型:Midjourney、Stable Diffusion、DALL-E、Flux、ComfyUI
  287. - SD 插件/节点:ControlNet、IP-Adapter、InstantID、DWPose、DSINE
  288. - 图像处理库:rembg、PIL/Pillow、OpenCV、scikit-image
  289. - LoRA/checkpoint 模型、ComfyUI 自定义节点、AI 绘图辅助工具
  290. 知识条目:
  291. task: {task}
  292. content: {content}
  293. 要求:
  294. - 如果涉及上述工具,提取每个工具的信息并以 JSON 格式返回。
  295. - 如果不涉及任何工具,返回 {{"has_tools": false}}。
  296. - 只输出 JSON,不要输出其他内容。
  297. 输出格式:
  298. {{
  299. "has_tools": true,
  300. "tools": [
  301. {{
  302. "name": "工具名称(原名)",
  303. "slug": "小写英文短名,空格换下划线,如 controlnet、ip_adapter",
  304. "category": "image_gen | image_process | model | plugin | workflow | other",
  305. "version": "版本号或 null",
  306. "description": "一句话功能介绍",
  307. "usage": "核心用法",
  308. "scenarios": ["应用场景1", "应用场景2"],
  309. "input": "输入类型描述或 null",
  310. "output": "输出类型描述或 null",
  311. "source": "来源/文档链接或 null",
  312. "status": "未接入"
  313. }}
  314. ]
  315. }}
  316. """
  317. # --- Dedup: RelationCache ---
  318. class RelationCache:
  319. """关系缓存,存储在内存中"""
  320. def __init__(self):
  321. self._cache: Dict[str, List[str]] = {}
  322. def load(self) -> dict:
  323. return self._cache
  324. def save(self, cache: dict):
  325. self._cache = cache
  326. def add_relation(self, relation_type: str, knowledge_id: str):
  327. if relation_type not in self._cache:
  328. self._cache[relation_type] = []
  329. if knowledge_id not in self._cache[relation_type]:
  330. self._cache[relation_type].append(knowledge_id)
  331. # --- Dedup: KnowledgeProcessor ---
  332. class KnowledgeProcessor:
  333. def __init__(self):
  334. self._lock = asyncio.Lock()
  335. self._relation_cache = RelationCache()
  336. async def process_pending(self):
  337. """持续处理 pending 和 dedup_passed 知识直到队列为空,有锁防并发"""
  338. if self._lock.locked():
  339. return
  340. async with self._lock:
  341. # 第一阶段:处理 pending(去重)
  342. while True:
  343. try:
  344. pending = pg_store.query('status == "pending"', limit=50)
  345. except Exception as e:
  346. print(f"[KnowledgeProcessor] 查询 pending 失败: {e}")
  347. break
  348. if not pending:
  349. break
  350. for knowledge in pending:
  351. await self._process_one(knowledge)
  352. # 第二阶段:处理 dedup_passed(工具关联)
  353. while True:
  354. try:
  355. dedup_passed = pg_store.query('status == "dedup_passed"', limit=50)
  356. except Exception as e:
  357. print(f"[KnowledgeProcessor] 查询 dedup_passed 失败: {e}")
  358. break
  359. if not dedup_passed:
  360. break
  361. for knowledge in dedup_passed:
  362. await self._analyze_tool_relation(knowledge)
  363. async def _process_one(self, knowledge: dict):
  364. kid = knowledge["id"]
  365. now = int(time.time())
  366. # 乐观锁:pending → processing(时间戳存秒级)
  367. try:
  368. pg_store.update(kid, {"status": "processing", "updated_at": now})
  369. except Exception as e:
  370. print(f"[KnowledgeProcessor] 锁定 {kid} 失败: {e}")
  371. return
  372. try:
  373. # 向量召回 top-10(只召回 approved/checked)
  374. embedding = knowledge.get("embedding")
  375. if not embedding:
  376. embedding = await get_embedding(knowledge["task"])
  377. candidates = pg_store.search(
  378. query_embedding=embedding,
  379. filters='(status == "approved" or status == "checked")',
  380. limit=10
  381. )
  382. candidates = [c for c in candidates if c["id"] != kid]
  383. # 只保留相似度 >= 0.75 的候选,低于阈值的 task 语义差异太大,直接视为 none
  384. candidates = [c for c in candidates if c.get("score", 0) >= 0.75]
  385. if not candidates:
  386. pg_store.update(kid, {"status": "dedup_passed", "updated_at": now})
  387. return
  388. llm_result = await self._llm_judge_relations(knowledge, candidates)
  389. await self._apply_decision(knowledge, llm_result)
  390. except Exception as e:
  391. print(f"[KnowledgeProcessor] 处理 {kid} 失败: {e},回退到 pending")
  392. try:
  393. pg_store.update(kid, {"status": "pending", "updated_at": int(time.time())})
  394. except Exception:
  395. pass
  396. async def _llm_judge_relations(self, new_knowledge: dict, candidates: list) -> dict:
  397. existing_list = "\n".join([
  398. f"[{i+1}] ID: {c['id']} | Task: {c['task']} | Content: {c['content'][:300]}"
  399. for i, c in enumerate(candidates)
  400. ])
  401. prompt = DEDUP_RELATION_PROMPT.format(
  402. new_task=new_knowledge["task"],
  403. new_content=new_knowledge["content"],
  404. existing_list=existing_list
  405. )
  406. for attempt in range(3):
  407. try:
  408. response = await _dedup_llm(
  409. messages=[{"role": "user", "content": prompt}],
  410. )
  411. content = response.get("content", "").strip()
  412. # 清理 markdown 代码块
  413. if "```" in content:
  414. parts = content.split("```")
  415. for part in parts:
  416. part = part.strip()
  417. if part.startswith("json"):
  418. part = part[4:].strip()
  419. try:
  420. result = json.loads(part)
  421. if "final_decision" in result:
  422. content = part
  423. break
  424. except Exception:
  425. continue
  426. result = json.loads(content)
  427. assert result.get("final_decision") in ("approved", "rejected")
  428. return result
  429. except Exception as e:
  430. print(f"[LLM Judge] 第{attempt+1}次失败: {e}")
  431. if attempt < 2:
  432. await asyncio.sleep(1)
  433. return {"final_decision": "approved", "relations": []}
  434. async def _apply_decision(self, new_knowledge: dict, llm_result: dict):
  435. kid = new_knowledge["id"]
  436. final_decision = llm_result.get("final_decision", "approved")
  437. relations = llm_result.get("relations", [])
  438. now = int(time.time())
  439. # 强制规则:如果存在 duplicate 或 subset 关系,必须 rejected
  440. if any(rel.get("type") in ("duplicate", "subset") for rel in relations):
  441. final_decision = "rejected"
  442. if final_decision == "rejected":
  443. # 记录 rejected 知识的关系(便于溯源为什么被拒绝)
  444. rejected_relationships = []
  445. for rel in relations:
  446. old_id = rel.get("old_id")
  447. rel_type = rel.get("type", "none")
  448. if old_id and rel_type != "none":
  449. rejected_relationships.append({"type": rel_type, "target": old_id})
  450. if rel_type in ("duplicate", "subset") and old_id:
  451. try:
  452. old = pg_store.get_by_id(old_id)
  453. if not old:
  454. continue
  455. eval_data = old.get("eval") or {}
  456. eval_data["helpful"] = eval_data.get("helpful", 0) + 1
  457. helpful_history = eval_data.get("helpful_history") or []
  458. helpful_history.append({
  459. "source": "dedup",
  460. "related_id": kid,
  461. "relation_type": rel_type,
  462. "timestamp": now
  463. })
  464. eval_data["helpful_history"] = helpful_history
  465. pg_store.update(old_id, {"eval": eval_data, "updated_at": now})
  466. except Exception as e:
  467. print(f"[Apply Decision] 更新旧知识 {old_id} helpful 失败: {e}")
  468. pg_store.update(kid, {"status": "rejected", "relationships": json.dumps(rejected_relationships), "updated_at": now})
  469. else:
  470. new_relationships = []
  471. for rel in relations:
  472. rel_type = rel.get("type", "none")
  473. reverse_type = rel.get("reverse_type", "none")
  474. old_id = rel.get("old_id")
  475. if not old_id or rel_type == "none":
  476. continue
  477. new_relationships.append({"type": rel_type, "target": old_id})
  478. self._relation_cache.add_relation(rel_type, kid)
  479. self._relation_cache.add_relation(rel_type, old_id)
  480. if reverse_type and reverse_type != "none":
  481. try:
  482. old = pg_store.get_by_id(old_id)
  483. if old:
  484. old_rels = old.get("relationships") or []
  485. old_rels.append({"type": reverse_type, "target": kid})
  486. pg_store.update(old_id, {"relationships": json.dumps(old_rels), "updated_at": now})
  487. self._relation_cache.add_relation(reverse_type, old_id)
  488. self._relation_cache.add_relation(reverse_type, kid)
  489. except Exception as e:
  490. print(f"[Apply Decision] 更新旧知识关系 {old_id} 失败: {e}")
  491. pg_store.update(kid, {
  492. "status": "dedup_passed",
  493. "relationships": json.dumps(new_relationships),
  494. "updated_at": now
  495. })
  496. async def _llm_analyze_tools(self, knowledge: dict) -> dict:
  497. """使用 LLM 分析知识中涉及的工具(复用迁移脚本逻辑)"""
  498. task = (knowledge.get("task") or "")[:600]
  499. content = (knowledge.get("content") or "")[:1200]
  500. prompt = TOOL_ANALYSIS_PROMPT.format(task=task, content=content)
  501. try:
  502. response = await _tool_analysis_llm(
  503. messages=[{"role": "user", "content": prompt}],
  504. max_tokens=2048,
  505. temperature=0.1,
  506. )
  507. raw = (response.get("content") or "").strip()
  508. if raw.startswith("```"):
  509. lines = raw.split("\n")
  510. inner = []
  511. in_block = False
  512. for line in lines:
  513. if line.startswith("```"):
  514. in_block = not in_block
  515. continue
  516. if in_block:
  517. inner.append(line)
  518. raw = "\n".join(inner).strip()
  519. return json.loads(raw)
  520. except Exception as e:
  521. print(f"[Tool Analysis LLM] 调用失败: {e}")
  522. raise
  523. async def _create_or_get_tool_resource(self, tool_info: dict) -> Optional[str]:
  524. """创建或获取工具资源(存入 PostgreSQL tool_table)"""
  525. category = tool_info.get("category", "other")
  526. slug = tool_info.get("slug", "")
  527. if not slug:
  528. return None
  529. tool_id = f"tools/{category}/{slug}"
  530. now_ts = int(time.time())
  531. cursor = pg_store._get_cursor()
  532. try:
  533. cursor.execute("SELECT id FROM tool_table WHERE id = %s", (tool_id,))
  534. if cursor.fetchone():
  535. return tool_id
  536. cursor.execute("""
  537. INSERT INTO tool_table (id, name, version, introduction, tutorial, input, output,
  538. updated_time, status, knowledge, case_knowledge, process_knowledge)
  539. VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
  540. """, (
  541. tool_id,
  542. tool_info.get("name", slug),
  543. tool_info.get("version") or None,
  544. tool_info.get("description", ""),
  545. tool_info.get("usage", ""),
  546. json.dumps(tool_info.get("input", "")),
  547. json.dumps(tool_info.get("output", "")),
  548. now_ts,
  549. tool_info.get("status", "未接入"),
  550. json.dumps([]),
  551. json.dumps([]),
  552. json.dumps([]),
  553. ))
  554. pg_store.conn.commit()
  555. print(f"[Tool Resource] 创建新工具: {tool_id}")
  556. return tool_id
  557. finally:
  558. cursor.close()
  559. async def _update_tool_knowledge_index(self, tool_id: str, knowledge_id: str):
  560. """更新工具的 knowledge 关联索引(PostgreSQL tool_table)"""
  561. now_ts = int(time.time())
  562. cursor = pg_store._get_cursor()
  563. try:
  564. cursor.execute("SELECT knowledge FROM tool_table WHERE id = %s", (tool_id,))
  565. row = cursor.fetchone()
  566. if not row:
  567. return
  568. knowledge_ids = row["knowledge"] if isinstance(row["knowledge"], list) else json.loads(row["knowledge"] or "[]")
  569. if knowledge_id not in knowledge_ids:
  570. knowledge_ids.append(knowledge_id)
  571. cursor.execute(
  572. "UPDATE tool_table SET knowledge = %s, updated_time = %s WHERE id = %s",
  573. (json.dumps(knowledge_ids), now_ts, tool_id)
  574. )
  575. pg_store.conn.commit()
  576. finally:
  577. cursor.close()
  578. async def _analyze_tool_relation(self, knowledge: dict):
  579. """分析知识与工具的关联关系"""
  580. kid = knowledge["id"]
  581. now = int(time.time())
  582. # 乐观锁:dedup_passed → analyzing
  583. try:
  584. pg_store.update(kid, {"status": "analyzing", "updated_at": now})
  585. except Exception as e:
  586. print(f"[Tool Analysis] 锁定 {kid} 失败: {e}")
  587. return
  588. try:
  589. tool_analysis = await self._llm_analyze_tools(knowledge)
  590. has_tools = bool(tool_analysis and tool_analysis.get("has_tools"))
  591. existing_tags = knowledge.get("tags") or {}
  592. has_tool_tag = existing_tags.get("tool") is True
  593. # 情况1:LLM 判定无工具,但有 tool tag → 重新分析一次
  594. if not has_tools and has_tool_tag:
  595. print(f"[Tool Analysis] {kid} LLM 判定无工具但有 tool tag,重新分析")
  596. tool_analysis = await self._llm_analyze_tools(knowledge)
  597. has_tools = bool(tool_analysis and tool_analysis.get("has_tools"))
  598. # 重新分析后仍然不一致 → 知识模糊,rejected
  599. if not has_tools:
  600. pg_store.update(kid, {"status": "rejected", "updated_at": now})
  601. print(f"[Tool Analysis] {kid} 两次判定不一致,知识模糊,rejected")
  602. return
  603. # 情况2:无工具且无 tool tag → 直接 approved
  604. if not has_tools:
  605. pg_store.update(kid, {"status": "approved", "updated_at": now})
  606. return
  607. # 情况3/4:有工具 → 创建资源并关联
  608. tool_ids = []
  609. for tool_info in (tool_analysis.get("tools") or []):
  610. tool_id = await self._create_or_get_tool_resource(tool_info)
  611. if tool_id:
  612. tool_ids.append(tool_id)
  613. existing_resource_ids = knowledge.get("resource_ids") or []
  614. updated_resource_ids = list(set(existing_resource_ids + tool_ids))
  615. updates: dict = {
  616. "status": "approved",
  617. "resource_ids": updated_resource_ids,
  618. "updated_at": now
  619. }
  620. # 有工具但无 tool tag → 添加 tag
  621. if not has_tool_tag:
  622. updated_tags = dict(existing_tags)
  623. updated_tags["tool"] = True
  624. updates["tags"] = updated_tags
  625. print(f"[Tool Analysis] {kid} 添加 tool tag")
  626. pg_store.update(kid, updates)
  627. for tool_id in tool_ids:
  628. await self._update_tool_knowledge_index(tool_id, kid)
  629. print(f"[Tool Analysis] {kid} 关联了 {len(tool_ids)} 个工具")
  630. except Exception as e:
  631. print(f"[Tool Analysis] {kid} 分析失败: {e},回退到 dedup_passed")
  632. try:
  633. pg_store.update(kid, {"status": "dedup_passed", "updated_at": int(time.time())})
  634. except Exception:
  635. pass
  636. async def _periodic_processor():
  637. """每60秒检测超时条目并回滚:processing(>5min)→pending,analyzing(>10min)→dedup_passed"""
  638. while True:
  639. await asyncio.sleep(60)
  640. try:
  641. now = int(time.time())
  642. # 回滚超时的 processing(5分钟 → pending)
  643. timeout_5min = now - 300
  644. processing = pg_store.query('status == "processing"', limit=200)
  645. for item in processing:
  646. updated_at = item.get("updated_at", 0) or 0
  647. updated_at_sec = updated_at // 1000 if updated_at > 1_000_000_000_000 else updated_at
  648. if updated_at_sec < timeout_5min:
  649. print(f"[Periodic] 回滚超时 processing → pending: {item['id']}")
  650. pg_store.update(item["id"], {"status": "pending", "updated_at": int(time.time())})
  651. # 回滚超时的 analyzing(10分钟 → dedup_passed)
  652. timeout_10min = now - 600
  653. analyzing = pg_store.query('status == "analyzing"', limit=200)
  654. for item in analyzing:
  655. updated_at = item.get("updated_at", 0) or 0
  656. updated_at_sec = updated_at // 1000 if updated_at > 1_000_000_000_000 else updated_at
  657. if updated_at_sec < timeout_10min:
  658. print(f"[Periodic] 回滚超时 analyzing → dedup_passed: {item['id']}")
  659. pg_store.update(item["id"], {"status": "dedup_passed", "updated_at": int(time.time())})
  660. except Exception as e:
  661. print(f"[Periodic] 定时任务错误: {e}")
  662. # --- App ---
  663. @asynccontextmanager
  664. async def lifespan(app: FastAPI):
  665. global pg_store, pg_resource_store, knowledge_processor
  666. # 初始化 PostgreSQL(knowledge + resources)
  667. pg_store = PostgreSQLStore()
  668. pg_resource_store = PostgreSQLResourceStore()
  669. # 初始化去重处理器 + 启动定时兜底任务
  670. knowledge_processor = KnowledgeProcessor()
  671. periodic_task = asyncio.create_task(_periodic_processor())
  672. yield
  673. # 清理
  674. periodic_task.cancel()
  675. try:
  676. await periodic_task
  677. except asyncio.CancelledError:
  678. pass
  679. pg_store.close()
  680. pg_resource_store.close()
  681. app = FastAPI(title=BRAND_NAME, lifespan=lifespan)
  682. # 挂载静态文件
  683. STATIC_DIR = Path(__file__).parent / "static"
  684. if STATIC_DIR.exists():
  685. app.mount("/static", StaticFiles(directory=str(STATIC_DIR)), name="static")
  686. # --- Knowledge API ---
  687. @app.post("/api/resource", status_code=201)
  688. def submit_resource(resource: ResourceIn):
  689. """提交资源(存入 PostgreSQL resources 表)"""
  690. try:
  691. # 加密敏感内容
  692. encrypted_secure_body = encrypt_content(resource.id, resource.secure_body)
  693. pg_resource_store.insert_or_update({
  694. 'id': resource.id,
  695. 'title': resource.title,
  696. 'body': resource.body,
  697. 'secure_body': encrypted_secure_body,
  698. 'content_type': resource.content_type,
  699. 'metadata': resource.metadata,
  700. 'sort_order': resource.sort_order,
  701. 'submitted_by': resource.submitted_by
  702. })
  703. return {"status": "ok", "id": resource.id}
  704. except Exception as e:
  705. raise HTTPException(status_code=500, detail=str(e))
  706. @app.get("/api/resource/{resource_id:path}", response_model=ResourceOut)
  707. def get_resource(resource_id: str, x_org_key: Optional[str] = Header(None)):
  708. """获取资源详情(从 PostgreSQL)"""
  709. try:
  710. row = pg_resource_store.get_by_id(resource_id)
  711. if not row:
  712. raise HTTPException(status_code=404, detail=f"Resource not found: {resource_id}")
  713. # 解密敏感内容
  714. secure_body = decrypt_content(resource_id, row.get("secure_body", ""), x_org_key)
  715. # 计算导航上下文
  716. root_id = resource_id.split("/")[0] if "/" in resource_id else resource_id
  717. # TOC (根节点)
  718. toc = None
  719. if "/" in resource_id:
  720. toc_row = pg_resource_store.get_by_id(root_id)
  721. if toc_row:
  722. toc = ResourceNode(id=toc_row["id"], title=toc_row["title"])
  723. # Children (子节点)
  724. children_rows = pg_resource_store.list_resources(prefix=f"{resource_id}/", limit=1000)
  725. children = [ResourceNode(id=r["id"], title=r["title"]) for r in children_rows
  726. if r["id"].count("/") == resource_id.count("/") + 1]
  727. # Prev/Next (同级节点)
  728. prev_node, next_node = pg_resource_store.get_siblings(resource_id)
  729. prev = ResourceNode(id=prev_node["id"], title=prev_node["title"]) if prev_node else None
  730. next = ResourceNode(id=next_node["id"], title=next_node["title"]) if next_node else None
  731. return ResourceOut(
  732. id=row["id"],
  733. title=row["title"],
  734. body=row["body"],
  735. secure_body=secure_body,
  736. content_type=row["content_type"],
  737. metadata=row.get("metadata", {}),
  738. toc=toc,
  739. children=children,
  740. prev=prev,
  741. next=next,
  742. )
  743. except HTTPException:
  744. raise
  745. except Exception as e:
  746. raise HTTPException(status_code=500, detail=str(e))
  747. @app.patch("/api/resource/{resource_id:path}")
  748. def patch_resource(resource_id: str, patch: ResourcePatchIn):
  749. """更新resource字段(PostgreSQL)"""
  750. try:
  751. # 检查是否存在
  752. if not pg_resource_store.get_by_id(resource_id):
  753. raise HTTPException(status_code=404, detail=f"Resource not found: {resource_id}")
  754. # 构建更新字典
  755. updates = {}
  756. if patch.title is not None:
  757. updates['title'] = patch.title
  758. if patch.body is not None:
  759. updates['body'] = patch.body
  760. if patch.secure_body is not None:
  761. updates['secure_body'] = encrypt_content(resource_id, patch.secure_body)
  762. if patch.content_type is not None:
  763. updates['content_type'] = patch.content_type
  764. if patch.metadata is not None:
  765. updates['metadata'] = patch.metadata
  766. if not updates:
  767. return {"status": "ok", "message": "No fields to update"}
  768. pg_resource_store.update(resource_id, updates)
  769. return {"status": "ok", "id": resource_id}
  770. except HTTPException:
  771. raise
  772. except Exception as e:
  773. raise HTTPException(status_code=500, detail=str(e))
  774. @app.get("/api/resource")
  775. def list_resources(
  776. content_type: Optional[str] = Query(None),
  777. limit: int = Query(100, ge=1, le=1000)
  778. ):
  779. """列出所有resource(PostgreSQL)"""
  780. try:
  781. results = pg_resource_store.list_resources(
  782. content_type=content_type,
  783. limit=limit
  784. )
  785. return {"results": results, "count": len(results)}
  786. except Exception as e:
  787. raise HTTPException(status_code=500, detail=str(e))
  788. @app.delete("/api/resource/{resource_id:path}")
  789. def delete_resource(resource_id: str):
  790. """删除单个resource(PostgreSQL)"""
  791. try:
  792. if not pg_resource_store.get_by_id(resource_id):
  793. raise HTTPException(status_code=404, detail=f"Resource not found: {resource_id}")
  794. pg_resource_store.delete(resource_id)
  795. return {"status": "ok", "id": resource_id}
  796. except HTTPException:
  797. raise
  798. except Exception as e:
  799. raise HTTPException(status_code=500, detail=str(e))
  800. # --- Knowledge API ---
  801. # ===== Knowledge API =====
  802. async def _llm_rerank(query: str, candidates: list[dict], top_k: int) -> list[str]:
  803. """
  804. 使用 LLM 对候选知识进行精排
  805. Args:
  806. query: 查询文本
  807. candidates: 候选知识列表
  808. top_k: 返回数量
  809. Returns:
  810. 排序后的知识 ID 列表
  811. """
  812. if not candidates:
  813. return []
  814. # 构造 prompt
  815. candidates_text = "\n".join([
  816. f"[{i+1}] ID: {c['id']}\nTask: {c['task']}\nContent: {c['content'][:200]}..."
  817. for i, c in enumerate(candidates)
  818. ])
  819. prompt = f"""你是知识检索专家。根据用户查询,从候选知识中选出最相关的 {top_k} 条。
  820. 用户查询:"{query}"
  821. 候选知识:
  822. {candidates_text}
  823. 请输出最相关的 {top_k} 个知识 ID,按相关性从高到低排序,用逗号分隔。
  824. 只输出 ID,不要其他内容。"""
  825. try:
  826. response = await _dedup_llm(
  827. messages=[{"role": "user", "content": prompt}],
  828. )
  829. content = response.get("content", "").strip()
  830. # 解析 ID 列表
  831. selected_ids = [
  832. idx.strip()
  833. for idx in re.split(r'[,\s]+', content)
  834. if idx.strip().startswith(("knowledge-", "research-"))
  835. ]
  836. return selected_ids[:top_k]
  837. except Exception as e:
  838. print(f"[LLM Rerank] 失败: {e}")
  839. return []
  840. @app.get("/api/knowledge/search")
  841. async def search_knowledge_api(
  842. q: str = Query(..., description="查询文本"),
  843. top_k: int = Query(default=5, ge=1, le=20),
  844. min_score: int = Query(default=3, ge=1, le=5),
  845. types: Optional[str] = None,
  846. owner: Optional[str] = None
  847. ):
  848. """检索知识(向量召回 + LLM 精排)"""
  849. try:
  850. # 1. 生成查询向量
  851. query_embedding = await get_embedding(q)
  852. # 2. 构建过滤表达式
  853. filters = []
  854. if types:
  855. type_list = [t.strip() for t in types.split(',') if t.strip()]
  856. for t in type_list:
  857. filters.append(f'array_contains(types, "{t}")')
  858. if owner:
  859. owner_list = [o.strip() for o in owner.split(',') if o.strip()]
  860. if len(owner_list) == 1:
  861. filters.append(f'owner == "{owner_list[0]}"')
  862. else:
  863. # 多个owner用OR连接
  864. owner_filters = [f'owner == "{o}"' for o in owner_list]
  865. filters.append(f'({" or ".join(owner_filters)})')
  866. # 添加 min_score 过滤
  867. filters.append(f'eval["score"] >= {min_score}')
  868. # 只搜索 approved 和 checked 的知识
  869. filters.append('(status == "approved" or status == "checked")')
  870. filter_expr = ' and '.join(filters) if filters else None
  871. # 3. 向量召回(3*k 个候选)
  872. recall_limit = top_k * 3
  873. candidates = pg_store.search(
  874. query_embedding=query_embedding,
  875. filters=filter_expr,
  876. limit=recall_limit
  877. )
  878. if not candidates:
  879. return {"results": [], "count": 0, "reranked": False}
  880. # 转换为可序列化的格式
  881. serialized_candidates = [serialize_milvus_result(c) for c in candidates]
  882. # 4. LLM 精排
  883. reranked_ids = await _llm_rerank(q, serialized_candidates, top_k)
  884. if reranked_ids:
  885. # 按 LLM 排序返回
  886. id_to_candidate = {c["id"]: c for c in serialized_candidates}
  887. results = [id_to_candidate[id] for id in reranked_ids if id in id_to_candidate]
  888. return {"results": results, "count": len(results), "reranked": True}
  889. else:
  890. # Fallback:直接返回向量召回的 top k
  891. print(f"[Knowledge Search] LLM 精排失败,fallback 到向量 top-{top_k}")
  892. return {"results": serialized_candidates[:top_k], "count": len(serialized_candidates[:top_k]), "reranked": False}
  893. except Exception as e:
  894. print(f"[Knowledge Search] 错误: {e}")
  895. raise HTTPException(status_code=500, detail=str(e))
  896. @app.post("/api/knowledge", status_code=201)
  897. async def save_knowledge(knowledge: KnowledgeIn, background_tasks: BackgroundTasks):
  898. """保存新知识"""
  899. try:
  900. # 生成 ID
  901. timestamp = datetime.now().strftime('%Y%m%d-%H%M%S')
  902. random_suffix = uuid.uuid4().hex[:4]
  903. knowledge_id = f"knowledge-{timestamp}-{random_suffix}"
  904. now = int(time.time())
  905. # 设置默认值
  906. owner = knowledge.owner or f"agent:{knowledge.source.get('agent_id', 'unknown')}"
  907. # 准备 source
  908. source = {
  909. "name": knowledge.source.get("name", ""),
  910. "category": knowledge.source.get("category", ""),
  911. "urls": knowledge.source.get("urls", []),
  912. "agent_id": knowledge.source.get("agent_id", "unknown"),
  913. "submitted_by": knowledge.source.get("submitted_by", ""),
  914. "timestamp": datetime.now(timezone.utc).isoformat(),
  915. "message_id": knowledge.message_id
  916. }
  917. # 准备 eval
  918. eval_data = {
  919. "score": knowledge.eval.get("score", 3),
  920. "helpful": knowledge.eval.get("helpful", 1),
  921. "harmful": knowledge.eval.get("harmful", 0),
  922. "confidence": knowledge.eval.get("confidence", 0.5),
  923. "helpful_history": [],
  924. "harmful_history": []
  925. }
  926. # 生成向量(只基于 task,因为搜索时用户描述的是任务场景)
  927. embedding = await get_embedding(knowledge.task)
  928. # 提取 tag keys(用于高效筛选)
  929. tag_keys = list(knowledge.tags.keys()) if isinstance(knowledge.tags, dict) else []
  930. # 准备插入数据
  931. insert_data = {
  932. "id": knowledge_id,
  933. "embedding": embedding,
  934. "message_id": knowledge.message_id,
  935. "task": knowledge.task,
  936. "content": knowledge.content,
  937. "types": knowledge.types,
  938. "tags": knowledge.tags,
  939. "tag_keys": tag_keys,
  940. "scopes": knowledge.scopes,
  941. "owner": owner,
  942. "resource_ids": knowledge.resource_ids,
  943. "source": source,
  944. "eval": eval_data,
  945. "created_at": now,
  946. "updated_at": now,
  947. "status": "pending",
  948. "relationships": json.dumps([]),
  949. }
  950. print(f"[Save Knowledge] 插入数据: {json.dumps({k: v for k, v in insert_data.items() if k != 'embedding'}, ensure_ascii=False)}")
  951. # 插入 Milvus
  952. pg_store.insert(insert_data)
  953. # 触发后台去重处理
  954. background_tasks.add_task(knowledge_processor.process_pending)
  955. return {"status": "pending", "knowledge_id": knowledge_id, "message": "知识已入队,正在处理去重..."}
  956. except Exception as e:
  957. print(f"[Save Knowledge] 错误: {e}")
  958. raise HTTPException(status_code=500, detail=str(e))
  959. @app.get("/api/knowledge")
  960. def list_knowledge(
  961. page: int = Query(default=1, ge=1),
  962. page_size: int = Query(default=200, ge=1, le=500),
  963. types: Optional[str] = None,
  964. scopes: Optional[str] = None,
  965. owner: Optional[str] = None,
  966. tags: Optional[str] = None,
  967. status: Optional[str] = None
  968. ):
  969. """列出知识(支持后端筛选和分页)"""
  970. try:
  971. # 构建过滤表达式
  972. filters = []
  973. # types 支持多个,用 AND 连接(交集:必须同时包含所有选中的type)
  974. if types:
  975. type_list = [t.strip() for t in types.split(',') if t.strip()]
  976. for t in type_list:
  977. filters.append(f'array_contains(types, "{t}")')
  978. if scopes:
  979. filters.append(f'array_contains(scopes, "{scopes}")')
  980. if owner:
  981. owner_list = [o.strip() for o in owner.split(',') if o.strip()]
  982. if len(owner_list) == 1:
  983. filters.append(f'owner == "{owner_list[0]}"')
  984. else:
  985. # 多个owner用OR连接
  986. owner_filters = [f'owner == "{o}"' for o in owner_list]
  987. filters.append(f'({" or ".join(owner_filters)})')
  988. # tags 支持多个,用 AND 连接(使用 tag_keys 数组进行高效筛选)
  989. if tags:
  990. tag_list = [t.strip() for t in tags.split(',') if t.strip()]
  991. for t in tag_list:
  992. filters.append(f'array_contains(tag_keys, "{t}")')
  993. # 只返回指定 status 的知识(默认 approved 和 checked)
  994. status_list = [s.strip() for s in (status or "approved,checked").split(',') if s.strip()]
  995. status_conditions = ' or '.join([f'status == "{s}"' for s in status_list])
  996. filters.append(f'({status_conditions})')
  997. # 如果没有过滤条件,查询所有
  998. filter_expr = ' and '.join(filters) if filters else 'id != ""'
  999. # 查询 Milvus(先获取所有符合条件的数据)
  1000. # Milvus 的 limit 是总数限制,我们需要获取足够多的数据来支持分页
  1001. max_limit = 10000 # 设置一个合理的上限
  1002. results = pg_store.query(filter_expr, limit=max_limit)
  1003. # 转换为可序列化的格式
  1004. serialized_results = [serialize_milvus_result(r) for r in results]
  1005. # 按 created_at 降序排序(最新的在前)
  1006. serialized_results.sort(key=lambda x: x.get('created_at', 0), reverse=True)
  1007. # 计算分页
  1008. total = len(serialized_results)
  1009. total_pages = (total + page_size - 1) // page_size # 向上取整
  1010. start_idx = (page - 1) * page_size
  1011. end_idx = start_idx + page_size
  1012. page_results = serialized_results[start_idx:end_idx]
  1013. return {
  1014. "results": page_results,
  1015. "pagination": {
  1016. "page": page,
  1017. "page_size": page_size,
  1018. "total": total,
  1019. "total_pages": total_pages
  1020. }
  1021. }
  1022. except Exception as e:
  1023. print(f"[List Knowledge] 错误: {e}")
  1024. raise HTTPException(status_code=500, detail=str(e))
  1025. @app.get("/api/knowledge/meta/tags")
  1026. def get_all_tags():
  1027. """获取所有已有的 tags"""
  1028. try:
  1029. # 查询所有知识
  1030. results = pg_store.query('id != ""', limit=10000)
  1031. all_tags = set()
  1032. for item in results:
  1033. # 转换为标准字典
  1034. serialized_item = serialize_milvus_result(item)
  1035. tags_dict = serialized_item.get("tags", {})
  1036. if isinstance(tags_dict, dict):
  1037. for key in tags_dict.keys():
  1038. all_tags.add(key)
  1039. return {"tags": sorted(list(all_tags))}
  1040. except Exception as e:
  1041. print(f"[Get Tags] 错误: {e}")
  1042. raise HTTPException(status_code=500, detail=str(e))
  1043. @app.get("/api/knowledge/pending")
  1044. def get_pending_knowledge(limit: int = Query(default=50, ge=1, le=200)):
  1045. """查询待处理队列(pending + processing + dedup_passed + analyzing)"""
  1046. try:
  1047. pending = pg_store.query(
  1048. 'status == "pending" or status == "processing" or status == "dedup_passed" or status == "analyzing"',
  1049. limit=limit
  1050. )
  1051. serialized = [serialize_milvus_result(r) for r in pending]
  1052. return {"results": serialized, "count": len(serialized)}
  1053. except Exception as e:
  1054. print(f"[Pending] 错误: {e}")
  1055. raise HTTPException(status_code=500, detail=str(e))
  1056. @app.post("/api/knowledge/process")
  1057. async def trigger_process(force: bool = Query(default=False)):
  1058. """手动触发去重处理。force=true 时先回滚所有 processing → pending,analyzing → dedup_passed"""
  1059. try:
  1060. if force:
  1061. processing = pg_store.query('status == "processing"', limit=200)
  1062. for item in processing:
  1063. pg_store.update(item["id"], {"status": "pending", "updated_at": int(time.time())})
  1064. print(f"[Manual Process] 回滚 {len(processing)} 条 processing → pending")
  1065. analyzing = pg_store.query('status == "analyzing"', limit=200)
  1066. for item in analyzing:
  1067. pg_store.update(item["id"], {"status": "dedup_passed", "updated_at": int(time.time())})
  1068. print(f"[Manual Process] 回滚 {len(analyzing)} 条 analyzing → dedup_passed")
  1069. asyncio.create_task(knowledge_processor.process_pending())
  1070. return {"status": "ok", "message": "处理任务已触发"}
  1071. except Exception as e:
  1072. print(f"[Manual Process] 错误: {e}")
  1073. raise HTTPException(status_code=500, detail=str(e))
  1074. @app.post("/api/knowledge/migrate")
  1075. async def migrate_knowledge_schema():
  1076. """手动触发 schema 迁移(PostgreSQL不需要此功能)"""
  1077. return {"status": "ok", "message": "PostgreSQL不需要schema迁移"}
  1078. @app.get("/api/knowledge/status/{knowledge_id}")
  1079. def get_knowledge_status(knowledge_id: str):
  1080. """查询单条知识的处理状态和关系"""
  1081. try:
  1082. result = pg_store.get_by_id(knowledge_id)
  1083. if not result:
  1084. raise HTTPException(status_code=404, detail=f"Knowledge not found: {knowledge_id}")
  1085. serialized = serialize_milvus_result(result)
  1086. return {
  1087. "id": knowledge_id,
  1088. "status": serialized.get("status", "approved"),
  1089. "relationships": serialized.get("relationships", []),
  1090. "created_at": serialized.get("created_at"),
  1091. "updated_at": serialized.get("updated_at"),
  1092. }
  1093. except HTTPException:
  1094. raise
  1095. except Exception as e:
  1096. print(f"[Knowledge Status] 错误: {e}")
  1097. raise HTTPException(status_code=500, detail=str(e))
  1098. @app.get("/api/knowledge/{knowledge_id}")
  1099. def get_knowledge(knowledge_id: str):
  1100. """获取单条知识"""
  1101. try:
  1102. result = pg_store.get_by_id(knowledge_id)
  1103. if not result:
  1104. raise HTTPException(status_code=404, detail=f"Knowledge not found: {knowledge_id}")
  1105. return serialize_milvus_result(result)
  1106. except HTTPException:
  1107. raise
  1108. except Exception as e:
  1109. print(f"[Get Knowledge] 错误: {e}")
  1110. raise HTTPException(status_code=500, detail=str(e))
  1111. async def _evolve_knowledge_with_llm(old_content: str, feedback: str) -> str:
  1112. """使用 LLM 进行知识进化重写"""
  1113. prompt = f"""你是一个 AI Agent 知识库管理员。请根据反馈建议,对现有的知识内容进行重写进化。
  1114. 【原知识内容】:
  1115. {old_content}
  1116. 【实战反馈建议】:
  1117. {feedback}
  1118. 【重写要求】:
  1119. 1. 融合知识:将反馈中的避坑指南、新参数或修正后的选择逻辑融入原知识,使其更具通用性和准确性。
  1120. 2. 保持结构:如果原内容有特定格式(如 Markdown、代码示例等),请保持该格式。
  1121. 3. 语言:简洁直接,使用中文。
  1122. 4. 禁止:严禁输出任何开场白、解释语或额外的 Markdown 标题,直接返回重写后的正文。
  1123. """
  1124. try:
  1125. response = await _dedup_llm(
  1126. messages=[{"role": "user", "content": prompt}],
  1127. )
  1128. evolved = response.get("content", "").strip()
  1129. if len(evolved) < 5:
  1130. raise ValueError("LLM output too short")
  1131. return evolved
  1132. except Exception as e:
  1133. print(f"知识进化失败,采用追加模式回退: {e}")
  1134. return f"{old_content}\n\n---\n[Update {datetime.now().strftime('%Y-%m-%d')}]: {feedback}"
  1135. @app.put("/api/knowledge/{knowledge_id}")
  1136. async def update_knowledge(knowledge_id: str, update: KnowledgeUpdateIn):
  1137. """更新知识评估,支持知识进化"""
  1138. try:
  1139. # 获取现有知识
  1140. existing = pg_store.get_by_id(knowledge_id)
  1141. if not existing:
  1142. raise HTTPException(status_code=404, detail=f"Knowledge not found: {knowledge_id}")
  1143. eval_data = existing.get("eval", {})
  1144. # 更新评分
  1145. if update.update_score is not None:
  1146. eval_data["score"] = update.update_score
  1147. # 添加有效案例
  1148. if update.add_helpful_case:
  1149. eval_data["helpful"] = eval_data.get("helpful", 0) + 1
  1150. if "helpful_history" not in eval_data:
  1151. eval_data["helpful_history"] = []
  1152. eval_data["helpful_history"].append(update.add_helpful_case)
  1153. # 添加有害案例
  1154. if update.add_harmful_case:
  1155. eval_data["harmful"] = eval_data.get("harmful", 0) + 1
  1156. if "harmful_history" not in eval_data:
  1157. eval_data["harmful_history"] = []
  1158. eval_data["harmful_history"].append(update.add_harmful_case)
  1159. # 知识进化
  1160. content = existing["content"]
  1161. need_reembed = False
  1162. if update.evolve_feedback:
  1163. content = await _evolve_knowledge_with_llm(content, update.evolve_feedback)
  1164. eval_data["helpful"] = eval_data.get("helpful", 0) + 1
  1165. need_reembed = True
  1166. # 准备更新数据
  1167. updates = {
  1168. "content": content,
  1169. "eval": eval_data,
  1170. }
  1171. # 如果内容变化,重新生成向量
  1172. if need_reembed:
  1173. embedding = await get_embedding(existing['task'])
  1174. updates["embedding"] = embedding
  1175. # 更新 Milvus
  1176. pg_store.update(knowledge_id, updates)
  1177. return {"status": "ok", "knowledge_id": knowledge_id}
  1178. except HTTPException:
  1179. raise
  1180. except Exception as e:
  1181. print(f"[Update Knowledge] 错误: {e}")
  1182. raise HTTPException(status_code=500, detail=str(e))
  1183. @app.patch("/api/knowledge/{knowledge_id}")
  1184. async def patch_knowledge(knowledge_id: str, patch: KnowledgePatchIn):
  1185. """直接编辑知识字段"""
  1186. try:
  1187. # 获取现有知识
  1188. existing = pg_store.get_by_id(knowledge_id)
  1189. if not existing:
  1190. raise HTTPException(status_code=404, detail=f"Knowledge not found: {knowledge_id}")
  1191. updates = {}
  1192. need_reembed = False
  1193. if patch.task is not None:
  1194. updates["task"] = patch.task
  1195. need_reembed = True
  1196. if patch.content is not None:
  1197. updates["content"] = patch.content
  1198. # content 变化不需要重新生成 embedding(只基于 task)
  1199. if patch.types is not None:
  1200. updates["types"] = patch.types
  1201. if patch.tags is not None:
  1202. updates["tags"] = patch.tags
  1203. # 同时更新 tag_keys
  1204. updates["tag_keys"] = list(patch.tags.keys()) if isinstance(patch.tags, dict) else []
  1205. if patch.scopes is not None:
  1206. updates["scopes"] = patch.scopes
  1207. if patch.owner is not None:
  1208. updates["owner"] = patch.owner
  1209. if not updates:
  1210. return {"status": "ok", "knowledge_id": knowledge_id}
  1211. # 如果 task 变化,重新生成向量
  1212. if need_reembed:
  1213. task = updates.get("task", existing["task"])
  1214. embedding = await get_embedding(task)
  1215. updates["embedding"] = embedding
  1216. # 更新 Milvus
  1217. pg_store.update(knowledge_id, updates)
  1218. return {"status": "ok", "knowledge_id": knowledge_id}
  1219. except HTTPException:
  1220. raise
  1221. except Exception as e:
  1222. print(f"[Patch Knowledge] 错误: {e}")
  1223. raise HTTPException(status_code=500, detail=str(e))
  1224. @app.delete("/api/knowledge/{knowledge_id}")
  1225. def delete_knowledge(knowledge_id: str):
  1226. """删除单条知识"""
  1227. try:
  1228. # 检查知识是否存在
  1229. existing = pg_store.get_by_id(knowledge_id)
  1230. if not existing:
  1231. raise HTTPException(status_code=404, detail=f"Knowledge not found: {knowledge_id}")
  1232. # 从 PostgreSQL 删除
  1233. pg_store.delete(knowledge_id)
  1234. print(f"[Delete Knowledge] 已删除知识: {knowledge_id}")
  1235. return {"status": "ok", "knowledge_id": knowledge_id}
  1236. except HTTPException:
  1237. raise
  1238. except Exception as e:
  1239. print(f"[Delete Knowledge] 错误: {e}")
  1240. raise HTTPException(status_code=500, detail=str(e))
  1241. @app.post("/api/knowledge/batch_delete")
  1242. def batch_delete_knowledge(knowledge_ids: List[str] = Body(...)):
  1243. """批量删除知识"""
  1244. try:
  1245. if not knowledge_ids:
  1246. raise HTTPException(status_code=400, detail="knowledge_ids cannot be empty")
  1247. # 批量删除
  1248. cursor = pg_store._get_cursor()
  1249. try:
  1250. cursor.execute(
  1251. "DELETE FROM knowledge WHERE id = ANY(%s)",
  1252. (knowledge_ids,)
  1253. )
  1254. pg_store.conn.commit()
  1255. deleted_count = cursor.rowcount
  1256. print(f"[Batch Delete] 已删除 {deleted_count} 条知识")
  1257. return {"status": "ok", "deleted_count": deleted_count}
  1258. finally:
  1259. cursor.close()
  1260. except HTTPException:
  1261. raise
  1262. except Exception as e:
  1263. print(f"[Batch Delete] 错误: {e}")
  1264. raise HTTPException(status_code=500, detail=str(e))
  1265. @app.post("/api/knowledge/batch_verify")
  1266. async def batch_verify_knowledge(batch: KnowledgeBatchVerifyIn):
  1267. """批量验证通过(approved → checked)"""
  1268. if not batch.knowledge_ids:
  1269. return {"status": "ok", "updated": 0}
  1270. try:
  1271. now_iso = datetime.now(timezone.utc).isoformat()
  1272. updated_count = 0
  1273. for kid in batch.knowledge_ids:
  1274. existing = pg_store.get_by_id(kid)
  1275. if not existing:
  1276. continue
  1277. eval_data = existing.get("eval") or {}
  1278. eval_data["verification"] = {
  1279. "status": "checked",
  1280. "verified_by": batch.verified_by,
  1281. "verified_at": now_iso,
  1282. "note": None,
  1283. "issue_type": None,
  1284. "issue_action": None,
  1285. }
  1286. pg_store.update(kid, {"eval": eval_data, "status": "checked", "updated_at": int(time.time())})
  1287. updated_count += 1
  1288. return {"status": "ok", "updated": updated_count}
  1289. except Exception as e:
  1290. print(f"[Batch Verify] 错误: {e}")
  1291. raise HTTPException(status_code=500, detail=str(e))
  1292. @app.post("/api/knowledge/{knowledge_id}/verify")
  1293. async def verify_knowledge(knowledge_id: str, verify: KnowledgeVerifyIn):
  1294. """知识验证:approve 切换 approved↔checked,reject 设为 rejected"""
  1295. try:
  1296. existing = pg_store.get_by_id(knowledge_id)
  1297. if not existing:
  1298. raise HTTPException(status_code=404, detail=f"Knowledge not found: {knowledge_id}")
  1299. current_status = existing.get("status", "approved")
  1300. if verify.action == "approve":
  1301. # checked → approved(取消验证),其他 → checked
  1302. new_status = "approved" if current_status == "checked" else "checked"
  1303. pg_store.update(knowledge_id, {
  1304. "status": new_status,
  1305. "updated_at": int(time.time())
  1306. })
  1307. return {"status": "ok", "new_status": new_status,
  1308. "message": "已取消验证" if new_status == "approved" else "验证通过"}
  1309. elif verify.action == "reject":
  1310. pg_store.update(knowledge_id, {
  1311. "status": "rejected",
  1312. "updated_at": int(time.time())
  1313. })
  1314. return {"status": "ok", "new_status": "rejected", "message": "已拒绝"}
  1315. else:
  1316. raise HTTPException(status_code=400, detail=f"Unknown action: {verify.action}")
  1317. except HTTPException:
  1318. raise
  1319. except Exception as e:
  1320. print(f"[Verify Knowledge] 错误: {e}")
  1321. raise HTTPException(status_code=500, detail=str(e))
  1322. @app.post("/api/knowledge/batch_update")
  1323. async def batch_update_knowledge(batch: KnowledgeBatchUpdateIn):
  1324. """批量反馈知识有效性"""
  1325. if not batch.feedback_list:
  1326. return {"status": "ok", "updated": 0}
  1327. try:
  1328. # 先处理无需进化的,收集需要进化的
  1329. evolution_tasks = [] # [(knowledge_id, old_content, feedback, eval_data)]
  1330. simple_updates = [] # [(knowledge_id, is_effective, eval_data)]
  1331. for item in batch.feedback_list:
  1332. knowledge_id = item.get("knowledge_id")
  1333. is_effective = item.get("is_effective")
  1334. feedback = item.get("feedback", "")
  1335. if not knowledge_id:
  1336. continue
  1337. existing = pg_store.get_by_id(knowledge_id)
  1338. if not existing:
  1339. continue
  1340. eval_data = existing.get("eval", {})
  1341. if is_effective and feedback:
  1342. evolution_tasks.append((knowledge_id, existing["content"], feedback, eval_data, existing["task"]))
  1343. else:
  1344. simple_updates.append((knowledge_id, is_effective, eval_data))
  1345. # 执行简单更新
  1346. for knowledge_id, is_effective, eval_data in simple_updates:
  1347. if is_effective:
  1348. eval_data["helpful"] = eval_data.get("helpful", 0) + 1
  1349. else:
  1350. eval_data["harmful"] = eval_data.get("harmful", 0) + 1
  1351. pg_store.update(knowledge_id, {"eval": eval_data})
  1352. # 并发执行知识进化
  1353. if evolution_tasks:
  1354. print(f"🧬 并发处理 {len(evolution_tasks)} 条知识进化...")
  1355. evolved_results = await asyncio.gather(
  1356. *[_evolve_knowledge_with_llm(old, fb) for _, old, fb, _, _ in evolution_tasks]
  1357. )
  1358. for (knowledge_id, _, _, eval_data, task), evolved_content in zip(evolution_tasks, evolved_results):
  1359. eval_data["helpful"] = eval_data.get("helpful", 0) + 1
  1360. # 重新生成向量(只基于 task)
  1361. embedding = await get_embedding(task)
  1362. pg_store.update(knowledge_id, {
  1363. "content": evolved_content,
  1364. "eval": eval_data,
  1365. "embedding": embedding
  1366. })
  1367. return {"status": "ok", "updated": len(simple_updates) + len(evolution_tasks)}
  1368. except Exception as e:
  1369. print(f"[Batch Update] 错误: {e}")
  1370. raise HTTPException(status_code=500, detail=str(e))
  1371. @app.post("/api/knowledge/slim")
  1372. async def slim_knowledge(model: str = "google/gemini-2.5-flash-lite"):
  1373. """知识库瘦身:合并语义相似知识"""
  1374. try:
  1375. # 获取所有知识
  1376. all_knowledge = pg_store.query('id != ""', limit=10000)
  1377. # 转换为可序列化的格式
  1378. all_knowledge = [serialize_milvus_result(item) for item in all_knowledge]
  1379. if len(all_knowledge) < 2:
  1380. return {"status": "ok", "message": f"知识库仅有 {len(all_knowledge)} 条,无需瘦身"}
  1381. # 构造发给大模型的内容
  1382. entries_text = ""
  1383. for item in all_knowledge:
  1384. eval_data = item.get("eval", {})
  1385. types = item.get("types", [])
  1386. entries_text += f"[ID: {item['id']}] [Types: {','.join(types)}] "
  1387. entries_text += f"[Helpful: {eval_data.get('helpful', 0)}, Harmful: {eval_data.get('harmful', 0)}] [Score: {eval_data.get('score', 3)}]\n"
  1388. entries_text += f"Task: {item['task']}\n"
  1389. entries_text += f"Content: {item['content'][:200]}...\n\n"
  1390. prompt = f"""你是一个 AI Agent 知识库管理员。以下是当前知识库的全部条目,请执行瘦身操作:
  1391. 【任务】:
  1392. 1. 识别语义高度相似或重复的知识,将它们合并为一条更精炼、更通用的知识。
  1393. 2. 合并时保留 helpful 最高的那条的 ID(helpful 取各条之和)。
  1394. 3. 对于独立的、无重复的知识,保持原样不动。
  1395. 【当前知识库】:
  1396. {entries_text}
  1397. 【输出格式要求】:
  1398. 严格按以下格式输出每条知识,条目之间用 === 分隔:
  1399. ID: <保留的id>
  1400. TYPES: <逗号分隔的type列表>
  1401. HELPFUL: <合并后的helpful计数>
  1402. HARMFUL: <合并后的harmful计数>
  1403. SCORE: <评分>
  1404. TASK: <任务描述>
  1405. CONTENT: <合并后的知识内容>
  1406. ===
  1407. 最后输出合并报告:
  1408. REPORT: 原有 X 条,合并后 Y 条,精简了 Z 条。
  1409. 禁止输出任何开场白或解释。"""
  1410. print(f"\n[知识瘦身] 正在调用 {model} 分析 {len(all_knowledge)} 条知识...")
  1411. slim_llm = create_openrouter_llm_call(model=model)
  1412. response = await slim_llm(
  1413. messages=[{"role": "user", "content": prompt}],
  1414. )
  1415. content = response.get("content", "").strip()
  1416. if not content:
  1417. raise HTTPException(status_code=500, detail="LLM 返回为空")
  1418. # 解析大模型输出
  1419. report_line = ""
  1420. new_entries = []
  1421. blocks = [b.strip() for b in content.split("===") if b.strip()]
  1422. for block in blocks:
  1423. if block.startswith("REPORT:"):
  1424. report_line = block
  1425. continue
  1426. lines = block.split("\n")
  1427. kid, types, helpful, harmful, score, task, content_lines = None, [], 0, 0, 3, "", []
  1428. current_field = None
  1429. for line in lines:
  1430. if line.startswith("ID:"):
  1431. kid = line[3:].strip()
  1432. current_field = None
  1433. elif line.startswith("TYPES:"):
  1434. types_str = line[6:].strip()
  1435. types = [t.strip() for t in types_str.split(",") if t.strip()]
  1436. current_field = None
  1437. elif line.startswith("HELPFUL:"):
  1438. try:
  1439. helpful = int(line[8:].strip())
  1440. except Exception:
  1441. helpful = 0
  1442. current_field = None
  1443. elif line.startswith("HARMFUL:"):
  1444. try:
  1445. harmful = int(line[8:].strip())
  1446. except Exception:
  1447. harmful = 0
  1448. current_field = None
  1449. elif line.startswith("SCORE:"):
  1450. try:
  1451. score = int(line[6:].strip())
  1452. except Exception:
  1453. score = 3
  1454. current_field = None
  1455. elif line.startswith("TASK:"):
  1456. task = line[5:].strip()
  1457. current_field = "task"
  1458. elif line.startswith("CONTENT:"):
  1459. content_lines.append(line[8:].strip())
  1460. current_field = "content"
  1461. elif current_field == "task":
  1462. task += "\n" + line
  1463. elif current_field == "content":
  1464. content_lines.append(line)
  1465. if kid and content_lines:
  1466. new_entries.append({
  1467. "id": kid,
  1468. "types": types if types else ["strategy"],
  1469. "helpful": helpful,
  1470. "harmful": harmful,
  1471. "score": score,
  1472. "task": task.strip(),
  1473. "content": "\n".join(content_lines).strip()
  1474. })
  1475. if not new_entries:
  1476. raise HTTPException(status_code=500, detail="解析大模型输出失败")
  1477. # 生成向量并重建知识库
  1478. print(f"[知识瘦身] 正在为 {len(new_entries)} 条知识生成向量...")
  1479. # 批量生成向量(只基于 task)
  1480. texts = [e['task'] for e in new_entries]
  1481. embeddings = await get_embeddings_batch(texts)
  1482. # 清空并重建(PostgreSQL使用TRUNCATE)
  1483. cursor = pg_store._get_cursor()
  1484. try:
  1485. cursor.execute("TRUNCATE TABLE knowledge")
  1486. pg_store.conn.commit()
  1487. finally:
  1488. cursor.close()
  1489. knowledge_list = []
  1490. for e, embedding in zip(new_entries, embeddings):
  1491. eval_data = {
  1492. "score": e["score"],
  1493. "helpful": e["helpful"],
  1494. "harmful": e["harmful"],
  1495. "confidence": 0.9,
  1496. "helpful_history": [],
  1497. "harmful_history": []
  1498. }
  1499. source = {
  1500. "name": "slim",
  1501. "category": "exp",
  1502. "urls": [],
  1503. "agent_id": "slim",
  1504. "submitted_by": "system",
  1505. "timestamp": datetime.now(timezone.utc).isoformat()
  1506. }
  1507. knowledge_list.append({
  1508. "id": e["id"],
  1509. "embedding": embedding,
  1510. "message_id": "",
  1511. "task": e["task"],
  1512. "content": e["content"],
  1513. "types": e["types"],
  1514. "tags": {},
  1515. "tag_keys": [],
  1516. "scopes": ["org:cybertogether"],
  1517. "owner": "agent:slim",
  1518. "resource_ids": [],
  1519. "source": source,
  1520. "eval": eval_data,
  1521. "created_at": now,
  1522. "updated_at": now,
  1523. "status": "approved",
  1524. "relationships": json.dumps([])
  1525. })
  1526. pg_store.insert_batch(knowledge_list)
  1527. result_msg = f"瘦身完成:{len(all_knowledge)} → {len(new_entries)} 条知识"
  1528. if report_line:
  1529. result_msg += f"\n{report_line}"
  1530. print(f"[知识瘦身] {result_msg}")
  1531. return {"status": "ok", "before": len(all_knowledge), "after": len(new_entries), "report": report_line}
  1532. except HTTPException:
  1533. raise
  1534. except Exception as e:
  1535. print(f"[Slim Knowledge] 错误: {e}")
  1536. raise HTTPException(status_code=500, detail=str(e))
  1537. @app.post("/api/extract")
  1538. async def extract_knowledge_from_messages(extract_req: MessageExtractIn, background_tasks: BackgroundTasks):
  1539. """从消息历史中提取知识(LLM 分析)"""
  1540. if not extract_req.submitted_by:
  1541. raise HTTPException(status_code=400, detail="submitted_by is required")
  1542. messages = extract_req.messages
  1543. if not messages or len(messages) == 0:
  1544. return {"status": "ok", "extracted_count": 0, "knowledge_ids": []}
  1545. # 构造消息历史文本
  1546. messages_text = ""
  1547. for msg in messages:
  1548. role = msg.get("role", "unknown")
  1549. content = msg.get("content", "")
  1550. messages_text += f"[{role}]: {content}\n\n"
  1551. # LLM 提取知识
  1552. prompt = f"""你是一个知识提取专家。请从以下 Agent 对话历史中提取有价值的知识。
  1553. 【对话历史】:
  1554. {messages_text}
  1555. 【提取要求】:
  1556. 1. 识别对话中的关键知识点(工具使用经验、问题解决方案、最佳实践、踩坑经验等)
  1557. 2. 每条知识必须包含:
  1558. - task: 任务场景描述(在什么情况下,要完成什么目标)
  1559. - content: 核心知识内容(具体可操作的方法、注意事项)
  1560. - types: 知识类型(从 strategy/tool/user_profile/usecase/definition/plan 中选择)
  1561. - score: 评分 1-5(根据知识的价值和可操作性)
  1562. 3. 只提取有实际价值的知识,不要提取泛泛而谈的内容,一次就成功或比较简单的经验就不要记录了。
  1563. 4. 如果没有值得提取的知识,返回空列表
  1564. 【输出格式】:
  1565. 严格按以下 JSON 格式输出,每条知识之间用逗号分隔:
  1566. [
  1567. {{
  1568. "task": "任务场景描述",
  1569. "content": "核心知识内容",
  1570. "types": ["strategy"],
  1571. "score": 4
  1572. }},
  1573. {{
  1574. "task": "另一个任务场景",
  1575. "content": "另一个知识内容",
  1576. "types": ["tool"],
  1577. "score": 5
  1578. }}
  1579. ]
  1580. 如果没有知识,输出: []
  1581. **注意**:只记录经过多次尝试、或经过用户指导才成功的知识,一次就成功或比较简单的经验就不要记录了。
  1582. 禁止输出任何解释或额外文本,只输出 JSON 数组。"""
  1583. try:
  1584. print(f"\n[Extract] 正在从 {len(messages)} 条消息中提取知识...")
  1585. response = await _dedup_llm(
  1586. messages=[{"role": "user", "content": prompt}],
  1587. )
  1588. content = response.get("content", "").strip()
  1589. # 尝试解析 JSON
  1590. # 移除可能的 markdown 代码块标记
  1591. if content.startswith("```json"):
  1592. content = content[7:]
  1593. if content.startswith("```"):
  1594. content = content[3:]
  1595. if content.endswith("```"):
  1596. content = content[:-3]
  1597. content = content.strip()
  1598. extracted_knowledge = json.loads(content)
  1599. if not isinstance(extracted_knowledge, list):
  1600. raise ValueError("LLM output is not a list")
  1601. if not extracted_knowledge:
  1602. return {"status": "ok", "extracted_count": 0, "knowledge_ids": []}
  1603. # 批量生成向量(只基于 task)
  1604. texts = [item.get('task', '') for item in extracted_knowledge]
  1605. embeddings = await get_embeddings_batch(texts)
  1606. # 保存提取的知识
  1607. knowledge_ids = []
  1608. now = int(time.time())
  1609. knowledge_list = []
  1610. for item, embedding in zip(extracted_knowledge, embeddings):
  1611. task = item.get("task", "")
  1612. knowledge_content = item.get("content", "")
  1613. types = item.get("types", ["strategy"])
  1614. score = item.get("score", 3)
  1615. if not task or not knowledge_content:
  1616. continue
  1617. # 生成 ID
  1618. timestamp = datetime.now().strftime('%Y%m%d-%H%M%S')
  1619. random_suffix = uuid.uuid4().hex[:4]
  1620. knowledge_id = f"knowledge-{timestamp}-{random_suffix}"
  1621. # 准备数据
  1622. source = {
  1623. "name": "message_extraction",
  1624. "category": "exp",
  1625. "urls": [],
  1626. "agent_id": extract_req.agent_id,
  1627. "submitted_by": extract_req.submitted_by,
  1628. "timestamp": datetime.now(timezone.utc).isoformat(),
  1629. "session_key": extract_req.session_key
  1630. }
  1631. eval_data = {
  1632. "score": score,
  1633. "helpful": 1,
  1634. "harmful": 0,
  1635. "confidence": 0.7,
  1636. "helpful_history": [],
  1637. "harmful_history": []
  1638. }
  1639. knowledge_list.append({
  1640. "id": knowledge_id,
  1641. "embedding": embedding,
  1642. "message_id": "",
  1643. "task": task,
  1644. "content": knowledge_content,
  1645. "types": types,
  1646. "tags": {},
  1647. "tag_keys": [],
  1648. "scopes": ["org:cybertogether"],
  1649. "owner": extract_req.submitted_by,
  1650. "resource_ids": [],
  1651. "source": source,
  1652. "eval": eval_data,
  1653. "created_at": now,
  1654. "updated_at": now,
  1655. "status": "pending",
  1656. "relationships": json.dumps([]),
  1657. })
  1658. knowledge_ids.append(knowledge_id)
  1659. # 批量插入
  1660. if knowledge_list:
  1661. pg_store.insert_batch(knowledge_list)
  1662. background_tasks.add_task(knowledge_processor.process_pending)
  1663. print(f"[Extract] 成功提取并保存 {len(knowledge_ids)} 条知识")
  1664. return {
  1665. "status": "ok",
  1666. "extracted_count": len(knowledge_ids),
  1667. "knowledge_ids": knowledge_ids
  1668. }
  1669. except json.JSONDecodeError as e:
  1670. print(f"[Extract] JSON 解析失败: {e}")
  1671. print(f"[Extract] LLM 输出: {content[:500]}")
  1672. return {"status": "error", "error": "Failed to parse LLM output", "extracted_count": 0}
  1673. except Exception as e:
  1674. print(f"[Extract] 提取失败: {e}")
  1675. return {"status": "error", "error": str(e), "extracted_count": 0}
  1676. @app.get("/", response_class=FileResponse)
  1677. def frontend():
  1678. """KnowHub 管理前端"""
  1679. index_file = STATIC_DIR / "index.html"
  1680. if not index_file.exists():
  1681. return HTMLResponse("<h1>KnowHub Frontend Not Found</h1><p>Please ensure knowhub/static/index.html exists.</p>", status_code=404)
  1682. return FileResponse(str(index_file))
  1683. if __name__ == "__main__":
  1684. import uvicorn
  1685. uvicorn.run(app, host="0.0.0.0", port=9999)