enhanced_search_v2.py 51 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. """
  4. 增强搜索系统 V2
  5. 支持LLM评估和扩展搜索的完整流程
  6. """
  7. import json
  8. import logging
  9. import time
  10. import os
  11. import argparse
  12. import subprocess
  13. from typing import Dict, List, Any, Optional
  14. from datetime import datetime
  15. from concurrent.futures import ThreadPoolExecutor, as_completed
  16. from openrouter_client import OpenRouterClient
  17. from llm_evaluator import LLMEvaluator
  18. from xiaohongshu_search import XiaohongshuSearch
  19. from stage7_analyzer import Stage7DeconstructionAnalyzer
  20. # 配置日志
  21. logging.basicConfig(
  22. level=logging.INFO,
  23. format='%(asctime)s - %(levelname)s - %(message)s',
  24. datefmt='%Y-%m-%d %H:%M:%S',
  25. handlers=[
  26. logging.FileHandler('enhanced_search_v2.log', encoding='utf-8'),
  27. logging.StreamHandler()
  28. ]
  29. )
  30. logger = logging.getLogger(__name__)
  31. class EnhancedSearchV2:
  32. """增强搜索系统V2"""
  33. def __init__(
  34. self,
  35. how_json_path: str,
  36. openrouter_api_key: Optional[str] = None,
  37. output_dir: str = "output_v2",
  38. top_n: int = 10,
  39. max_total_searches: Optional[int] = None,
  40. search_max_workers: int = 3,
  41. max_searches_per_feature: Optional[int] = None,
  42. max_searches_per_base_word: Optional[int] = None,
  43. enable_stage5: bool = True,
  44. stage5_max_workers: int = 10,
  45. stage5_max_notes: int = 20,
  46. enable_stage6: bool = False,
  47. stage6_only: bool = False,
  48. stage6_max_workers: int = 5,
  49. stage6_max_notes: Optional[int] = None,
  50. stage6_skip: int = 0,
  51. stage6_sort_by: str = 'score',
  52. stage6_api_url: str = "http://192.168.245.150:7000/what/analysis/single",
  53. stage6_min_score: float = 0.8
  54. ):
  55. """
  56. 初始化系统
  57. Args:
  58. how_json_path: How解构文件路径
  59. openrouter_api_key: OpenRouter API密钥
  60. output_dir: 输出目录
  61. top_n: 每个原始特征取评分最高的N个搜索词(默认10)
  62. max_total_searches: 全局最大搜索次数限制(默认None不限制)
  63. search_max_workers: 搜索并发数(默认3)
  64. max_searches_per_feature: 每个原始特征的最大搜索次数(默认None不限制)
  65. max_searches_per_base_word: 每个base_word的最大搜索次数(默认None不限制)
  66. enable_stage5: 是否启用Stage 5评估(默认False)
  67. stage5_max_workers: Stage 5并发评估数(默认10)
  68. stage5_max_notes: 每个搜索结果评估的最大帖子数(默认20)
  69. enable_stage6: 是否启用Stage 6深度解构(默认False)
  70. stage6_only: 只运行Stage 6(从Stage 5结果开始,默认False)
  71. stage6_max_workers: Stage 6并发数(默认5)
  72. stage6_max_notes: Stage 6最多处理多少个帖子(默认None不限制)
  73. stage6_skip: Stage 6跳过前N个帖子(默认0)
  74. stage6_sort_by: Stage 6排序方式:score/time/engagement(默认score)
  75. stage6_api_url: Stage 6解构API地址
  76. stage6_min_score: Stage 6处理的最低分数阈值(默认0.8,0-1分制)
  77. """
  78. self.how_json_path = how_json_path
  79. self.output_dir = output_dir
  80. self.top_n = top_n
  81. self.max_total_searches = max_total_searches
  82. self.search_max_workers = search_max_workers
  83. self.max_searches_per_feature = max_searches_per_feature
  84. self.max_searches_per_base_word = max_searches_per_base_word
  85. self.enable_stage5 = enable_stage5
  86. self.stage5_max_workers = stage5_max_workers
  87. self.stage5_max_notes = stage5_max_notes
  88. self.enable_stage6 = enable_stage6
  89. self.stage6_only = stage6_only
  90. # 创建输出目录
  91. os.makedirs(output_dir, exist_ok=True)
  92. # 加载数据
  93. logger.info("加载数据文件...")
  94. self.how_data = self._load_json(how_json_path)
  95. logger.info(" ✓ 已加载 how.json")
  96. # 初始化组件
  97. logger.info("初始化组件...")
  98. self.openrouter_client = OpenRouterClient(
  99. api_key=openrouter_api_key,
  100. model="google/gemini-2.5-flash",
  101. retry_delay=5 # 增加重试延迟避免限流
  102. )
  103. self.llm_evaluator = LLMEvaluator(self.openrouter_client)
  104. self.search_client = XiaohongshuSearch()
  105. # 初始化 Stage 6 分析器(深度解构)
  106. self.stage6_analyzer = Stage7DeconstructionAnalyzer(
  107. api_url=stage6_api_url,
  108. max_workers=stage6_max_workers,
  109. max_notes=stage6_max_notes,
  110. min_score=stage6_min_score,
  111. skip_count=stage6_skip,
  112. sort_by=stage6_sort_by,
  113. output_dir=output_dir,
  114. enable_image_download=False, # 直接使用原始图片URL,不做代理
  115. image_server_url="http://localhost:8765", # 图片服务器URL(已弃用)
  116. image_download_dir="downloaded_images" # 图片下载目录(已弃用)
  117. )
  118. logger.info("系统初始化完成")
  119. def _load_json(self, file_path: str) -> Any:
  120. """加载JSON文件"""
  121. try:
  122. with open(file_path, 'r', encoding='utf-8') as f:
  123. return json.load(f)
  124. except Exception as e:
  125. logger.error(f"加载文件失败 {file_path}: {e}")
  126. raise
  127. def _save_json(self, data: Any, file_path: str):
  128. """保存JSON文件"""
  129. try:
  130. with open(file_path, 'w', encoding='utf-8') as f:
  131. json.dump(data, f, ensure_ascii=False, indent=2)
  132. logger.info(f"已保存: {file_path}")
  133. except Exception as e:
  134. logger.error(f"保存文件失败 {file_path}: {e}")
  135. raise
  136. # ========== 阶段1:筛选 0.5 <= 相似度 < 0.8 的特征 ==========
  137. def stage1_filter_features(self) -> List[Dict[str, Any]]:
  138. """
  139. 阶段1:筛选中等匹配度特征
  140. 筛选条件:0.5 <= 最高相似度 < 0.8
  141. Returns:
  142. 筛选后的特征列表
  143. """
  144. logger.info("=" * 60)
  145. logger.info("阶段1:筛选中等匹配度特征 (0.5 <= 相似度 < 0.8)")
  146. logger.info("=" * 60)
  147. results = []
  148. how_result = self.how_data.get('解构结果', {})
  149. total_features = 0
  150. filtered_out_low = 0 # < 0.5
  151. filtered_out_high = 0 # >= 0.8
  152. selected_count = 0
  153. # 遍历三个维度
  154. for level_name, level_list in how_result.items():
  155. if not isinstance(level_list, list):
  156. continue
  157. logger.info(f"\n处理 {level_name}...")
  158. for item_idx, item in enumerate(level_list):
  159. item_name = item.get('名称', f'未命名-{item_idx}')
  160. # 新格式:直接读取点层级的匹配人设结果
  161. match_results = item.get('匹配人设结果', [])
  162. total_features += 1
  163. if not match_results:
  164. logger.info(f" ✗ {item_name}: 无匹配结果")
  165. continue
  166. # 找到最高相似度(新格式:相似度是直接字段)
  167. max_similarity = max(
  168. (m.get('相似度', 0) for m in match_results),
  169. default=0
  170. )
  171. # 筛选条件
  172. if max_similarity < 0.5:
  173. filtered_out_low += 1
  174. logger.info(f" ✗ {item_name}: 最高相似度 {max_similarity:.3f} < 0.5(过滤)")
  175. continue
  176. elif max_similarity >= 0.8:
  177. filtered_out_high += 1
  178. logger.info(f" ✗ {item_name}: 最高相似度 {max_similarity:.3f} >= 0.8(过滤)")
  179. continue
  180. # 0.5 <= max_similarity < 0.8,保留
  181. # 按相似度降序排序,取前3个
  182. sorted_matches = sorted(
  183. match_results,
  184. key=lambda x: x.get('相似度', 0),
  185. reverse=True
  186. )
  187. top3_matches = sorted_matches[:3] # 取前3个
  188. # 构建top3匹配信息列表
  189. top3_match_info = []
  190. for match in top3_matches:
  191. feature_classification = match.get('特征分类', [])
  192. classification_path = self._build_classification_path(feature_classification)
  193. # 直接从匹配结果读取特征类型
  194. is_classification = (match.get('特征类型') == '分类')
  195. top3_match_info.append({
  196. '人设特征名称': match.get('人设特征名称'),
  197. '人设特征层级': match.get('人设特征层级'),
  198. '特征类型': match.get('特征类型'),
  199. '特征分类': feature_classification,
  200. '相似度': match.get('相似度', 0), # 直接字段
  201. '匹配说明': match.get('说明', ''), # 直接字段
  202. '是分类': is_classification,
  203. '所属分类路径': classification_path
  204. })
  205. result_item = {
  206. '原始特征名称': item_name, # 使用点的名称作为特征名
  207. '来源层级': level_name,
  208. '权重': 1.0, # 新格式没有权重字段,默认1.0
  209. '所属点名称': item_name,
  210. '最高匹配信息': top3_match_info[0], # 保留第1个用于Stage2
  211. 'top3匹配信息': top3_match_info # 新增字段
  212. }
  213. results.append(result_item)
  214. selected_count += 1
  215. # 显示top3匹配信息
  216. top3_names = [m['人设特征名称'] for m in top3_match_info]
  217. logger.info(f" ✓ {item_name} → Top{len(top3_match_info)}: {', '.join(top3_names)}")
  218. # 统计信息
  219. logger.info(f"\n" + "=" * 60)
  220. logger.info(f"阶段1完成")
  221. logger.info(f" 总特征数: {total_features}")
  222. logger.info(f" 过滤掉(<0.5): {filtered_out_low}")
  223. logger.info(f" 过滤掉(>=0.8): {filtered_out_high}")
  224. logger.info(f" 保留(0.5-0.8): {selected_count}")
  225. logger.info("=" * 60)
  226. # 保存结果
  227. output_path = os.path.join(self.output_dir, "stage1_filtered_features.json")
  228. self._save_json(results, output_path)
  229. return results
  230. def _build_classification_path(self, feature_classification: List[str]) -> str:
  231. """
  232. 构建分类路径
  233. Args:
  234. feature_classification: 特征分类数组
  235. Returns:
  236. 分类路径
  237. """
  238. if not feature_classification:
  239. return ""
  240. # 步骤1: 去掉中间元素的"实质"后缀
  241. cleaned = []
  242. for i, item in enumerate(feature_classification):
  243. if i == len(feature_classification) - 1: # 最后一个保留
  244. cleaned.append(item)
  245. elif item.endswith("实质") and i != 0: # 中间的去掉"实质"
  246. cleaned.append(item[:-2])
  247. else:
  248. cleaned.append(item)
  249. # 步骤2: 反转数组
  250. reversed_list = list(reversed(cleaned))
  251. # 步骤3: 拼接路径
  252. path = "/".join(reversed_list)
  253. return path
  254. # ========== 阶段2:从how文件提取高相似度候选词 ==========
  255. def stage2_extract_candidates(self, filtered_features: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
  256. """
  257. 阶段2:从how文件提取相似度>=0.8的候选词
  258. 处理流程:
  259. 1. 遍历 how_data['解构结果'] 所有特征的匹配结果
  260. 2. 筛选 相似度 >= 0.8 的人设特征名称
  261. 3. 去重(按最高相似度保留)
  262. 4. 按相似度降序排序
  263. 5. 为每个中心词分配候选词列表
  264. 6. 构造 '高相似度候选_按base_word' 结构
  265. Args:
  266. filtered_features: Stage 1筛选的特征列表
  267. Returns:
  268. 带高相似度候选的特征列表
  269. """
  270. logger.info("=" * 60)
  271. logger.info("阶段2:从how文件提取高相似度候选词")
  272. logger.info("=" * 60)
  273. # Step 1: 从整个how文件提取候选词
  274. candidates_dict = {} # {人设特征名称: {候选词信息}}
  275. how_result = self.how_data.get('解构结果', {})
  276. # 遍历三个维度
  277. for dimension in ['灵感点列表', '关键点列表', '目的点列表']:
  278. items_list = how_result.get(dimension, [])
  279. for item in items_list:
  280. item_name = item.get('名称', '')
  281. # 新格式:直接读取点层级的匹配人设结果
  282. matches = item.get('匹配人设结果', [])
  283. for match in matches:
  284. # 新格式:相似度是直接字段
  285. similarity = match.get('相似度', 0)
  286. persona_feature_name = match.get('人设特征名称', '')
  287. # 筛选相似度 >= 0.8
  288. if similarity >= 0.8 and persona_feature_name:
  289. # 去重逻辑:保留最高相似度
  290. if persona_feature_name not in candidates_dict or \
  291. similarity > candidates_dict[persona_feature_name]['相似度']:
  292. candidates_dict[persona_feature_name] = {
  293. '人设特征名称': persona_feature_name,
  294. '相似度': similarity,
  295. '特征类型': match.get('特征类型', ''),
  296. '特征分类': match.get('特征分类', []),
  297. '人设特征层级': match.get('人设特征层级', ''),
  298. '来源路径': self._build_classification_path(match.get('特征分类', [])),
  299. '匹配说明': match.get('说明', ''), # 直接字段
  300. '来源原始特征': item_name # 使用点的名称
  301. }
  302. # Step 2: 转为列表并按相似度降序排序
  303. global_candidates = sorted(
  304. candidates_dict.values(),
  305. key=lambda x: x['相似度'],
  306. reverse=True
  307. )
  308. logger.info(f"从how文件提取到 {len(global_candidates)} 个唯一的高相似度候选词")
  309. # 显示Top 10候选词
  310. if global_candidates:
  311. logger.info("Top 10 候选词:")
  312. for i, candidate in enumerate(global_candidates[:10], 1):
  313. logger.info(f" {i}. {candidate['人设特征名称']} (相似度: {candidate['相似度']:.3f})")
  314. # Step 3: 为每个特征构造输出结构
  315. results = []
  316. for idx, feature_data in enumerate(filtered_features, 1):
  317. original_feature_name = feature_data.get('原始特征名称', '')
  318. logger.info(f"\n[{idx}/{len(filtered_features)}] 处理: {original_feature_name}")
  319. top3_matches = feature_data.get('top3匹配信息', [])
  320. # 提取3个中心词
  321. base_words = [match.get('人设特征名称', '') for match in top3_matches[:3]]
  322. logger.info(f" 中心词: {', '.join(base_words)}")
  323. # 所有中心词共享相同的候选词列表
  324. high_similarity_by_base = {}
  325. for base_word in base_words:
  326. if base_word:
  327. high_similarity_by_base[base_word] = global_candidates.copy()
  328. logger.info(f" 每个中心词分配 {len(global_candidates)} 个候选词")
  329. result = {
  330. '原始特征名称': original_feature_name,
  331. '来源层级': feature_data.get('来源层级', ''), # 保留元数据
  332. '权重': feature_data.get('权重', 0), # 保留元数据
  333. 'top3匹配信息': top3_matches,
  334. '找到的关联_按base_word': {}, # 新方式不需要关联分析
  335. '高相似度候选_按base_word': high_similarity_by_base
  336. }
  337. results.append(result)
  338. # 保存结果
  339. output_path = os.path.join(self.output_dir, 'stage2_candidates.json')
  340. self._save_json(results, output_path)
  341. logger.info(f"\n" + "=" * 60)
  342. logger.info(f"阶段2完成")
  343. logger.info(f" 提取候选词: {len(global_candidates)} 个")
  344. logger.info(f" 处理特征: {len(results)} 个")
  345. logger.info("=" * 60)
  346. return results
  347. # ========== 阶段4:多词组合 + LLM评估 ==========
  348. def stage4_generate_and_evaluate_search_words(
  349. self,
  350. features_data: List[Dict[str, Any]],
  351. max_workers: int = 4,
  352. max_candidates: int = 20,
  353. max_combo_length: int = 4
  354. ) -> List[Dict[str, Any]]:
  355. """
  356. 阶段4:多词组合 + LLM评估
  357. 基于Stage1的基础词和Stage3的高相似度候选,
  358. 生成所有2-N词组合,通过LLM评估选出Top10
  359. Args:
  360. features_data: 阶段3的数据(包含高相似度候选)
  361. max_workers: 并发评估的原始特征数(默认4)
  362. max_candidates: 参与组合的最大候选词数(默认20)
  363. max_combo_length: 最大组合词数(默认4,即基础词+3个候选)
  364. Returns:
  365. 带LLM评估的数据
  366. """
  367. logger.info("=" * 60)
  368. logger.info("阶段4:多词组合 + LLM评估")
  369. logger.info(f" 最大候选词数: {max_candidates}")
  370. logger.info(f" 最大组合长度: {max_combo_length} 词")
  371. logger.info(f" 并发数: {max_workers} 个原始特征")
  372. logger.info("=" * 60)
  373. total_features = len(features_data)
  374. # 使用ThreadPoolExecutor并行处理不同的原始特征
  375. with ThreadPoolExecutor(max_workers=max_workers) as executor:
  376. # 提交所有任务
  377. futures = []
  378. for idx, feature_result in enumerate(features_data, 1):
  379. future = executor.submit(
  380. self._process_single_feature_combinations,
  381. idx,
  382. total_features,
  383. feature_result,
  384. max_candidates,
  385. max_combo_length
  386. )
  387. futures.append((future, feature_result))
  388. # 等待所有任务完成并收集结果
  389. for future, feature_result in futures:
  390. try:
  391. _ = future.result() # 等待完成,结果已经写回到feature_result中
  392. except Exception as e:
  393. logger.error(f" 评估失败: {feature_result['原始特征名称']}, 错误: {e}")
  394. # 保存结果
  395. output_path = os.path.join(self.output_dir, "stage4_combinations_evaluated.json")
  396. self._save_json(features_data, output_path)
  397. logger.info(f"\n" + "=" * 60)
  398. logger.info(f"阶段4完成")
  399. logger.info("=" * 60)
  400. return features_data
  401. def _process_single_feature_combinations(
  402. self,
  403. idx: int,
  404. total: int,
  405. feature_result: Dict[str, Any],
  406. max_candidates: int,
  407. max_combo_length: int
  408. ) -> None:
  409. """
  410. 处理单个原始特征的组合生成和评估
  411. 改进: 每个base_word使用自己的候选词(而不是共享)
  412. Steps:
  413. 1. Get top3 base_words from Stage1's top3匹配信息
  414. 2. For each base_word:
  415. a. Get candidates from Stage3's 高相似度候选_按base_word
  416. b. Generate combinations
  417. c. LLM evaluation
  418. d. Select Top 10
  419. 3. Save grouped results
  420. Args:
  421. idx: 特征索引
  422. total: 总特征数
  423. feature_result: 特征结果数据
  424. max_candidates: 参与组合的最大候选词数
  425. max_combo_length: 最大组合词数
  426. """
  427. original_feature = feature_result['原始特征名称']
  428. logger.info(f"\n[{idx}/{total}] 处理: {original_feature}")
  429. # 步骤1: 获取top3基础词
  430. top3_info = feature_result.get('top3匹配信息', [])
  431. if not top3_info:
  432. logger.info(f" 无top3匹配信息,跳过")
  433. feature_result['组合评估结果_分组'] = []
  434. return
  435. logger.info(f" 找到 {len(top3_info)} 个base_word")
  436. # 步骤2: 获取按base_word分组的候选词
  437. candidates_by_base_word = feature_result.get('高相似度候选_按base_word', {})
  438. if not candidates_by_base_word:
  439. logger.warning(f" 无按base_word分组的候选词,跳过")
  440. feature_result['组合评估结果_分组'] = []
  441. return
  442. # 步骤3: 为每个base_word独立处理
  443. grouped_results = []
  444. for base_idx, base_info in enumerate(top3_info, 1):
  445. base_word = base_info.get('人设特征名称', '')
  446. base_similarity = base_info.get('相似度', 0)
  447. if not base_word:
  448. continue
  449. logger.info(f" [{base_idx}/{len(top3_info)}] Base Word: {base_word} (相似度: {base_similarity:.3f})")
  450. # 获取该base_word的候选词
  451. base_candidates = candidates_by_base_word.get(base_word, [])
  452. candidates = base_candidates[:max_candidates]
  453. candidate_words = [c['人设特征名称'] for c in candidates]
  454. if not candidate_words:
  455. logger.warning(f" 该base_word无候选词,跳过")
  456. grouped_results.append({
  457. 'base_word': base_word,
  458. 'base_word_similarity': base_similarity,
  459. 'base_word_info': base_info,
  460. 'top10_searches': [],
  461. 'available_words': []
  462. })
  463. continue
  464. logger.info(f" 候选词数量: {len(candidate_words)} (限制: {max_candidates})")
  465. # LLM生成query(新方式:直接让LLM基于候选词生成query)
  466. logger.info(f" 使用LLM生成query(中心词: {base_word})...")
  467. evaluated = self.llm_evaluator.generate_queries_from_candidates(
  468. original_feature=original_feature,
  469. base_word=base_word,
  470. candidate_words=candidate_words,
  471. max_queries=10
  472. )
  473. # 选出Top 10(已经由LLM生成方法控制数量)
  474. top_10 = evaluated[:10]
  475. logger.info(f" 生成完成,共 {len(top_10)} 个query")
  476. # 保存分组结果 - 每个base_word有自己的available_words
  477. grouped_results.append({
  478. 'base_word': base_word,
  479. 'base_word_similarity': base_similarity,
  480. 'base_word_info': base_info,
  481. 'top10_searches': top_10,
  482. 'available_words': candidate_words # 该base_word自己的候选词
  483. })
  484. # 写回结果
  485. feature_result['组合评估结果_分组'] = grouped_results
  486. total_searches = sum(len(g['top10_searches']) for g in grouped_results)
  487. logger.info(f" 完成!共 {len(grouped_results)} 个base_word,{total_searches} 个搜索词")
  488. # ========== 阶段5:执行搜索 ==========
  489. def _execute_single_search(
  490. self,
  491. idx: int,
  492. total: int,
  493. search_word: str,
  494. feature_ref: Dict[str, Any]
  495. ) -> Dict[str, Any]:
  496. """
  497. 执行单个搜索任务(用于并发执行)
  498. Args:
  499. idx: 搜索索引
  500. total: 总搜索数
  501. search_word: 搜索词
  502. feature_ref: 特征引用(用于写入结果)
  503. Returns:
  504. 搜索结果信息
  505. """
  506. logger.info(f"[{idx}/{total}] 搜索: {search_word}")
  507. try:
  508. result = self.search_client.search(
  509. keyword=search_word,
  510. content_type='不限',
  511. sort_type='综合',
  512. max_retries=3,
  513. use_cache=True # 启用搜索缓存
  514. )
  515. note_count = len(result.get('data', {}).get('data', []))
  516. logger.info(f" ✓ 成功,获取 {note_count} 条帖子")
  517. # 写入结果
  518. feature_ref['search_result'] = result
  519. feature_ref['search_metadata'] = {
  520. 'searched_at': datetime.now().isoformat(),
  521. 'status': 'success',
  522. 'note_count': note_count,
  523. 'search_params': {
  524. 'keyword': search_word,
  525. 'content_type': '图文',
  526. 'sort_type': '综合'
  527. }
  528. }
  529. return {'status': 'success', 'search_word': search_word, 'note_count': note_count}
  530. except Exception as e:
  531. logger.error(f" ✗ 失败: {e}")
  532. feature_ref['search_result'] = None
  533. feature_ref['search_metadata'] = {
  534. 'searched_at': datetime.now().isoformat(),
  535. 'status': 'failed',
  536. 'note_count': 0,
  537. 'error': str(e)
  538. }
  539. return {'status': 'failed', 'search_word': search_word, 'error': str(e)}
  540. def stage4_execute_searches(
  541. self,
  542. features_data: List[Dict[str, Any]],
  543. search_delay: float = 2.0,
  544. top_n: int = 10
  545. ) -> List[Dict[str, Any]]:
  546. """
  547. 阶段4:执行小红书搜索
  548. Args:
  549. features_data: 阶段3的数据
  550. search_delay: 搜索延迟
  551. top_n: 每个原始特征取评分最高的N个搜索词
  552. Returns:
  553. 带搜索结果的数据
  554. """
  555. logger.info("=" * 60)
  556. logger.info("阶段4:执行小红书搜索")
  557. logger.info("=" * 60)
  558. # 按原始特征分组收集搜索词(从Stage4的组合评估结果_分组读取)
  559. feature_search_groups = {}
  560. for feature_result in features_data:
  561. original_feature = feature_result['原始特征名称']
  562. if original_feature not in feature_search_groups:
  563. feature_search_groups[original_feature] = []
  564. # 从Stage4的组合评估结果_分组读取(新结构)
  565. grouped_results = feature_result.get('组合评估结果_分组', [])
  566. if grouped_results:
  567. # 使用分组结构:每个base_word的top10都执行
  568. for group in grouped_results:
  569. base_word = group.get('base_word', '')
  570. base_similarity = group.get('base_word_similarity', 0)
  571. base_word_searches = []
  572. for eval_item in group.get('top10_searches', []):
  573. sw = eval_item.get('search_word')
  574. if not sw:
  575. continue
  576. score = eval_item.get('score', 0.0)
  577. base_word_searches.append({
  578. 'search_word': sw,
  579. 'score': score,
  580. 'base_word': base_word,
  581. 'base_word_similarity': base_similarity,
  582. 'feature_ref': eval_item # 引用评估项,用于写入搜索结果
  583. })
  584. # 应用每个base_word的搜索次数限制
  585. if self.max_searches_per_base_word and len(base_word_searches) > self.max_searches_per_base_word:
  586. logger.info(f" 应用base_word限制: {base_word} 从 {len(base_word_searches)} 减少到 {self.max_searches_per_base_word}")
  587. base_word_searches = base_word_searches[:self.max_searches_per_base_word]
  588. feature_search_groups[original_feature].extend(base_word_searches)
  589. else:
  590. # 兼容旧结构(组合评估结果)
  591. for eval_item in feature_result.get('组合评估结果', []):
  592. sw = eval_item.get('search_word')
  593. if not sw:
  594. continue
  595. score = eval_item.get('score', 0.0)
  596. feature_search_groups[original_feature].append({
  597. 'search_word': sw,
  598. 'score': score,
  599. 'feature_ref': eval_item
  600. })
  601. # 应用每个原始特征的搜索次数限制
  602. if self.max_searches_per_feature and len(feature_search_groups[original_feature]) > self.max_searches_per_feature:
  603. logger.info(f" 应用特征限制: {original_feature} 从 {len(feature_search_groups[original_feature])} 减少到 {self.max_searches_per_feature}")
  604. feature_search_groups[original_feature] = feature_search_groups[original_feature][:self.max_searches_per_feature]
  605. # 收集所有搜索任务(分组结构下执行所有base_word的top10,不再过滤)
  606. all_searches = []
  607. total_count = 0
  608. for original_feature, search_list in feature_search_groups.items():
  609. total_count += len(search_list)
  610. all_searches.extend(search_list)
  611. logger.info(f" {original_feature}: {len(search_list)} 个搜索词")
  612. # 应用全局搜索次数限制
  613. if self.max_total_searches and len(all_searches) > self.max_total_searches:
  614. logger.info(f" 应用全局限制:从 {len(all_searches)} 个减少到 {self.max_total_searches} 个")
  615. all_searches = all_searches[:self.max_total_searches]
  616. logger.info(f"\n共 {len(all_searches)} 个搜索任务")
  617. logger.info(f" 并发执行搜索(并发数: {self.search_max_workers})")
  618. # 使用ThreadPoolExecutor并发执行搜索
  619. with ThreadPoolExecutor(max_workers=self.search_max_workers) as executor:
  620. # 提交所有搜索任务
  621. futures = []
  622. for idx, item in enumerate(all_searches, 1):
  623. future = executor.submit(
  624. self._execute_single_search,
  625. idx,
  626. len(all_searches),
  627. item['search_word'],
  628. item['feature_ref']
  629. )
  630. futures.append(future)
  631. # 等待所有搜索完成
  632. for future in as_completed(futures):
  633. try:
  634. result = future.result()
  635. # 结果已经写入feature_ref,无需额外处理
  636. except Exception as e:
  637. logger.error(f" 搜索任务失败: {e}")
  638. # 保存结果
  639. output_path = os.path.join(self.output_dir, "stage4_with_search_results.json")
  640. self._save_json(features_data, output_path)
  641. logger.info(f"\n" + "=" * 60)
  642. logger.info(f"阶段4完成")
  643. logger.info("=" * 60)
  644. return features_data
  645. # ========== 阶段5:LLM评估搜索结果 ==========
  646. def stage5_evaluate_search_results(
  647. self,
  648. features_data: List[Dict[str, Any]]
  649. ) -> List[Dict[str, Any]]:
  650. """
  651. 阶段5:用LLM评估搜索结果(多模态)
  652. Args:
  653. features_data: 阶段4的数据
  654. Returns:
  655. 带结果评估的数据
  656. """
  657. logger.info("=" * 60)
  658. logger.info("阶段5:LLM评估搜索结果")
  659. logger.info("=" * 60)
  660. # 收集所有需要评估的特征节点
  661. features_to_evaluate = []
  662. for feature_result in features_data:
  663. original_feature = feature_result['原始特征名称']
  664. for assoc in feature_result.get('找到的关联', []):
  665. for feature in assoc.get('特征列表', []):
  666. if feature.get('search_result') and feature['search_metadata']['status'] == 'success':
  667. features_to_evaluate.append({
  668. 'original_feature': original_feature,
  669. 'feature_node': feature
  670. })
  671. logger.info(f"共 {len(features_to_evaluate)} 个搜索结果需要评估")
  672. # 并行评估(并发数较低)
  673. with ThreadPoolExecutor(max_workers=8) as executor:
  674. futures = []
  675. for item in features_to_evaluate:
  676. future = executor.submit(
  677. self._evaluate_single_search_result,
  678. item['original_feature'],
  679. item['feature_node']
  680. )
  681. futures.append((future, item))
  682. # 收集结果
  683. for idx, (future, item) in enumerate(futures, 1):
  684. try:
  685. evaluation = future.result()
  686. item['feature_node']['result_evaluation'] = evaluation
  687. logger.info(f" [{idx}/{len(futures)}] {item['feature_node']['search_word']}: "
  688. f"relevance={evaluation['overall_relevance']:.3f}")
  689. except Exception as e:
  690. logger.error(f" 评估失败: {item['feature_node']['search_word']}, 错误: {e}")
  691. item['feature_node']['result_evaluation'] = None
  692. # 保存结果
  693. output_path = os.path.join(self.output_dir, "stage5_with_evaluations.json")
  694. self._save_json(features_data, output_path)
  695. logger.info(f"\n" + "=" * 60)
  696. logger.info(f"阶段5完成")
  697. logger.info("=" * 60)
  698. return features_data
  699. def _evaluate_single_search_result(
  700. self,
  701. original_feature: str,
  702. feature_node: Dict[str, Any]
  703. ) -> Dict[str, Any]:
  704. """
  705. 评估单个搜索结果(使用并行评估)
  706. Args:
  707. original_feature: 原始特征
  708. feature_node: 特征节点
  709. Returns:
  710. 评估结果
  711. """
  712. search_word = feature_node.get('search_word', '')
  713. notes = feature_node['search_result'].get('data', {}).get('data', [])
  714. return self.llm_evaluator.evaluate_search_results_parallel(
  715. original_feature=original_feature,
  716. search_word=search_word,
  717. notes=notes,
  718. max_notes=20,
  719. max_workers=20 # 20个并发评估每个帖子
  720. )
  721. def stage5_evaluate_search_results_with_filter(
  722. self,
  723. features_data: List[Dict[str, Any]]
  724. ) -> List[Dict[str, Any]]:
  725. """
  726. 阶段5:用LLM评估搜索结果(使用两层过滤评估)
  727. 遍历所有搜索结果,使用两层评估机制:
  728. 1. 第一层:过滤与搜索Query无关的结果
  729. 2. 第二层:评估与目标特征的匹配度(0.8-1.0/0.6-0.79/0.5-0.59/≤0.4)
  730. Args:
  731. features_data: 阶段4的数据
  732. Returns:
  733. 带评估结果的数据
  734. """
  735. logger.info("=" * 60)
  736. logger.info("阶段5:LLM评估搜索结果(两层过滤评估)")
  737. logger.info(f" 并发数: {self.stage5_max_workers}")
  738. logger.info(f" 每个搜索最多评估: {self.stage5_max_notes} 个帖子")
  739. logger.info("=" * 60)
  740. # 收集所有需要评估的搜索项
  741. search_items_to_evaluate = []
  742. for feature_result in features_data:
  743. original_feature = feature_result['原始特征名称']
  744. # 从组合评估结果_分组中读取搜索结果
  745. grouped_results = feature_result.get('组合评估结果_分组', [])
  746. if grouped_results:
  747. for group in grouped_results:
  748. for eval_item in group.get('top10_searches', []):
  749. # 检查是否有搜索结果
  750. if eval_item.get('search_result') and eval_item.get('search_metadata', {}).get('status') == 'success':
  751. search_items_to_evaluate.append({
  752. 'original_feature': original_feature,
  753. 'search_item': eval_item,
  754. 'base_word': group.get('base_word', '')
  755. })
  756. else:
  757. # 兼容旧结构
  758. for eval_item in feature_result.get('组合评估结果', []):
  759. if eval_item.get('search_result') and eval_item.get('search_metadata', {}).get('status') == 'success':
  760. search_items_to_evaluate.append({
  761. 'original_feature': original_feature,
  762. 'search_item': eval_item,
  763. 'base_word': ''
  764. })
  765. logger.info(f"共 {len(search_items_to_evaluate)} 个搜索结果需要评估")
  766. # 并行评估所有搜索结果
  767. with ThreadPoolExecutor(max_workers=self.stage5_max_workers) as executor:
  768. futures = []
  769. for idx, item in enumerate(search_items_to_evaluate, 1):
  770. future = executor.submit(
  771. self._evaluate_single_search_with_filter,
  772. idx,
  773. len(search_items_to_evaluate),
  774. item['original_feature'],
  775. item['search_item'],
  776. item['base_word']
  777. )
  778. futures.append((future, item))
  779. # 收集结果
  780. success_count = 0
  781. failed_count = 0
  782. for future, item in futures:
  783. try:
  784. evaluation = future.result()
  785. item['search_item']['evaluation_with_filter'] = evaluation
  786. success_count += 1
  787. except Exception as e:
  788. logger.error(f" 评估失败: {item['search_item'].get('search_word', 'unknown')}, 错误: {e}")
  789. item['search_item']['evaluation_with_filter'] = None
  790. failed_count += 1
  791. logger.info(f"\n评估完成: 成功 {success_count}, 失败 {failed_count}")
  792. # 保存结果
  793. output_path = os.path.join(self.output_dir, "stage5_with_evaluations.json")
  794. self._save_json(features_data, output_path)
  795. logger.info(f"\n" + "=" * 60)
  796. logger.info(f"阶段5完成")
  797. logger.info("=" * 60)
  798. return features_data
  799. def _evaluate_single_search_with_filter(
  800. self,
  801. idx: int,
  802. total: int,
  803. original_feature: str,
  804. search_item: Dict[str, Any],
  805. base_word: str
  806. ) -> Dict[str, Any]:
  807. """
  808. 评估单个搜索结果(使用两层过滤)
  809. Args:
  810. idx: 索引
  811. total: 总数
  812. original_feature: 原始特征
  813. search_item: 搜索项(包含search_word和search_result)
  814. base_word: 基础词
  815. Returns:
  816. 评估结果
  817. """
  818. search_word = search_item.get('search_word', '')
  819. notes = search_item['search_result'].get('data', {}).get('data', [])
  820. logger.info(f"[{idx}/{total}] 评估: {search_word} (帖子数: {len(notes)})")
  821. # 调用LLM评估器的批量评估方法
  822. evaluation = self.llm_evaluator.batch_evaluate_notes_with_filter(
  823. search_query=search_word,
  824. target_feature=original_feature,
  825. notes=notes,
  826. max_notes=self.stage5_max_notes,
  827. max_workers=self.stage5_max_workers
  828. )
  829. # 统计信息
  830. filtered_count = evaluation.get('filtered_count', 0)
  831. evaluated_count = evaluation.get('evaluated_count', 0)
  832. match_dist = evaluation.get('match_distribution', {})
  833. logger.info(f" ✓ 完成: 过滤 {filtered_count}, 评估 {evaluated_count}, "
  834. f"完全匹配 {match_dist.get('完全匹配(0.8-1.0)', 0)}, "
  835. f"相似匹配 {match_dist.get('相似匹配(0.6-0.79)', 0)}")
  836. return evaluation
  837. # ========== 阶段7:扩展搜索 ==========
  838. def stage7_extended_searches(
  839. self,
  840. features_data: List[Dict[str, Any]],
  841. search_delay: float = 2.0
  842. ) -> List[Dict[str, Any]]:
  843. """
  844. 阶段7:基于评估结果扩展搜索(多个)
  845. Args:
  846. features_data: 阶段6的数据
  847. search_delay: 搜索延迟
  848. Returns:
  849. 带扩展搜索的数据
  850. """
  851. logger.info("=" * 60)
  852. logger.info("阶段7:扩展搜索")
  853. logger.info("=" * 60)
  854. # 收集需要扩展搜索的任务
  855. extension_tasks = []
  856. for feature_result in features_data:
  857. original_feature = feature_result['原始特征名称']
  858. for assoc in feature_result.get('找到的关联', []):
  859. for feature in assoc.get('特征列表', []):
  860. result_eval = feature.get('result_evaluation')
  861. if not result_eval:
  862. continue
  863. extracted_elements = result_eval.get('extracted_elements', [])
  864. if not extracted_elements:
  865. continue
  866. # 为每个提取的元素创建扩展搜索
  867. base_search_word = feature.get('search_word', '')
  868. for element in extracted_elements:
  869. extended_keyword = f"{base_search_word} {element}"
  870. extension_tasks.append({
  871. 'extended_keyword': extended_keyword,
  872. 'original_feature': original_feature,
  873. 'feature_node': feature,
  874. 'element': element
  875. })
  876. logger.info(f"共 {len(extension_tasks)} 个扩展搜索任务")
  877. # 执行扩展搜索
  878. for idx, task in enumerate(extension_tasks, 1):
  879. extended_kw = task['extended_keyword']
  880. logger.info(f"[{idx}/{len(extension_tasks)}] 扩展搜索: {extended_kw}")
  881. try:
  882. result = self.search_client.search(
  883. keyword=extended_kw,
  884. content_type='不限',
  885. sort_type='综合',
  886. max_retries=3,
  887. use_cache=True # 启用搜索缓存
  888. )
  889. note_count = len(result.get('data', {}).get('data', []))
  890. logger.info(f" ✓ 成功,获取 {note_count} 条帖子")
  891. # 评估扩展搜索结果
  892. logger.info(f" 评估扩展搜索结果...")
  893. evaluation = self.llm_evaluator.evaluate_search_results(
  894. original_feature=task['original_feature'],
  895. search_word=extended_kw,
  896. notes=result.get('data', {}).get('data', []),
  897. max_notes=20,
  898. max_images_per_note=2
  899. )
  900. # 存储扩展搜索结果
  901. feature_node = task['feature_node']
  902. if 'extended_searches' not in feature_node:
  903. feature_node['extended_searches'] = []
  904. feature_node['extended_searches'].append({
  905. 'extended_keyword': extended_kw,
  906. 'based_on_element': task['element'],
  907. 'search_result': result,
  908. 'search_metadata': {
  909. 'searched_at': datetime.now().isoformat(),
  910. 'status': 'success',
  911. 'note_count': note_count
  912. },
  913. 'result_evaluation': evaluation
  914. })
  915. logger.info(f" 评估完成,relevance={evaluation['overall_relevance']:.3f}")
  916. except Exception as e:
  917. logger.error(f" ✗ 失败: {e}")
  918. # 延迟
  919. if idx < len(extension_tasks):
  920. time.sleep(search_delay)
  921. # 保存结果
  922. output_path = os.path.join(self.output_dir, "stage7_final_results.json")
  923. self._save_json(features_data, output_path)
  924. logger.info(f"\n" + "=" * 60)
  925. logger.info(f"阶段7完成")
  926. logger.info("=" * 60)
  927. return features_data
  928. # ========== 主流程 ==========
  929. def run_full_pipeline(self):
  930. """执行完整流程"""
  931. logger.info("\n" + "=" * 60)
  932. logger.info("开始执行完整流程")
  933. logger.info("=" * 60)
  934. try:
  935. # Stage 6 Only 模式:只运行深度解构分析(从 Stage 5 结果开始)
  936. if self.stage6_only:
  937. logger.info("运行模式: Stage 6 Only (从 Stage 5 结果开始)")
  938. stage5_path = os.path.join(self.output_dir, "stage5_with_evaluations.json")
  939. if not os.path.exists(stage5_path):
  940. raise FileNotFoundError(f"Stage 5 结果不存在: {stage5_path}")
  941. with open(stage5_path, 'r', encoding='utf-8') as f:
  942. stage5_results = json.load(f)
  943. stage6_results = self.stage6_analyzer.run(stage5_results)
  944. return stage6_results
  945. # 正常流程:从 Stage 1 开始
  946. # 阶段1
  947. stage1_results = self.stage1_filter_features()
  948. # 阶段2:从how文件提取候选词
  949. stage2_results = self.stage2_extract_candidates(stage1_results)
  950. # 阶段3:多词组合 + LLM评估
  951. stage3_results = self.stage4_generate_and_evaluate_search_words(
  952. stage2_results,
  953. max_workers=8, # 提高并发从4到8
  954. max_combo_length=3 # 降低组合长度从4到3
  955. )
  956. # 阶段4:执行搜索
  957. stage4_results = self.stage4_execute_searches(stage3_results, search_delay=2.0, top_n=self.top_n)
  958. # 阶段5:LLM评估搜索结果 - 条件执行
  959. if self.enable_stage5:
  960. stage5_results = self.stage5_evaluate_search_results_with_filter(stage4_results)
  961. else:
  962. stage5_results = stage4_results
  963. logger.info("\n" + "=" * 60)
  964. logger.info("阶段5:跳过(未启用)")
  965. logger.info("=" * 60)
  966. # 阶段6:深度解构分析 - 条件执行
  967. if self.enable_stage6:
  968. stage6_results = self.stage6_analyzer.run(stage5_results)
  969. final_results = stage6_results
  970. else:
  971. final_results = stage5_results
  972. logger.info("\n" + "=" * 60)
  973. if self.enable_stage6:
  974. logger.info("✓ 完整流程执行完成(Stage1-6)")
  975. elif self.enable_stage5:
  976. logger.info("✓ 完整流程执行完成(Stage1-5)")
  977. else:
  978. logger.info("✓ 完整流程执行完成(Stage1-4)")
  979. logger.info("=" * 60)
  980. # 自动执行可视化
  981. logger.info("\n" + "=" * 60)
  982. logger.info("开始生成可视化...")
  983. logger.info("=" * 60)
  984. try:
  985. # 使用统一的可视化脚本
  986. viz_script = 'visualize_stage6_results.py'
  987. logger.info(f" 使用可视化脚本: {viz_script}")
  988. result = subprocess.run(
  989. ['python3', viz_script],
  990. capture_output=True,
  991. text=True,
  992. timeout=60
  993. )
  994. if result.returncode == 0:
  995. logger.info("✓ 可视化生成成功")
  996. logger.info(result.stdout)
  997. else:
  998. logger.error(f"可视化生成失败: {result.stderr}")
  999. except subprocess.TimeoutExpired:
  1000. logger.error("可视化生成超时")
  1001. except Exception as e:
  1002. logger.error(f"可视化生成异常: {e}")
  1003. return final_results
  1004. except Exception as e:
  1005. logger.error(f"流程执行失败: {e}")
  1006. raise
  1007. def main():
  1008. """主函数"""
  1009. parser = argparse.ArgumentParser(description='增强搜索系统V2')
  1010. parser.add_argument(
  1011. '--how-json',
  1012. default='input/690d977d0000000007036331_how.json',
  1013. help='How解构文件路径'
  1014. )
  1015. parser.add_argument(
  1016. '--api-key',
  1017. default=None,
  1018. help='OpenRouter API密钥(默认从环境变量读取)'
  1019. )
  1020. parser.add_argument(
  1021. '--output-dir',
  1022. default='output_v2',
  1023. help='输出目录'
  1024. )
  1025. parser.add_argument(
  1026. '--top-n',
  1027. type=int,
  1028. default=10,
  1029. help='每个原始特征取评分最高的N个搜索词(默认10)'
  1030. )
  1031. parser.add_argument(
  1032. '--max-total-searches',
  1033. type=int,
  1034. default=None,
  1035. help='全局最大搜索次数限制(默认None不限制)'
  1036. )
  1037. parser.add_argument(
  1038. '--search-workers',
  1039. type=int,
  1040. default=3,
  1041. help='搜索并发数(默认3)'
  1042. )
  1043. parser.add_argument(
  1044. '--max-searches-per-feature',
  1045. type=int,
  1046. default=None,
  1047. help='每个原始特征的最大搜索次数(默认None不限制)'
  1048. )
  1049. parser.add_argument(
  1050. '--max-searches-per-base-word',
  1051. type=int,
  1052. default=None,
  1053. help='每个base_word的最大搜索次数(默认None不限制)'
  1054. )
  1055. parser.add_argument(
  1056. '--enable-stage5',
  1057. action='store_true',
  1058. help='启用Stage 5评估(默认False)'
  1059. )
  1060. parser.add_argument(
  1061. '--stage5-max-workers',
  1062. type=int,
  1063. default=10,
  1064. help='Stage 5并发评估数(默认10)'
  1065. )
  1066. parser.add_argument(
  1067. '--stage5-max-notes',
  1068. type=int,
  1069. default=20,
  1070. help='每个搜索结果评估的最大帖子数(默认20)'
  1071. )
  1072. parser.add_argument(
  1073. '--enable-stage6',
  1074. action='store_true',
  1075. help='启用 Stage 6 深度解构分析'
  1076. )
  1077. parser.add_argument(
  1078. '--stage6-only',
  1079. action='store_true',
  1080. help='只运行 Stage 6(从 Stage 5 结果开始)'
  1081. )
  1082. parser.add_argument(
  1083. '--stage6-max-workers',
  1084. type=int,
  1085. default=5,
  1086. help='Stage 6 并发数(默认5)'
  1087. )
  1088. parser.add_argument(
  1089. '--stage6-max-notes',
  1090. type=int,
  1091. default=None,
  1092. help='Stage 6 最多处理多少个完全匹配的帖子(默认None不限制)'
  1093. )
  1094. parser.add_argument(
  1095. '--stage6-skip',
  1096. type=int,
  1097. default=0,
  1098. help='Stage 6 跳过前 N 个完全匹配的帖子(默认0)'
  1099. )
  1100. parser.add_argument(
  1101. '--stage6-sort-by',
  1102. type=str,
  1103. choices=['score', 'time', 'engagement'],
  1104. default='score',
  1105. help='Stage 6 排序方式: score(评分), time(时间), engagement(互动量)'
  1106. )
  1107. parser.add_argument(
  1108. '--stage6-api-url',
  1109. type=str,
  1110. default='http://192.168.245.150:7000/what/analysis/single',
  1111. help='Stage 6 解构 API 地址'
  1112. )
  1113. parser.add_argument(
  1114. '--stage6-min-score',
  1115. type=float,
  1116. default=0.8,
  1117. help='Stage 6 处理的最低分数阈值(默认0.8,0-1分制)'
  1118. )
  1119. args = parser.parse_args()
  1120. # 创建系统实例
  1121. system = EnhancedSearchV2(
  1122. how_json_path=args.how_json,
  1123. openrouter_api_key=args.api_key,
  1124. output_dir=args.output_dir,
  1125. top_n=args.top_n,
  1126. max_total_searches=args.max_total_searches,
  1127. search_max_workers=args.search_workers,
  1128. max_searches_per_feature=args.max_searches_per_feature,
  1129. max_searches_per_base_word=args.max_searches_per_base_word,
  1130. enable_stage5=args.enable_stage5,
  1131. stage5_max_workers=args.stage5_max_workers,
  1132. stage5_max_notes=args.stage5_max_notes,
  1133. enable_stage6=args.enable_stage6,
  1134. stage6_only=args.stage6_only,
  1135. stage6_max_workers=args.stage6_max_workers,
  1136. stage6_max_notes=args.stage6_max_notes,
  1137. stage6_skip=args.stage6_skip,
  1138. stage6_sort_by=args.stage6_sort_by,
  1139. stage6_api_url=args.stage6_api_url,
  1140. stage6_min_score=args.stage6_min_score
  1141. )
  1142. # 执行完整流程
  1143. system.run_full_pipeline()
  1144. if __name__ == '__main__':
  1145. main()