analyze_creation_origin.py 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. """
  4. 创作起点分析
  5. 整合数据准备 + AI分析两步流程:
  6. 1. 根据帖子图谱 + 人设图谱,准备待分析数据
  7. 2. 调用AI分析起点
  8. 输入:帖子图谱 + 人设图谱
  9. 输出:起点分析结果
  10. """
  11. import asyncio
  12. import json
  13. from pathlib import Path
  14. from typing import Dict, List, Optional
  15. import sys
  16. # 添加项目根目录到路径
  17. project_root = Path(__file__).parent.parent.parent
  18. sys.path.insert(0, str(project_root))
  19. from agents import Agent, Runner, ModelSettings, trace
  20. from agents.tracing.create import custom_span
  21. from lib.client import get_model
  22. from lib.my_trace import set_trace_smith as set_trace
  23. from script.data_processing.path_config import PathConfig
  24. # ===== 配置 =====
  25. MODEL_NAME = "google/gemini-3-pro-preview"
  26. # MODEL_NAME = "anthropic/claude-sonnet-4"
  27. MATCH_SCORE_THRESHOLD = 0.8 # 匹配分数阈值
  28. GLOBAL_RATIO_THRESHOLD = 0.8 # 全局占比阈值
  29. agent = Agent(
  30. name="Creation Origin Analyzer",
  31. model=get_model(MODEL_NAME),
  32. model_settings=ModelSettings(
  33. temperature=0.0,
  34. max_tokens=8192,
  35. ),
  36. tools=[],
  37. )
  38. # ===== 数据加载 =====
  39. def load_json(file_path: Path) -> Dict:
  40. """加载JSON文件"""
  41. with open(file_path, "r", encoding="utf-8") as f:
  42. return json.load(f)
  43. def get_post_graph_files(config: PathConfig) -> List[Path]:
  44. """获取所有帖子图谱文件"""
  45. post_graph_dir = config.intermediate_dir / "post_graph"
  46. return sorted(post_graph_dir.glob("*_帖子图谱.json"))
  47. def get_result_file(config: PathConfig, post_id: str) -> Path:
  48. """获取分析结果文件路径"""
  49. return config.intermediate_dir / "origin_analysis_result" / f"{post_id}_起点分析.json"
  50. def is_already_processed(config: PathConfig, post_id: str) -> bool:
  51. """检查帖子是否已处理过"""
  52. result_file = get_result_file(config, post_id)
  53. return result_file.exists()
  54. # ===== 第一步:数据准备 =====
  55. def extract_post_detail(post_graph: Dict) -> Dict:
  56. """提取帖子详情(保留原始字段名)"""
  57. meta = post_graph.get("meta", {})
  58. post_detail = meta.get("postDetail", {})
  59. return {
  60. "postId": meta.get("postId", ""),
  61. "postTitle": meta.get("postTitle", ""),
  62. "body_text": post_detail.get("body_text", ""),
  63. "images": post_detail.get("images", []),
  64. "video": post_detail.get("video"),
  65. "publish_time": post_detail.get("publish_time", ""),
  66. "like_count": post_detail.get("like_count", 0),
  67. "collect_count": post_detail.get("collect_count", 0),
  68. }
  69. def extract_analysis_nodes(post_graph: Dict, persona_graph: Dict) -> tuple:
  70. """
  71. 提取待分析节点列表
  72. 待分析节点 = 灵感点 + 目的点 + 关键点
  73. """
  74. nodes = post_graph.get("nodes", {})
  75. edges = post_graph.get("edges", {})
  76. persona_nodes = persona_graph.get("nodes", {})
  77. persona_index = persona_graph.get("index", {})
  78. # 1. 收集关键点信息(用于支撑信息)
  79. keypoints = {}
  80. for node_id, node in nodes.items():
  81. if node.get("type") == "标签" and node.get("dimension") == "关键点":
  82. keypoints[node_id] = {
  83. "名称": node.get("name", ""),
  84. "描述": node.get("detail", {}).get("description", ""),
  85. }
  86. # 2. 分析支撑关系:关键点 → 灵感点/目的点
  87. support_map = {} # {target_node_id: [支撑的关键点信息]}
  88. for edge_id, edge in edges.items():
  89. if edge.get("type") == "支撑":
  90. source_id = edge.get("source", "")
  91. target_id = edge.get("target", "")
  92. if source_id in keypoints:
  93. if target_id not in support_map:
  94. support_map[target_id] = []
  95. support_map[target_id].append(keypoints[source_id])
  96. # 3. 分析关联关系
  97. relation_map = {} # {node_id: [关联的节点名称]}
  98. for edge_id, edge in edges.items():
  99. if edge.get("type") == "关联":
  100. source_id = edge.get("source", "")
  101. target_id = edge.get("target", "")
  102. source_name = nodes.get(source_id, {}).get("name", "")
  103. target_name = nodes.get(target_id, {}).get("name", "")
  104. # 双向记录
  105. if source_id not in relation_map:
  106. relation_map[source_id] = []
  107. relation_map[source_id].append(target_name)
  108. if target_id not in relation_map:
  109. relation_map[target_id] = []
  110. relation_map[target_id].append(source_name)
  111. # 4. 分析人设匹配
  112. match_map = {} # {node_id: 匹配信息}
  113. persona_out_edges = persona_index.get("outEdges", {})
  114. def get_node_info(node_id: str) -> Optional[Dict]:
  115. """获取人设节点的标准信息"""
  116. node = persona_nodes.get(node_id, {})
  117. if not node:
  118. return None
  119. detail = node.get("detail", {})
  120. parent_path = detail.get("parentPath", [])
  121. return {
  122. "节点ID": node_id,
  123. "节点名称": node.get("name", ""),
  124. "节点分类": "/".join(parent_path) if parent_path else "",
  125. "节点维度": node.get("dimension", ""),
  126. "节点类型": node.get("type", ""),
  127. "人设全局占比": detail.get("probGlobal", 0),
  128. "父类下占比": detail.get("probToParent", 0),
  129. }
  130. def get_parent_category_id(node_id: str) -> Optional[str]:
  131. """通过属于边获取父分类节点ID"""
  132. belong_edges = persona_out_edges.get(node_id, {}).get("属于", [])
  133. for edge in belong_edges:
  134. target_id = edge.get("target", "")
  135. target_node = persona_nodes.get(target_id, {})
  136. if target_node.get("type") == "分类":
  137. return target_id
  138. return None
  139. for edge_id, edge in edges.items():
  140. if edge.get("type") == "匹配":
  141. source_id = edge.get("source", "")
  142. target_id = edge.get("target", "")
  143. # 只处理 帖子节点 → 人设节点 的匹配
  144. if source_id.startswith("帖子:") and target_id.startswith("人设:"):
  145. match_score = edge.get("score", 0)
  146. persona_node = persona_nodes.get(target_id, {})
  147. if persona_node:
  148. node_type = persona_node.get("type", "")
  149. # 获取匹配节点信息
  150. match_node_info = get_node_info(target_id)
  151. if not match_node_info:
  152. continue
  153. # 确定所属分类节点
  154. if node_type == "标签":
  155. # 标签:找父分类
  156. category_id = get_parent_category_id(target_id)
  157. else:
  158. # 分类:就是自己
  159. category_id = target_id
  160. # 获取所属分类信息和常见搭配
  161. category_info = None
  162. if category_id:
  163. category_node = persona_nodes.get(category_id, {})
  164. if category_node:
  165. category_detail = category_node.get("detail", {})
  166. category_path = category_detail.get("parentPath", [])
  167. category_info = {
  168. "节点ID": category_id,
  169. "节点名称": category_node.get("name", ""),
  170. "节点分类": "/".join(category_path) if category_path else "",
  171. "节点维度": category_node.get("dimension", ""),
  172. "节点类型": "分类",
  173. "人设全局占比": category_detail.get("probGlobal", 0),
  174. "父类下占比": category_detail.get("probToParent", 0),
  175. "历史共现分类": [],
  176. }
  177. # 获取分类共现节点(按共现度降序排列)
  178. co_occur_edges = persona_out_edges.get(category_id, {}).get("分类共现", [])
  179. co_occur_edges_sorted = sorted(co_occur_edges, key=lambda x: x.get("score", 0), reverse=True)
  180. for co_edge in co_occur_edges_sorted[:5]: # 取前5个
  181. co_target_id = co_edge.get("target", "")
  182. co_score = co_edge.get("score", 0)
  183. co_node = persona_nodes.get(co_target_id, {})
  184. if co_node:
  185. co_detail = co_node.get("detail", {})
  186. co_path = co_detail.get("parentPath", [])
  187. category_info["历史共现分类"].append({
  188. "节点ID": co_target_id,
  189. "节点名称": co_node.get("name", ""),
  190. "节点分类": "/".join(co_path) if co_path else "",
  191. "节点维度": co_node.get("dimension", ""),
  192. "节点类型": "分类",
  193. "人设全局占比": co_detail.get("probGlobal", 0),
  194. "父类下占比": co_detail.get("probToParent", 0),
  195. "共现度": round(co_score, 4),
  196. })
  197. match_map[source_id] = {
  198. "匹配节点": match_node_info,
  199. "匹配分数": round(match_score, 4),
  200. "所属分类": category_info,
  201. }
  202. # 5. 构建待分析节点列表(灵感点、目的点、关键点)
  203. analysis_nodes = []
  204. for node_id, node in nodes.items():
  205. if node.get("type") == "标签" and node.get("domain") == "帖子":
  206. dimension = node.get("dimension", "")
  207. if dimension in ["灵感点", "目的点", "关键点"]:
  208. # 人设匹配信息
  209. match_info = match_map.get(node_id)
  210. analysis_nodes.append({
  211. "节点ID": node_id,
  212. "节点名称": node.get("name", ""),
  213. "节点分类": node.get("category", ""), # 根分类:意图/实质/形式
  214. "节点维度": dimension,
  215. "节点类型": node.get("type", ""),
  216. "节点描述": node.get("detail", {}).get("description", ""),
  217. "人设匹配": match_info,
  218. })
  219. # 6. 构建可能的关系列表
  220. relation_list = []
  221. # 支撑关系:关键点 → 灵感点/目的点
  222. for edge_id, edge in edges.items():
  223. if edge.get("type") == "支撑":
  224. source_id = edge.get("source", "")
  225. target_id = edge.get("target", "")
  226. if source_id in keypoints:
  227. relation_list.append({
  228. "来源节点": source_id,
  229. "目标节点": target_id,
  230. "关系类型": "支撑",
  231. })
  232. # 关联关系:节点之间的关联(去重,只记录一次)
  233. seen_relations = set()
  234. for edge_id, edge in edges.items():
  235. if edge.get("type") == "关联":
  236. source_id = edge.get("source", "")
  237. target_id = edge.get("target", "")
  238. # 用排序后的元组作为key去重
  239. key = tuple(sorted([source_id, target_id]))
  240. if key not in seen_relations:
  241. seen_relations.add(key)
  242. relation_list.append({
  243. "来源节点": source_id,
  244. "目标节点": target_id,
  245. "关系类型": "关联",
  246. })
  247. return analysis_nodes, relation_list
  248. def prepare_analysis_data(post_graph: Dict, persona_graph: Dict) -> Dict:
  249. """
  250. 准备完整的分析数据
  251. Returns:
  252. {
  253. "帖子详情": {...},
  254. "待分析节点列表": [...],
  255. "可能的关系列表": [...]
  256. }
  257. """
  258. analysis_nodes, relation_list = extract_analysis_nodes(post_graph, persona_graph)
  259. return {
  260. "帖子详情": extract_post_detail(post_graph),
  261. "待分析节点列表": analysis_nodes,
  262. "可能的关系列表": relation_list,
  263. }
  264. # ===== 第二步:AI分析 =====
  265. def build_context(data: Dict) -> Dict:
  266. """
  267. 构造AI分析的上下文
  268. Returns:
  269. {
  270. "all_points": [...], # 全部创意点(含详细信息)
  271. "candidates": [...], # 起点候选集(名称列表)
  272. "constants": [...], # 人设常量(名称列表)
  273. }
  274. """
  275. nodes = data.get("待分析节点列表", [])
  276. # 全部创意点(含详细信息)
  277. all_points = []
  278. for node in nodes:
  279. match_info = node.get("人设匹配")
  280. match_score = 0
  281. category_global_ratio = 0
  282. if match_info:
  283. match_score = match_info.get("匹配分数", 0)
  284. category_info = match_info.get("所属分类", {})
  285. if category_info:
  286. category_global_ratio = category_info.get("人设全局占比", 0)
  287. all_points.append({
  288. "名称": node["节点名称"],
  289. "分类": node.get("节点分类", ""),
  290. "维度": node.get("节点维度", ""),
  291. "描述": node.get("节点描述", ""),
  292. "人设匹配度": round(match_score, 2),
  293. "所属分类全局占比": round(category_global_ratio, 2),
  294. })
  295. # 起点候选集(灵感点 + 目的点)
  296. candidates = [
  297. node["节点名称"]
  298. for node in nodes
  299. if node["节点维度"] in ["灵感点", "目的点"]
  300. ]
  301. # 人设常量(匹配分数 > 0.8 且 全局占比 > 0.8)
  302. constants = []
  303. for node in nodes:
  304. match_info = node.get("人设匹配")
  305. if match_info:
  306. match_score = match_info.get("匹配分数", 0)
  307. match_node = match_info.get("匹配节点", {})
  308. global_ratio = match_node.get("人设全局占比", 0)
  309. if match_score > MATCH_SCORE_THRESHOLD and global_ratio > GLOBAL_RATIO_THRESHOLD:
  310. constants.append(node["节点名称"])
  311. return {
  312. "all_points": all_points,
  313. "candidates": candidates,
  314. "constants": constants,
  315. }
  316. def format_prompt(context: Dict) -> str:
  317. """
  318. 格式化为AI prompt
  319. """
  320. all_points = context["all_points"]
  321. candidates = context["candidates"]
  322. constants = context["constants"]
  323. # 格式化全部创意点为易读文本
  324. points_text = ""
  325. for p in all_points:
  326. points_text += f"- {p['名称']}\n"
  327. points_text += f" 维度: {p['维度']} | 分类: {p['分类']}\n"
  328. points_text += f" 描述: {p['描述']}\n"
  329. points_text += f" 人设匹配度: {p['人设匹配度']} | 所属分类全局占比: {p['所属分类全局占比']}\n"
  330. points_text += "\n"
  331. # 格式化起点候选集
  332. candidates_text = "、".join(candidates)
  333. # 格式化人设常量
  334. constants_text = "、".join(constants) if constants else "无"
  335. prompt = f"""# Role
  336. 你是小红书爆款内容的"逆向工程"专家。你的核心能力是透过内容的表象(视觉/形式),还原创作者最初的脑回路(动机/实质)。
  337. # Task
  338. 我提供一组笔记的【创意标签】和一个【起点候选集】。
  339. 请推理出哪些选项是真正的**创意起点**。
  340. # Input Data
  341. ## 全部创意点
  342. {points_text}
  343. ## 起点候选集
  344. {candidates_text}
  345. ## 来自人设的常量
  346. {constants_text}
  347. # 推理约束
  348. 1. 实质推形式,而不是形式推实质,除非形式是一切创意的起点
  349. 2. 因推果而不是果推因
  350. 3. 无法被其他项或人设推理出的点,即为起点
  351. # Output Format
  352. 请输出一个标准的 JSON 格式。
  353. - Key: 候选集中的词。
  354. - Value: 一个对象,包含:
  355. - `score`: 0.0 到 1.0 的浮点数(代表是起点的可能性)。
  356. - `analysis`: 一句话推理"""
  357. return prompt
  358. # ===== 显示函数 =====
  359. def display_context(context: Dict, post_id: str):
  360. """显示构造的上下文"""
  361. print(f"\n帖子: {post_id}")
  362. print(f"\n全部创意点 ({len(context['all_points'])} 个):")
  363. for p in context['all_points']:
  364. print(f" - {p['名称']} ({p['维度']}/{p['分类']}) 匹配度={p['人设匹配度']}, 分类占比={p['所属分类全局占比']}")
  365. print(f"\n起点候选集 ({len(context['candidates'])} 个):")
  366. print(f" {context['candidates']}")
  367. print(f"\n人设常量 ({len(context['constants'])} 个):")
  368. print(f" {context['constants']}")
  369. def display_result(result: Dict):
  370. """显示分析结果"""
  371. output = result.get("输出")
  372. if output:
  373. print("\n起点分析结果:")
  374. # 按score降序排列
  375. sorted_items = sorted(output.items(), key=lambda x: x[1].get("score", 0), reverse=True)
  376. for name, info in sorted_items:
  377. score = info.get("score", 0)
  378. analysis = info.get("analysis", "")
  379. marker = "★" if score >= 0.7 else "○"
  380. print(f" {marker} {name}: {score:.2f}")
  381. print(f" {analysis}")
  382. else:
  383. print(f" 分析失败: {result.get('错误', 'N/A')}")
  384. # ===== 处理函数 =====
  385. async def process_single_post(
  386. post_file: Path,
  387. persona_graph: Dict,
  388. config: PathConfig,
  389. current_time: str = None,
  390. log_url: str = None,
  391. force: bool = False,
  392. ) -> Dict:
  393. """
  394. 处理单个帖子(数据准备 + AI分析)
  395. """
  396. # 加载帖子图谱
  397. post_graph = load_json(post_file)
  398. post_id = post_graph.get("meta", {}).get("postId", "unknown")
  399. # 检查是否已处理
  400. if not force and is_already_processed(config, post_id):
  401. print(f"\n跳过帖子 {post_id}(已处理,使用 --force 强制重新分析)")
  402. # 返回已有结果
  403. result_file = get_result_file(config, post_id)
  404. return load_json(result_file)
  405. print(f"\n{'=' * 60}")
  406. print(f"处理帖子: {post_id}")
  407. print("-" * 60)
  408. # 第一步:准备数据
  409. data = prepare_analysis_data(post_graph, persona_graph)
  410. # 构造上下文
  411. context = build_context(data)
  412. display_context(context, post_id)
  413. # 格式化prompt
  414. prompt = format_prompt(context)
  415. # 第二步:调用AI
  416. print("\n调用AI分析中...")
  417. with custom_span(
  418. name=f"创作起点分析 - {post_id}",
  419. data={
  420. "帖子id": post_id,
  421. "候选数": len(context["candidates"]),
  422. "模型": MODEL_NAME
  423. }
  424. ):
  425. result = await Runner.run(agent, input=prompt)
  426. output_text = result.final_output
  427. # 解析JSON
  428. try:
  429. if "```json" in output_text:
  430. json_start = output_text.find("```json") + 7
  431. json_end = output_text.find("```", json_start)
  432. json_str = output_text[json_start:json_end].strip()
  433. elif "{" in output_text and "}" in output_text:
  434. json_start = output_text.find("{")
  435. json_end = output_text.rfind("}") + 1
  436. json_str = output_text[json_start:json_end]
  437. else:
  438. json_str = output_text
  439. analysis_result = json.loads(json_str)
  440. result_data = {
  441. "帖子id": post_id,
  442. "模型": MODEL_NAME,
  443. "输入": context,
  444. "输出": analysis_result
  445. }
  446. except Exception as e:
  447. result_data = {
  448. "帖子id": post_id,
  449. "模型": MODEL_NAME,
  450. "输入": context,
  451. "输出": None,
  452. "错误": str(e),
  453. "原始输出": output_text
  454. }
  455. # 显示结果
  456. display_result(result_data)
  457. # 保存结果
  458. output_dir = config.intermediate_dir / "origin_analysis_result"
  459. output_dir.mkdir(parents=True, exist_ok=True)
  460. output_with_meta = {
  461. "元数据": {
  462. "current_time": current_time,
  463. "log_url": log_url,
  464. "model": MODEL_NAME
  465. },
  466. **result_data
  467. }
  468. output_file = output_dir / f"{post_id}_起点分析.json"
  469. with open(output_file, "w", encoding="utf-8") as f:
  470. json.dump(output_with_meta, f, ensure_ascii=False, indent=2)
  471. print(f"\n已保存: {output_file.name}")
  472. return result_data
  473. # ===== 主函数 =====
  474. async def main(
  475. post_id: str = None,
  476. all_posts: bool = False,
  477. force: bool = False,
  478. ):
  479. """
  480. 主函数
  481. Args:
  482. post_id: 帖子ID,可选
  483. all_posts: 是否处理所有帖子
  484. force: 强制重新分析已处理的帖子
  485. """
  486. # 设置 trace
  487. current_time, log_url = set_trace()
  488. config = PathConfig()
  489. print(f"账号: {config.account_name}")
  490. print(f"使用模型: {MODEL_NAME}")
  491. print(f"Trace URL: {log_url}")
  492. # 加载人设图谱
  493. persona_graph_file = config.intermediate_dir / "人设图谱.json"
  494. if not persona_graph_file.exists():
  495. print(f"错误: 人设图谱文件不存在: {persona_graph_file}")
  496. return
  497. persona_graph = load_json(persona_graph_file)
  498. print(f"人设图谱节点数: {len(persona_graph.get('nodes', {}))}")
  499. # 获取帖子图谱文件
  500. post_graph_files = get_post_graph_files(config)
  501. if not post_graph_files:
  502. print("错误: 没有找到帖子图谱文件")
  503. return
  504. # 确定要处理的帖子
  505. if post_id:
  506. target_file = next(
  507. (f for f in post_graph_files if post_id in f.name),
  508. None
  509. )
  510. if not target_file:
  511. print(f"错误: 未找到帖子 {post_id}")
  512. return
  513. files_to_process = [target_file]
  514. elif all_posts:
  515. files_to_process = post_graph_files
  516. else:
  517. files_to_process = [post_graph_files[0]]
  518. print(f"待处理帖子数: {len(files_to_process)}")
  519. # 处理
  520. with trace("创作起点分析"):
  521. results = []
  522. skipped = 0
  523. for i, post_file in enumerate(files_to_process, 1):
  524. print(f"\n{'#' * 60}")
  525. print(f"# 处理帖子 {i}/{len(files_to_process)}")
  526. print(f"{'#' * 60}")
  527. result = await process_single_post(
  528. post_file=post_file,
  529. persona_graph=persona_graph,
  530. config=config,
  531. current_time=current_time,
  532. log_url=log_url,
  533. force=force,
  534. )
  535. # 检查是否是跳过的
  536. if not force and "元数据" in result:
  537. skipped += 1
  538. results.append(result)
  539. # 汇总
  540. print(f"\n{'#' * 60}")
  541. print(f"# 完成! 共处理 {len(results)} 个帖子 (跳过 {skipped} 个已处理)")
  542. print(f"{'#' * 60}")
  543. print(f"Trace: {log_url}")
  544. print("\n汇总(score >= 0.7 的起点):")
  545. for result in results:
  546. post_id = result.get("帖子id")
  547. output = result.get("输出")
  548. if output:
  549. origins = [f"{k}({v['score']:.2f})" for k, v in output.items() if v.get("score", 0) >= 0.7]
  550. print(f" {post_id}: {', '.join(origins) if origins else '无高置信起点'}")
  551. else:
  552. print(f" {post_id}: 分析失败")
  553. if __name__ == "__main__":
  554. import argparse
  555. parser = argparse.ArgumentParser(description="创作起点分析")
  556. parser.add_argument("--post-id", type=str, help="帖子ID")
  557. parser.add_argument("--all-posts", action="store_true", help="处理所有帖子")
  558. parser.add_argument("--force", action="store_true", help="强制重新分析已处理的帖子")
  559. args = parser.parse_args()
  560. asyncio.run(main(
  561. post_id=args.post_id,
  562. all_posts=args.all_posts,
  563. force=args.force,
  564. ))