extract_feature_combinations_from_posts.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. """
  4. 从过去帖子_what解构结果目录中提取特征组合及其来源信息
  5. 特征组合格式: ['特征名称1', '特征名称2', ...]
  6. """
  7. import json
  8. from pathlib import Path
  9. from typing import Dict, List, Optional
  10. import re
  11. import sys
  12. # 添加项目根目录到路径
  13. project_root = Path(__file__).parent.parent.parent
  14. sys.path.insert(0, str(project_root))
  15. from script.detail import get_xiaohongshu_detail
  16. def extract_post_id_from_filename(filename: str) -> str:
  17. """从文件名中提取帖子ID"""
  18. match = re.match(r'^([^_]+)_', filename)
  19. if match:
  20. return match.group(1)
  21. return ""
  22. def get_post_detail(post_id: str) -> Optional[Dict]:
  23. """
  24. 获取帖子详情
  25. Args:
  26. post_id: 帖子ID
  27. Returns:
  28. 帖子详情字典,如果获取失败则返回None
  29. """
  30. try:
  31. detail = get_xiaohongshu_detail(post_id)
  32. return detail
  33. except Exception as e:
  34. print(f" 警告: 获取帖子 {post_id} 详情失败: {e}")
  35. return None
  36. def extract_feature_combination_from_point(point_data: Dict, post_id: str, point_name: str, point_description: str) -> Optional[Dict]:
  37. """
  38. 从单个点(灵感点/目的点/关键点)中提取特征组合信息
  39. Args:
  40. point_data: 点的数据
  41. post_id: 帖子ID
  42. point_name: 点的名称
  43. point_description: 点的描述
  44. Returns:
  45. 特征组合字典,如果没有特征则返回None
  46. """
  47. # 检查是否有"提取的特征"字段
  48. if "提取的特征" not in point_data or not isinstance(point_data["提取的特征"], list):
  49. return None
  50. features = point_data["提取的特征"]
  51. if not features:
  52. return None
  53. # 提取所有特征名称,组成特征组合
  54. feature_names = [f["特征名称"] for f in features if "特征名称" in f]
  55. if not feature_names:
  56. return None
  57. return {
  58. "特征组合": feature_names,
  59. "点的名称": point_name,
  60. "点的描述": point_description,
  61. "帖子id": post_id
  62. }
  63. def process_single_file(file_path: Path) -> Dict[str, List[Dict]]:
  64. """
  65. 处理单个JSON文件,提取所有特征组合信息
  66. Args:
  67. file_path: JSON文件路径
  68. Returns:
  69. 包含灵感点、目的点、关键点的特征组合列表字典
  70. """
  71. result = {
  72. "灵感点": [],
  73. "目的点": [],
  74. "关键点": []
  75. }
  76. # 从文件名提取帖子ID
  77. post_id = extract_post_id_from_filename(file_path.name)
  78. try:
  79. with open(file_path, "r", encoding="utf-8") as f:
  80. data = json.load(f)
  81. # 提取三点解构数据
  82. if "三点解构" not in data:
  83. return result
  84. three_points = data["三点解构"]
  85. # 处理灵感点
  86. if "灵感点" in three_points:
  87. inspiration = three_points["灵感点"]
  88. # 处理全新内容
  89. if "全新内容" in inspiration and isinstance(inspiration["全新内容"], list):
  90. for item in inspiration["全新内容"]:
  91. point_name = item.get("灵感点", "")
  92. point_desc = item.get("描述", "")
  93. feature_combo = extract_feature_combination_from_point(item, post_id, point_name, point_desc)
  94. if feature_combo:
  95. result["灵感点"].append(feature_combo)
  96. # 处理共性差异
  97. if "共性差异" in inspiration and isinstance(inspiration["共性差异"], list):
  98. for item in inspiration["共性差异"]:
  99. point_name = item.get("灵感点", "")
  100. point_desc = item.get("描述", "")
  101. feature_combo = extract_feature_combination_from_point(item, post_id, point_name, point_desc)
  102. if feature_combo:
  103. result["灵感点"].append(feature_combo)
  104. # 处理共性内容
  105. if "共性内容" in inspiration and isinstance(inspiration["共性内容"], list):
  106. for item in inspiration["共性内容"]:
  107. point_name = item.get("灵感点", "")
  108. point_desc = item.get("描述", "")
  109. feature_combo = extract_feature_combination_from_point(item, post_id, point_name, point_desc)
  110. if feature_combo:
  111. result["灵感点"].append(feature_combo)
  112. # 处理目的点
  113. if "目的点" in three_points:
  114. purpose = three_points["目的点"]
  115. if "purposes" in purpose and isinstance(purpose["purposes"], list):
  116. for item in purpose["purposes"]:
  117. point_name = item.get("目的点", "")
  118. point_desc = item.get("描述", "")
  119. feature_combo = extract_feature_combination_from_point(item, post_id, point_name, point_desc)
  120. if feature_combo:
  121. result["目的点"].append(feature_combo)
  122. # 处理关键点
  123. if "关键点" in three_points:
  124. key_points = three_points["关键点"]
  125. if "key_points" in key_points and isinstance(key_points["key_points"], list):
  126. for item in key_points["key_points"]:
  127. point_name = item.get("关键点", "")
  128. point_desc = item.get("描述", "")
  129. feature_combo = extract_feature_combination_from_point(item, post_id, point_name, point_desc)
  130. if feature_combo:
  131. result["关键点"].append(feature_combo)
  132. except Exception as e:
  133. print(f"处理文件 {file_path.name} 时出错: {e}")
  134. return result
  135. def merge_results(all_results: List[Dict]) -> Dict:
  136. """
  137. 合并所有文件的提取结果,按特征组合分组
  138. Args:
  139. all_results: 所有文件的结果列表
  140. Returns:
  141. 合并后的结果
  142. """
  143. merged = {
  144. "灵感点": {},
  145. "目的点": {},
  146. "关键点": {}
  147. }
  148. for result in all_results:
  149. for category in ["灵感点", "目的点", "关键点"]:
  150. for combo_data in result[category]:
  151. # 将特征组合列表转换为tuple作为字典的key(list不可哈希)
  152. combo_key = tuple(sorted(combo_data["特征组合"]))
  153. if combo_key not in merged[category]:
  154. merged[category][combo_key] = []
  155. merged[category][combo_key].append({
  156. "点的名称": combo_data["点的名称"],
  157. "点的描述": combo_data["点的描述"],
  158. "帖子id": combo_data["帖子id"]
  159. })
  160. return merged
  161. def convert_to_array_format(merged_dict: Dict, fetch_details: bool = True, time_filter: Optional[str] = None) -> Dict:
  162. """
  163. 将字典格式转换为数组格式,并添加帖子详情
  164. Args:
  165. merged_dict: 字典格式的结果
  166. fetch_details: 是否获取帖子详情,默认为True
  167. time_filter: 时间过滤阈值,只保留发布时间<该时间的帖子,格式为 "YYYY-MM-DD HH:MM:SS"
  168. Returns:
  169. 数组格式的结果
  170. """
  171. result = {
  172. "灵感点": [],
  173. "目的点": [],
  174. "关键点": []
  175. }
  176. # 收集所有需要获取详情的帖子ID
  177. post_ids = set()
  178. if fetch_details:
  179. for category in ["灵感点", "目的点", "关键点"]:
  180. for combo_key, sources in merged_dict[category].items():
  181. for source in sources:
  182. post_ids.add(source["帖子id"])
  183. # 批量获取帖子详情
  184. print(f"\n正在获取 {len(post_ids)} 个帖子的详情...")
  185. post_details = {}
  186. for i, post_id in enumerate(post_ids, 1):
  187. print(f"[{i}/{len(post_ids)}] 获取帖子 {post_id} 的详情...")
  188. detail = get_post_detail(post_id)
  189. if detail:
  190. post_details[post_id] = detail
  191. print(f"成功获取 {len(post_details)} 个帖子详情")
  192. # 如果启用时间过滤,过滤帖子(过滤掉发布时间晚于等于阈值的帖子,避免穿越)
  193. if time_filter:
  194. print(f"\n正在应用时间过滤 (< {time_filter}),避免使用晚于当前帖子的数据...")
  195. filtered_post_ids = set()
  196. filtered_count = 0
  197. for post_id, detail in post_details.items():
  198. publish_time = detail.get('publish_time', '')
  199. if publish_time < time_filter:
  200. filtered_post_ids.add(post_id)
  201. else:
  202. filtered_count += 1
  203. print(f" ⚠️ 过滤掉帖子 {post_id} (发布时间: {publish_time},晚于阈值)")
  204. print(f"过滤掉 {filtered_count} 个帖子(穿越),保留 {len(filtered_post_ids)} 个帖子")
  205. # 更新post_details,只保留符合时间条件的
  206. post_details = {pid: detail for pid, detail in post_details.items() if pid in filtered_post_ids}
  207. # 转换为数组格式并添加帖子详情
  208. for category in ["灵感点", "目的点", "关键点"]:
  209. for combo_key, sources in merged_dict[category].items():
  210. # 为每个来源添加帖子详情
  211. enhanced_sources = []
  212. for source in sources:
  213. # 如果启用时间过滤,跳过不符合时间条件的帖子
  214. if fetch_details and time_filter and source["帖子id"] not in post_details:
  215. continue
  216. enhanced_source = source.copy()
  217. if fetch_details and source["帖子id"] in post_details:
  218. enhanced_source["帖子详情"] = post_details[source["帖子id"]]
  219. enhanced_sources.append(enhanced_source)
  220. # 只添加有来源的特征组合
  221. if enhanced_sources:
  222. result[category].append({
  223. "特征组合": list(combo_key), # 将tuple转回list
  224. "特征来源": enhanced_sources
  225. })
  226. return result
  227. def get_earliest_publish_time(current_posts_dir: Path) -> Optional[str]:
  228. """
  229. 获取当前帖子目录中最早的发布时间
  230. Args:
  231. current_posts_dir: 当前帖子目录路径
  232. Returns:
  233. 最早的发布时间字符串,格式为 "YYYY-MM-DD HH:MM:SS"
  234. """
  235. if not current_posts_dir.exists():
  236. print(f"警告: 当前帖子目录不存在: {current_posts_dir}")
  237. return None
  238. json_files = list(current_posts_dir.glob("*.json"))
  239. if not json_files:
  240. print(f"警告: 当前帖子目录为空: {current_posts_dir}")
  241. return None
  242. print(f"\n正在获取当前帖子的发布时间...")
  243. print(f"找到 {len(json_files)} 个当前帖子")
  244. earliest_time = None
  245. for file_path in json_files:
  246. post_id = extract_post_id_from_filename(file_path.name)
  247. if not post_id:
  248. continue
  249. try:
  250. detail = get_post_detail(post_id)
  251. if detail and 'publish_time' in detail:
  252. publish_time = detail['publish_time']
  253. if earliest_time is None or publish_time < earliest_time:
  254. earliest_time = publish_time
  255. print(f" 更新最早时间: {publish_time} (帖子: {post_id})")
  256. except Exception as e:
  257. print(f" 警告: 获取帖子 {post_id} 发布时间失败: {e}")
  258. if earliest_time:
  259. print(f"\n当前帖子最早发布时间: {earliest_time}")
  260. else:
  261. print("\n警告: 未能获取到任何当前帖子的发布时间")
  262. return earliest_time
  263. def main():
  264. # 输入输出路径(默认使用项目根目录下的 data/data_1118 目录)
  265. script_dir = Path(__file__).parent
  266. project_root = script_dir.parent.parent
  267. data_dir = project_root / "data" / "data_1118"
  268. input_dir = data_dir / "过去帖子_what解构结果"
  269. current_posts_dir = data_dir / "当前帖子_what解构结果"
  270. output_file = data_dir / "特征组合_帖子来源.json"
  271. # 获取当前帖子的最早发布时间
  272. earliest_time = get_earliest_publish_time(current_posts_dir)
  273. print(f"\n正在扫描目录: {input_dir}")
  274. # 获取所有JSON文件
  275. json_files = list(input_dir.glob("*.json"))
  276. print(f"找到 {len(json_files)} 个JSON文件")
  277. # 处理所有文件
  278. all_results = []
  279. for i, file_path in enumerate(json_files, 1):
  280. print(f"处理文件 [{i}/{len(json_files)}]: {file_path.name}")
  281. result = process_single_file(file_path)
  282. all_results.append(result)
  283. # 合并结果
  284. print("\n正在合并结果...")
  285. merged_result = merge_results(all_results)
  286. # 转换为数组格式(带时间过滤)
  287. print("正在转换为数组格式...")
  288. final_result = convert_to_array_format(merged_result, fetch_details=True, time_filter=earliest_time)
  289. # 统计信息
  290. if earliest_time:
  291. print(f"\n提取统计 (已过滤掉发布时间 >= {earliest_time} 的帖子):")
  292. else:
  293. print(f"\n提取统计:")
  294. for category in ["灵感点", "目的点", "关键点"]:
  295. combo_count = len(final_result[category])
  296. source_count = sum(len(item["特征来源"]) for item in final_result[category])
  297. print(f" {category}: {combo_count} 个特征组合, {source_count} 个来源")
  298. # 保存结果
  299. print(f"\n正在保存结果到: {output_file}")
  300. with open(output_file, "w", encoding="utf-8") as f:
  301. json.dump(final_result, f, ensure_ascii=False, indent=4)
  302. print("完成!")
  303. if __name__ == "__main__":
  304. main()