xiaohongshu_search.py 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701
  1. #!/usr/bin/env python3
  2. """
  3. 小红书笔记搜索工具
  4. 根据关键词搜索小红书笔记,支持多种筛选条件
  5. """
  6. import requests
  7. import json
  8. import os
  9. import argparse
  10. import time
  11. import hashlib
  12. import re
  13. from datetime import datetime
  14. from typing import Dict, Any, Optional, Tuple
  15. from copy import deepcopy
  16. from pathlib import Path
  17. class XiaohongshuSearch:
  18. """小红书笔记搜索API封装类"""
  19. BASE_URL = "http://47.84.182.56:8001"
  20. TOOL_NAME = "xhs_note_search"
  21. PLATFORM = "xiaohongshu"
  22. def __init__(self, results_dir: str = None, use_cache: bool = True):
  23. """
  24. 初始化API客户端
  25. Args:
  26. results_dir: 结果输出目录,默认为项目根目录下的 data/search 文件夹
  27. use_cache: 是否启用缓存,默认为 True
  28. """
  29. self.api_url = f"{self.BASE_URL}/tools/call/{self.TOOL_NAME}"
  30. self.use_cache = use_cache
  31. # 设置结果输出目录
  32. if results_dir:
  33. self.results_base_dir = results_dir
  34. else:
  35. # 默认使用项目根目录的 data/search 文件夹
  36. script_dir = os.path.dirname(os.path.abspath(__file__))
  37. project_root = os.path.dirname(os.path.dirname(script_dir))
  38. self.results_base_dir = os.path.join(project_root, "data", "search")
  39. def _sanitize_keyword(self, keyword: str) -> str:
  40. """
  41. 清理关键词,使其可以作为文件夹名称
  42. Args:
  43. keyword: 原始关键词
  44. Returns:
  45. 清理后的关键词
  46. """
  47. # 替换不能用作文件夹名称的字符
  48. # Windows: < > : " / \ | ? *
  49. # Unix: /
  50. # 替换为下划线
  51. sanitized = re.sub(r'[<>:"/\\|?*]', '_', keyword)
  52. # 移除首尾空格
  53. sanitized = sanitized.strip()
  54. # 移除首尾的点号(Windows不允许)
  55. sanitized = sanitized.strip('.')
  56. # 如果清理后为空,使用默认名称
  57. if not sanitized:
  58. sanitized = "unnamed"
  59. # 限制长度(文件系统通常限制255字符)
  60. if len(sanitized) > 200:
  61. sanitized = sanitized[:200]
  62. return sanitized
  63. def _get_cache_key(
  64. self,
  65. keyword: str,
  66. content_type: str,
  67. sort_type: str,
  68. publish_time: str,
  69. cursor: str
  70. ) -> str:
  71. """
  72. 生成缓存键(基于搜索参数的哈希)
  73. Args:
  74. 搜索参数
  75. Returns:
  76. 缓存键(MD5哈希值)
  77. """
  78. # 将所有参数组合成字符串
  79. params_str = f"{keyword}|{content_type}|{sort_type}|{publish_time}|{cursor}"
  80. # 生成 MD5 哈希
  81. return hashlib.md5(params_str.encode('utf-8')).hexdigest()
  82. def _get_latest_cache(
  83. self,
  84. keyword: str,
  85. cache_key: str,
  86. content_type: str,
  87. sort_type: str,
  88. publish_time: str
  89. ) -> Optional[Tuple[str, str]]:
  90. """
  91. 获取最新的缓存文件(匹配搜索参数)
  92. Args:
  93. keyword: 搜索关键词
  94. cache_key: 缓存键(未使用,保留接口兼容)
  95. content_type: 内容类型
  96. sort_type: 排序方式
  97. publish_time: 发布时间
  98. Returns:
  99. (raw_filepath, clean_filepath) 或 None(如果没有缓存)
  100. """
  101. # 清理关键词用于文件夹名称
  102. safe_keyword = self._sanitize_keyword(keyword)
  103. base_dir = os.path.join(self.results_base_dir, "xiaohongshu_search", safe_keyword)
  104. raw_dir = os.path.join(base_dir, "raw")
  105. clean_dir = os.path.join(base_dir, "clean")
  106. # 检查目录是否存在
  107. if not os.path.exists(raw_dir) or not os.path.exists(clean_dir):
  108. return None
  109. # 获取所有文件并筛选匹配参数的文件
  110. try:
  111. # 生成参数后缀用于匹配文件名
  112. param_suffix = self._get_filename_suffix(content_type, sort_type, publish_time)
  113. raw_files = list(Path(raw_dir).glob("*.json"))
  114. clean_files = list(Path(clean_dir).glob("*.json"))
  115. if not raw_files or not clean_files:
  116. return None
  117. # 筛选匹配参数的文件
  118. matching_raw_files = [
  119. f for f in raw_files
  120. if param_suffix in f.name
  121. ]
  122. matching_clean_files = [
  123. f for f in clean_files
  124. if param_suffix in f.name
  125. ]
  126. if not matching_raw_files or not matching_clean_files:
  127. return None
  128. # 按修改时间排序,最新的在前
  129. matching_raw_files.sort(key=lambda x: x.stat().st_mtime, reverse=True)
  130. matching_clean_files.sort(key=lambda x: x.stat().st_mtime, reverse=True)
  131. # 返回最新的匹配文件路径
  132. return (str(matching_raw_files[0]), str(matching_clean_files[0]))
  133. except Exception:
  134. return None
  135. def _load_cached_result(self, raw_filepath: str) -> Optional[Dict[str, Any]]:
  136. """
  137. 加载缓存的原始数据
  138. Args:
  139. raw_filepath: 原始数据文件路径
  140. Returns:
  141. 原始数据字典 或 None
  142. """
  143. try:
  144. with open(raw_filepath, 'r', encoding='utf-8') as f:
  145. data = json.load(f)
  146. # 兼容旧格式和新格式
  147. if "api_response" in data:
  148. # 新格式:包含 search_params 和 api_response
  149. return data["api_response"]
  150. else:
  151. # 旧格式:直接是 API 响应
  152. return data
  153. except Exception:
  154. return None
  155. def search(
  156. self,
  157. keyword: str,
  158. content_type: str = "不限",
  159. sort_type: str = "综合",
  160. publish_time: str = "不限",
  161. cursor: str = "",
  162. timeout: int = 30,
  163. max_retries: int = 5,
  164. retry_delay: int = 2,
  165. force: bool = False
  166. ) -> tuple[Dict[str, Any], bool]:
  167. """
  168. 搜索小红书笔记,带自动重试机制和缓存
  169. Args:
  170. keyword: 搜索关键词
  171. content_type: 内容类型,可选值:不限、视频、图文,默认为'不限'
  172. sort_type: 排序方式,可选值:综合、最新、最多点赞、最多评论,默认为'综合'
  173. publish_time: 发布时间筛选,可选值:不限、一天内、一周内、半年内,默认为'不限'
  174. cursor: 翻页游标,第一页默认为空,下一页的游标在上一页的返回值中获取
  175. timeout: 请求超时时间(秒),默认30秒
  176. max_retries: 最大重试次数,默认5次
  177. retry_delay: 重试延迟(秒),默认2秒,每次重试会指数增长
  178. force: 强制重新请求API,忽略缓存,默认为 False
  179. Returns:
  180. (原始数据, 是否来自缓存) 的元组
  181. Raises:
  182. requests.exceptions.RequestException: 所有重试失败后抛出异常
  183. """
  184. # 检查缓存(如果启用且未强制刷新)
  185. if self.use_cache and not force:
  186. cache_key = self._get_cache_key(keyword, content_type, sort_type, publish_time, cursor)
  187. cached_files = self._get_latest_cache(keyword, cache_key, content_type, sort_type, publish_time)
  188. if cached_files:
  189. raw_filepath, clean_filepath = cached_files
  190. cached_result = self._load_cached_result(raw_filepath)
  191. if cached_result:
  192. print(f"✓ 使用缓存数据: {raw_filepath}")
  193. return cached_result, True # 返回缓存标记
  194. payload = {
  195. "keyword": keyword,
  196. "content_type": content_type,
  197. "sort_type": sort_type,
  198. "publish_time": publish_time,
  199. "cursor": cursor
  200. }
  201. last_exception = None
  202. for attempt in range(max_retries):
  203. try:
  204. if attempt > 0:
  205. # 指数退避策略:每次重试延迟时间翻倍
  206. wait_time = retry_delay * (2 ** (attempt - 1))
  207. print(f"等待 {wait_time} 秒后进行第 {attempt + 1} 次重试...")
  208. time.sleep(wait_time)
  209. print(f"正在搜索关键词: {keyword} (尝试 {attempt + 1}/{max_retries})")
  210. response = requests.post(
  211. self.api_url,
  212. json=payload,
  213. timeout=timeout,
  214. headers={"Content-Type": "application/json"}
  215. )
  216. response.raise_for_status()
  217. raw_result = response.json()
  218. # 如果 result 字段是字符串,需要解析成 JSON 对象
  219. if 'result' in raw_result and isinstance(raw_result['result'], str):
  220. try:
  221. raw_result['result'] = json.loads(raw_result['result'])
  222. except json.JSONDecodeError:
  223. pass # 如果解析失败,保持原样
  224. # raw_result 就是 raw 数据(已解析 result,保留完整结构)
  225. print(f"✓ 搜索成功!")
  226. return raw_result, False # 返回新数据标记
  227. except requests.exceptions.Timeout as e:
  228. last_exception = e
  229. print(f"✗ 请求超时: {e}")
  230. except requests.exceptions.ConnectionError as e:
  231. last_exception = e
  232. print(f"✗ 连接错误: {e}")
  233. except requests.exceptions.HTTPError as e:
  234. last_exception = e
  235. status_code = e.response.status_code if e.response else "未知"
  236. print(f"✗ HTTP错误 {status_code}: {e}")
  237. # 如果是客户端错误(4xx),不重试
  238. if e.response and 400 <= e.response.status_code < 500:
  239. print(f"客户端错误,停止重试")
  240. raise
  241. except requests.exceptions.RequestException as e:
  242. last_exception = e
  243. print(f"✗ 请求失败: {e}")
  244. # 所有重试都失败
  245. print(f"✗ 已达到最大重试次数 ({max_retries}),请求失败")
  246. raise last_exception
  247. def _extract_clean_data(self, result: Dict[str, Any]) -> Dict[str, Any]:
  248. """
  249. 提取并清理数据,生成扁平化的结构
  250. Args:
  251. result: 已处理的结果字典
  252. Returns:
  253. 包含笔记列表和分页信息的字典
  254. """
  255. result_data = result.get("result", {})
  256. if not isinstance(result_data, dict):
  257. return {"has_more": False, "next_cursor": "", "notes": []}
  258. data = result_data.get("data", {})
  259. notes = data.get("data", [])
  260. clean_notes = []
  261. for note in notes:
  262. note_card = note.get("note_card", {})
  263. user = note_card.get("user", {})
  264. interact_info = note_card.get("interact_info", {})
  265. # 处理 image_list:从字典格式提取 URL
  266. image_list_raw = note_card.get("image_list", [])
  267. images = []
  268. for img in image_list_raw:
  269. if isinstance(img, dict) and "image_url" in img:
  270. images.append(img["image_url"])
  271. elif isinstance(img, str):
  272. images.append(img)
  273. # 不存在的字段统一用 None/null 表示
  274. note_id = note.get("id")
  275. clean_note = {
  276. "channel_content_id": note_id or None,
  277. "link": f"https://www.xiaohongshu.com/explore/{note_id}" if note_id else None,
  278. "comment_count": interact_info.get("comment_count"),
  279. "images": images if images else [],
  280. "like_count": interact_info.get("liked_count"),
  281. "desc": note_card.get("desc") or None, # 摘要(搜索接口返回)
  282. "body_text": None, # 完整正文需要调用详情接口获取
  283. "title": note_card.get("display_title") or None,
  284. "collect_count": interact_info.get("collected_count"),
  285. "channel_account_id": user.get("user_id") or None,
  286. "channel_account_name": user.get("nick_name") or None,
  287. "content_type": note_card.get("type") or None,
  288. "video": None, # 搜索结果中没有视频字段
  289. "shared_count": interact_info.get("shared_count")
  290. }
  291. clean_notes.append(clean_note)
  292. # Return clean data with pagination info
  293. return {
  294. "has_more": data.get("has_more", False),
  295. "next_cursor": data.get("next_cursor", ""),
  296. "notes": clean_notes
  297. }
  298. def _get_filename_suffix(
  299. self,
  300. content_type: str,
  301. sort_type: str,
  302. publish_time: str
  303. ) -> str:
  304. """
  305. 根据搜索参数生成文件名后缀
  306. Args:
  307. content_type: 内容类型
  308. sort_type: 排序方式
  309. publish_time: 发布时间
  310. Returns:
  311. 文件名后缀字符串
  312. """
  313. # 直接使用原始参数值,不做映射,全部显示
  314. parts = [content_type, sort_type, publish_time]
  315. return "_" + "_".join(parts)
  316. def save_result(
  317. self,
  318. keyword: str,
  319. raw_result: Dict[str, Any],
  320. page: int = 1,
  321. content_type: str = "不限",
  322. sort_type: str = "综合",
  323. publish_time: str = "不限",
  324. cursor: str = ""
  325. ) -> tuple[str, str]:
  326. """
  327. 保存原始数据和清理后数据到不同的目录
  328. 目录结构:
  329. data/search/xiaohongshu_search/
  330. ├── {keyword}/
  331. │ ├── raw/ # 原始数据(完整 API 响应,含分页信息)
  332. │ │ └── {timestamp}_page{页码}_{参数}.json
  333. │ └── clean/ # 清理后数据(扁平化笔记数组)
  334. │ └── {timestamp}_page{页码}_{参数}.json
  335. Args:
  336. keyword: 搜索关键词
  337. raw_result: 原始数据(已解析 result 字段)
  338. page: 页码
  339. content_type: 内容类型
  340. sort_type: 排序方式
  341. publish_time: 发布时间
  342. cursor: 翻页游标
  343. Returns:
  344. (原始数据路径, 清理后数据路径) 的元组
  345. """
  346. # 清理关键词用于文件夹名称
  347. safe_keyword = self._sanitize_keyword(keyword)
  348. # 创建目录结构
  349. base_dir = os.path.join(self.results_base_dir, "xiaohongshu_search", safe_keyword)
  350. raw_dir = os.path.join(base_dir, "raw")
  351. clean_dir = os.path.join(base_dir, "clean")
  352. os.makedirs(raw_dir, exist_ok=True)
  353. os.makedirs(clean_dir, exist_ok=True)
  354. # 生成文件名(包含参数信息)
  355. timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
  356. param_suffix = self._get_filename_suffix(content_type, sort_type, publish_time)
  357. filename = f"{timestamp}_page{page}{param_suffix}.json"
  358. raw_filepath = os.path.join(raw_dir, filename)
  359. clean_filepath = os.path.join(clean_dir, filename)
  360. # 添加搜索参数到 raw 数据
  361. raw_data_with_meta = {
  362. "search_params": {
  363. "keyword": keyword,
  364. "content_type": content_type,
  365. "sort_type": sort_type,
  366. "publish_time": publish_time,
  367. "cursor": cursor,
  368. "page": page,
  369. "timestamp": timestamp
  370. },
  371. "api_response": raw_result
  372. }
  373. # 保存原始结果(包含元数据)
  374. with open(raw_filepath, 'w', encoding='utf-8') as f:
  375. json.dump(raw_data_with_meta, f, ensure_ascii=False, indent=2)
  376. # 提取并保存清理后的数据
  377. clean_data = self._extract_clean_data(raw_result)
  378. # 添加搜索参数到 clean 数据
  379. clean_data_with_meta = {
  380. "search_params": {
  381. "keyword": keyword,
  382. "content_type": content_type,
  383. "sort_type": sort_type,
  384. "publish_time": publish_time,
  385. "cursor": cursor,
  386. "page": page,
  387. "timestamp": timestamp
  388. },
  389. "has_more": clean_data["has_more"],
  390. "next_cursor": clean_data["next_cursor"],
  391. "notes": clean_data["notes"]
  392. }
  393. with open(clean_filepath, 'w', encoding='utf-8') as f:
  394. json.dump(clean_data_with_meta, f, ensure_ascii=False, indent=2)
  395. return raw_filepath, clean_filepath
  396. def main():
  397. """示例使用"""
  398. # 解析命令行参数
  399. parser = argparse.ArgumentParser(description='小红书笔记搜索工具')
  400. parser.add_argument(
  401. '--results-dir',
  402. type=str,
  403. default='data/search',
  404. help='结果输出目录 (默认: data/search)'
  405. )
  406. parser.add_argument(
  407. '--keyword',
  408. type=str,
  409. required=True,
  410. help='搜索关键词 (必填)'
  411. )
  412. parser.add_argument(
  413. '--content-type',
  414. type=str,
  415. default='不限',
  416. choices=['不限', '视频', '图文'],
  417. help='内容类型 (默认: 不限)'
  418. )
  419. parser.add_argument(
  420. '--sort-type',
  421. type=str,
  422. default='综合',
  423. choices=['综合', '最新', '最多点赞', '最多评论'],
  424. help='排序方式 (默认: 综合)'
  425. )
  426. parser.add_argument(
  427. '--publish-time',
  428. type=str,
  429. default='不限',
  430. choices=['不限', '一天内', '一周内', '半年内'],
  431. help='发布时间筛选 (默认: 不限)'
  432. )
  433. parser.add_argument(
  434. '--cursor',
  435. type=str,
  436. default='',
  437. help='翻页游标 (默认为空,即第一页)'
  438. )
  439. parser.add_argument(
  440. '--page',
  441. type=int,
  442. default=1,
  443. help='页码标识,用于保存文件名 (默认: 1)'
  444. )
  445. parser.add_argument(
  446. '--max-retries',
  447. type=int,
  448. default=5,
  449. help='最大重试次数 (默认: 5)'
  450. )
  451. parser.add_argument(
  452. '--retry-delay',
  453. type=int,
  454. default=2,
  455. help='重试延迟秒数 (默认: 2)'
  456. )
  457. parser.add_argument(
  458. '--timeout',
  459. type=int,
  460. default=30,
  461. help='请求超时秒数 (默认: 30)'
  462. )
  463. parser.add_argument(
  464. '--force',
  465. action='store_true',
  466. help='强制重新请求API,忽略缓存'
  467. )
  468. parser.add_argument(
  469. '--no-cache',
  470. action='store_true',
  471. help='禁用缓存功能'
  472. )
  473. args = parser.parse_args()
  474. # 创建API客户端实例
  475. use_cache = not args.no_cache
  476. client = XiaohongshuSearch(results_dir=args.results_dir, use_cache=use_cache)
  477. # 执行搜索并保存
  478. try:
  479. raw_result, from_cache = client.search(
  480. args.keyword,
  481. args.content_type,
  482. args.sort_type,
  483. args.publish_time,
  484. args.cursor,
  485. timeout=args.timeout,
  486. max_retries=args.max_retries,
  487. retry_delay=args.retry_delay,
  488. force=args.force
  489. )
  490. # 只有新数据才保存
  491. if not from_cache:
  492. raw_filepath, clean_filepath = client.save_result(
  493. args.keyword,
  494. raw_result,
  495. args.page,
  496. args.content_type,
  497. args.sort_type,
  498. args.publish_time,
  499. args.cursor
  500. )
  501. print(f"Raw data saved to: {raw_filepath}")
  502. print(f"Clean data saved to: {clean_filepath}")
  503. else:
  504. print(f"Used cached data, no new files saved")
  505. except Exception as e:
  506. print(f"Error: {e}", file=__import__('sys').stderr)
  507. def search_xiaohongshu(
  508. keyword: str,
  509. content_type: str = "不限",
  510. sort_type: str = "综合",
  511. publish_time: str = "不限",
  512. page: int = 1,
  513. force: bool = False
  514. ) -> Dict[str, Any]:
  515. """
  516. 小红书笔记搜索
  517. Args:
  518. keyword: 搜索关键词
  519. content_type: 内容类型,可选:不限、视频、图文
  520. sort_type: 排序方式,可选:综合、最新、最多点赞、最多评论
  521. publish_time: 发布时间,可选:不限、一天内、一周内、半年内
  522. page: 页码(自动翻页)
  523. force: 强制刷新(忽略缓存)
  524. Returns:
  525. {
  526. "search_params": {...},
  527. "has_more": bool,
  528. "next_cursor": str,
  529. "notes": [...]
  530. }
  531. Examples:
  532. >>> # 基本使用
  533. >>> data = search_xiaohongshu("产品测试")
  534. >>> for note in data['notes']:
  535. ... print(f"{note['title']} - {note['like_count']} 赞")
  536. >>> # 带参数
  537. >>> data = search_xiaohongshu(
  538. ... keyword="产品测试",
  539. ... content_type="视频",
  540. ... sort_type="最新"
  541. ... )
  542. >>> # 翻页(自动处理 cursor)
  543. >>> page1 = search_xiaohongshu("产品测试", page=1)
  544. >>> page2 = search_xiaohongshu("产品测试", page=2)
  545. >>> page3 = search_xiaohongshu("产品测试", page=3)
  546. """
  547. # 创建客户端(使用默认配置)
  548. client = XiaohongshuSearch(use_cache=True)
  549. # 自动处理翻页游标
  550. cursor = ""
  551. if page > 1:
  552. # 读取上一页的 cursor
  553. prev_page_result = search_xiaohongshu(
  554. keyword=keyword,
  555. content_type=content_type,
  556. sort_type=sort_type,
  557. publish_time=publish_time,
  558. page=page - 1,
  559. force=False # 上一页使用缓存
  560. )
  561. cursor = prev_page_result.get('next_cursor', '')
  562. # 搜索(内部处理重试、超时等)
  563. raw_result, from_cache = client.search(
  564. keyword=keyword,
  565. content_type=content_type,
  566. sort_type=sort_type,
  567. publish_time=publish_time,
  568. cursor=cursor,
  569. force=force
  570. )
  571. # 只有新请求的数据才需要保存
  572. if not from_cache:
  573. _, clean_filepath = client.save_result(
  574. keyword=keyword,
  575. raw_result=raw_result,
  576. page=page,
  577. content_type=content_type,
  578. sort_type=sort_type,
  579. publish_time=publish_time,
  580. cursor=cursor
  581. )
  582. # 读取并返回数据
  583. with open(clean_filepath, 'r', encoding='utf-8') as f:
  584. return json.load(f)
  585. else:
  586. # 如果是缓存数据,直接提取 clean 数据返回
  587. clean_data = client._extract_clean_data(raw_result)
  588. # 添加搜索参数到 clean 数据
  589. timestamp = raw_result.get("search_params", {}).get("timestamp", "")
  590. clean_data_with_meta = {
  591. "search_params": {
  592. "keyword": keyword,
  593. "content_type": content_type,
  594. "sort_type": sort_type,
  595. "publish_time": publish_time,
  596. "cursor": cursor,
  597. "page": page,
  598. "timestamp": timestamp
  599. },
  600. "has_more": clean_data["has_more"],
  601. "next_cursor": clean_data["next_cursor"],
  602. "notes": clean_data["notes"]
  603. }
  604. return clean_data_with_meta
  605. if __name__ == "__main__":
  606. main()