gzh_spider.py 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123
  1. from __future__ import annotations
  2. import re
  3. import json
  4. import requests
  5. from fake_useragent import FakeUserAgent
  6. from tenacity import retry
  7. from applications.api import log
  8. from applications.utils import request_retry
  9. retry_desc = request_retry(retry_times=3, min_retry_delay=2, max_retry_delay=30)
  10. # url from aigc
  11. base_url = "http://crawler-cn.aiddit.com/crawler/wei_xin"
  12. headers = {"Content-Type": "application/json"}
  13. @retry(**retry_desc)
  14. def get_article_detail(
  15. article_link: str, is_count: bool = False, is_cache: bool = True
  16. ) -> dict | None:
  17. """
  18. get official article detail
  19. """
  20. target_url = f"{base_url}/detail"
  21. payload = json.dumps(
  22. {
  23. "content_link": article_link,
  24. "is_count": is_count,
  25. "is_ad": False,
  26. "is_cache": is_cache,
  27. }
  28. )
  29. try:
  30. response = requests.post(
  31. url=target_url, headers=headers, data=payload, timeout=120
  32. )
  33. response.raise_for_status()
  34. return response.json()
  35. except requests.exceptions.RequestException as e:
  36. log(
  37. task="get_official_article_detail",
  38. function="get_official_article_detail",
  39. message=f"API请求失败: {e}",
  40. data={"link": article_link},
  41. )
  42. except json.JSONDecodeError as e:
  43. log(
  44. task="get_official_article_detail",
  45. function="get_official_article_detail",
  46. message=f"响应解析失败: {e}",
  47. data={"link": article_link},
  48. )
  49. return None
  50. @retry(**retry_desc)
  51. def get_article_list_from_account(account_id: str, index=None) -> dict | None:
  52. target_url = f"{base_url}/blogger"
  53. payload = json.dumps({"account_id": account_id, "cursor": index})
  54. try:
  55. response = requests.post(
  56. url=target_url, headers=headers, data=payload, timeout=120
  57. )
  58. response.raise_for_status()
  59. return response.json()
  60. except requests.exceptions.RequestException as e:
  61. log(
  62. task="get_official_account_article_list",
  63. function="get_official_account_article_list",
  64. message=f"API请求失败: {e}",
  65. data={"gh_id": account_id},
  66. )
  67. except json.JSONDecodeError as e:
  68. log(
  69. task="get_official_account_article_list",
  70. function="get_official_account_article_list",
  71. message=f"响应解析失败: {e}",
  72. data={"gh_id": account_id},
  73. )
  74. return None
  75. @retry(**retry_desc)
  76. def get_source_account_from_article(article_link) -> dict | None:
  77. """
  78. get account info from official article
  79. :param article_link:
  80. :return:
  81. """
  82. try:
  83. response = requests.get(
  84. url=article_link,
  85. headers={"User-Agent": FakeUserAgent().random},
  86. timeout=120,
  87. )
  88. response.raise_for_status()
  89. html_text = response.text
  90. regex_nickname = r"hit_nickname:\s*'([^']+)'"
  91. regex_username = r"hit_username:\s*'([^']+)'"
  92. nickname = re.search(regex_nickname, html_text)
  93. username = re.search(regex_username, html_text)
  94. # 输出提取的结果
  95. if nickname and username:
  96. return {"name": nickname.group(1), "gh_id": username.group(1)}
  97. else:
  98. return {}
  99. except requests.exceptions.RequestException as e:
  100. log(
  101. task="get_source_account_from_article",
  102. function="get_source_account_from_article",
  103. message=f"API请求失败: {e}",
  104. data={"link": article_link},
  105. )
  106. except json.JSONDecodeError as e:
  107. log(
  108. task="get_source_account_from_article",
  109. function="get_source_account_from_article",
  110. message=f"响应解析失败: {e}",
  111. data={"link": article_link},
  112. )
  113. return None