xiaoniangao_h5_schduling.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254
  1. # -*- coding: utf-8 -*-
  2. # @Author: luojunhui
  3. # @Time: 2023/09/25
  4. import json
  5. import os
  6. import random
  7. import shutil
  8. import sys
  9. import time
  10. from hashlib import md5
  11. import requests
  12. import urllib3
  13. from fake_useragent import FakeUserAgent
  14. from common.mq import MQ
  15. sys.path.append(os.getcwd())
  16. from common.common import Common
  17. from common.feishu import Feishu
  18. from common.publish import Publish
  19. from common.scheduling_db import MysqlHelper
  20. from common.public import get_config_from_mysql, download_rule
  21. proxies = {"http": None, "https": None}
  22. class XiaoNianGaoH5Scheduling:
  23. platform = "小年糕"
  24. words = "abcdefghijklmnopqrstuvwxyz0123456789"
  25. uid_token_dict = {
  26. "uid": f"""{"".join(random.sample(words, 8))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 12))}""",
  27. "token": "".join(random.sample(words, 32)),
  28. }
  29. # 获取列表
  30. @classmethod
  31. def get_videoList(cls, log_type, crawler, rule_dict, our_uid, env):
  32. mq = MQ(topic_name="topic_crawler_etl_" + env)
  33. for page in range(1, 2):
  34. try:
  35. Common.logger(log_type, crawler).info(f"正在抓取第{page}页")
  36. # Common.logging(log_type, crawler, env, f"正在抓取第{page}页")
  37. url = "https://kapi.xiaoniangao.cn/trends/get_recommend_trends"
  38. payload = {"tag_id": 101}
  39. headers = {
  40. "Host": "kapi.xiaoniangao.cn",
  41. "accept": "application/json, text/plain, */*",
  42. "user-agent": FakeUserAgent().random,
  43. "content-type": "application/x-www-form-urlencoded",
  44. "origin": "https://wx.xiaoniangao.cn",
  45. "sec-fetch-site": "same-site",
  46. "sec-fetch-mode": "cors",
  47. "sec-fetch-dest": "empty",
  48. "referer": "https://wx.xiaoniangao.cn/",
  49. "accept-language": "en",
  50. }
  51. urllib3.disable_warnings()
  52. # r = requests.post(url=url, headers=headers, data=json.dumps(payload), proxies=proxies, verify=False)
  53. r = requests.post(
  54. url=url, headers=headers, data=json.dumps(payload), verify=False
  55. )
  56. if "data" not in r.text or r.status_code != 200:
  57. Common.logger(log_type, crawler).warning(
  58. f"get_videoList:{r.text}\n"
  59. )
  60. # Common.logging(log_type, crawler, env, f"get_videoList:{r.text}\n")
  61. return
  62. elif "data" not in r.json():
  63. Common.logger(log_type, crawler).info(f"get_videoList:{r.json()}\n")
  64. # Common.logging(log_type, crawler, env, f"get_videoList:{r.json()}\n")
  65. return
  66. elif "list" not in r.json()["data"]:
  67. Common.logger(log_type, crawler).warning(
  68. f"get_videoList:{r.json()['data']}\n"
  69. )
  70. # Common.logging(log_type, crawler, env, f"get_videoList:{r.json()['data']}\n")
  71. return
  72. elif len(r.json()["data"]["list"]) == 0:
  73. Common.logger(log_type, crawler).warning(
  74. f"get_videoList:{r.json()['data']['list']}\n"
  75. )
  76. # Common.logging(log_type, crawler, env, f"get_videoList:{r.json()['data']['list']}\n")
  77. return
  78. else:
  79. # 视频列表数据
  80. feeds = r.json()["data"]["list"]
  81. for i in range(len(feeds)):
  82. try:
  83. # 标题,表情随机加在片头、片尾,或替代句子中间的标点符号
  84. xiaoniangao_title = (
  85. feeds[i]
  86. .get("title", "")
  87. .strip()
  88. .replace("\n", "")
  89. .replace("/", "")
  90. .replace("\r", "")
  91. .replace("#", "")
  92. .replace(".", "。")
  93. .replace("\\", "")
  94. .replace("&NBSP", "")
  95. .replace(":", "")
  96. .replace("*", "")
  97. .replace("?", "")
  98. .replace("?", "")
  99. .replace('"', "")
  100. .replace("<", "")
  101. .replace(">", "")
  102. .replace("|", "")
  103. .replace(" ", "")
  104. .replace('"', "")
  105. .replace("'", "")
  106. )
  107. # 随机取一个表情/符号
  108. emoji = random.choice(
  109. get_config_from_mysql(log_type, crawler, env, "emoji")
  110. )
  111. # 生成最终标题,标题list[表情+title, title+表情]随机取一个
  112. video_title = random.choice(
  113. [
  114. f"{emoji}{xiaoniangao_title}",
  115. f"{xiaoniangao_title}{emoji}",
  116. ]
  117. )
  118. # 发布时间
  119. publish_time_stamp = int(int(feeds[i].get("t", 0)) / 1000)
  120. publish_time_str = time.strftime(
  121. "%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp)
  122. )
  123. # 用户名 / 头像
  124. user_name = (
  125. feeds[i]
  126. .get("user", {})
  127. .get("nick", "")
  128. .strip()
  129. .replace("\n", "")
  130. .replace("/", "")
  131. .replace("快手", "")
  132. .replace(" ", "")
  133. .replace(" ", "")
  134. .replace("&NBSP", "")
  135. .replace("\r", "")
  136. )
  137. video_dict = {
  138. "video_title": video_title,
  139. "video_id": feeds[i].get("vid", ""),
  140. "duration": int(feeds[i].get("du", 0) / 1000),
  141. "play_cnt": feeds[i].get("play_pv", 0),
  142. "like_cnt": feeds[i].get("favor", {}).get("total", 0),
  143. "comment_cnt": feeds[i].get("comment_count", 0),
  144. "share_cnt": feeds[i].get("share", 0),
  145. "user_name": user_name,
  146. "publish_time_stamp": publish_time_stamp,
  147. "publish_time_str": publish_time_str,
  148. "video_width": int(feeds[i].get("vw", 0)),
  149. "video_height": int(feeds[i].get("vh", 0)),
  150. "avatar_url": feeds[i].get("user", {}).get("hurl", ""),
  151. "profile_id": feeds[i]["id"],
  152. "profile_mid": feeds[i]["user"]["mid"],
  153. "cover_url": feeds[i].get("url", ""),
  154. "video_url": feeds[i].get("v_url", ""),
  155. "session": f"xiaoniangao-h5-{int(time.time())}",
  156. }
  157. for k, v in video_dict.items():
  158. Common.logger(log_type, crawler).info(f"{k}:{v}")
  159. # Common.logging(log_type, crawler, env, f"{video_dict}")
  160. # 过滤无效视频
  161. if (
  162. video_title == ""
  163. or video_dict["video_id"] == ""
  164. or video_dict["video_url"] == ""
  165. ):
  166. Common.logger(log_type, crawler).warning("无效视频\n")
  167. # Common.logging(log_type, crawler, env, "无效视频\n")
  168. # 抓取基础规则过滤
  169. elif (
  170. download_rule(
  171. log_type=log_type,
  172. crawler=crawler,
  173. video_dict=video_dict,
  174. rule_dict=rule_dict,
  175. )
  176. is False
  177. ):
  178. Common.logger(log_type, crawler).info("不满足抓取规则\n")
  179. # Common.logging(log_type, crawler, env, "不满足抓取规则\n")
  180. elif (
  181. any(
  182. str(word)
  183. if str(word) in video_dict["video_title"]
  184. else False
  185. for word in get_config_from_mysql(
  186. log_type=log_type,
  187. source=crawler,
  188. env=env,
  189. text="filter",
  190. action="",
  191. )
  192. )
  193. is True
  194. ):
  195. Common.logger(log_type, crawler).info("已中过滤词\n")
  196. # Common.logging(log_type, crawler, env, '已中过滤词\n')
  197. elif (
  198. cls.repeat_video(
  199. log_type, crawler, video_dict["video_id"], env
  200. )
  201. != 0
  202. ):
  203. Common.logger(log_type, crawler).info("视频已下载\n")
  204. # Common.logging(log_type, crawler, env, '视频已下载\n')
  205. else:
  206. # cls.download_publish(log_type=log_type,
  207. # crawler=crawler,
  208. # video_dict=video_dict,
  209. # rule_dict=rule_dict,
  210. # our_uid=our_uid,
  211. # env=env)
  212. video_dict["out_user_id"] = video_dict["profile_id"]
  213. video_dict["platform"] = crawler
  214. video_dict["strategy"] = log_type
  215. video_dict["out_video_id"] = video_dict["video_id"]
  216. video_dict["width"] = video_dict["video_width"]
  217. video_dict["height"] = video_dict["video_height"]
  218. video_dict["crawler_rule"] = json.dumps(rule_dict)
  219. video_dict["user_id"] = our_uid
  220. video_dict["publish_time"] = video_dict[
  221. "publish_time_str"
  222. ]
  223. video_dict["strategy_type"] = "play"
  224. # print(video_dict)
  225. mq.send_msg(video_dict)
  226. # break
  227. except Exception as e:
  228. Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
  229. # Common.logging(log_type, crawler, env, f"抓取单条视频异常:{e}\n")
  230. except Exception as e:
  231. Common.logger(log_type, crawler).error(f"抓取第{page}页时异常:{e}\n")
  232. # Common.logging(log_type, crawler, env, f"抓取第{page}页时异常:{e}\n")
  233. @classmethod
  234. def repeat_video(cls, log_type, crawler, video_id, env):
  235. # sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}"; """
  236. sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and out_video_id="{video_id}"; """
  237. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  238. return len(repeat_video)
  239. if __name__ == "__main__":
  240. XNG_H5 = XiaoNianGaoH5Scheduling
  241. XNG_H5.get_videoList(
  242. log_type="H5", crawler="xiaoniangao", rule_dict={}, our_uid="ljh", env="dev"
  243. )