xiaoniangao_h5_schduling.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289
  1. # -*- coding: utf-8 -*-
  2. # @Author: luojunhui
  3. # @Time: 2023/09/25
  4. import json
  5. import os
  6. import random
  7. import sys
  8. import time
  9. import requests
  10. import urllib3
  11. from fake_useragent import FakeUserAgent
  12. from common.mq import MQ
  13. sys.path.append(os.getcwd())
  14. from common.common import Common
  15. from common.feishu import Feishu
  16. from common.publish import Publish
  17. from common.scheduling_db import MysqlHelper
  18. from common.public import get_config_from_mysql, download_rule
  19. # proxies = {"http": None, "https": None}
  20. class XiaoNianGaoH5Scheduling:
  21. platform = "小年糕"
  22. words = "abcdefghijklmnopqrstuvwxyz0123456789"
  23. uid_token_dict = {
  24. "uid": f"""{"".join(random.sample(words, 8))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 12))}""",
  25. "token": "".join(random.sample(words, 32)),
  26. }
  27. # 获取列表
  28. @classmethod
  29. def get_videoList(cls, log_type, crawler, rule_dict, our_uid, env):
  30. mq = MQ(topic_name="topic_crawler_etl_" + env)
  31. for page in range(1, 101):
  32. try:
  33. Common.logger(log_type, crawler).info(f"正在抓取第{page}页")
  34. Common.logging(log_type, crawler, env, f"正在抓取第{page}页")
  35. url = "https://kapi.xiaoniangao.cn/trends/get_recommend_trends"
  36. payload = {
  37. "rec_ab_config": {
  38. "ban_ab": 1,
  39. "city_slot": 0,
  40. "multi_ab": 1,
  41. "region_ab": {
  42. "num": 4,
  43. "position": {
  44. "0": 1,
  45. "1": 2,
  46. "2": 3,
  47. "3": 4
  48. }
  49. }
  50. },
  51. "qs": "imageMogr2/gravity/center/rotate/$/thumbnail/!750x500r/crop/750x500/interlace/1/format/jpg/quality/75",
  52. "h_qs": "imageMogr2/gravity/center/rotate/$/thumbnail/!80x80r/crop/80x80/interlace/1/format/jpg/quality/75",
  53. "limit": 4,
  54. # "tag_id": 116,
  55. "share_height": 500,
  56. "share_width": 625,
  57. "log_params": {
  58. "proj": "in",
  59. "page": "discover_rec",
  60. "common": {
  61. "os": "OS X 10.15.7",
  62. "device": "",
  63. "weixinver": "6.8.0",
  64. "srcver": "5.71.11"
  65. }
  66. },
  67. "token": cls.uid_token_dict['token'],
  68. "code_ver": "5.71.11",
  69. "uid": cls.uid_token_dict['uid'],
  70. "proj": "in"
  71. }
  72. headers = {
  73. "Host": "kapi.xiaoniangao.cn",
  74. "accept": "application/json, text/plain, */*",
  75. "user-agent": FakeUserAgent().random,
  76. "content-type": "application/x-www-form-urlencoded",
  77. "origin": "https://wx.xiaoniangao.cn",
  78. "sec-fetch-site": "same-site",
  79. "sec-fetch-mode": "cors",
  80. "sec-fetch-dest": "empty",
  81. "referer": "https://wx.xiaoniangao.cn/",
  82. "accept-language": "en",
  83. }
  84. urllib3.disable_warnings()
  85. proxies = Common.tunnel_proxies()
  86. r = requests.post(url=url, headers=headers, data=json.dumps(payload), proxies=proxies, verify=False)
  87. # r = requests.post(
  88. # url=url, headers=headers, data=json.dumps(payload), verify=False
  89. # )
  90. if "data" not in r.text or r.status_code != 200:
  91. Common.logger(log_type, crawler).warning(
  92. f"get_videoList:{r.text}\n"
  93. )
  94. Common.logging(log_type, crawler, env, f"get_videoList:{r.text}\n")
  95. return
  96. elif "data" not in r.json():
  97. Common.logger(log_type, crawler).info(f"get_videoList:{r.json()}\n")
  98. Common.logging(log_type, crawler, env, f"get_videoList:{r.json()}\n")
  99. return
  100. elif "list" not in r.json()["data"]:
  101. Common.logger(log_type, crawler).warning(
  102. f"get_videoList:{r.json()['data']}\n"
  103. )
  104. Common.logging(log_type, crawler, env, f"get_videoList:{r.json()['data']}\n")
  105. return
  106. elif len(r.json()["data"]["list"]) == 0:
  107. Common.logger(log_type, crawler).warning(
  108. f"get_videoList:{r.json()['data']['list']}\n"
  109. )
  110. Common.logging(log_type, crawler, env, f"get_videoList:{r.json()['data']['list']}\n")
  111. return
  112. else:
  113. # 视频列表数据
  114. feeds = r.json()["data"]["list"]
  115. for i in range(len(feeds)):
  116. try:
  117. # 标题,表情随机加在片头、片尾,或替代句子中间的标点符号
  118. xiaoniangao_title = (
  119. feeds[i]
  120. .get("title", "")
  121. .strip()
  122. .replace("\n", "")
  123. .replace("/", "")
  124. .replace("\r", "")
  125. .replace("#", "")
  126. .replace(".", "。")
  127. .replace("\\", "")
  128. .replace("&NBSP", "")
  129. .replace(":", "")
  130. .replace("*", "")
  131. .replace("?", "")
  132. .replace("?", "")
  133. .replace('"', "")
  134. .replace("<", "")
  135. .replace(">", "")
  136. .replace("|", "")
  137. .replace(" ", "")
  138. .replace('"', "")
  139. .replace("'", "")
  140. )
  141. # 随机取一个表情/符号
  142. emoji = random.choice(
  143. get_config_from_mysql(log_type, crawler, env, "emoji")
  144. )
  145. # 生成最终标题,标题list[表情+title, title+表情]随机取一个
  146. video_title = random.choice(
  147. [
  148. f"{emoji}{xiaoniangao_title}",
  149. f"{xiaoniangao_title}{emoji}",
  150. ]
  151. )
  152. # 发布时间
  153. publish_time_stamp = int(int(feeds[i].get("t", 0)) / 1000)
  154. publish_time_str = time.strftime(
  155. "%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp)
  156. )
  157. # 用户名 / 头像
  158. user_name = (
  159. feeds[i]
  160. .get("user", {})
  161. .get("nick", "")
  162. .strip()
  163. .replace("\n", "")
  164. .replace("/", "")
  165. .replace("快手", "")
  166. .replace(" ", "")
  167. .replace(" ", "")
  168. .replace("&NBSP", "")
  169. .replace("\r", "")
  170. )
  171. video_dict = {
  172. "video_title": video_title,
  173. "video_id": feeds[i].get("vid", ""),
  174. "duration": int(feeds[i].get("du", 0) / 1000),
  175. "play_cnt": feeds[i].get("play_pv", 0),
  176. "like_cnt": feeds[i].get("favor", {}).get("total", 0),
  177. "comment_cnt": feeds[i].get("comment_count", 0),
  178. "share_cnt": feeds[i].get("share", 0),
  179. "user_name": user_name,
  180. "publish_time_stamp": publish_time_stamp,
  181. "publish_time_str": publish_time_str,
  182. "video_width": int(feeds[i].get("vw", 0)),
  183. "video_height": int(feeds[i].get("vh", 0)),
  184. "avatar_url": feeds[i].get("user", {}).get("hurl", ""),
  185. "profile_id": feeds[i]["id"],
  186. "profile_mid": feeds[i]["user"]["mid"],
  187. "cover_url": feeds[i].get("url", ""),
  188. "video_url": feeds[i].get("v_url", ""),
  189. "session": f"xiaoniangao-h5-{int(time.time())}",
  190. }
  191. for k, v in video_dict.items():
  192. Common.logger(log_type, crawler).info(f"{k}:{v}")
  193. Common.logging(log_type, crawler, env, f"{video_dict}")
  194. # 过滤无效视频
  195. if (
  196. video_title == ""
  197. or video_dict["video_id"] == ""
  198. or video_dict["video_url"] == ""
  199. ):
  200. Common.logger(log_type, crawler).warning("无效视频\n")
  201. Common.logging(log_type, crawler, env, "无效视频\n")
  202. # 抓取基础规则过滤
  203. elif (
  204. download_rule(
  205. log_type=log_type,
  206. crawler=crawler,
  207. video_dict=video_dict,
  208. rule_dict=rule_dict,
  209. )
  210. is False
  211. ):
  212. Common.logger(log_type, crawler).info("不满足抓取规则\n")
  213. Common.logging(log_type, crawler, env, "不满足抓取规则\n")
  214. elif (
  215. any(
  216. str(word)
  217. if str(word) in video_dict["video_title"]
  218. else False
  219. for word in get_config_from_mysql(
  220. log_type=log_type,
  221. source=crawler,
  222. env=env,
  223. text="filter",
  224. action="",
  225. )
  226. )
  227. is True
  228. ):
  229. Common.logger(log_type, crawler).info("已中过滤词\n")
  230. Common.logging(log_type, crawler, env, '已中过滤词\n')
  231. elif (
  232. cls.repeat_video(
  233. log_type, crawler, video_dict["video_id"], env
  234. )
  235. != 0
  236. ):
  237. Common.logger(log_type, crawler).info("视频已下载\n")
  238. Common.logging(log_type, crawler, env, '视频已下载\n')
  239. else:
  240. # cls.download_publish(log_type=log_type,
  241. # crawler=crawler,
  242. # video_dict=video_dict,
  243. # rule_dict=rule_dict,
  244. # our_uid=our_uid,
  245. # env=env)
  246. video_dict["out_user_id"] = video_dict["profile_id"]
  247. video_dict["platform"] = crawler
  248. video_dict["strategy"] = log_type
  249. video_dict["out_video_id"] = video_dict["video_id"]
  250. video_dict["width"] = video_dict["video_width"]
  251. video_dict["height"] = video_dict["video_height"]
  252. video_dict["crawler_rule"] = json.dumps(rule_dict)
  253. video_dict["user_id"] = our_uid
  254. video_dict["publish_time"] = video_dict[
  255. "publish_time_str"
  256. ]
  257. video_dict["strategy_type"] = "play"
  258. # print(video_dict)
  259. mq.send_msg(video_dict)
  260. # break
  261. except Exception as e:
  262. Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
  263. Common.logging(log_type, crawler, env, f"抓取单条视频异常:{e}\n")
  264. except Exception as e:
  265. Common.logger(log_type, crawler).error(f"抓取第{page}页时异常:{e}\n")
  266. Common.logging(log_type, crawler, env, f"抓取第{page}页时异常:{e}\n")
  267. @classmethod
  268. def repeat_video(cls, log_type, crawler, video_id, env):
  269. # sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}"; """
  270. sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and out_video_id="{video_id}"; """
  271. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  272. return len(repeat_video)
  273. if __name__ == "__main__":
  274. XNG_H5 = XiaoNianGaoH5Scheduling
  275. XNG_H5.get_videoList(
  276. log_type="H5", crawler="xiaoniangao", rule_dict={}, our_uid="ljh", env="dev"
  277. )