xiaoniangao_play_scheduling.py 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2023/3/16
  4. import json
  5. import os
  6. import random
  7. import shutil
  8. import sys
  9. import time
  10. import requests
  11. import urllib3
  12. sys.path.append(os.getcwd())
  13. from common.common import Common
  14. from common.feishu import Feishu
  15. from common.publish import Publish
  16. from common.scheduling_db import MysqlHelper
  17. from common.public import get_config_from_mysql
  18. proxies = {"http": None, "https": None}
  19. class XiaoniangaoPlayScheduling:
  20. platform = "小年糕"
  21. words = "abcdefghijklmnopqrstuvwxyz0123456789"
  22. uid = f"""{"".join(random.sample(words, 8))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 12))}"""
  23. token = "".join(random.sample(words, 32))
  24. uid_token_dict = {
  25. "uid": uid,
  26. "token": token
  27. }
  28. # 生成 uid、token
  29. @classmethod
  30. def get_uid_token(cls):
  31. words = "abcdefghijklmnopqrstuvwxyz0123456789"
  32. uid = f"""{"".join(random.sample(words, 8))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 12))}"""
  33. token = "".join(random.sample(words, 32))
  34. uid_token_dict = {
  35. "uid": uid,
  36. "token": token
  37. }
  38. return uid_token_dict
  39. # 基础门槛规则
  40. @staticmethod
  41. def download_rule(log_type, crawler, video_dict, rule_dict):
  42. """
  43. 下载视频的基本规则
  44. :param log_type: 日志
  45. :param crawler: 哪款爬虫
  46. :param video_dict: 视频信息,字典格式
  47. :param rule_dict: 规则信息,字典格式
  48. :return: 满足规则,返回 True;反之,返回 False
  49. """
  50. rule_play_cnt_min = rule_dict.get('play_cnt', {}).get('min', 0)
  51. rule_play_cnt_max = rule_dict.get('play_cnt', {}).get('max', 100000000)
  52. if rule_play_cnt_max == 0:
  53. rule_play_cnt_max = 100000000
  54. rule_duration_min = rule_dict.get('duration', {}).get('min', 0)
  55. rule_duration_max = rule_dict.get('duration', {}).get('max', 100000000)
  56. if rule_duration_max == 0:
  57. rule_duration_max = 100000000
  58. rule_period_min = rule_dict.get('period', {}).get('min', 0)
  59. # rule_period_max = rule_dict.get('period', {}).get('max', 100000000)
  60. # if rule_period_max == 0:
  61. # rule_period_max = 100000000
  62. rule_fans_cnt_min = rule_dict.get('fans_cnt', {}).get('min', 0)
  63. rule_fans_cnt_max = rule_dict.get('fans_cnt', {}).get('max', 100000000)
  64. if rule_fans_cnt_max == 0:
  65. rule_fans_cnt_max = 100000000
  66. rule_videos_cnt_min = rule_dict.get('videos_cnt', {}).get('min', 0)
  67. rule_videos_cnt_max = rule_dict.get('videos_cnt', {}).get('max', 100000000)
  68. if rule_videos_cnt_max == 0:
  69. rule_videos_cnt_max = 100000000
  70. rule_like_cnt_min = rule_dict.get('like_cnt', {}).get('min', 0)
  71. rule_like_cnt_max = rule_dict.get('like_cnt', {}).get('max', 100000000)
  72. if rule_like_cnt_max == 0:
  73. rule_like_cnt_max = 100000000
  74. rule_width_min = rule_dict.get('width', {}).get('min', 0)
  75. rule_width_max = rule_dict.get('width', {}).get('max', 100000000)
  76. if rule_width_max == 0:
  77. rule_width_max = 100000000
  78. rule_height_min = rule_dict.get('height', {}).get('min', 0)
  79. rule_height_max = rule_dict.get('height', {}).get('max', 100000000)
  80. if rule_height_max == 0:
  81. rule_height_max = 100000000
  82. rule_share_cnt_min = rule_dict.get('share_cnt', {}).get('min', 0)
  83. rule_share_cnt_max = rule_dict.get('share_cnt', {}).get('max', 100000000)
  84. if rule_share_cnt_max == 0:
  85. rule_share_cnt_max = 100000000
  86. rule_comment_cnt_min = rule_dict.get('comment_cnt', {}).get('min', 0)
  87. rule_comment_cnt_max = rule_dict.get('comment_cnt', {}).get('max', 100000000)
  88. if rule_comment_cnt_max == 0:
  89. rule_comment_cnt_max = 100000000
  90. rule_publish_time_min = rule_dict.get('publish_time', {}).get('min', 0)
  91. rule_publish_time_max = rule_dict.get('publish_time', {}).get('max', 0)
  92. if rule_publish_time_max == 0:
  93. rule_publish_time_max = 4102415999000 # 2099-12-31 23:59:59
  94. Common.logger(log_type, crawler).info(
  95. f'rule_duration_max:{rule_duration_max} >= duration:{int(float(video_dict["duration"]))} >= rule_duration_min:{int(rule_duration_min)}')
  96. Common.logger(log_type, crawler).info(
  97. f'rule_play_cnt_max:{int(rule_play_cnt_max)} >= play_cnt:{int(video_dict["play_cnt"])} >= rule_play_cnt_min:{int(rule_play_cnt_min)}')
  98. Common.logger(log_type, crawler).info(
  99. f'now:{int(time.time())} - publish_time_stamp:{int(video_dict["publish_time_stamp"])} <= {3600 * 24 * int(rule_period_min)}')
  100. Common.logger(log_type, crawler).info(
  101. f'rule_like_cnt_max:{int(rule_like_cnt_max)} >= like_cnt:{int(video_dict["like_cnt"])} >= rule_like_cnt_min:{int(rule_like_cnt_min)}')
  102. Common.logger(log_type, crawler).info(
  103. f'rule_comment_cnt_max:{int(rule_comment_cnt_max)} >= comment_cnt:{int(video_dict["comment_cnt"])} >= rule_comment_cnt_min:{int(rule_comment_cnt_min)}')
  104. Common.logger(log_type, crawler).info(
  105. f'rule_share_cnt_max:{int(rule_share_cnt_max)} >= share_cnt:{int(video_dict["share_cnt"])} >= rule_share_cnt_min:{int(rule_share_cnt_min)}')
  106. Common.logger(log_type, crawler).info(
  107. f'rule_width_max:{int(rule_width_max)} >= video_width:{int(video_dict["video_width"])} >= rule_width_min:{int(rule_width_min)}')
  108. Common.logger(log_type, crawler).info(
  109. f'rule_height_max:{int(rule_height_max)} >= video_height:{int(video_dict["video_height"])} >= rule_height_min:{int(rule_height_min)}')
  110. Common.logger(log_type, crawler).info(
  111. f'rule_publish_time_max:{int(rule_publish_time_max)} >= publish_time_stamp:{int(video_dict["publish_time_stamp"])} >= rule_publish_time_min:{int(rule_publish_time_min)}')
  112. if int(rule_duration_max) >= int(float(video_dict["duration"])) >= int(rule_duration_min) \
  113. and int(rule_play_cnt_max) >= int(video_dict['play_cnt']) >= int(rule_play_cnt_min) \
  114. and int(time.time()) - int(video_dict["publish_time_stamp"]) <= 3600 * 24 * int(rule_period_min) \
  115. and int(rule_like_cnt_max) >= int(video_dict['like_cnt']) >= int(rule_like_cnt_min) \
  116. and int(rule_comment_cnt_max) >= int(video_dict['comment_cnt']) >= int(rule_comment_cnt_min) \
  117. and int(rule_share_cnt_max) >= int(video_dict['share_cnt']) >= int(rule_share_cnt_min) \
  118. and int(rule_width_max) >= int(video_dict['video_width']) >= int(rule_width_min) \
  119. and int(rule_height_max) >= int(video_dict['video_height']) >= int(rule_height_min)\
  120. and int(rule_publish_time_max) >= int(video_dict['publish_time_stamp']) >= int(rule_publish_time_min):
  121. return True
  122. else:
  123. return False
  124. # 获取列表
  125. @classmethod
  126. def get_videoList(cls, log_type, crawler, rule_dict, strategy, oss_endpoint, env):
  127. uid_token_dict = cls.uid_token_dict
  128. url = "https://kapi.xiaoniangao.cn/trends/get_recommend_trends"
  129. headers = {
  130. "x-b3-traceid": '1dc0a6d0929a2b',
  131. "X-Token-Id": 'ae99a4953804085ebb0ae36fa138031d-1146052582',
  132. "uid": uid_token_dict['uid'],
  133. "content-type": "application/json",
  134. "Accept-Encoding": "gzip,compress,br,deflate",
  135. "User-Agent": 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X)'
  136. ' AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 '
  137. 'MicroMessenger/8.0.20(0x18001432) NetType/WIFI Language/zh_CN',
  138. "Referer": 'https://servicewechat.com/wxd7911e4c177690e4/620/page-frame.html'
  139. }
  140. data = {
  141. "log_params": {
  142. "page": "discover_rec",
  143. "common": {
  144. "brand": "iPhone",
  145. "device": "iPhone 11",
  146. "os": "iOS 14.7.1",
  147. "weixinver": "8.0.20",
  148. "srcver": "2.24.2",
  149. "net": "wifi",
  150. "scene": 1089
  151. }
  152. },
  153. "qs": "imageMogr2/gravity/center/rotate/$/thumbnail/!750x500r/crop/750x500/interlace/1/format/jpg",
  154. "h_qs": "imageMogr2/gravity/center/rotate/$/thumbnail/!80x80r/crop/80x80/interlace/1/format/jpg",
  155. "share_width": 625,
  156. "share_height": 500,
  157. "ext": {
  158. "fmid": 0,
  159. "items": {}
  160. },
  161. "app": "xng",
  162. "rec_scene": "discover_rec",
  163. "log_common_params": {
  164. "e": [{
  165. "data": {
  166. "page": "discoverIndexPage",
  167. "topic": "recommend"
  168. },
  169. "ab": {}
  170. }],
  171. "ext": {
  172. "brand": "iPhone",
  173. "device": "iPhone 11",
  174. "os": "iOS 14.7.1",
  175. "weixinver": "8.0.20",
  176. "srcver": "2.24.3",
  177. "net": "wifi",
  178. "scene": "1089"
  179. },
  180. "pj": "1",
  181. "pf": "2",
  182. "session_id": "7bcce313-b57d-4305-8d14-6ebd9a1bad29"
  183. },
  184. "refresh": False,
  185. # "token": cls.play_token,
  186. "token": uid_token_dict['token'],
  187. # "uid": cls.play_uid,
  188. "uid": uid_token_dict['uid'],
  189. "proj": "ma",
  190. "wx_ver": "8.0.20",
  191. "code_ver": "3.62.0"
  192. }
  193. urllib3.disable_warnings()
  194. r = requests.post(url=url, headers=headers, json=data, proxies=proxies, verify=False)
  195. if "data" not in r.text or r.status_code != 200:
  196. Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}")
  197. return
  198. elif "data" not in r.json():
  199. Common.logger(log_type, crawler).info(f"get_videoList:{r.json()}")
  200. return
  201. elif "list" not in r.json()["data"]:
  202. Common.logger(log_type, crawler).warning(f"get_videoList:{r.json()['data']}")
  203. return
  204. elif len(r.json()["data"]["list"]) == 0:
  205. Common.logger(log_type, crawler).warning(f"get_videoList:{r.json()['data']['list']}")
  206. return
  207. else:
  208. # 视频列表数据
  209. feeds = r.json()["data"]["list"]
  210. for i in range(len(feeds)):
  211. # 标题,表情随机加在片头、片尾,或替代句子中间的标点符号
  212. xiaoniangao_title = feeds[i].get("title", "").strip().replace("\n", "") \
  213. .replace("/", "").replace("\r", "").replace("#", "") \
  214. .replace(".", "。").replace("\\", "").replace("&NBSP", "") \
  215. .replace(":", "").replace("*", "").replace("?", "") \
  216. .replace("?", "").replace('"', "").replace("<", "") \
  217. .replace(">", "").replace("|", "").replace(" ", "") \
  218. .replace('"', '').replace("'", '')
  219. # 随机取一个表情/符号
  220. emoji = random.choice(get_config_from_mysql(log_type, crawler, env, "emoji"))
  221. # 生成最终标题,标题list[表情+title, title+表情]随机取一个
  222. video_title = random.choice([f"{emoji}{xiaoniangao_title}", f"{xiaoniangao_title}{emoji}"])
  223. # 视频 ID
  224. video_id = feeds[i].get("vid", "")
  225. # 播放量
  226. play_cnt = feeds[i].get("play_pv", 0)
  227. # 点赞量
  228. like_cnt = feeds[i].get("favor", {}).get("total", 0)
  229. # 评论数
  230. comment_cnt = feeds[i].get("comment_count", 0)
  231. # 分享量
  232. share_cnt = feeds[i].get("share", 0)
  233. # 时长
  234. duration = int(feeds[i].get("du", 0) / 1000)
  235. # 宽和高
  236. video_width = int(feeds[i].get("w", 0))
  237. video_height = int(feeds[i].get("h", 0))
  238. # 发布时间
  239. publish_time_stamp = int(int(feeds[i].get("t", 0)) / 1000)
  240. publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
  241. # 用户名 / 头像
  242. user_name = feeds[i].get("user", {}).get("nick", "").strip().replace("\n", "") \
  243. .replace("/", "").replace("快手", "").replace(" ", "") \
  244. .replace(" ", "").replace("&NBSP", "").replace("\r", "")
  245. avatar_url = feeds[i].get("user", {}).get("hurl", "")
  246. # 用户 ID
  247. profile_id = feeds[i]["id"]
  248. # 用户 mid
  249. profile_mid = feeds[i]["user"]["mid"]
  250. # 视频封面
  251. cover_url = feeds[i].get("url", "")
  252. # 视频播放地址
  253. video_url = feeds[i].get("v_url", "")
  254. video_dict = {
  255. "video_title": video_title,
  256. "video_id": video_id,
  257. "duration": duration,
  258. "play_cnt": play_cnt,
  259. "like_cnt": like_cnt,
  260. "comment_cnt": comment_cnt,
  261. "share_cnt": share_cnt,
  262. "user_name": user_name,
  263. "publish_time_stamp": publish_time_stamp,
  264. "publish_time_str": publish_time_str,
  265. "video_width": video_width,
  266. "video_height": video_height,
  267. "avatar_url": avatar_url,
  268. "profile_id": profile_id,
  269. "profile_mid": profile_mid,
  270. "cover_url": cover_url,
  271. "video_url": video_url,
  272. "session": f"xiaoniangao-play-{int(time.time())}"
  273. }
  274. for k, v in video_dict.items():
  275. Common.logger(log_type, crawler).info(f"{k}:{v}")
  276. # 过滤无效视频
  277. if video_title == "" or video_id == "" or video_url == "":
  278. Common.logger(log_type, crawler).warning("无效视频\n")
  279. # 抓取基础规则过滤
  280. elif cls.download_rule(log_type, crawler, video_dict, rule_dict) is False:
  281. Common.logger(log_type, crawler).info("不满足抓取规则\n")
  282. elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
  283. Common.logger(log_type, crawler).info('视频已下载\n')
  284. # 过滤敏感词
  285. elif any(str(word) if str(word) in video_title else False for word in get_config_from_mysql(log_type, crawler, env, "filter", action="")) is True:
  286. Common.logger(log_type, crawler).info("视频已中过滤词\n")
  287. else:
  288. cls.download_publish(log_type=log_type,
  289. crawler=crawler,
  290. video_dict=video_dict,
  291. rule_dict=rule_dict,
  292. strategy=strategy,
  293. oss_endpoint=oss_endpoint,
  294. env=env)
  295. @classmethod
  296. def repeat_video(cls, log_type, crawler, video_id, env):
  297. sql = f""" select * from crawler_video where platform="小年糕" and out_video_id="{video_id}"; """
  298. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  299. return len(repeat_video)
  300. @classmethod
  301. def download_publish(cls, log_type, crawler, video_dict, rule_dict, strategy, oss_endpoint, env):
  302. # 下载封面
  303. Common.download_method(log_type=log_type, crawler=crawler, text="cover", title=video_dict["video_title"], url=video_dict["cover_url"])
  304. # 下载视频
  305. Common.download_method(log_type=log_type, crawler=crawler, text="video", title=video_dict["video_title"], url=video_dict["video_url"])
  306. # 保存视频信息至 "./videos/{download_video_title}/info.txt"
  307. Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
  308. # 上传视频
  309. Common.logger(log_type, crawler).info("开始上传视频...")
  310. our_video_id = Publish.upload_and_publish(log_type=log_type,
  311. crawler=crawler,
  312. strategy=strategy,
  313. our_uid="play",
  314. env=env,
  315. oss_endpoint=oss_endpoint)
  316. if env == "dev":
  317. our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  318. else:
  319. our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  320. Common.logger(log_type, crawler).info("视频上传完成")
  321. if our_video_id is None:
  322. # 删除视频文件夹
  323. shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
  324. return
  325. insert_sql = f""" insert into crawler_video(video_id,
  326. out_user_id,
  327. platform,
  328. strategy,
  329. out_video_id,
  330. video_title,
  331. cover_url,
  332. video_url,
  333. duration,
  334. publish_time,
  335. play_cnt,
  336. crawler_rule,
  337. width,
  338. height)
  339. values({our_video_id},
  340. "{video_dict['profile_id']}",
  341. "{cls.platform}",
  342. "播放量榜爬虫策略",
  343. "{video_dict['video_id']}",
  344. "{video_dict['video_title']}",
  345. "{video_dict['cover_url']}",
  346. "{video_dict['video_url']}",
  347. {int(video_dict['duration'])},
  348. "{video_dict['publish_time_str']}",
  349. {int(video_dict['play_cnt'])},
  350. '{json.dumps(rule_dict)}',
  351. {int(video_dict['video_width'])},
  352. {int(video_dict['video_height'])}) """
  353. Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
  354. MysqlHelper.update_values(log_type, crawler, insert_sql, env)
  355. Common.logger(log_type, crawler).info('视频信息插入数据库成功!')
  356. # 视频写入飞书
  357. Feishu.insert_columns(log_type, crawler, "c85k1C", "ROWS", 1, 2)
  358. # 视频ID工作表,首行写入数据
  359. upload_time = int(time.time())
  360. values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
  361. "播放量榜爬虫策略",
  362. str(video_dict['video_id']),
  363. str(video_dict['video_title']),
  364. our_video_link,
  365. video_dict['play_cnt'],
  366. video_dict['comment_cnt'],
  367. video_dict['like_cnt'],
  368. video_dict['share_cnt'],
  369. video_dict['duration'],
  370. f"{video_dict['video_width']}*{video_dict['video_height']}",
  371. str(video_dict['publish_time_str']),
  372. str(video_dict['user_name']),
  373. str(video_dict['profile_id']),
  374. str(video_dict['profile_mid']),
  375. str(video_dict['avatar_url']),
  376. str(video_dict['cover_url']),
  377. str(video_dict['video_url'])]]
  378. time.sleep(1)
  379. Feishu.update_values(log_type, crawler, "c85k1C", "F2:Z2", values)
  380. Common.logger(log_type, crawler).info('视频信息写入飞书成功\n')
  381. if __name__ == '__main__':
  382. pass