xiaoniangao_play_scheduling.py 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2023/3/16
  4. import json
  5. import os
  6. import random
  7. import shutil
  8. import sys
  9. import time
  10. import requests
  11. import urllib3
  12. sys.path.append(os.getcwd())
  13. from common.common import Common
  14. from common.feishu import Feishu
  15. from common.publish import Publish
  16. from common.scheduling_db import MysqlHelper
  17. from common.public import get_config_from_mysql
  18. proxies = {"http": None, "https": None}
  19. class XiaoniangaoPlayScheduling:
  20. platform = "小年糕"
  21. # 生成 uid、token
  22. @classmethod
  23. def get_uid_token(cls):
  24. words = "abcdefghijklmnopqrstuvwxyz0123456789"
  25. uid = f"""{"".join(random.sample(words, 8))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 12))}"""
  26. token = "".join(random.sample(words, 32))
  27. uid_token_dict = {
  28. "uid": uid,
  29. "token": token
  30. }
  31. return uid_token_dict
  32. # 基础门槛规则
  33. @staticmethod
  34. def download_rule(log_type, crawler, video_dict, rule_dict):
  35. """
  36. 下载视频的基本规则
  37. :param log_type: 日志
  38. :param crawler: 哪款爬虫
  39. :param video_dict: 视频信息,字典格式
  40. :param rule_dict: 规则信息,字典格式
  41. :return: 满足规则,返回 True;反之,返回 False
  42. """
  43. rule_playCnt_min = rule_dict.get('playCnt', {}).get('min', 0)
  44. rule_playCnt_max = rule_dict.get('playCnt', {}).get('max', 100000000)
  45. if rule_playCnt_max == 0:
  46. rule_playCnt_max = 100000000
  47. rule_duration_min = rule_dict.get('duration', {}).get('min', 0)
  48. rule_duration_max = rule_dict.get('duration', {}).get('max', 100000000)
  49. if rule_duration_max == 0:
  50. rule_duration_max = 100000000
  51. rule_period_min = rule_dict.get('period', {}).get('min', 0)
  52. # rule_period_max = rule_dict.get('period', {}).get('max', 100000000)
  53. # if rule_period_max == 0:
  54. # rule_period_max = 100000000
  55. #
  56. # rule_fans_min = rule_dict.get('fans', {}).get('min', 0)
  57. # rule_fans_max = rule_dict.get('fans', {}).get('max', 100000000)
  58. # if rule_fans_max == 0:
  59. # rule_fans_max = 100000000
  60. #
  61. # rule_videos_min = rule_dict.get('videos', {}).get('min', 0)
  62. # rule_videos_max = rule_dict.get('videos', {}).get('max', 100000000)
  63. # if rule_videos_max == 0:
  64. # rule_videos_max = 100000000
  65. rule_like_min = rule_dict.get('like', {}).get('min', 0)
  66. rule_like_max = rule_dict.get('like', {}).get('max', 100000000)
  67. if rule_like_max == 0:
  68. rule_like_max = 100000000
  69. rule_videoWidth_min = rule_dict.get('videoWidth', {}).get('min', 0)
  70. rule_videoWidth_max = rule_dict.get('videoWidth', {}).get('max', 100000000)
  71. if rule_videoWidth_max == 0:
  72. rule_videoWidth_max = 100000000
  73. rule_videoHeight_min = rule_dict.get('videoHeight', {}).get('min', 0)
  74. rule_videoHeight_max = rule_dict.get('videoHeight', {}).get('max', 100000000)
  75. if rule_videoHeight_max == 0:
  76. rule_videoHeight_max = 100000000
  77. rule_shareCnt_min = rule_dict.get('shareCnt', {}).get('min', 0)
  78. rule_shareCnt_max = rule_dict.get('shareCnt', {}).get('max', 100000000)
  79. if rule_shareCnt_max == 0:
  80. rule_shareCnt_max = 100000000
  81. rule_commentCnt_min = rule_dict.get('commentCnt', {}).get('min', 0)
  82. rule_commentCnt_max = rule_dict.get('commentCnt', {}).get('max', 100000000)
  83. if rule_commentCnt_max == 0:
  84. rule_commentCnt_max = 100000000
  85. Common.logger(log_type, crawler).info(
  86. f'rule_duration_max:{rule_duration_max} >= duration:{int(float(video_dict["duration"]))} >= rule_duration_min:{int(rule_duration_min)}')
  87. Common.logger(log_type, crawler).info(
  88. f'rule_playCnt_max:{int(rule_playCnt_max)} >= play_cnt:{int(video_dict["play_cnt"])} >= rule_playCnt_min:{int(rule_playCnt_min)}')
  89. Common.logger(log_type, crawler).info(
  90. f'now:{int(time.time())} - publish_time_stamp:{int(video_dict["publish_time_stamp"])} <= {3600 * 24 * int(rule_period_min)}')
  91. Common.logger(log_type, crawler).info(
  92. f'rule_like_max:{int(rule_like_max)} >= like_cnt:{int(video_dict["like_cnt"])} >= rule_like_min:{int(rule_like_min)}')
  93. Common.logger(log_type, crawler).info(
  94. f'rule_commentCnt_max:{int(rule_commentCnt_max)} >= comment_cnt:{int(video_dict["comment_cnt"])} >= rule_commentCnt_min:{int(rule_commentCnt_min)}')
  95. Common.logger(log_type, crawler).info(
  96. f'rule_shareCnt_max:{int(rule_shareCnt_max)} >= share_cnt:{int(video_dict["share_cnt"])} >= rule_shareCnt_min:{int(rule_shareCnt_min)}')
  97. Common.logger(log_type, crawler).info(
  98. f'rule_videoWidth_max:{int(rule_videoWidth_max)} >= video_width:{int(video_dict["video_width"])} >= rule_videoWidth_min:{int(rule_videoWidth_min)}')
  99. Common.logger(log_type, crawler).info(
  100. f'rule_videoHeight_max:{int(rule_videoHeight_max)} >= video_height:{int(video_dict["video_height"])} >= rule_videoHeight_min:{int(rule_videoHeight_min)}')
  101. if int(rule_duration_max) >= int(float(video_dict["duration"])) >= int(rule_duration_min) \
  102. and int(rule_playCnt_max) >= int(video_dict['play_cnt']) >= int(rule_playCnt_min) \
  103. and int(time.time()) - int(video_dict["publish_time_stamp"]) <= 3600 * 24 * int(rule_period_min) \
  104. and int(rule_like_max) >= int(video_dict['like_cnt']) >= int(rule_like_min) \
  105. and int(rule_commentCnt_max) >= int(video_dict['comment_cnt']) >= int(rule_commentCnt_min) \
  106. and int(rule_shareCnt_max) >= int(video_dict['share_cnt']) >= int(rule_shareCnt_min) \
  107. and int(rule_videoWidth_max) >= int(video_dict['video_width']) >= int(rule_videoWidth_min) \
  108. and int(rule_videoHeight_max) >= int(video_dict['video_height']) >= int(rule_videoHeight_min):
  109. return True
  110. else:
  111. return False
  112. # 获取列表
  113. @classmethod
  114. def get_videoList(cls, log_type, crawler, rule_dict, strategy, oss_endpoint, env):
  115. uid_token_dict = cls.get_uid_token()
  116. url = "https://kapi.xiaoniangao.cn/trends/get_recommend_trends"
  117. headers = {
  118. "x-b3-traceid": '1dc0a6d0929a2b',
  119. "X-Token-Id": 'ae99a4953804085ebb0ae36fa138031d-1146052582',
  120. "uid": uid_token_dict['uid'],
  121. "content-type": "application/json",
  122. "Accept-Encoding": "gzip,compress,br,deflate",
  123. "User-Agent": 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X)'
  124. ' AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 '
  125. 'MicroMessenger/8.0.20(0x18001432) NetType/WIFI Language/zh_CN',
  126. "Referer": 'https://servicewechat.com/wxd7911e4c177690e4/620/page-frame.html'
  127. }
  128. data = {
  129. "log_params": {
  130. "page": "discover_rec",
  131. "common": {
  132. "brand": "iPhone",
  133. "device": "iPhone 11",
  134. "os": "iOS 14.7.1",
  135. "weixinver": "8.0.20",
  136. "srcver": "2.24.2",
  137. "net": "wifi",
  138. "scene": 1089
  139. }
  140. },
  141. "qs": "imageMogr2/gravity/center/rotate/$/thumbnail/!750x500r/crop/750x500/interlace/1/format/jpg",
  142. "h_qs": "imageMogr2/gravity/center/rotate/$/thumbnail/!80x80r/crop/80x80/interlace/1/format/jpg",
  143. "share_width": 625,
  144. "share_height": 500,
  145. "ext": {
  146. "fmid": 0,
  147. "items": {}
  148. },
  149. "app": "xng",
  150. "rec_scene": "discover_rec",
  151. "log_common_params": {
  152. "e": [{
  153. "data": {
  154. "page": "discoverIndexPage",
  155. "topic": "recommend"
  156. },
  157. "ab": {}
  158. }],
  159. "ext": {
  160. "brand": "iPhone",
  161. "device": "iPhone 11",
  162. "os": "iOS 14.7.1",
  163. "weixinver": "8.0.20",
  164. "srcver": "2.24.3",
  165. "net": "wifi",
  166. "scene": "1089"
  167. },
  168. "pj": "1",
  169. "pf": "2",
  170. "session_id": "7bcce313-b57d-4305-8d14-6ebd9a1bad29"
  171. },
  172. "refresh": False,
  173. # "token": cls.play_token,
  174. "token": uid_token_dict['token'],
  175. # "uid": cls.play_uid,
  176. "uid": uid_token_dict['uid'],
  177. "proj": "ma",
  178. "wx_ver": "8.0.20",
  179. "code_ver": "3.62.0"
  180. }
  181. urllib3.disable_warnings()
  182. r = requests.post(url=url, headers=headers, json=data, proxies=proxies, verify=False)
  183. if "data" not in r.text or r.status_code != 200:
  184. Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}")
  185. return
  186. elif "data" not in r.json():
  187. Common.logger(log_type, crawler).info(f"get_videoList:{r.json()}")
  188. return
  189. elif "list" not in r.json()["data"]:
  190. Common.logger(log_type, crawler).warning(f"get_videoList:{r.json()['data']}")
  191. return
  192. elif len(r.json()["data"]["list"]) == 0:
  193. Common.logger(log_type, crawler).warning(f"get_videoList:{r.json()['data']['list']}")
  194. return
  195. else:
  196. # 视频列表数据
  197. feeds = r.json()["data"]["list"]
  198. for i in range(len(feeds)):
  199. # 标题,表情随机加在片头、片尾,或替代句子中间的标点符号
  200. xiaoniangao_title = feeds[i].get("title", "").strip().replace("\n", "") \
  201. .replace("/", "").replace("\r", "").replace("#", "") \
  202. .replace(".", "。").replace("\\", "").replace("&NBSP", "") \
  203. .replace(":", "").replace("*", "").replace("?", "") \
  204. .replace("?", "").replace('"', "").replace("<", "") \
  205. .replace(">", "").replace("|", "").replace(" ", "") \
  206. .replace('"', '').replace("'", '')
  207. # 随机取一个表情/符号
  208. emoji = random.choice(get_config_from_mysql(log_type, crawler, env, "emoji"))
  209. # 生成最终标题,标题list[表情+title, title+表情]随机取一个
  210. video_title = random.choice([f"{emoji}{xiaoniangao_title}", f"{xiaoniangao_title}{emoji}"])
  211. # 视频 ID
  212. video_id = feeds[i].get("vid", "")
  213. # 播放量
  214. play_cnt = feeds[i].get("play_pv", 0)
  215. # 点赞量
  216. like_cnt = feeds[i].get("favor", {}).get("total", 0)
  217. # 评论数
  218. comment_cnt = feeds[i].get("comment_count", 0)
  219. # 分享量
  220. share_cnt = feeds[i].get("share", 0)
  221. # 时长
  222. duration = int(feeds[i].get("du", 0) / 1000)
  223. # 宽和高
  224. video_width = int(feeds[i].get("w", 0))
  225. video_height = int(feeds[i].get("h", 0))
  226. # 发布时间
  227. publish_time_stamp = int(int(feeds[i].get("t", 0)) / 1000)
  228. publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
  229. # 用户名 / 头像
  230. user_name = feeds[i].get("user", {}).get("nick", "").strip().replace("\n", "") \
  231. .replace("/", "").replace("快手", "").replace(" ", "") \
  232. .replace(" ", "").replace("&NBSP", "").replace("\r", "")
  233. avatar_url = feeds[i].get("user", {}).get("hurl", "")
  234. # 用户 ID
  235. profile_id = feeds[i]["id"]
  236. # 用户 mid
  237. profile_mid = feeds[i]["user"]["mid"]
  238. # 视频封面
  239. cover_url = feeds[i].get("url", "")
  240. # 视频播放地址
  241. video_url = feeds[i].get("v_url", "")
  242. video_dict = {
  243. "video_title": video_title,
  244. "video_id": video_id,
  245. "duration": duration,
  246. "play_cnt": play_cnt,
  247. "like_cnt": like_cnt,
  248. "comment_cnt": comment_cnt,
  249. "share_cnt": share_cnt,
  250. "user_name": user_name,
  251. "publish_time_stamp": publish_time_stamp,
  252. "publish_time_str": publish_time_str,
  253. "video_width": video_width,
  254. "video_height": video_height,
  255. "avatar_url": avatar_url,
  256. "profile_id": profile_id,
  257. "profile_mid": profile_mid,
  258. "cover_url": cover_url,
  259. "video_url": video_url,
  260. "session": f"xiaoniangao-play-{int(time.time())}"
  261. }
  262. for k, v in video_dict.items():
  263. Common.logger(log_type, crawler).info(f"{k}:{v}")
  264. # 过滤无效视频
  265. if video_title == "" or video_id == "" or video_url == "":
  266. Common.logger(log_type, crawler).warning("无效视频\n")
  267. # 抓取基础规则过滤
  268. elif cls.download_rule(log_type, crawler, video_dict, rule_dict) is False:
  269. Common.logger(log_type, crawler).info("不满足抓取规则\n")
  270. elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
  271. Common.logger(log_type, crawler).info('视频已下载\n')
  272. # 过滤敏感词
  273. elif any(str(word) if str(word) in video_title else False for word in get_config_from_mysql(log_type, crawler, env, "filter", action="")) is True:
  274. Common.logger(log_type, crawler).info("视频已中过滤词\n")
  275. else:
  276. cls.download_publish(log_type=log_type,
  277. crawler=crawler,
  278. video_dict=video_dict,
  279. rule_dict=rule_dict,
  280. strategy=strategy,
  281. oss_endpoint=oss_endpoint,
  282. env=env)
  283. @classmethod
  284. def repeat_video(cls, log_type, crawler, video_id, env):
  285. sql = f""" select * from crawler_video where platform="小年糕" and out_video_id="{video_id}"; """
  286. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  287. return len(repeat_video)
  288. @classmethod
  289. def download_publish(cls, log_type, crawler, video_dict, rule_dict, strategy, oss_endpoint, env):
  290. # 下载封面
  291. Common.download_method(log_type=log_type, crawler=crawler, text="cover", title=video_dict["video_title"], url=video_dict["cover_url"])
  292. # 下载视频
  293. Common.download_method(log_type=log_type, crawler=crawler, text="video", title=video_dict["video_title"], url=video_dict["video_url"])
  294. # 保存视频信息至 "./videos/{download_video_title}/info.txt"
  295. Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
  296. # 上传视频
  297. Common.logger(log_type, crawler).info("开始上传视频...")
  298. our_video_id = Publish.upload_and_publish(log_type=log_type,
  299. crawler=crawler,
  300. strategy=strategy,
  301. our_uid="play",
  302. env=env,
  303. oss_endpoint=oss_endpoint)
  304. if env == "dev":
  305. our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  306. else:
  307. our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  308. Common.logger(log_type, crawler).info("视频上传完成")
  309. if our_video_id is None:
  310. # 删除视频文件夹
  311. shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
  312. return
  313. insert_sql = f""" insert into crawler_video(video_id,
  314. out_user_id,
  315. platform,
  316. strategy,
  317. out_video_id,
  318. video_title,
  319. cover_url,
  320. video_url,
  321. duration,
  322. publish_time,
  323. play_cnt,
  324. crawler_rule,
  325. width,
  326. height)
  327. values({our_video_id},
  328. "{video_dict['profile_id']}",
  329. "{cls.platform}",
  330. "播放量榜爬虫策略",
  331. "{video_dict['video_id']}",
  332. "{video_dict['video_title']}",
  333. "{video_dict['cover_url']}",
  334. "{video_dict['video_url']}",
  335. {int(video_dict['duration'])},
  336. "{video_dict['publish_time_str']}",
  337. {int(video_dict['play_cnt'])},
  338. '{json.dumps(rule_dict)}',
  339. {int(video_dict['video_width'])},
  340. {int(video_dict['video_height'])}) """
  341. Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
  342. MysqlHelper.update_values(log_type, crawler, insert_sql, env)
  343. Common.logger(log_type, crawler).info('视频信息插入数据库成功!')
  344. # 视频写入飞书
  345. Feishu.insert_columns(log_type, crawler, "c85k1C", "ROWS", 1, 2)
  346. # 视频ID工作表,首行写入数据
  347. upload_time = int(time.time())
  348. values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
  349. "播放量榜爬虫策略",
  350. str(video_dict['video_id']),
  351. str(video_dict['video_title']),
  352. our_video_link,
  353. video_dict['play_cnt'],
  354. video_dict['comment_cnt'],
  355. video_dict['like_cnt'],
  356. video_dict['share_cnt'],
  357. video_dict['duration'],
  358. f"{video_dict['video_width']}*{video_dict['video_height']}",
  359. str(video_dict['publish_time_str']),
  360. str(video_dict['user_name']),
  361. str(video_dict['profile_id']),
  362. str(video_dict['profile_mid']),
  363. str(video_dict['avatar_url']),
  364. str(video_dict['cover_url']),
  365. str(video_dict['video_url'])]]
  366. time.sleep(1)
  367. Feishu.update_values(log_type, crawler, "c85k1C", "F2:Z2", values)
  368. Common.logger(log_type, crawler).info('视频信息写入飞书成功\n')
  369. if __name__ == '__main__':
  370. XiaoniangaoPlayScheduling.get_videoList("play", "xiaoniangao", "播放量榜爬虫策略", "out", "dev")
  371. pass