xiaoniangao_hour_scheduling.py 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2023/3/15
  4. import datetime
  5. import json
  6. import os
  7. import random
  8. import shutil
  9. import sys
  10. import time
  11. import requests
  12. import urllib3
  13. sys.path.append(os.getcwd())
  14. from common.common import Common
  15. from common.feishu import Feishu
  16. from common.publish import Publish
  17. from common.scheduling_db import MysqlHelper
  18. from common.public import get_config_from_mysql
  19. proxies = {"http": None, "https": None}
  20. class XiaoniangaoHourScheduling:
  21. platform = "小年糕"
  22. # 生成 uid、token
  23. @classmethod
  24. def get_uid_token(cls):
  25. words = "abcdefghijklmnopqrstuvwxyz0123456789"
  26. uid = f"""{"".join(random.sample(words, 8))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 12))}"""
  27. token = "".join(random.sample(words, 32))
  28. uid_token_dict = {
  29. "uid": uid,
  30. "token": token
  31. }
  32. return uid_token_dict
  33. # 基础门槛规则
  34. @staticmethod
  35. def download_rule(log_type, crawler, video_dict, rule_dict):
  36. """
  37. 下载视频的基本规则
  38. :param log_type: 日志
  39. :param crawler: 哪款爬虫
  40. :param video_dict: 视频信息,字典格式
  41. :param rule_dict: 规则信息,字典格式
  42. :return: 满足规则,返回 True;反之,返回 False
  43. """
  44. rule_playCnt_min = rule_dict.get('playCnt', {}).get('min', 0)
  45. rule_playCnt_max = rule_dict.get('playCnt', {}).get('max', 100000000)
  46. if rule_playCnt_max == 0:
  47. rule_playCnt_max = 100000000
  48. rule_duration_min = rule_dict.get('duration', {}).get('min', 0)
  49. rule_duration_max = rule_dict.get('duration', {}).get('max', 100000000)
  50. if rule_duration_max == 0:
  51. rule_duration_max = 100000000
  52. rule_period_min = rule_dict.get('period', {}).get('min', 0)
  53. # rule_period_max = rule_dict.get('period', {}).get('max', 100000000)
  54. # if rule_period_max == 0:
  55. # rule_period_max = 100000000
  56. #
  57. # rule_fans_min = rule_dict.get('fans', {}).get('min', 0)
  58. # rule_fans_max = rule_dict.get('fans', {}).get('max', 100000000)
  59. # if rule_fans_max == 0:
  60. # rule_fans_max = 100000000
  61. #
  62. # rule_videos_min = rule_dict.get('videos', {}).get('min', 0)
  63. # rule_videos_max = rule_dict.get('videos', {}).get('max', 100000000)
  64. # if rule_videos_max == 0:
  65. # rule_videos_max = 100000000
  66. rule_like_min = rule_dict.get('like', {}).get('min', 0)
  67. rule_like_max = rule_dict.get('like', {}).get('max', 100000000)
  68. if rule_like_max == 0:
  69. rule_like_max = 100000000
  70. rule_videoWidth_min = rule_dict.get('videoWidth', {}).get('min', 0)
  71. rule_videoWidth_max = rule_dict.get('videoWidth', {}).get('max', 100000000)
  72. if rule_videoWidth_max == 0:
  73. rule_videoWidth_max = 100000000
  74. rule_videoHeight_min = rule_dict.get('videoHeight', {}).get('min', 0)
  75. rule_videoHeight_max = rule_dict.get('videoHeight', {}).get('max', 100000000)
  76. if rule_videoHeight_max == 0:
  77. rule_videoHeight_max = 100000000
  78. rule_shareCnt_min = rule_dict.get('shareCnt', {}).get('min', 0)
  79. rule_shareCnt_max = rule_dict.get('shareCnt', {}).get('max', 100000000)
  80. if rule_shareCnt_max == 0:
  81. rule_shareCnt_max = 100000000
  82. rule_commentCnt_min = rule_dict.get('commentCnt', {}).get('min', 0)
  83. rule_commentCnt_max = rule_dict.get('commentCnt', {}).get('max', 100000000)
  84. if rule_commentCnt_max == 0:
  85. rule_commentCnt_max = 100000000
  86. Common.logger(log_type, crawler).info(
  87. f'rule_duration_max:{rule_duration_max} >= duration:{int(float(video_dict["duration"]))} >= rule_duration_min:{int(rule_duration_min)}')
  88. Common.logger(log_type, crawler).info(
  89. f'rule_playCnt_max:{int(rule_playCnt_max)} >= play_cnt:{int(video_dict["play_cnt"])} >= rule_playCnt_min:{int(rule_playCnt_min)}')
  90. Common.logger(log_type, crawler).info(
  91. f'now:{int(time.time())} - publish_time_stamp:{int(video_dict["publish_time_stamp"])} <= {3600 * 24 * int(rule_period_min)}')
  92. Common.logger(log_type, crawler).info(
  93. f'rule_like_max:{int(rule_like_max)} >= like_cnt:{int(video_dict["like_cnt"])} >= rule_like_min:{int(rule_like_min)}')
  94. Common.logger(log_type, crawler).info(
  95. f'rule_commentCnt_max:{int(rule_commentCnt_max)} >= comment_cnt:{int(video_dict["comment_cnt"])} >= rule_commentCnt_min:{int(rule_commentCnt_min)}')
  96. Common.logger(log_type, crawler).info(
  97. f'rule_shareCnt_max:{int(rule_shareCnt_max)} >= share_cnt:{int(video_dict["share_cnt"])} >= rule_shareCnt_min:{int(rule_shareCnt_min)}')
  98. Common.logger(log_type, crawler).info(
  99. f'rule_videoWidth_max:{int(rule_videoWidth_max)} >= video_width:{int(video_dict["video_width"])} >= rule_videoWidth_min:{int(rule_videoWidth_min)}')
  100. Common.logger(log_type, crawler).info(
  101. f'rule_videoHeight_max:{int(rule_videoHeight_max)} >= video_height:{int(video_dict["video_height"])} >= rule_videoHeight_min:{int(rule_videoHeight_min)}')
  102. if int(rule_duration_max) >= int(float(video_dict["duration"])) >= int(rule_duration_min) \
  103. and int(rule_playCnt_max) >= int(video_dict['play_cnt']) >= int(rule_playCnt_min) \
  104. and int(time.time()) - int(video_dict["publish_time_stamp"]) <= 3600 * 24 * int(rule_period_min) \
  105. and int(rule_like_max) >= int(video_dict['like_cnt']) >= int(rule_like_min) \
  106. and int(rule_commentCnt_max) >= int(video_dict['comment_cnt']) >= int(rule_commentCnt_min) \
  107. and int(rule_shareCnt_max) >= int(video_dict['share_cnt']) >= int(rule_shareCnt_min) \
  108. and int(rule_videoWidth_max) >= int(video_dict['video_width']) >= int(rule_videoWidth_min) \
  109. and int(rule_videoHeight_max) >= int(video_dict['video_height']) >= int(rule_videoHeight_min):
  110. return True
  111. else:
  112. return False
  113. @classmethod
  114. def repeat_video(cls, log_type, crawler, video_id, env):
  115. sql = f""" select * from crawler_video where platform="小年糕" and out_video_id="{video_id}"; """
  116. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  117. return len(repeat_video)
  118. @classmethod
  119. def repeat_hour(cls, log_type, crawler, video_id, env):
  120. sql = f""" select * from crawler_xiaoniangao_hour where platform="小年糕" and out_video_id="{video_id}"; """
  121. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  122. return len(repeat_video)
  123. # 获取列表
  124. @classmethod
  125. def get_videoList(cls, log_type, crawler, rule_dict, env):
  126. uid_token_dict = cls.get_uid_token()
  127. url = "https://kapi.xiaoniangao.cn/trends/get_recommend_trends"
  128. headers = {
  129. "x-b3-traceid": '1c403a4aa72e3c',
  130. "X-Token-Id": 'ab619e96d801f1567388629260aa68ec-1202200806',
  131. "uid": uid_token_dict['uid'],
  132. "content-type": "application/json",
  133. "Accept-Encoding": "gzip,compress,br,deflate",
  134. "User-Agent": 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X)'
  135. ' AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 '
  136. 'MicroMessenger/8.0.20(0x18001432) NetType/WIFI Language/zh_CN',
  137. "Referer": 'https://servicewechat.com/wxd7911e4c177690e4/624/page-frame.html'
  138. }
  139. data = {
  140. "log_params": {
  141. "page": "discover_rec",
  142. "common": {
  143. "brand": "iPhone",
  144. "device": "iPhone 11",
  145. "os": "iOS 14.7.1",
  146. "weixinver": "8.0.20",
  147. "srcver": "2.24.2",
  148. "net": "wifi",
  149. "scene": 1089
  150. }
  151. },
  152. "qs": "imageMogr2/gravity/center/rotate/$/thumbnail/!750x500r/crop/750x500/interlace/1/format/jpg",
  153. "h_qs": "imageMogr2/gravity/center/rotate/$/thumbnail/!80x80r/crop/80x80/interlace/1/format/jpg",
  154. "share_width": 625,
  155. "share_height": 500,
  156. "ext": {
  157. "fmid": 0,
  158. "items": {}
  159. },
  160. "app": "xng",
  161. "rec_scene": "discover_rec",
  162. "log_common_params": {
  163. "e": [{
  164. "data": {
  165. "page": "discoverIndexPage",
  166. "topic": "recommend"
  167. },
  168. "ab": {}
  169. }],
  170. "ext": {
  171. "brand": "iPhone",
  172. "device": "iPhone 11",
  173. "os": "iOS 14.7.1",
  174. "weixinver": "8.0.20",
  175. "srcver": "2.24.3",
  176. "net": "wifi",
  177. "scene": "1089"
  178. },
  179. "pj": "1",
  180. "pf": "2",
  181. "session_id": "7bcce313-b57d-4305-8d14-6ebd9a1bad29"
  182. },
  183. "refresh": False,
  184. "token": uid_token_dict["token"],
  185. "uid": uid_token_dict["uid"],
  186. "proj": "ma",
  187. "wx_ver": "8.0.20",
  188. "code_ver": "3.62.0"
  189. }
  190. urllib3.disable_warnings()
  191. r = requests.post(url=url, headers=headers, json=data, proxies=proxies, verify=False)
  192. if 'data' not in r.text or r.status_code != 200:
  193. Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
  194. return
  195. elif "data" not in r.json():
  196. Common.logger(log_type, crawler).warning(f"get_videoList:{r.json()}\n")
  197. return
  198. elif "list" not in r.json()["data"]:
  199. Common.logger(log_type, crawler).warning(f"get_videoList:{r.json()['data']}\n")
  200. return
  201. elif len(r.json()['data']['list']) == 0:
  202. Common.logger(log_type, crawler).warning(f"get_videoList:{r.json()['data']['list']}\n")
  203. return
  204. else:
  205. # 视频列表数据
  206. feeds = r.json()["data"]["list"]
  207. for i in range(len(feeds)):
  208. # 标题,表情随机加在片头、片尾,或替代句子中间的标点符号
  209. xiaoniangao_title = feeds[i].get("title", "").strip().replace("\n", "") \
  210. .replace("/", "").replace("\r", "").replace("#", "") \
  211. .replace(".", "。").replace("\\", "").replace("&NBSP", "") \
  212. .replace(":", "").replace("*", "").replace("?", "") \
  213. .replace("?", "").replace('"', "").replace("<", "") \
  214. .replace(">", "").replace("|", "").replace(" ", "")\
  215. .replace('"', '').replace("'", '')
  216. # 随机取一个表情/符号
  217. emoji = random.choice(get_config_from_mysql(log_type, crawler, env, "emoji"))
  218. # 生成最终标题,标题list[表情+title, title+表情]随机取一个
  219. video_title = random.choice([f"{emoji}{xiaoniangao_title}", f"{xiaoniangao_title}{emoji}"])
  220. # 视频 ID
  221. video_id = feeds[i].get("vid", "")
  222. # 播放量
  223. play_cnt = feeds[i].get("play_pv", 0)
  224. # 点赞量
  225. like_cnt = feeds[i].get("favor", {}).get("total", 0)
  226. # 评论数
  227. comment_cnt = feeds[i].get("comment_count", 0)
  228. # 分享量
  229. share_cnt = feeds[i].get("share", 0)
  230. # 时长
  231. duration = int(feeds[i].get("du", 0)/1000)
  232. # 宽和高
  233. video_width = int(feeds[i].get("w", 0))
  234. video_height = int(feeds[i].get("h", 0))
  235. # 发布时间
  236. publish_time_stamp = int(int(feeds[i].get("t", 0))/1000)
  237. publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
  238. # 用户名 / 头像
  239. user_name = feeds[i].get("user", {}).get("nick", "").strip().replace("\n", "") \
  240. .replace("/", "").replace("快手", "").replace(" ", "") \
  241. .replace(" ", "").replace("&NBSP", "").replace("\r", "")
  242. avatar_url = feeds[i].get("user", {}).get("hurl", "")
  243. # 用户 ID
  244. profile_id = feeds[i]["id"]
  245. # 用户 mid
  246. profile_mid = feeds[i]["user"]["mid"]
  247. # 视频封面
  248. cover_url = feeds[i].get("url", "")
  249. # 视频播放地址
  250. video_url = feeds[i].get("v_url", "")
  251. video_dict = {
  252. "video_title": video_title,
  253. "video_id": video_id,
  254. "duration": duration,
  255. "play_cnt": play_cnt,
  256. "like_cnt": like_cnt,
  257. "comment_cnt": comment_cnt,
  258. "share_cnt": share_cnt,
  259. "user_name": user_name,
  260. "publish_time_stamp": publish_time_stamp,
  261. "publish_time_str": publish_time_str,
  262. "video_width": video_width,
  263. "video_height": video_height,
  264. "avatar_url": avatar_url,
  265. "profile_id": profile_id,
  266. "profile_mid": profile_mid,
  267. "cover_url": cover_url,
  268. "video_url": video_url,
  269. "session": f"xiaoniangao-hour-{int(time.time())}"
  270. }
  271. for k, v in video_dict.items():
  272. Common.logger(log_type, crawler).info(f"{k}:{v}")
  273. # 过滤无效视频
  274. if video_title == "" or video_id == "" or video_url == "":
  275. Common.logger(log_type, crawler).warning("无效视频\n")
  276. # 抓取基础规则过滤
  277. elif cls.download_rule(log_type, crawler, video_dict, rule_dict) is False:
  278. Common.logger(log_type, crawler).info("不满足抓取规则\n")
  279. elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
  280. Common.logger(log_type, crawler).info('视频已下载\n')
  281. # 过滤敏感词
  282. elif any(str(word) if str(word) in video_title else False for word in get_config_from_mysql(log_type, crawler, env, "filter", action="")) is True:
  283. Common.logger(log_type, crawler).info("视频已中过滤词\n")
  284. else:
  285. # 写入飞书小时级feeds数据库表
  286. insert_sql = f""" insert into crawler_xiaoniangao_hour(profile_id,
  287. profile_mid,
  288. platform,
  289. out_video_id,
  290. video_title,
  291. user_name,
  292. cover_url,
  293. video_url,
  294. duration,
  295. publish_time,
  296. play_cnt,
  297. crawler_time_stamp,
  298. crawler_time)
  299. values({profile_id},
  300. {profile_mid},
  301. "{cls.platform}",
  302. "{video_id}",
  303. "{video_title}",
  304. "{user_name}",
  305. "{cover_url}",
  306. "{video_url}",
  307. {duration},
  308. "{publish_time_str}",
  309. {play_cnt},
  310. {int(time.time())},
  311. "{time.strftime("%Y-%y-%d %H:%M:%S", time.localtime(int(time.time())))}"
  312. )"""
  313. Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
  314. MysqlHelper.update_values(log_type, crawler, insert_sql, env)
  315. Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
  316. @classmethod
  317. def get_video_info(cls, log_type, crawler, p_id, p_mid, v_title, v_id):
  318. uid_token_dict = cls.get_uid_token()
  319. url = "https://kapi.xiaoniangao.cn/profile/get_profile_by_id"
  320. headers = {
  321. "x-b3-traceid": '1c403a4aa72e3c',
  322. "X-Token-Id": 'ab619e96d801f1567388629260aa68ec-1202200806',
  323. "uid": uid_token_dict['uid'],
  324. "content-type": "application/json",
  325. "Accept-Encoding": "gzip,compress,br,deflate",
  326. "User-Agent": 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X)'
  327. ' AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 '
  328. 'MicroMessenger/8.0.20(0x18001432) NetType/WIFI Language/zh_CN',
  329. "Referer": 'https://servicewechat.com/wxd7911e4c177690e4/624/page-frame.html'
  330. }
  331. data = {
  332. "play_src": "1",
  333. "profile_id": int(p_id),
  334. "profile_mid": int(p_mid),
  335. "qs": "imageMogr2/gravity/center/rotate/$/thumbnail/"
  336. "!400x400r/crop/400x400/interlace/1/format/jpg",
  337. "h_qs": "imageMogr2/gravity/center/rotate/$/thumbnail"
  338. "/!80x80r/crop/80x80/interlace/1/format/jpg",
  339. "share_width": 625,
  340. "share_height": 500,
  341. "no_comments": True,
  342. "no_follow": True,
  343. "vid": v_id,
  344. "hot_l1_comment": True,
  345. "token": uid_token_dict['token'],
  346. "uid": uid_token_dict['uid'],
  347. "proj": "ma",
  348. "wx_ver": "8.0.20",
  349. "code_ver": "3.62.0",
  350. "log_common_params": {
  351. "e": [{
  352. "data": {
  353. "page": "dynamicSharePage"
  354. }
  355. }],
  356. "ext": {
  357. "brand": "iPhone",
  358. "device": "iPhone 11",
  359. "os": "iOS 14.7.1",
  360. "weixinver": "8.0.20",
  361. "srcver": "2.24.3",
  362. "net": "wifi",
  363. "scene": "1089"
  364. },
  365. "pj": "1",
  366. "pf": "2",
  367. "session_id": "7bcce313-b57d-4305-8d14-6ebd9a1bad29"
  368. }
  369. }
  370. urllib3.disable_warnings()
  371. r = requests.post(headers=headers, url=url, json=data, proxies=proxies, verify=False)
  372. if r.status_code != 200 or 'data' not in r.text:
  373. Common.logger(log_type, crawler).warning(f"get_videoInfo:{r.text}\n")
  374. else:
  375. hour_play_cnt = r.json()["data"]["play_pv"]
  376. hour_cover_url = r.json()["data"]["url"]
  377. hour_video_url = r.json()["data"]["v_url"]
  378. hour_video_duration = r.json()["data"]["du"]
  379. hour_video_comment_cnt = r.json()["data"]["comment_count"]
  380. hour_video_like_cnt = r.json()["data"]["favor"]["total"]
  381. hour_video_share_cnt = r.json()["data"]["share"]
  382. hour_video_width = r.json()["data"]["w"]
  383. hour_video_height = r.json()["data"]["h"]
  384. hour_video_send_time = r.json()["data"]["t"]
  385. publish_time_stamp = int(int(hour_video_send_time) / 1000)
  386. publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
  387. hour_user_name = r.json()["data"]["user"]["nick"]
  388. hour_head_url = r.json()["data"]["user"]["hurl"]
  389. video_info_dict = {
  390. "video_id": v_id,
  391. "video_title": v_title,
  392. "duration": hour_video_duration,
  393. "play_cnt": hour_play_cnt,
  394. "like_cnt": hour_video_like_cnt,
  395. "comment_cnt": hour_video_comment_cnt,
  396. "share_cnt": hour_video_share_cnt,
  397. "user_name": hour_user_name,
  398. "publish_time_stamp": publish_time_stamp,
  399. "publish_time_str": publish_time_str,
  400. "video_width": hour_video_width,
  401. "video_height": hour_video_height,
  402. "avatar_url": hour_head_url,
  403. "profile_id": p_id,
  404. "profile_mid": p_mid,
  405. "cover_url": hour_cover_url,
  406. "video_url": hour_video_url,
  407. "session": f"xiaoniangao-hour-{int(time.time())}"
  408. }
  409. return video_info_dict
  410. # 更新小时榜数据
  411. @classmethod
  412. def update_videoList(cls, log_type, crawler, rule_dict, strategy, oss_endpoint, env):
  413. """
  414. 更新小时榜数据
  415. """
  416. befor_yesterday = (datetime.date.today() + datetime.timedelta(days=-3)).strftime("%Y-%m-%d %H:%M:%S")
  417. update_time_stamp = int(time.mktime(time.strptime(befor_yesterday, "%Y-%m-%d %H:%M:%S")))
  418. select_sql = f""" select * from crawler_xiaoniangao_hour where crawler_time_stamp >= {update_time_stamp} GROUP BY out_video_id """
  419. update_video_list = MysqlHelper.get_values(log_type, crawler, select_sql, env)
  420. if len(update_video_list) == 0:
  421. Common.logger(log_type, crawler).info("暂无需要更新的小时榜数据\n")
  422. return
  423. for update_video_info in update_video_list:
  424. profile_id = update_video_info["profile_id"]
  425. profile_mid = update_video_info["profile_mid"]
  426. video_title = update_video_info["video_title"]
  427. video_id = update_video_info["out_video_id"]
  428. if datetime.datetime.now().hour == 10 and datetime.datetime.now().minute <= 10:
  429. video_info_dict = cls.get_video_info(log_type=log_type,
  430. crawler=crawler,
  431. p_id=profile_id,
  432. p_mid=profile_mid,
  433. v_title=video_title,
  434. v_id=video_id)
  435. ten_play_cnt = video_info_dict['play_cnt']
  436. Common.logger(log_type, crawler).info(f"ten_play_cnt:{ten_play_cnt}")
  437. update_sql = f""" update crawler_xiaoniangao_hour set ten_play_cnt={ten_play_cnt} WHERE out_video_id="{video_id}"; """
  438. # Common.logger(log_type, crawler).info(f"update_sql:{update_sql}")
  439. MysqlHelper.update_values(log_type, crawler, update_sql, env)
  440. cls.download_publish(log_type=log_type,
  441. crawler=crawler,
  442. video_info_dict=video_info_dict,
  443. rule_dict=rule_dict,
  444. update_video_info=update_video_info,
  445. strategy=strategy,
  446. oss_endpoint=oss_endpoint,
  447. env=env)
  448. elif datetime.datetime.now().hour == 15 and datetime.datetime.now().minute <= 10:
  449. video_info_dict = cls.get_video_info(log_type=log_type,
  450. crawler=crawler,
  451. p_id=profile_id,
  452. p_mid=profile_mid,
  453. v_title=video_title,
  454. v_id=video_id)
  455. fifteen_play_cnt = video_info_dict['play_cnt']
  456. Common.logger(log_type, crawler).info(f"fifteen_play_cnt:{fifteen_play_cnt}")
  457. update_sql = f""" update crawler_xiaoniangao_hour set fifteen_play_cnt={fifteen_play_cnt} WHERE out_video_id="{video_id}"; """
  458. # Common.logger(log_type, crawler).info(f"update_sql:{update_sql}")
  459. MysqlHelper.update_values(log_type, crawler, update_sql, env)
  460. cls.download_publish(log_type=log_type,
  461. crawler=crawler,
  462. video_info_dict=video_info_dict,
  463. rule_dict=rule_dict,
  464. update_video_info=update_video_info,
  465. strategy=strategy,
  466. oss_endpoint=oss_endpoint,
  467. env=env)
  468. elif datetime.datetime.now().hour == 20 and datetime.datetime.now().minute <= 10:
  469. video_info_dict = cls.get_video_info(log_type=log_type,
  470. crawler=crawler,
  471. p_id=profile_id,
  472. p_mid=profile_mid,
  473. v_title=video_title,
  474. v_id=video_id)
  475. twenty_play_cnt = video_info_dict['play_cnt']
  476. Common.logger(log_type, crawler).info(f"twenty_play_cnt:{twenty_play_cnt}")
  477. update_sql = f""" update crawler_xiaoniangao_hour set twenty_play_cnt={twenty_play_cnt} WHERE out_video_id="{video_id}"; """
  478. # Common.logger(log_type, crawler).info(f"update_sql:{update_sql}")
  479. MysqlHelper.update_values(log_type, crawler, update_sql, env)
  480. cls.download_publish(log_type=log_type,
  481. crawler=crawler,
  482. video_info_dict=video_info_dict,
  483. rule_dict=rule_dict,
  484. update_video_info=update_video_info,
  485. strategy=strategy,
  486. oss_endpoint=oss_endpoint,
  487. env=env)
  488. else:
  489. pass
  490. @classmethod
  491. def download(cls, log_type, crawler, video_info_dict, rule_dict, strategy, oss_endpoint, env):
  492. # 下载封面
  493. Common.download_method(log_type=log_type, crawler=crawler, text="cover", title=video_info_dict["video_title"],
  494. url=video_info_dict["cover_url"])
  495. # 下载视频
  496. Common.download_method(log_type=log_type, crawler=crawler, text="video", title=video_info_dict["video_title"],
  497. url=video_info_dict["video_url"])
  498. # 保存视频信息至 "./videos/{download_video_title}/info.txt"
  499. Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_info_dict)
  500. # 上传视频
  501. Common.logger(log_type, crawler).info("开始上传视频...")
  502. our_video_id = Publish.upload_and_publish(log_type=log_type,
  503. crawler=crawler,
  504. strategy=strategy,
  505. our_uid="hour",
  506. env=env,
  507. oss_endpoint=oss_endpoint)
  508. if env == "dev":
  509. our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  510. else:
  511. our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  512. Common.logger(log_type, crawler).info("视频上传完成")
  513. if our_video_id is None:
  514. # 删除视频文件夹
  515. shutil.rmtree(f"./{crawler}/videos/{video_info_dict['video_title']}")
  516. return
  517. # # 视频信息保存数据库
  518. # rule_dict = {
  519. # "duration": {"min": 40},
  520. # "play_cnt": {"min": 4000},
  521. # "publish_day": {"min": 10}
  522. # }
  523. insert_sql = f""" insert into crawler_video(video_id,
  524. out_user_id,
  525. platform,
  526. strategy,
  527. out_video_id,
  528. video_title,
  529. cover_url,
  530. video_url,
  531. duration,
  532. publish_time,
  533. play_cnt,
  534. crawler_rule,
  535. width,
  536. height)
  537. values({our_video_id},
  538. "{video_info_dict['profile_id']}",
  539. "{cls.platform}",
  540. "小时榜爬虫策略",
  541. "{video_info_dict['video_id']}",
  542. "{video_info_dict['video_title']}",
  543. "{video_info_dict['cover_url']}",
  544. "{video_info_dict['video_url']}",
  545. {int(video_info_dict['duration'])},
  546. "{video_info_dict['publish_time_str']}",
  547. {int(video_info_dict['play_cnt'])},
  548. '{json.dumps(rule_dict)}',
  549. {int(video_info_dict['video_width'])},
  550. {int(video_info_dict['video_height'])}) """
  551. Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
  552. MysqlHelper.update_values(log_type, crawler, insert_sql, env)
  553. Common.logger(log_type, crawler).info('视频信息插入数据库成功!')
  554. # 视频写入飞书
  555. Feishu.insert_columns(log_type, crawler, "yatRv2", "ROWS", 1, 2)
  556. # 视频ID工作表,首行写入数据
  557. upload_time = int(time.time())
  558. values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
  559. "小时级上升榜",
  560. str(video_info_dict['video_id']),
  561. str(video_info_dict['video_title']),
  562. our_video_link,
  563. video_info_dict['play_cnt'],
  564. video_info_dict['comment_cnt'],
  565. video_info_dict['like_cnt'],
  566. video_info_dict['share_cnt'],
  567. video_info_dict['duration'],
  568. f"{video_info_dict['video_width']}*{video_info_dict['video_height']}",
  569. str(video_info_dict['publish_time_str'].replace("-", "/")),
  570. str(video_info_dict['user_name']),
  571. str(video_info_dict['profile_id']),
  572. str(video_info_dict['profile_mid']),
  573. str(video_info_dict['avatar_url']),
  574. str(video_info_dict['cover_url']),
  575. str(video_info_dict['video_url'])]]
  576. time.sleep(1)
  577. Feishu.update_values(log_type, crawler, "yatRv2", "F2:Z2", values)
  578. Common.logger(log_type, crawler).info('视频信息写入飞书成功\n')
  579. # 下载/上传
  580. @classmethod
  581. def download_publish(cls, log_type, crawler, video_info_dict, rule_dict, update_video_info, strategy, oss_endpoint, env):
  582. if cls.repeat_video(log_type, crawler, video_info_dict["video_id"], env) != 0:
  583. Common.logger(log_type, crawler).info('视频已下载\n')
  584. # 播放量大于 50000,直接下载
  585. elif int(video_info_dict["play_cnt"]) >= 50000:
  586. Common.logger(log_type, crawler).info(
  587. f"播放量:{video_info_dict['play_cnt']} >= 50000,满足下载规则,开始下载视频")
  588. cls.download(log_type=log_type,
  589. crawler=crawler,
  590. video_info_dict=video_info_dict,
  591. rule_dict=rule_dict,
  592. strategy=strategy,
  593. oss_endpoint=oss_endpoint,
  594. env=env)
  595. # 上升榜判断逻辑,任意时间段上升量>=5000,连续两个时间段上升量>=2000
  596. elif int(update_video_info['ten_play_cnt']) >= 5000 or int(
  597. update_video_info['fifteen_play_cnt']) >= 5000 or int(update_video_info['twenty_play_cnt']) >= 5000:
  598. Common.logger(log_type, crawler).info(
  599. f"10:00 or 15:00 or 20:00 数据上升量:{int(update_video_info['ten_play_cnt'])} or {int(update_video_info['fifteen_play_cnt'])} or {int(update_video_info['twenty_play_cnt'])} >= 5000")
  600. Common.logger(log_type, crawler).info("满足下载规则,开始下载视频")
  601. cls.download(log_type=log_type,
  602. crawler=crawler,
  603. video_info_dict=video_info_dict,
  604. rule_dict=rule_dict,
  605. strategy=strategy,
  606. oss_endpoint=oss_endpoint,
  607. env=env)
  608. elif int(update_video_info['ten_play_cnt']) >= 2000 and int(update_video_info['fifteen_play_cnt']) >= 2000:
  609. Common.logger(log_type, crawler).info(
  610. f"10:00 and 15:00 数据上升量:{int(update_video_info['ten_play_cnt'])} and {int(update_video_info['fifteen_play_cnt'])} >= 2000")
  611. Common.logger(log_type, crawler).info("满足下载规则,开始下载视频")
  612. cls.download(log_type=log_type,
  613. crawler=crawler,
  614. video_info_dict=video_info_dict,
  615. rule_dict=rule_dict,
  616. strategy=strategy,
  617. oss_endpoint=oss_endpoint,
  618. env=env)
  619. elif int(update_video_info['fifteen_play_cnt']) >= 2000 and int(update_video_info['twenty_play_cnt']) >= 2000:
  620. Common.logger(log_type, crawler).info(
  621. f"15:00 and 20:00 数据上升量:{int(update_video_info['fifteen_play_cnt'])} and {int(update_video_info['twenty_play_cnt'])} >= 2000")
  622. Common.logger(log_type, crawler).info("满足下载规则,开始下载视频")
  623. cls.download(log_type=log_type,
  624. crawler=crawler,
  625. video_info_dict=video_info_dict,
  626. rule_dict=rule_dict,
  627. strategy=strategy,
  628. oss_endpoint=oss_endpoint,
  629. env=env)
  630. elif int(update_video_info['ten_play_cnt']) >= 2000 and int(update_video_info['twenty_play_cnt']) >= 2000:
  631. Common.logger(log_type, crawler).info(
  632. f"今日10:00 / 20:00数据上升量:{int(update_video_info['ten_play_cnt'])} and {int(update_video_info['twenty_play_cnt'])} >= 2000")
  633. Common.logger(log_type, crawler).info("满足下载规则,开始下载视频")
  634. cls.download(log_type=log_type,
  635. crawler=crawler,
  636. video_info_dict=video_info_dict,
  637. rule_dict=rule_dict,
  638. strategy=strategy,
  639. oss_endpoint=oss_endpoint,
  640. env=env)
  641. else:
  642. Common.logger(log_type, crawler).info("上升量不满足下载规则")
  643. if __name__ == "__main__":
  644. print(get_config_from_mysql(log_type='hour', source='xiaoniangao', env='dev', text='filter'))
  645. # print(XiaoniangaoHour.get_uid_token())
  646. # XiaoniangaoHour.get_videoList("test", "xiaoniangao", "dev")
  647. # XiaoniangaoHour.update_videoList("test", "xiaoniangao", "小时榜爬虫策略", "out", "dev")
  648. pass