xiaoniangao_hour.py 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2023/3/15
  4. import datetime
  5. import json
  6. import os
  7. import random
  8. import shutil
  9. import sys
  10. import time
  11. import requests
  12. import urllib3
  13. sys.path.append(os.getcwd())
  14. from common.common import Common
  15. from common.feishu import Feishu
  16. from common.publish import Publish
  17. from common.scheduling_db import MysqlHelper
  18. proxies = {"http": None, "https": None}
  19. class XiaoniangaoHour:
  20. platform = "小年糕"
  21. # 生成 uid、token
  22. @classmethod
  23. def get_uid_token(cls):
  24. words = "abcdefghijklmnopqrstuvwxyz0123456789"
  25. uid = f"""{"".join(random.sample(words, 8))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 12))}"""
  26. token = "".join(random.sample(words, 32))
  27. uid_token_dict = {
  28. "uid": uid,
  29. "token": token
  30. }
  31. return uid_token_dict
  32. # 过滤敏感词
  33. @classmethod
  34. def filter_words(cls, log_type, crawler):
  35. try:
  36. while True:
  37. # 敏感词库列表
  38. word_list = []
  39. # 从云文档读取所有敏感词,添加到词库列表
  40. filter_sheet = Feishu.get_values_batch(log_type, "xiaoniangao", "DRAnZh")
  41. if filter_sheet is None:
  42. Common.logger(log_type, crawler).info(f"filter_sheet:{filter_sheet}")
  43. continue
  44. for i in filter_sheet:
  45. for j in i:
  46. # 过滤空的单元格内容
  47. if j is None:
  48. pass
  49. else:
  50. word_list.append(j)
  51. return word_list
  52. except Exception as e:
  53. Common.logger(log_type, crawler).error(f"filter_words:{e}\n")
  54. # 基础门槛规则
  55. @staticmethod
  56. def download_rule(video_dict):
  57. """
  58. 下载视频的基本规则
  59. :param video_dict: 视频信息,字典格式
  60. :return: 满足规则,返回 True;反之,返回 False
  61. """
  62. # 视频时长
  63. if int(float(video_dict["duration"])) >= 40:
  64. # 宽或高
  65. if int(video_dict["video_width"]) >= 0 or int(video_dict["video_height"]) >= 0:
  66. # 播放量
  67. if int(video_dict["play_cnt"]) >= 4000:
  68. # 点赞量
  69. if int(video_dict["like_cnt"]) >= 0:
  70. # 分享量
  71. if int(video_dict["share_cnt"]) >= 0:
  72. # 发布时间 <= 10 天
  73. if int(time.time()) - int(video_dict["publish_time_stamp"]) <= 3600*24*10:
  74. return True
  75. else:
  76. return False
  77. else:
  78. return False
  79. else:
  80. return False
  81. else:
  82. return False
  83. return False
  84. return False
  85. # 获取表情及符号
  86. @classmethod
  87. def get_expression(cls):
  88. while True:
  89. expression_list = []
  90. char_list = []
  91. char_sheet = Feishu.get_values_batch("hour", "xiaoniangao", "BhlbST")
  92. if char_sheet is None:
  93. continue
  94. for i in range(len(char_sheet)):
  95. if char_sheet[i][0] is not None:
  96. expression_list.append(char_sheet[i][0])
  97. if char_sheet[i][1] is not None:
  98. char_list.append(char_sheet[i][1])
  99. return expression_list, char_list
  100. @classmethod
  101. def repeat_video(cls, log_type, crawler, video_id, env):
  102. sql = f""" select * from crawler_video where platform="小年糕" and out_video_id="{video_id}"; """
  103. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  104. return len(repeat_video)
  105. @classmethod
  106. def repeat_hour(cls, log_type, crawler, video_id, env):
  107. sql = f""" select * from crawler_xiaoniangao_hour where platform="小年糕" and out_video_id="{video_id}"; """
  108. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  109. return len(repeat_video)
  110. # 获取列表
  111. @classmethod
  112. def get_videoList(cls, log_type, crawler, env):
  113. # try:
  114. uid_token_dict = cls.get_uid_token()
  115. url = "https://kapi.xiaoniangao.cn/trends/get_recommend_trends"
  116. headers = {
  117. # "x-b3-traceid": cls.hour_x_b3_traceid,
  118. "x-b3-traceid": '1c403a4aa72e3c',
  119. # "X-Token-Id": cls.hour_x_token_id,
  120. "X-Token-Id": 'ab619e96d801f1567388629260aa68ec-1202200806',
  121. # "uid": cls.hour_uid,
  122. "uid": uid_token_dict['uid'],
  123. "content-type": "application/json",
  124. "Accept-Encoding": "gzip,compress,br,deflate",
  125. "User-Agent": 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X)'
  126. ' AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 '
  127. 'MicroMessenger/8.0.20(0x18001432) NetType/WIFI Language/zh_CN',
  128. # "Referer": cls.hour_referer
  129. "Referer": 'https://servicewechat.com/wxd7911e4c177690e4/624/page-frame.html'
  130. }
  131. data = {
  132. "log_params": {
  133. "page": "discover_rec",
  134. "common": {
  135. "brand": "iPhone",
  136. "device": "iPhone 11",
  137. "os": "iOS 14.7.1",
  138. "weixinver": "8.0.20",
  139. "srcver": "2.24.2",
  140. "net": "wifi",
  141. "scene": 1089
  142. }
  143. },
  144. "qs": "imageMogr2/gravity/center/rotate/$/thumbnail/!750x500r/crop/750x500/interlace/1/format/jpg",
  145. "h_qs": "imageMogr2/gravity/center/rotate/$/thumbnail/!80x80r/crop/80x80/interlace/1/format/jpg",
  146. "share_width": 625,
  147. "share_height": 500,
  148. "ext": {
  149. "fmid": 0,
  150. "items": {}
  151. },
  152. "app": "xng",
  153. "rec_scene": "discover_rec",
  154. "log_common_params": {
  155. "e": [{
  156. "data": {
  157. "page": "discoverIndexPage",
  158. "topic": "recommend"
  159. },
  160. "ab": {}
  161. }],
  162. "ext": {
  163. "brand": "iPhone",
  164. "device": "iPhone 11",
  165. "os": "iOS 14.7.1",
  166. "weixinver": "8.0.20",
  167. "srcver": "2.24.3",
  168. "net": "wifi",
  169. "scene": "1089"
  170. },
  171. "pj": "1",
  172. "pf": "2",
  173. "session_id": "7bcce313-b57d-4305-8d14-6ebd9a1bad29"
  174. },
  175. "refresh": False,
  176. "token": uid_token_dict["token"],
  177. "uid": uid_token_dict["uid"],
  178. "proj": "ma",
  179. "wx_ver": "8.0.20",
  180. "code_ver": "3.62.0"
  181. }
  182. urllib3.disable_warnings()
  183. r = requests.post(url=url, headers=headers, json=data, proxies=proxies, verify=False)
  184. if 'data' not in r.text or r.status_code != 200:
  185. Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
  186. elif "data" not in r.json():
  187. Common.logger(log_type, crawler).warning(f"get_videoList:{r.json()}\n")
  188. elif "list" not in r.json()["data"]:
  189. Common.logger(log_type, crawler).warning(f"get_videoList:{r.json()['data']}\n")
  190. elif len(r.json()['data']['list']) == 0:
  191. Common.logger(log_type, crawler).warning(f"get_videoList:{r.json()['data']['list']}\n")
  192. else:
  193. # 视频列表数据
  194. feeds = r.json()["data"]["list"]
  195. for i in range(len(feeds)):
  196. # 标题,表情随机加在片头、片尾,或替代句子中间的标点符号
  197. if "title" in feeds[i]:
  198. befor_video_title = feeds[i]["title"].strip().replace("\n", "") \
  199. .replace("/", "").replace("\r", "").replace("#", "") \
  200. .replace(".", "。").replace("\\", "").replace("&NBSP", "") \
  201. .replace(":", "").replace("*", "").replace("?", "") \
  202. .replace("?", "").replace('"', "").replace("<", "") \
  203. .replace(">", "").replace("|", "").replace(" ", "").replace("#表情", "").replace("#符号", "")
  204. expression = cls.get_expression()
  205. expression_list = expression[0]
  206. char_list = expression[1]
  207. # 随机取一个表情
  208. expression = random.choice(expression_list)
  209. # 生成标题list[表情+title, title+表情]
  210. expression_title_list = [expression + befor_video_title, befor_video_title + expression]
  211. # 从标题list中随机取一个标题
  212. title_list1 = random.choice(expression_title_list)
  213. # 生成标题:原标题+符号
  214. title_list2 = befor_video_title + random.choice(char_list)
  215. # 表情和标题组合,与标题和符号组合,汇总成待使用的标题列表
  216. title_list4 = [title_list2, title_list1]
  217. # 最终标题
  218. video_title = random.choice(title_list4)
  219. else:
  220. video_title = 0
  221. # 视频 ID
  222. if "vid" in feeds[i]:
  223. video_id = feeds[i]["vid"]
  224. else:
  225. video_id = 0
  226. # 播放量
  227. if "play_pv" in feeds[i]:
  228. video_play_cnt = feeds[i]["play_pv"]
  229. else:
  230. video_play_cnt = 0
  231. # 点赞量
  232. if "favor" in feeds[i]:
  233. video_like_cnt = feeds[i]["favor"]["total"]
  234. else:
  235. video_like_cnt = 0
  236. # 评论数
  237. if "comment_count" in feeds[i]:
  238. video_comment_cnt = feeds[i]["comment_count"]
  239. else:
  240. video_comment_cnt = 0
  241. # 分享量
  242. if "share" in feeds[i]:
  243. video_share_cnt = feeds[i]["share"]
  244. else:
  245. video_share_cnt = 0
  246. # 时长
  247. if "du" in feeds[i]:
  248. video_duration = int(feeds[i]["du"] / 1000)
  249. else:
  250. video_duration = 0
  251. # 宽和高
  252. if "w" or "h" in feeds[i]:
  253. video_width = feeds[i]["w"]
  254. video_height = feeds[i]["h"]
  255. else:
  256. video_width = 0
  257. video_height = 0
  258. # 发布时间
  259. if "t" in feeds[i]:
  260. video_send_time = feeds[i]["t"]
  261. else:
  262. video_send_time = 0
  263. publish_time_stamp = int(int(video_send_time)/1000)
  264. publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
  265. # 用户名 / 头像
  266. if "user" in feeds[i]:
  267. user_name = feeds[i]["user"]["nick"].strip().replace("\n", "") \
  268. .replace("/", "").replace("快手", "").replace(" ", "") \
  269. .replace(" ", "").replace("&NBSP", "").replace("\r", "")
  270. head_url = feeds[i]["user"]["hurl"]
  271. else:
  272. user_name = 0
  273. head_url = 0
  274. # 用户 ID
  275. profile_id = feeds[i]["id"]
  276. # 用户 mid
  277. profile_mid = feeds[i]["user"]["mid"]
  278. # 视频封面
  279. if "url" in feeds[i]:
  280. cover_url = feeds[i]["url"]
  281. else:
  282. cover_url = 0
  283. # 视频播放地址
  284. if "v_url" in feeds[i]:
  285. video_url = feeds[i]["v_url"]
  286. else:
  287. video_url = 0
  288. video_dict = {
  289. "video_title": video_title,
  290. "video_id": video_id,
  291. "duration": video_duration,
  292. "play_cnt": video_play_cnt,
  293. "like_cnt": video_like_cnt,
  294. "comment_cnt": video_comment_cnt,
  295. "share_cnt": video_share_cnt,
  296. "user_name": user_name,
  297. "publish_time_stamp": publish_time_stamp,
  298. "publish_time_str": publish_time_str,
  299. "video_width": video_width,
  300. "video_height": video_height,
  301. "avatar_url": head_url,
  302. "profile_id": profile_id,
  303. "profile_mid": profile_mid,
  304. "cover_url": cover_url,
  305. "video_url": video_url,
  306. "session": f"xiaoniangao-hour-{int(time.time())}"
  307. }
  308. for k, v in video_dict.items():
  309. Common.logger(log_type, crawler).info(f"{k}:{v}")
  310. # 过滤无效视频
  311. if video_title == 0 or video_id == 0 or video_duration == 0 \
  312. or video_send_time == 0 or user_name == 0 or head_url == 0 \
  313. or cover_url == 0 or video_url == 0:
  314. Common.logger(log_type, crawler).warning("无效视频\n")
  315. # 抓取基础规则过滤
  316. elif cls.download_rule(video_dict) is False:
  317. Common.logger(log_type, crawler).info("不满足基础门槛规则\n")
  318. elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
  319. Common.logger(log_type, crawler).info('视频已下载\n')
  320. # 过滤敏感词
  321. elif any(str(word) if str(word) in video_title else False for word in cls.filter_words(log_type, crawler)) is True:
  322. Common.logger(log_type, crawler).info("视频已中过滤词\n")
  323. time.sleep(1)
  324. else:
  325. # 写入飞书小时级feeds数据库表
  326. insert_sql = f""" insert into crawler_xiaoniangao_hour(profile_id,
  327. profile_mid,
  328. platform,
  329. out_video_id,
  330. video_title,
  331. user_name,
  332. cover_url,
  333. video_url,
  334. duration,
  335. publish_time,
  336. play_cnt,
  337. crawler_time_stamp,
  338. crawler_time)
  339. values({profile_id},
  340. {profile_mid},
  341. "{cls.platform}",
  342. "{video_id}",
  343. "{video_title}",
  344. "{user_name}",
  345. "{cover_url}",
  346. "{video_url}",
  347. {video_duration},
  348. "{publish_time_str}",
  349. {video_play_cnt},
  350. {int(time.time())},
  351. "{time.strftime("%Y-%y-%d %H:%M:%S", time.localtime(int(time.time())))}"
  352. )"""
  353. Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
  354. MysqlHelper.update_values(log_type, crawler, insert_sql, env)
  355. Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
  356. # except Exception as e:
  357. # Common.logger(log_type, crawler).error(f"get_videoList:{e}\n")
  358. @classmethod
  359. def get_video_info(cls, log_type, crawler, p_id, p_mid, v_title, v_id):
  360. try:
  361. uid_token_dict = cls.get_uid_token()
  362. url = "https://kapi.xiaoniangao.cn/profile/get_profile_by_id"
  363. headers = {
  364. # "x-b3-traceid": cls.hour_x_b3_traceid,
  365. "x-b3-traceid": '1c403a4aa72e3c',
  366. # "X-Token-Id": cls.hour_x_token_id,
  367. "X-Token-Id": 'ab619e96d801f1567388629260aa68ec-1202200806',
  368. "uid": uid_token_dict['uid'],
  369. "content-type": "application/json",
  370. "Accept-Encoding": "gzip,compress,br,deflate",
  371. "User-Agent": 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X)'
  372. ' AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 '
  373. 'MicroMessenger/8.0.20(0x18001432) NetType/WIFI Language/zh_CN',
  374. # "Referer": cls.hour_referer
  375. "Referer": 'https://servicewechat.com/wxd7911e4c177690e4/624/page-frame.html'
  376. }
  377. data = {
  378. "play_src": "1",
  379. "profile_id": int(p_id),
  380. "profile_mid": int(p_mid),
  381. "qs": "imageMogr2/gravity/center/rotate/$/thumbnail/"
  382. "!400x400r/crop/400x400/interlace/1/format/jpg",
  383. "h_qs": "imageMogr2/gravity/center/rotate/$/thumbnail"
  384. "/!80x80r/crop/80x80/interlace/1/format/jpg",
  385. "share_width": 625,
  386. "share_height": 500,
  387. "no_comments": True,
  388. "no_follow": True,
  389. "vid": v_id,
  390. "hot_l1_comment": True,
  391. # "token": cls.hour_token,
  392. "token": uid_token_dict['token'],
  393. # "uid": cls.hour_uid,
  394. "uid": uid_token_dict['uid'],
  395. "proj": "ma",
  396. "wx_ver": "8.0.20",
  397. "code_ver": "3.62.0",
  398. "log_common_params": {
  399. "e": [{
  400. "data": {
  401. "page": "dynamicSharePage"
  402. }
  403. }],
  404. "ext": {
  405. "brand": "iPhone",
  406. "device": "iPhone 11",
  407. "os": "iOS 14.7.1",
  408. "weixinver": "8.0.20",
  409. "srcver": "2.24.3",
  410. "net": "wifi",
  411. "scene": "1089"
  412. },
  413. "pj": "1",
  414. "pf": "2",
  415. "session_id": "7bcce313-b57d-4305-8d14-6ebd9a1bad29"
  416. }
  417. }
  418. urllib3.disable_warnings()
  419. r = requests.post(headers=headers, url=url, json=data, proxies=proxies, verify=False)
  420. if r.status_code != 200 or 'data' not in r.text:
  421. Common.logger(log_type, crawler).warning(f"get_videoInfo:{r.text}\n")
  422. else:
  423. hour_play_cnt = r.json()["data"]["play_pv"]
  424. hour_cover_url = r.json()["data"]["url"]
  425. hour_video_url = r.json()["data"]["v_url"]
  426. hour_video_duration = r.json()["data"]["du"]
  427. hour_video_comment_cnt = r.json()["data"]["comment_count"]
  428. hour_video_like_cnt = r.json()["data"]["favor"]["total"]
  429. hour_video_share_cnt = r.json()["data"]["share"]
  430. hour_video_width = r.json()["data"]["w"]
  431. hour_video_height = r.json()["data"]["h"]
  432. hour_video_send_time = r.json()["data"]["t"]
  433. publish_time_stamp = int(int(hour_video_send_time)/1000)
  434. publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
  435. hour_user_name = r.json()["data"]["user"]["nick"]
  436. hour_head_url = r.json()["data"]["user"]["hurl"]
  437. video_info_dict = {
  438. "video_id": v_id,
  439. "video_title": v_title,
  440. "duration": hour_video_duration,
  441. "play_cnt": hour_play_cnt,
  442. "like_cnt": hour_video_like_cnt,
  443. "comment_cnt": hour_video_comment_cnt,
  444. "share_cnt": hour_video_share_cnt,
  445. "user_name": hour_user_name,
  446. "publish_time_stamp": publish_time_stamp,
  447. "publish_time_str": publish_time_str,
  448. "video_width": hour_video_width,
  449. "video_height": hour_video_height,
  450. "avatar_url": hour_head_url,
  451. "profile_id": p_id,
  452. "profile_mid": p_mid,
  453. "cover_url": hour_cover_url,
  454. "video_url": hour_video_url,
  455. "session": f"xiaoniangao-hour-{int(time.time())}"
  456. }
  457. return video_info_dict
  458. except Exception as e:
  459. Common.logger(log_type, crawler).error(f"download_video:{e}\n")
  460. # 更新小时榜数据
  461. @classmethod
  462. def update_videoList(cls, log_type, crawler, strategy, oss_endpoint, env):
  463. """
  464. 更新小时榜数据
  465. """
  466. befor_yesterday = (datetime.date.today() + datetime.timedelta(days=-3)).strftime("%Y-%m-%d %H:%M:%S")
  467. update_time_stamp = int(time.mktime(time.strptime(befor_yesterday, "%Y-%m-%d %H:%M:%S")))
  468. select_sql = f""" select * from crawler_xiaoniangao_hour where crawler_time_stamp >= {update_time_stamp} """
  469. update_video_list = MysqlHelper.get_values(log_type, crawler, select_sql, env)
  470. if len(update_video_list) == 0:
  471. Common.logger(log_type, crawler).info("暂无需要更新的小时榜数据\n")
  472. return
  473. for update_video_info in update_video_list:
  474. profile_id = update_video_info["profile_id"]
  475. profile_mid = update_video_info["profile_mid"]
  476. video_title = update_video_info["video_title"]
  477. video_id = update_video_info["out_video_id"]
  478. if datetime.datetime.now().hour == 10 and datetime.datetime.now().minute <=10:
  479. video_info_dict = cls.get_video_info(log_type=log_type,
  480. crawler=crawler,
  481. p_id=profile_id,
  482. p_mid=profile_mid,
  483. v_title=video_title,
  484. v_id=video_id)
  485. ten_play_cnt = video_info_dict['play_cnt']
  486. Common.logger(log_type, crawler).info(f"ten_play_cnt:{ten_play_cnt}")
  487. update_sql = f""" update crawler_xiaoniangao_hour set ten_play_cnt={ten_play_cnt} WHERE out_video_id={video_id}; """
  488. # Common.logger(log_type, crawler).info(f"update_sql:{update_sql}")
  489. MysqlHelper.update_values(log_type, crawler, update_sql, env)
  490. cls.download_publish(log_type, crawler, video_info_dict, update_video_info, strategy, oss_endpoint, env)
  491. elif datetime.datetime.now().hour == 15 and datetime.datetime.now().minute <=10:
  492. video_info_dict = cls.get_video_info(log_type=log_type,
  493. crawler=crawler,
  494. p_id=profile_id,
  495. p_mid=profile_mid,
  496. v_title=video_title,
  497. v_id=video_id)
  498. fifteen_play_cnt = video_info_dict['play_cnt']
  499. Common.logger(log_type, crawler).info(f"ten_play_cnt:{fifteen_play_cnt}")
  500. update_sql = f""" update crawler_xiaoniangao_hour set ten_play_cnt={fifteen_play_cnt} WHERE out_video_id={video_id}; """
  501. # Common.logger(log_type, crawler).info(f"update_sql:{update_sql}")
  502. MysqlHelper.update_values(log_type, crawler, update_sql, env)
  503. cls.download_publish(log_type, crawler, video_info_dict, update_video_info, strategy, oss_endpoint, env)
  504. elif datetime.datetime.now().hour == 20 and datetime.datetime.now().minute <=10:
  505. video_info_dict = cls.get_video_info(log_type=log_type,
  506. crawler=crawler,
  507. p_id=profile_id,
  508. p_mid=profile_mid,
  509. v_title=video_title,
  510. v_id=video_id)
  511. twenty_play_cnt = video_info_dict['play_cnt']
  512. Common.logger(log_type, crawler).info(f"ten_play_cnt:{twenty_play_cnt}")
  513. update_sql = f""" update crawler_xiaoniangao_hour set ten_play_cnt={twenty_play_cnt} WHERE out_video_id={video_id}; """
  514. # Common.logger(log_type, crawler).info(f"update_sql:{update_sql}")
  515. MysqlHelper.update_values(log_type, crawler, update_sql, env)
  516. cls.download_publish(log_type, crawler, video_info_dict, update_video_info, strategy, oss_endpoint, env)
  517. else:
  518. pass
  519. @classmethod
  520. def download(cls, log_type, crawler, video_info_dict, strategy, oss_endpoint, env):
  521. # 下载封面
  522. Common.download_method(log_type=log_type, crawler=crawler, text="cover", title=video_info_dict["video_title"], url=video_info_dict["cover_url"])
  523. # 下载视频
  524. Common.download_method(log_type=log_type, crawler=crawler, text="video", title=video_info_dict["video_title"], url=video_info_dict["video_url"])
  525. # 保存视频信息至 "./videos/{download_video_title}/info.txt"
  526. Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_info_dict)
  527. # 上传视频
  528. Common.logger(log_type, crawler).info("开始上传视频...")
  529. our_video_id = Publish.upload_and_publish(log_type=log_type,
  530. crawler=crawler,
  531. strategy=strategy,
  532. our_uid="hour",
  533. env=env,
  534. oss_endpoint=oss_endpoint)
  535. if env == "dev":
  536. our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  537. else:
  538. our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  539. Common.logger(log_type, crawler).info("视频上传完成")
  540. if our_video_id is None:
  541. # 删除视频文件夹
  542. shutil.rmtree(f"./{crawler}/videos/{video_info_dict['video_title']}")
  543. return
  544. # 视频信息保存数据库
  545. rule_dict = {
  546. "duration": {"min": 40},
  547. "play_cnt": {"min": 4000},
  548. "publish_day": {"min": 10}
  549. }
  550. insert_sql = f""" insert into crawler_video(video_id,
  551. out_user_id,
  552. platform,
  553. strategy,
  554. out_video_id,
  555. video_title,
  556. cover_url,
  557. video_url,
  558. duration,
  559. publish_time,
  560. play_cnt,
  561. crawler_rule,
  562. width,
  563. height)
  564. values({our_video_id},
  565. "{video_info_dict['profile_id']}",
  566. "{cls.platform}",
  567. "小时榜爬虫策略",
  568. "{video_info_dict['video_id']}",
  569. "{video_info_dict['video_title']}",
  570. "{video_info_dict['cover_url']}",
  571. "{video_info_dict['video_url']}",
  572. {int(video_info_dict['duration'])},
  573. "{video_info_dict['publish_time_str']}",
  574. {int(video_info_dict['play_cnt'])},
  575. '{json.dumps(rule_dict)}',
  576. {int(video_info_dict['video_width'])},
  577. {int(video_info_dict['video_height'])}) """
  578. Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
  579. MysqlHelper.update_values(log_type, crawler, insert_sql, env)
  580. Common.logger(log_type, crawler).info('视频信息插入数据库成功!')
  581. # 视频写入飞书
  582. Feishu.insert_columns(log_type, crawler, "yatRv2", "ROWS", 1, 2)
  583. # 视频ID工作表,首行写入数据
  584. upload_time = int(time.time())
  585. values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
  586. "小时级上升榜",
  587. str(video_info_dict['video_id']),
  588. str(video_info_dict['video_title']),
  589. our_video_link,
  590. video_info_dict['play_cnt'],
  591. video_info_dict['comment_cnt'],
  592. video_info_dict['like_cnt'],
  593. video_info_dict['share_cnt'],
  594. video_info_dict['duration'],
  595. f"{video_info_dict['video_width']}*{video_info_dict['video_height']}",
  596. str(video_info_dict['publish_time_str'].replace("-", "/")),
  597. str(video_info_dict['user_name']),
  598. str(video_info_dict['profile_id']),
  599. str(video_info_dict['profile_mid']),
  600. str(video_info_dict['avatar_url']),
  601. str(video_info_dict['cover_url']),
  602. str(video_info_dict['video_url'])]]
  603. time.sleep(1)
  604. Feishu.update_values(log_type, crawler, "yatRv2", "F2:Z2", values)
  605. Common.logger(log_type, crawler).info('视频信息写入飞书成功\n')
  606. # 下载/上传
  607. @classmethod
  608. def download_publish(cls, log_type, crawler, video_info_dict, update_video_info, strategy, oss_endpoint, env):
  609. # try:
  610. if cls.repeat_video(log_type, crawler, video_info_dict["video_id"], env) != 0:
  611. Common.logger(log_type, crawler).info('视频已下载\n')
  612. # 播放量大于 50000,直接下载
  613. elif int(video_info_dict["play_cnt"]) >= 50000:
  614. Common.logger(log_type, crawler).info(f"播放量:{video_info_dict['play_cnt']} >= 50000,满足下载规则,开始下载视频")
  615. cls.download(log_type, crawler, video_info_dict, strategy, oss_endpoint, env)
  616. # 上升榜判断逻辑,任意时间段上升量>=5000,连续两个时间段上升量>=2000
  617. elif int(update_video_info['ten_play_cnt']) >= 5000 or int(update_video_info['fifteen_play_cnt']) >= 5000 or int(update_video_info['twenty_play_cnt']) >= 5000:
  618. Common.logger(log_type, crawler).info(f"10:00 or 15:00 or 20:00 数据上升量:{int(update_video_info['ten_play_cnt'])} or {int(update_video_info['fifteen_play_cnt'])} or {int(update_video_info['twenty_play_cnt'])} >= 5000")
  619. Common.logger(log_type, crawler).info("满足下载规则,开始下载视频")
  620. cls.download(log_type, crawler, video_info_dict, strategy, oss_endpoint, env)
  621. elif int(update_video_info['ten_play_cnt']) >= 2000 and int(update_video_info['fifteen_play_cnt']) >= 2000:
  622. Common.logger(log_type, crawler).info(f"10:00 and 15:00 数据上升量:{int(update_video_info['ten_play_cnt'])} and {int(update_video_info['fifteen_play_cnt'])} >= 2000")
  623. Common.logger(log_type, crawler).info("满足下载规则,开始下载视频")
  624. cls.download(log_type, crawler, video_info_dict, strategy, oss_endpoint, env)
  625. elif int(update_video_info['fifteen_play_cnt']) >= 2000 and int(update_video_info['twenty_play_cnt']) >= 2000:
  626. Common.logger(log_type, crawler).info(f"15:00 and 20:00 数据上升量:{int(update_video_info['fifteen_play_cnt'])} and {int(update_video_info['twenty_play_cnt'])} >= 2000")
  627. Common.logger(log_type, crawler).info("满足下载规则,开始下载视频")
  628. cls.download(log_type, crawler, video_info_dict, strategy, oss_endpoint, env)
  629. elif int(update_video_info['ten_play_cnt']) >= 2000 and int(update_video_info['twenty_play_cnt']) >= 2000:
  630. Common.logger(log_type, crawler).info(f"今日10:00 / 20:00数据上升量:{int(update_video_info['ten_play_cnt'])} and {int(update_video_info['twenty_play_cnt'])} >= 2000")
  631. Common.logger(log_type, crawler).info("满足下载规则,开始下载视频")
  632. cls.download(log_type, crawler, video_info_dict, strategy, oss_endpoint, env)
  633. else:
  634. Common.logger(log_type, crawler).info("上升量不满足下载规则")
  635. # except Exception as e:
  636. # Common.logger(log_type, crawler).error(f"download_publish:{e}\n")
  637. if __name__ == "__main__":
  638. # print(XiaoniangaoHour.filter_words("hour", "xiaoniangao"))
  639. # print(XiaoniangaoHour.get_uid_token())
  640. # XiaoniangaoHour.get_videoList("test", "xiaoniangao", "dev")
  641. XiaoniangaoHour.update_videoList("test", "xiaoniangao", "小时榜爬虫策略", "out", "dev")
  642. pass