xiaoniangao_hour_scheduling.py 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2023/3/15
  4. import datetime
  5. import json
  6. import os
  7. import random
  8. import shutil
  9. import sys
  10. import time
  11. from hashlib import md5
  12. import requests
  13. import urllib3
  14. from common.mq import MQ
  15. sys.path.append(os.getcwd())
  16. from common.common import Common
  17. from common.feishu import Feishu
  18. from common.publish import Publish
  19. from common.scheduling_db import MysqlHelper
  20. from common.public import get_config_from_mysql, download_rule
  21. proxies = {"http": None, "https": None}
  22. class XiaoniangaoHourScheduling:
  23. platform = "小年糕"
  24. words = "abcdefghijklmnopqrstuvwxyz0123456789"
  25. uid_token_dict = {
  26. "uid": f"""{"".join(random.sample(words, 8))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 12))}""",
  27. "token": "".join(random.sample(words, 32))
  28. }
  29. @classmethod
  30. def repeat_video(cls, log_type, crawler, video_id, env):
  31. # sql = f""" select * from crawler_video where platform="小年糕" and out_video_id="{video_id}"; """
  32. sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and out_video_id="{video_id}"; """
  33. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  34. return len(repeat_video)
  35. @classmethod
  36. def repeat_hour(cls, log_type, crawler, video_id, env):
  37. sql = f""" select * from crawler_xiaoniangao_hour where platform="小年糕" and out_video_id="{video_id}"; """
  38. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  39. return len(repeat_video)
  40. # 获取列表
  41. @classmethod
  42. def get_videoList(cls, log_type, crawler, rule_dict, env):
  43. uid_token_dict = cls.uid_token_dict
  44. url = "https://kapi-xng-app.xiaoniangao.cn/v2/trends/recommend"
  45. payload = "{\"topic_name\":\"recommend\",\"ext\":{\"current_item\":0,\"items\":[]},\"tag_id\":0,\"refresh\":false,\"share_width\":300,\"share_height\":240,\"staggered_style\":0,\"qs\":\"imageMogr2\\/gravity\\/center\\/rotate\\/$\\/thumbnail\\/!750x500r\\/interlace\\/1\\/format\\/jpg\",\"topic_id\":2,\"h_qs\":\"imageMogr2\\/gravity\\/center\\/rotate\\/$\\/thumbnail\\/!80x80r\\/interlace\\/1\\/format\\/jpg\",\"log\":{\"net\":\"wifi\",\"product\":\"xng\",\"uid\":\"2F310D09-5E32-5985-8644-3BCB6920E76F\",\"brand\":\"iPad\",\"page\":\"\",\"session_id\":\"71C77648-3224-4083-894C-B6282131F286\",\"resolution\":\"750*1334\",\"pf\":\"4\",\"app_version\":\"1.22.5\",\"device\":\"iPad Pro (12.9-inch) (3rd generation)\",\"os_version\":\"15.7\",\"idfa\":\"\",\"channel\":\"ios_app_store\"},\"token\":\"\"}"
  46. headers = {
  47. 'Host': 'kapi-xng-app.xiaoniangao.cn',
  48. 'content-type': 'application/json; charset=utf-8',
  49. 'accept': 'application/json',
  50. 'authorization': 'PsrUTBCQ5G7UVZdgx+JxymPHcKU=',
  51. 'verb': 'POST',
  52. 'content-md5': '08fa0e6bf725fd6ef83c16d2ceb8a544',
  53. 'x-b3-traceid': '45a6c5b4c471eecc',
  54. 'accept-language': 'zh-Hans-CN;q=1.0',
  55. 'date': 'Mon, 19 Jun 2023 09:47:40 GMT',
  56. 'x-token-id': '',
  57. 'x-signaturemethod': 'hmac-sha1',
  58. 'user-agent': 'xngapp/1.22.5 (cn.xiaoniangao.xngapp; build:157; iOS 15.7.0) Alamofire/5.2.2'
  59. }
  60. urllib3.disable_warnings()
  61. r = requests.post(url=url, headers=headers, data=payload, proxies=proxies, verify=False)
  62. if 'data' not in r.text or r.status_code != 200:
  63. Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
  64. Common.logging(log_type, crawler, env, f"get_videoList:{r.text}\n")
  65. return
  66. elif "data" not in r.json():
  67. Common.logger(log_type, crawler).warning(f"get_videoList:{r.json()}\n")
  68. Common.logging(log_type, crawler, env, f"get_videoList:{r.json()}\n")
  69. return
  70. elif "list" not in r.json()["data"]:
  71. Common.logger(log_type, crawler).warning(f"get_videoList:{r.json()['data']}\n")
  72. Common.logging(log_type, crawler, env, f"get_videoList:{r.json()['data']}\n")
  73. return
  74. elif len(r.json()['data']['list']) == 0:
  75. Common.logger(log_type, crawler).warning(f"get_videoList:{r.json()['data']['list']}\n")
  76. Common.logging(log_type, crawler, env, f"get_videoList:{r.json()['data']['list']}\n")
  77. return
  78. else:
  79. # 视频列表数据
  80. feeds = r.json()["data"]["list"]
  81. for i in range(len(feeds)):
  82. try:
  83. # 标题,表情随机加在片头、片尾,或替代句子中间的标点符号
  84. xiaoniangao_title = feeds[i].get("title", "").strip().replace("\n", "") \
  85. .replace("/", "").replace("\r", "").replace("#", "") \
  86. .replace(".", "。").replace("\\", "").replace("&NBSP", "") \
  87. .replace(":", "").replace("*", "").replace("?", "") \
  88. .replace("?", "").replace('"', "").replace("<", "") \
  89. .replace(">", "").replace("|", "").replace(" ", "")\
  90. .replace('"', '').replace("'", '')
  91. # 随机取一个表情/符号
  92. emoji = random.choice(get_config_from_mysql(log_type, crawler, env, "emoji"))
  93. # 生成最终标题,标题list[表情+title, title+表情]随机取一个
  94. video_title = random.choice([f"{emoji}{xiaoniangao_title}", f"{xiaoniangao_title}{emoji}"])
  95. # 发布时间
  96. publish_time_stamp = int(int(feeds[i].get("t", 0))/1000)
  97. publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
  98. # 用户名 / 头像
  99. user_name = feeds[i].get("user", {}).get("nick", "").strip().replace("\n", "") \
  100. .replace("/", "").replace(" ", "") \
  101. .replace(" ", "").replace("&NBSP", "").replace("\r", "")
  102. video_dict = {
  103. "video_title": video_title,
  104. "video_id": feeds[i].get("vid", ""),
  105. "duration": int(feeds[i].get("du", 0)/1000),
  106. "play_cnt": feeds[i].get("play_pv", 0),
  107. "like_cnt": feeds[i].get("favor", {}).get("total", 0),
  108. "comment_cnt": feeds[i].get("comment_count", 0),
  109. "share_cnt": feeds[i].get("share", 0),
  110. "user_name": user_name,
  111. "publish_time_stamp": publish_time_stamp,
  112. "publish_time_str": publish_time_str,
  113. "video_width": int(feeds[i].get("vw", 0)),
  114. "video_height": int(feeds[i].get("vh", 0)),
  115. "avatar_url": feeds[i].get("user", {}).get("hurl", ""),
  116. "profile_id": feeds[i]["id"],
  117. "profile_mid": feeds[i]["user"]["mid"],
  118. "cover_url": feeds[i].get("url", ""),
  119. "video_url": feeds[i].get("v_url", ""),
  120. "session": f"xiaoniangao-hour-{int(time.time())}"
  121. }
  122. for k, v in video_dict.items():
  123. Common.logger(log_type, crawler).info(f"{k}:{v}")
  124. Common.logging(log_type, crawler, env, f"{video_dict}")
  125. # 过滤无效视频
  126. if video_title == "" or video_dict["video_id"] == "" or video_dict["video_url"] == "":
  127. Common.logger(log_type, crawler).warning("无效视频\n")
  128. Common.logging(log_type, crawler, env, "无效视频\n")
  129. # 抓取基础规则过滤
  130. elif download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
  131. Common.logger(log_type, crawler).info("不满足抓取规则\n")
  132. Common.logging(log_type, crawler, env, "不满足抓取规则\n")
  133. elif any(str(word) if str(word) in video_dict["video_title"] else False
  134. for word in get_config_from_mysql(log_type=log_type,
  135. source=crawler,
  136. env=env,
  137. text="filter",
  138. action="")) is True:
  139. Common.logger(log_type, crawler).info('已中过滤词\n')
  140. Common.logging(log_type, crawler, env, '已中过滤词\n')
  141. elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
  142. Common.logger(log_type, crawler).info('视频已下载\n')
  143. Common.logging(log_type, crawler, env, '视频已下载\n')
  144. else:
  145. # 写入飞书小时级feeds数据库表
  146. insert_sql = f""" insert into crawler_xiaoniangao_hour(profile_id,
  147. profile_mid,
  148. platform,
  149. out_video_id,
  150. video_title,
  151. user_name,
  152. cover_url,
  153. video_url,
  154. duration,
  155. publish_time,
  156. play_cnt,
  157. crawler_time_stamp,
  158. crawler_time)
  159. values({video_dict["profile_id"]},
  160. {video_dict["profile_mid"]},
  161. "{cls.platform}",
  162. "{video_dict["video_id"]}",
  163. "{video_title}",
  164. "{user_name}",
  165. "{video_dict["cover_url"]}",
  166. "{video_dict["video_url"]}",
  167. {video_dict["duration"]},
  168. "{publish_time_str}",
  169. {video_dict["play_cnt"]},
  170. {int(time.time())},
  171. "{time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time())))}"
  172. )"""
  173. Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
  174. Common.logging(log_type, crawler, env, f"insert_sql:{insert_sql}")
  175. MysqlHelper.update_values(log_type, crawler, insert_sql, env)
  176. Common.logger(log_type, crawler).info('视频信息写入小时级数据库成功!\n')
  177. Common.logging(log_type, crawler, env, '视频信息写入小时级数据库成功!\n')
  178. except Exception as e:
  179. Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
  180. Common.logging(log_type, crawler, env, f"抓取单条视频异常:{e}\n")
  181. @classmethod
  182. def get_video_info(cls, log_type, crawler, p_id, p_mid, v_title, v_id):
  183. uid_token_dict = cls.uid_token_dict
  184. url = "https://kapi.xiaoniangao.cn/profile/get_profile_by_id"
  185. headers = {
  186. "x-b3-traceid": '1c403a4aa72e3c',
  187. "X-Token-Id": 'ab619e96d801f1567388629260aa68ec-1202200806',
  188. "uid": uid_token_dict['uid'],
  189. "content-type": "application/json",
  190. "Accept-Encoding": "gzip,compress,br,deflate",
  191. "User-Agent": 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X)'
  192. ' AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 '
  193. 'MicroMessenger/8.0.20(0x18001432) NetType/WIFI Language/zh_CN',
  194. "Referer": 'https://servicewechat.com/wxd7911e4c177690e4/624/page-frame.html'
  195. }
  196. data = {
  197. "play_src": "1",
  198. "profile_id": int(p_id),
  199. "profile_mid": int(p_mid),
  200. "qs": "imageMogr2/gravity/center/rotate/$/thumbnail/"
  201. "!400x400r/crop/400x400/interlace/1/format/jpg",
  202. "h_qs": "imageMogr2/gravity/center/rotate/$/thumbnail"
  203. "/!80x80r/crop/80x80/interlace/1/format/jpg",
  204. "share_width": 625,
  205. "share_height": 500,
  206. "no_comments": True,
  207. "no_follow": True,
  208. "vid": v_id,
  209. "hot_l1_comment": True,
  210. "token": uid_token_dict['token'],
  211. "uid": uid_token_dict['uid'],
  212. "proj": "ma",
  213. "wx_ver": "8.0.20",
  214. "code_ver": "3.62.0",
  215. "log_common_params": {
  216. "e": [{
  217. "data": {
  218. "page": "dynamicSharePage"
  219. }
  220. }],
  221. "ext": {
  222. "brand": "iPhone",
  223. "device": "iPhone 11",
  224. "os": "iOS 14.7.1",
  225. "weixinver": "8.0.20",
  226. "srcver": "2.24.3",
  227. "net": "wifi",
  228. "scene": "1089"
  229. },
  230. "pj": "1",
  231. "pf": "2",
  232. "session_id": "7bcce313-b57d-4305-8d14-6ebd9a1bad29"
  233. }
  234. }
  235. urllib3.disable_warnings()
  236. r = requests.post(headers=headers, url=url, json=data, proxies=proxies, verify=False)
  237. if r.status_code != 200 or 'data' not in r.text:
  238. Common.logger(log_type, crawler).warning(f"get_videoInfo:{r.text}\n")
  239. else:
  240. hour_play_cnt = r.json()["data"]["play_pv"]
  241. hour_cover_url = r.json()["data"]["url"]
  242. hour_video_url = r.json()["data"]["v_url"]
  243. hour_video_duration = r.json()["data"]["du"]
  244. hour_video_comment_cnt = r.json()["data"]["comment_count"]
  245. hour_video_like_cnt = r.json()["data"]["favor"]["total"]
  246. hour_video_share_cnt = r.json()["data"]["share"]
  247. hour_video_width = r.json()["data"]["w"]
  248. hour_video_height = r.json()["data"]["h"]
  249. hour_video_send_time = r.json()["data"]["t"]
  250. publish_time_stamp = int(int(hour_video_send_time) / 1000)
  251. publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
  252. hour_user_name = r.json()["data"]["user"]["nick"]
  253. hour_head_url = r.json()["data"]["user"]["hurl"]
  254. video_info_dict = {
  255. "video_id": v_id,
  256. "video_title": v_title,
  257. "duration": hour_video_duration,
  258. "play_cnt": hour_play_cnt,
  259. "like_cnt": hour_video_like_cnt,
  260. "comment_cnt": hour_video_comment_cnt,
  261. "share_cnt": hour_video_share_cnt,
  262. "user_name": hour_user_name,
  263. "publish_time_stamp": publish_time_stamp,
  264. "publish_time_str": publish_time_str,
  265. "video_width": hour_video_width,
  266. "video_height": hour_video_height,
  267. "avatar_url": hour_head_url,
  268. "profile_id": p_id,
  269. "profile_mid": p_mid,
  270. "cover_url": hour_cover_url,
  271. "video_url": hour_video_url,
  272. "session": f"xiaoniangao-hour-{int(time.time())}"
  273. }
  274. return video_info_dict
  275. # 更新小时榜数据
  276. @classmethod
  277. def update_videoList(cls, log_type, crawler, rule_dict, our_uid, env):
  278. """
  279. 更新小时榜数据
  280. """
  281. befor_yesterday = (datetime.date.today() + datetime.timedelta(days=-3)).strftime("%Y-%m-%d %H:%M:%S")
  282. update_time_stamp = int(time.mktime(time.strptime(befor_yesterday, "%Y-%m-%d %H:%M:%S")))
  283. select_sql = f""" select * from crawler_xiaoniangao_hour where crawler_time_stamp>={update_time_stamp} GROUP BY out_video_id DESC """
  284. update_video_list = MysqlHelper.get_values(log_type, crawler, select_sql, env)
  285. if len(update_video_list) == 0:
  286. Common.logger(log_type, crawler).info("暂无需要更新的小时榜数据\n")
  287. Common.logging(log_type, crawler, env, "暂无需要更新的小时榜数据\n")
  288. return
  289. for update_video_info in update_video_list:
  290. try:
  291. profile_id = update_video_info["profile_id"]
  292. profile_mid = update_video_info["profile_mid"]
  293. video_title = update_video_info["video_title"]
  294. video_id = update_video_info["out_video_id"]
  295. if datetime.datetime.now().hour == 10 and datetime.datetime.now().minute <= 10:
  296. video_info_dict = cls.get_video_info(log_type=log_type,
  297. crawler=crawler,
  298. p_id=profile_id,
  299. p_mid=profile_mid,
  300. v_title=video_title,
  301. v_id=video_id)
  302. ten_play_cnt = video_info_dict['play_cnt']
  303. Common.logger(log_type, crawler).info(f"ten_play_cnt:{ten_play_cnt}")
  304. Common.logging(log_type, crawler, env, f"ten_play_cnt:{ten_play_cnt}")
  305. update_sql = f""" update crawler_xiaoniangao_hour set ten_play_cnt={ten_play_cnt} WHERE out_video_id="{video_id}"; """
  306. # Common.logger(log_type, crawler).info(f"update_sql:{update_sql}")
  307. MysqlHelper.update_values(log_type, crawler, update_sql, env)
  308. cls.download_publish(log_type=log_type,
  309. crawler=crawler,
  310. video_info_dict=video_info_dict,
  311. rule_dict=rule_dict,
  312. update_video_info=update_video_info,
  313. our_uid=our_uid,
  314. env=env)
  315. elif datetime.datetime.now().hour == 15 and datetime.datetime.now().minute <= 10:
  316. video_info_dict = cls.get_video_info(log_type=log_type,
  317. crawler=crawler,
  318. p_id=profile_id,
  319. p_mid=profile_mid,
  320. v_title=video_title,
  321. v_id=video_id)
  322. fifteen_play_cnt = video_info_dict['play_cnt']
  323. Common.logger(log_type, crawler).info(f"fifteen_play_cnt:{fifteen_play_cnt}")
  324. Common.logging(log_type, crawler, env, f"fifteen_play_cnt:{fifteen_play_cnt}")
  325. update_sql = f""" update crawler_xiaoniangao_hour set fifteen_play_cnt={fifteen_play_cnt} WHERE out_video_id="{video_id}"; """
  326. # Common.logger(log_type, crawler).info(f"update_sql:{update_sql}")
  327. MysqlHelper.update_values(log_type, crawler, update_sql, env)
  328. cls.download_publish(log_type=log_type,
  329. crawler=crawler,
  330. video_info_dict=video_info_dict,
  331. rule_dict=rule_dict,
  332. update_video_info=update_video_info,
  333. our_uid=our_uid,
  334. env=env)
  335. elif datetime.datetime.now().hour == 20 and datetime.datetime.now().minute <= 10:
  336. video_info_dict = cls.get_video_info(log_type=log_type,
  337. crawler=crawler,
  338. p_id=profile_id,
  339. p_mid=profile_mid,
  340. v_title=video_title,
  341. v_id=video_id)
  342. twenty_play_cnt = video_info_dict['play_cnt']
  343. Common.logger(log_type, crawler).info(f"twenty_play_cnt:{twenty_play_cnt}")
  344. Common.logging(log_type, crawler, env, f"twenty_play_cnt:{twenty_play_cnt}")
  345. update_sql = f""" update crawler_xiaoniangao_hour set twenty_play_cnt={twenty_play_cnt} WHERE out_video_id="{video_id}"; """
  346. # Common.logger(log_type, crawler).info(f"update_sql:{update_sql}")
  347. MysqlHelper.update_values(log_type, crawler, update_sql, env)
  348. cls.download_publish(log_type=log_type,
  349. crawler=crawler,
  350. video_info_dict=video_info_dict,
  351. rule_dict=rule_dict,
  352. update_video_info=update_video_info,
  353. our_uid=our_uid,
  354. env=env)
  355. else:
  356. pass
  357. except Exception as e:
  358. Common.logger(log_type, crawler).error(f'更新{update_video_info["video_title"]}时异常:{e}\n')
  359. Common.logging(log_type, crawler, env, f'更新{update_video_info["video_title"]}时异常:{e}\n')
  360. @classmethod
  361. def send_to_mq(cls, log_type, crawler, video_info_dict, rule_dict, env):
  362. video_info_dict["out_user_id"] = video_info_dict["profile_id"]
  363. video_info_dict["platform"] = crawler
  364. video_info_dict["strategy"] = log_type
  365. video_info_dict["out_video_id"] = video_info_dict["video_id"]
  366. video_info_dict["width"] = video_info_dict["video_width"]
  367. video_info_dict["height"] = video_info_dict["video_height"]
  368. video_info_dict["crawler_rule"] = json.dumps(rule_dict)
  369. video_info_dict["user_id"] = video_info_dict["uid"]
  370. video_info_dict["publish_time"] = video_info_dict["publish_time_str"]
  371. video_info_dict["strategy_type"] = "hour"
  372. mq = MQ(topic_name="topic_crawler_etl_" + env)
  373. mq.send_msg(video_info_dict)
  374. @classmethod
  375. def download(cls, log_type, crawler, video_info_dict, rule_dict, our_uid, env):
  376. # 下载视频
  377. Common.download_method(log_type=log_type, crawler=crawler, text="video", title=video_info_dict["video_title"],
  378. url=video_info_dict["video_url"])
  379. md_title = md5(video_info_dict['video_title'].encode('utf8')).hexdigest()
  380. try:
  381. if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
  382. # 删除视频文件夹
  383. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  384. Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
  385. Common.logging(log_type, crawler, env, "视频size=0,删除成功\n")
  386. return
  387. except FileNotFoundError:
  388. # 删除视频文件夹
  389. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  390. Common.logger(log_type, crawler).info("视频文件不存在,删除文件夹成功\n")
  391. Common.logging(log_type, crawler, env, "视频文件不存在,删除文件夹成功\n")
  392. return
  393. # 下载封面
  394. Common.download_method(log_type=log_type, crawler=crawler, text="cover", title=video_info_dict["video_title"],
  395. url=video_info_dict["cover_url"])
  396. # 保存视频信息至 "./videos/{download_video_title}/info.txt"
  397. Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_info_dict)
  398. # 上传视频
  399. Common.logger(log_type, crawler).info("开始上传视频...")
  400. Common.logging(log_type, crawler, env, "开始上传视频...")
  401. if env == "dev":
  402. oss_endpoint = "out"
  403. our_video_id = Publish.upload_and_publish(log_type=log_type,
  404. crawler=crawler,
  405. strategy="上升榜抓取策略",
  406. our_uid=our_uid,
  407. env=env,
  408. oss_endpoint=oss_endpoint)
  409. our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  410. else:
  411. oss_endpoint = "inner"
  412. our_video_id = Publish.upload_and_publish(log_type=log_type,
  413. crawler=crawler,
  414. strategy="上升榜抓取策略",
  415. our_uid=our_uid,
  416. env=env,
  417. oss_endpoint=oss_endpoint)
  418. our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  419. if our_video_id is None:
  420. try:
  421. # 删除视频文件夹
  422. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  423. return
  424. except FileNotFoundError:
  425. return
  426. insert_sql = f""" insert into crawler_video(video_id,
  427. out_user_id,
  428. platform,
  429. strategy,
  430. out_video_id,
  431. video_title,
  432. cover_url,
  433. video_url,
  434. duration,
  435. publish_time,
  436. play_cnt,
  437. crawler_rule,
  438. width,
  439. height)
  440. values({our_video_id},
  441. "{video_info_dict['profile_id']}",
  442. "{cls.platform}",
  443. "上升榜抓取策略",
  444. "{video_info_dict['video_id']}",
  445. "{video_info_dict['video_title']}",
  446. "{video_info_dict['cover_url']}",
  447. "{video_info_dict['video_url']}",
  448. {int(video_info_dict['duration'])},
  449. "{video_info_dict['publish_time_str']}",
  450. {int(video_info_dict['play_cnt'])},
  451. '{json.dumps(rule_dict)}',
  452. {int(video_info_dict['video_width'])},
  453. {int(video_info_dict['video_height'])}) """
  454. Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
  455. Common.logging(log_type, crawler, env, f"insert_sql:{insert_sql}")
  456. MysqlHelper.update_values(log_type, crawler, insert_sql, env)
  457. Common.logger(log_type, crawler).info('视频信息插入数据库成功!')
  458. Common.logging(log_type, crawler, env, '视频信息插入数据库成功!')
  459. # 视频写入飞书
  460. Feishu.insert_columns(log_type, crawler, "yatRv2", "ROWS", 1, 2)
  461. # 视频ID工作表,首行写入数据
  462. upload_time = int(time.time())
  463. values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
  464. "上升榜抓取策略",
  465. str(video_info_dict['video_id']),
  466. str(video_info_dict['video_title']),
  467. our_video_link,
  468. video_info_dict['play_cnt'],
  469. video_info_dict['comment_cnt'],
  470. video_info_dict['like_cnt'],
  471. video_info_dict['share_cnt'],
  472. video_info_dict['duration'],
  473. f"{video_info_dict['video_width']}*{video_info_dict['video_height']}",
  474. str(video_info_dict['publish_time_str'].replace("-", "/")),
  475. str(video_info_dict['user_name']),
  476. str(video_info_dict['profile_id']),
  477. str(video_info_dict['profile_mid']),
  478. str(video_info_dict['avatar_url']),
  479. str(video_info_dict['cover_url']),
  480. str(video_info_dict['video_url'])]]
  481. time.sleep(1)
  482. Feishu.update_values(log_type, crawler, "yatRv2", "F2:Z2", values)
  483. Common.logger(log_type, crawler).info('视频信息写入飞书成功\n')
  484. Common.logging(log_type, crawler, env, '视频信息写入飞书成功\n')
  485. # 下载/上传
  486. @classmethod
  487. def download_publish(cls, log_type, crawler, video_info_dict, rule_dict, update_video_info, our_uid, env):
  488. if cls.repeat_video(log_type, crawler, video_info_dict["video_id"], env) != 0:
  489. Common.logger(log_type, crawler).info('视频已下载\n')
  490. Common.logging(log_type, crawler, env, '视频已下载\n')
  491. # 播放量大于 50000,直接下载
  492. elif int(video_info_dict["play_cnt"]) >= 30000:
  493. Common.logger(log_type, crawler).info(f"播放量:{video_info_dict['play_cnt']} >= 30000,满足下载规则,开始下载视频")
  494. Common.logging(log_type, crawler, env, f"播放量:{video_info_dict['play_cnt']} >= 30000,满足下载规则,开始下载视频")
  495. # cls.download(log_type=log_type,
  496. # crawler=crawler,
  497. # video_info_dict=video_info_dict,
  498. # rule_dict=rule_dict,
  499. # our_uid=our_uid,
  500. # env=env)
  501. cls.send_to_mq(log_type=log_type, crawler=crawler, video_info_dict=video_info_dict, rule_dict=rule_dict, env=env)
  502. # 上升榜判断逻辑,任意时间段上升量>=5000,连续两个时间段上升量>=2000
  503. elif int(update_video_info['ten_play_cnt']) >= 3000 or int(
  504. update_video_info['fifteen_play_cnt']) >= 3000 or int(update_video_info['twenty_play_cnt']) >= 3000:
  505. Common.logger(log_type, crawler).info(f"10:00 or 15:00 or 20:00 数据上升量:{int(update_video_info['ten_play_cnt'])} or {int(update_video_info['fifteen_play_cnt'])} or {int(update_video_info['twenty_play_cnt'])} >= 3000")
  506. Common.logging(log_type, crawler, env, f"10:00 or 15:00 or 20:00 数据上升量:{int(update_video_info['ten_play_cnt'])} or {int(update_video_info['fifteen_play_cnt'])} or {int(update_video_info['twenty_play_cnt'])} >= 3000")
  507. Common.logger(log_type, crawler).info("满足下载规则,开始下载视频")
  508. Common.logging(log_type, crawler, env, "满足下载规则,开始下载视频")
  509. # cls.download(log_type=log_type,
  510. # crawler=crawler,
  511. # video_info_dict=video_info_dict,
  512. # rule_dict=rule_dict,
  513. # our_uid=our_uid,
  514. # env=env)
  515. cls.send_to_mq(log_type=log_type, crawler=crawler, video_info_dict=video_info_dict, rule_dict=rule_dict, env=env)
  516. elif int(update_video_info['ten_play_cnt']) >= 1000 and int(update_video_info['fifteen_play_cnt']) >= 1000:
  517. Common.logger(log_type, crawler).info(f"10:00 and 15:00 数据上升量:{int(update_video_info['ten_play_cnt'])} and {int(update_video_info['fifteen_play_cnt'])} >= 1000")
  518. Common.logging(log_type, crawler, env, f"10:00 and 15:00 数据上升量:{int(update_video_info['ten_play_cnt'])} and {int(update_video_info['fifteen_play_cnt'])} >= 1000")
  519. Common.logger(log_type, crawler).info("满足下载规则,开始下载视频")
  520. Common.logging(log_type, crawler, env, "满足下载规则,开始下载视频")
  521. # cls.download(log_type=log_type,
  522. # crawler=crawler,
  523. # video_info_dict=video_info_dict,
  524. # rule_dict=rule_dict,
  525. # our_uid=our_uid,
  526. # env=env)
  527. cls.send_to_mq(log_type=log_type, crawler=crawler, video_info_dict=video_info_dict, rule_dict=rule_dict, env=env)
  528. elif int(update_video_info['fifteen_play_cnt']) >= 1000 and int(update_video_info['twenty_play_cnt']) >= 1000:
  529. Common.logger(log_type, crawler).info(
  530. f"15:00 and 20:00 数据上升量:{int(update_video_info['fifteen_play_cnt'])} and {int(update_video_info['twenty_play_cnt'])} >= 1000")
  531. Common.logging(log_type, crawler, env, f"15:00 and 20:00 数据上升量:{int(update_video_info['fifteen_play_cnt'])} and {int(update_video_info['twenty_play_cnt'])} >= 1000")
  532. Common.logger(log_type, crawler).info("满足下载规则,开始下载视频")
  533. Common.logging(log_type, crawler, env, "满足下载规则,开始下载视频")
  534. # cls.download(log_type=log_type,
  535. # crawler=crawler,
  536. # video_info_dict=video_info_dict,
  537. # rule_dict=rule_dict,
  538. # our_uid=our_uid,
  539. # env=env)
  540. cls.send_to_mq(log_type=log_type, crawler=crawler, video_info_dict=video_info_dict, rule_dict=rule_dict, env=env)
  541. elif int(update_video_info['ten_play_cnt']) >= 1000 and int(update_video_info['twenty_play_cnt']) >= 1000:
  542. Common.logger(log_type, crawler).info(
  543. f"今日10:00 / 20:00数据上升量:{int(update_video_info['ten_play_cnt'])} and {int(update_video_info['twenty_play_cnt'])} >= 1000")
  544. Common.logging(log_type, crawler, env, f"今日10:00 / 20:00数据上升量:{int(update_video_info['ten_play_cnt'])} and {int(update_video_info['twenty_play_cnt'])} >= 1000")
  545. Common.logger(log_type, crawler).info("满足下载规则,开始下载视频")
  546. Common.logging(log_type, crawler, env, "满足下载规则,开始下载视频")
  547. # cls.download(log_type=log_type,
  548. # crawler=crawler,
  549. # video_info_dict=video_info_dict,
  550. # rule_dict=rule_dict,
  551. # our_uid=our_uid,
  552. # env=env)
  553. cls.send_to_mq(log_type=log_type, crawler=crawler, video_info_dict=video_info_dict, rule_dict=rule_dict, env=env)
  554. else:
  555. Common.logger(log_type, crawler).info("上升量不满足下载规则")
  556. Common.logging(log_type, crawler, env, "上升量不满足下载规则")
  557. if __name__ == "__main__":
  558. print(get_config_from_mysql(log_type='hour', source='xiaoniangao', env='dev', text='filter'))
  559. pass