suisuiniannianyingfuqi_recommend_scheduling.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2023/4/13
  4. import json
  5. import os
  6. import shutil
  7. import sys
  8. import time
  9. from hashlib import md5
  10. import requests
  11. import urllib3
  12. from requests.adapters import HTTPAdapter
  13. from common.mq import MQ
  14. sys.path.append(os.getcwd())
  15. from common.common import Common
  16. from common.feishu import Feishu
  17. from common.publish import Publish
  18. from common.public import download_rule
  19. from common.scheduling_db import MysqlHelper
  20. class SuisuiniannianyingfuqiRecommendScheduling:
  21. platform = "岁岁年年迎福气"
  22. @classmethod
  23. def repeat_video(cls, log_type, crawler, video_id, env):
  24. # sql = f""" select * from crawler_video where platform="岁岁年年迎福气" and out_video_id="{video_id}"; """
  25. sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and out_video_id="{video_id}"; """
  26. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  27. return len(repeat_video)
  28. @classmethod
  29. def get_videoList(cls, log_type, crawler, our_uid, rule_dict, env):
  30. mq = MQ(topic_name="topic_crawler_etl_" + env)
  31. page = 1
  32. while True:
  33. try:
  34. url = 'https://www.jzkksp.com/index/home/get_home_list.html'
  35. headers = {
  36. 'content-type': 'application/x-www-form-urlencoded',
  37. 'Accept-Encoding': 'gzip,compress,br,deflate',
  38. 'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 13_3_1 like Mac OS X) '
  39. 'AppleWebKit/605.1.15 (KHTML, like Gecko) '
  40. 'Mobile/15E148 MicroMessenger/8.0.25(0x1800192b) NetType/WIFI Language/zh_CN',
  41. 'Referer': 'https://servicewechat.com/wxd4c54f60812f6f36/1/page-frame.html',
  42. }
  43. data = {
  44. 'token': '851ae159fd33f955bf433e7c47a4a298',
  45. 'time': '1667905857000',
  46. 'str_data': 'uT551tU8',
  47. 'page': str(page),
  48. 'limit': '10',
  49. 'appid': 'wxd4c54f60812f6f36',
  50. 'version': '1.4.1',
  51. 'openid': 'oDAjy5SCFe7Ml3PNgiow3ncozL1o'
  52. }
  53. urllib3.disable_warnings()
  54. s = requests.session()
  55. # max_retries=3 重试3次
  56. s.mount('http://', HTTPAdapter(max_retries=3))
  57. s.mount('https://', HTTPAdapter(max_retries=3))
  58. response = s.post(url=url, headers=headers, data=data, verify=False, timeout=5)
  59. page += 1
  60. if response.status_code != 200:
  61. Common.logger(log_type, crawler).warning(f'get_videoList:{response.status_code}, {response.text}\n')
  62. return
  63. elif 'data' not in response.json():
  64. Common.logger(log_type, crawler).warning(f'get_videoList:{response.status_code}, {response.json()}\n')
  65. return
  66. elif len(response.json()['data']['video_list']['data']) == 0:
  67. Common.logger(log_type, crawler).info(f'没有更多数据啦~ {response.json()}\n')
  68. return
  69. else:
  70. feeds = response.json()['data']['video_list']['data']
  71. for i in range(len(feeds)):
  72. try:
  73. publish_time_str = feeds[i].get('createtime', '')
  74. publish_time_stamp = int(time.mktime(time.strptime(publish_time_str, "%Y-%m-%d")))
  75. video_dict = {'video_title': feeds[i].get('title', "").replace("'", "").replace('"', ''),
  76. 'video_id': str(feeds[i].get('id', '')),
  77. 'play_cnt': feeds[i].get('browse', 0),
  78. 'comment_cnt': 0,
  79. 'like_cnt': 0,
  80. 'share_cnt': 0,
  81. 'publish_time_stamp': publish_time_stamp,
  82. 'publish_time_str': publish_time_str,
  83. 'user_name': "岁岁年年迎福气",
  84. 'user_id': "suisuiniannianyingfuqi",
  85. 'avatar_url': feeds[i].get('thumb', ''),
  86. 'cover_url': feeds[i].get('thumb', ''),
  87. 'video_url': feeds[i].get('url', ''),
  88. 'session': f"suisuiniannianyingfuqi-{int(time.time())}"}
  89. for k, v in video_dict.items():
  90. Common.logger(log_type, crawler).info(f"{k}:{v}")
  91. if video_dict["video_id"] == '' or video_dict["video_title"] == '' or video_dict["cover_url"] == '' or video_dict["video_url"] == '':
  92. Common.logger(log_type, crawler).info('无效视频\n')
  93. elif download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
  94. Common.logger(log_type, crawler).info("不满足抓取规则\n")
  95. elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
  96. Common.logger(log_type, crawler).info('视频已下载\n')
  97. else:
  98. # cls.download_publish(log_type=log_type,
  99. # crawler=crawler,
  100. # our_uid=our_uid,
  101. # video_dict=video_dict,
  102. # rule_dict=rule_dict,
  103. # env=env)
  104. video_dict["out_user_id"] = video_dict["user_id"]
  105. video_dict["platform"] = crawler
  106. video_dict["strategy"] = log_type
  107. video_dict["out_video_id"] = video_dict["video_id"]
  108. video_dict["width"] = 0
  109. video_dict["height"] = 0
  110. video_dict["crawler_rule"] = json.dumps(rule_dict)
  111. video_dict["user_id"] = our_uid
  112. video_dict["publish_time"] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
  113. mq.send_msg(video_dict)
  114. except Exception as e:
  115. Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
  116. except Exception as e:
  117. Common.logger(log_type, crawler).error(f"抓取第{page}页时异常:{e}\n")
  118. # 下载 / 上传
  119. @classmethod
  120. def download_publish(cls, log_type, crawler, our_uid, video_dict, rule_dict, env):
  121. # 下载视频
  122. Common.download_method(log_type=log_type, crawler=crawler, text='video', title=video_dict['video_title'], url=video_dict['video_url'])
  123. md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
  124. try:
  125. if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
  126. # 删除视频文件夹
  127. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  128. Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
  129. return
  130. except FileNotFoundError:
  131. # 删除视频文件夹
  132. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  133. Common.logger(log_type, crawler).info("视频文件不存在,删除文件夹成功\n")
  134. return
  135. ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
  136. video_dict["duration"] = ffmpeg_dict["duration"]
  137. video_dict["video_width"] = ffmpeg_dict["width"]
  138. video_dict["video_height"] = ffmpeg_dict["height"]
  139. # 下载封面
  140. Common.download_method(log_type=log_type, crawler=crawler, text='cover', title=video_dict['video_title'], url=video_dict['cover_url'])
  141. # 保存视频信息至txt
  142. Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
  143. if env == "dev":
  144. oss_endpoint = "out"
  145. else:
  146. oss_endpoint = "inner"
  147. # 上传视频
  148. Common.logger(log_type, crawler).info("开始上传视频...")
  149. our_video_id = Publish.upload_and_publish(log_type=log_type,
  150. crawler=crawler,
  151. strategy="推荐榜爬虫策略",
  152. our_uid=our_uid,
  153. env=env,
  154. oss_endpoint=oss_endpoint)
  155. if env == 'dev':
  156. our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  157. else:
  158. our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  159. Common.logger(log_type, crawler).info("视频上传完成")
  160. if our_video_id is None:
  161. try:
  162. # 删除视频文件夹
  163. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  164. return
  165. except FileNotFoundError:
  166. return
  167. # 视频写入飞书
  168. Feishu.insert_columns(log_type, crawler, "290bae", "ROWS", 1, 2)
  169. upload_time = int(time.time())
  170. values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
  171. "推荐榜爬虫策略",
  172. video_dict['video_title'],
  173. video_dict['video_id'],
  174. our_video_link,
  175. video_dict['play_cnt'],
  176. video_dict['duration'],
  177. f"{video_dict['video_width']}*{video_dict['video_height']}",
  178. video_dict['cover_url'],
  179. video_dict['video_url']]]
  180. time.sleep(0.5)
  181. Feishu.update_values(log_type, crawler, "290bae", "F2:Z2", values)
  182. Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
  183. # 视频信息保存数据库
  184. insert_sql = f""" insert into crawler_video(video_id,
  185. out_user_id,
  186. platform,
  187. strategy,
  188. out_video_id,
  189. video_title,
  190. cover_url,
  191. video_url,
  192. duration,
  193. publish_time,
  194. play_cnt,
  195. crawler_rule,
  196. width,
  197. height)
  198. values({our_video_id},
  199. "{video_dict['user_id']}",
  200. "{cls.platform}",
  201. "推荐榜爬虫策略",
  202. "{video_dict['video_id']}",
  203. "{video_dict['video_title']}",
  204. "{video_dict['cover_url']}",
  205. "{video_dict['video_url']}",
  206. {int(video_dict['duration'])},
  207. "{video_dict['publish_time_str']}",
  208. {int(video_dict['play_cnt'])},
  209. '{json.dumps(rule_dict)}',
  210. {int(video_dict['video_width'])},
  211. {int(video_dict['video_height'])}) """
  212. Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
  213. MysqlHelper.update_values(log_type, crawler, insert_sql, env, action='')
  214. Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
  215. if __name__ == '__main__':
  216. SuisuiniannianyingfuqiRecommendScheduling.get_videoList(log_type='recommend',
  217. crawler='suisuiniannianyingfuqi',
  218. our_uid=6267140,
  219. rule_dict={},
  220. env='dev')