benshanzhufu_recommend_scheduling.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2023/4/13
  4. # -*- coding: utf-8 -*-
  5. # @Author: wangkun
  6. # @Time: 2022/4/25
  7. import json
  8. import os
  9. import shutil
  10. import sys
  11. import time
  12. from hashlib import md5
  13. from urllib import parse
  14. import requests
  15. import urllib3
  16. sys.path.append(os.getcwd())
  17. from common.mq import MQ
  18. from common.common import Common
  19. from common.scheduling_db import MysqlHelper
  20. from common.feishu import Feishu
  21. from common.publish import Publish
  22. from common.public import get_config_from_mysql, download_rule
  23. proxies = {"http": None, "https": None}
  24. class BenshanzhufuRecommend:
  25. platform = "本山祝福"
  26. @classmethod
  27. def repeat_video(cls, log_type, crawler, video_id, env):
  28. sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and out_video_id="{video_id}"; """
  29. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  30. return len(repeat_video)
  31. # 推荐列表获取视频
  32. @classmethod
  33. def get_videoList(cls, log_type, crawler, our_uid, rule_dict, env):
  34. mq = MQ(topic_name="topic_crawler_etl_" + env)
  35. # 翻页参数
  36. visitor_key = ""
  37. page = 1
  38. while True:
  39. # try:
  40. now = int(time.time() * 1000)
  41. url = "https://bszf.wentingyou.cn/index.php/v111/index/index?parameter="
  42. header = {
  43. "content-time": str(now),
  44. "chatKey": "wx0fb8149da961d3b0",
  45. "cache-time": str(now),
  46. "User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X) "
  47. "AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 "
  48. "MicroMessenger/8.0.20(0x1800142d) NetType/WIFI Language/zh_CN",
  49. "Referer": "https://servicewechat.com/wx0fb8149da961d3b0/2/page-frame.html"
  50. }
  51. parameter = {
  52. "page": page,
  53. "ini_id": visitor_key
  54. }
  55. params = parse.quote(json.dumps(parameter))
  56. url = url + str(params)
  57. urllib3.disable_warnings()
  58. r = requests.get(headers=header, url=url, proxies=proxies, verify=False)
  59. if r.status_code != 200:
  60. Common.logger(log_type, crawler).warning(f"get_videoList:{r.status_code}, {r.text}\n")
  61. Common.logging(log_type, crawler, env, f"get_videoList:{r.status_code}, {r.text}\n")
  62. return
  63. elif r.json()['message'] != "list success":
  64. Common.logger(log_type, crawler).warning(f"get_videoList:{r.status_code}, {r.json()}\n")
  65. Common.logging(log_type, crawler, env, f"get_videoList:{r.status_code}, {r.text}\n")
  66. return
  67. elif "data" not in r.json():
  68. Common.logger(log_type, crawler).warning(f"get_videoList:{r.status_code}, {r.json()}\n")
  69. Common.logging(log_type, crawler, env, f"get_videoList:{r.status_code}, {r.text}\n")
  70. return
  71. elif len(r.json()['data']["list"]) == 0:
  72. Common.logger(log_type, crawler).info(f"没有更多数据了~ {r.json()}\n")
  73. Common.logging(log_type, crawler, env, f"没有更多数据了~ {r.json()}\n")
  74. return
  75. else:
  76. # 翻页
  77. visitor_key = r.json()["data"]["visitor_key"]
  78. page += 1
  79. feeds = r.json()["data"]["list"]
  80. for i in range(len(feeds)):
  81. # try:
  82. publish_time_stamp = feeds[i].get("update_time", 0)
  83. publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
  84. video_url = feeds[i].get("video_url", "")
  85. if ".mp4" not in video_url:
  86. video_url = ""
  87. video_dict = {
  88. 'video_title': feeds[i].get("title", "").replace(" ", "").replace("'", "").replace('"', ""),
  89. 'video_id': str(feeds[i].get("nid", "")),
  90. 'play_cnt': 0,
  91. 'comment_cnt': feeds[i].get("commentCount", 0),
  92. 'like_cnt': 0,
  93. 'share_cnt': 0,
  94. 'publish_time_stamp': publish_time_stamp,
  95. 'publish_time_str': publish_time_str,
  96. 'user_name': "本山祝福",
  97. 'user_id': "benshanzhufu",
  98. 'avatar_url': feeds[i].get("video_cover", ""),
  99. 'cover_url': feeds[i].get("video_cover", ""),
  100. 'video_url': video_url,
  101. 'session': f"benshanzhufu-{int(time.time())}"
  102. }
  103. for k, v in video_dict.items():
  104. Common.logger(log_type, crawler).info(f"{k}:{v}")
  105. Common.logging(log_type, crawler, env, f"video_dict:{video_dict}")
  106. # 过滤无效视频
  107. if video_dict["video_id"] == "" or video_dict["cover_url"] == "" or video_dict["video_url"] == "":
  108. Common.logger(log_type, crawler).info("无效视频\n")
  109. Common.logging(log_type, crawler, env, "无效视频\n")
  110. elif download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
  111. Common.logger(log_type, crawler).info("不满足抓取规则\n")
  112. Common.logging(log_type, crawler, env, "不满足抓取规则\n")
  113. elif any(str(word) if str(word) in video_dict["video_title"] else False
  114. for word in get_config_from_mysql(log_type=log_type,
  115. source=crawler,
  116. env=env,
  117. text="filter",
  118. action="")) is True:
  119. Common.logger(log_type, crawler).info('已中过滤词\n')
  120. Common.logging(log_type, crawler, env, '已中过滤词\n')
  121. elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
  122. Common.logger(log_type, crawler).info('视频已下载\n')
  123. Common.logging(log_type, crawler, env, '视频已下载\n')
  124. else:
  125. video_dict["out_user_id"] = video_dict["user_id"]
  126. video_dict["platform"] = crawler
  127. video_dict["strategy"] = log_type
  128. video_dict["out_video_id"] = video_dict["video_id"]
  129. video_dict["width"] = 0
  130. video_dict["height"] = 0
  131. video_dict["crawler_rule"] = json.dumps(rule_dict)
  132. video_dict["user_id"] = our_uid
  133. video_dict["publish_time"] = video_dict["publish_time_str"]
  134. video_dict["fans_cnt"] = 0
  135. video_dict["videos_cnt"] = 0
  136. mq.send_msg(video_dict)
  137. # except Exception as e:
  138. # Common.logger(log_type, crawler).info(f"抓取单条视频异常:{e}\n")
  139. # except Exception as e:
  140. # Common.logger(log_type, crawler).error(f"抓取第{page}页时异常:{e}\n")
  141. # 下载 / 上传
  142. @classmethod
  143. def download_publish(cls, log_type, crawler, our_uid, video_dict, rule_dict, env):
  144. # 下载视频
  145. Common.download_method(log_type=log_type, crawler=crawler, text='video', title=video_dict['video_title'], url=video_dict['video_url'])
  146. md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
  147. try:
  148. if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
  149. # 删除视频文件夹
  150. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  151. Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
  152. return
  153. except FileNotFoundError:
  154. # 删除视频文件夹
  155. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  156. Common.logger(log_type, crawler).info("视频文件不存在,删除文件夹成功\n")
  157. return
  158. ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
  159. video_dict["duration"] = ffmpeg_dict["duration"]
  160. video_dict["video_width"] = ffmpeg_dict["width"]
  161. video_dict["video_height"] = ffmpeg_dict["height"]
  162. # 下载封面
  163. Common.download_method(log_type=log_type, crawler=crawler, text='cover', title=video_dict['video_title'], url=video_dict['cover_url'])
  164. # 保存视频信息至txt
  165. Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
  166. # 上传视频
  167. Common.logger(log_type, crawler).info("开始上传视频...")
  168. if env == "dev":
  169. oss_endpoint = "out"
  170. our_video_id = Publish.upload_and_publish(log_type=log_type,
  171. crawler=crawler,
  172. strategy="推荐榜爬虫策略",
  173. our_uid=our_uid,
  174. env=env,
  175. oss_endpoint=oss_endpoint)
  176. our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  177. else:
  178. oss_endpoint = "inner"
  179. our_video_id = Publish.upload_and_publish(log_type=log_type,
  180. crawler=crawler,
  181. strategy="推荐榜爬虫策略",
  182. our_uid=our_uid,
  183. env=env,
  184. oss_endpoint=oss_endpoint)
  185. our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  186. if our_video_id is None:
  187. try:
  188. # 删除视频文件夹
  189. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  190. return
  191. except FileNotFoundError:
  192. return
  193. # 视频信息保存数据库
  194. insert_sql = f""" insert into crawler_video(video_id,
  195. out_user_id,
  196. platform,
  197. strategy,
  198. out_video_id,
  199. video_title,
  200. cover_url,
  201. video_url,
  202. duration,
  203. publish_time,
  204. play_cnt,
  205. crawler_rule,
  206. width,
  207. height)
  208. values({our_video_id},
  209. "{video_dict['user_id']}",
  210. "{cls.platform}",
  211. "推荐榜爬虫策略",
  212. "{video_dict['video_id']}",
  213. "{video_dict['video_title']}",
  214. "{video_dict['cover_url']}",
  215. "{video_dict['video_url']}",
  216. {int(video_dict['duration'])},
  217. "{video_dict['publish_time_str']}",
  218. {int(video_dict['play_cnt'])},
  219. '{json.dumps(rule_dict)}',
  220. {int(video_dict['video_width'])},
  221. {int(video_dict['video_height'])}) """
  222. Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
  223. MysqlHelper.update_values(log_type, crawler, insert_sql, env, action='')
  224. Common.logger(log_type, crawler).info('视频信息写入数据库成功')
  225. # 视频写入飞书
  226. Feishu.insert_columns(log_type, crawler, "440018", "ROWS", 1, 2)
  227. upload_time = int(time.time())
  228. values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
  229. "推荐榜爬虫策略",
  230. video_dict['video_id'],
  231. video_dict['video_title'],
  232. our_video_link,
  233. video_dict['play_cnt'],
  234. video_dict['comment_cnt'],
  235. video_dict['like_cnt'],
  236. video_dict['share_cnt'],
  237. video_dict['duration'],
  238. f"{video_dict['video_width']}*{video_dict['video_height']}",
  239. video_dict['publish_time_str'],
  240. video_dict['user_name'],
  241. video_dict['user_id'],
  242. video_dict['avatar_url'],
  243. video_dict['cover_url'],
  244. video_dict['video_url']]]
  245. time.sleep(0.5)
  246. Feishu.update_values(log_type, crawler, "440018", "E2:Z2", values)
  247. Common.logger(log_type, crawler).info(f"视频信息已保存至云文档\n")
  248. if __name__ == "__main__":
  249. print(get_config_from_mysql("recommend", "benshanzhufu", "dev", "filter"))
  250. pass