benshanzhufu_recommend.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2023/4/13
  4. # -*- coding: utf-8 -*-
  5. # @Author: wangkun
  6. # @Time: 2022/4/25
  7. import json
  8. import os
  9. import random
  10. import shutil
  11. import sys
  12. import time
  13. from hashlib import md5
  14. from urllib import parse
  15. import requests
  16. import urllib3
  17. sys.path.append(os.getcwd())
  18. from common.common import Common
  19. from common.scheduling_db import MysqlHelper
  20. from common.feishu import Feishu
  21. from common.publish import Publish
  22. proxies = {"http": None, "https": None}
  23. class BenshanzhufuRecommend:
  24. # 翻页参数
  25. visitor_key = ""
  26. page = 1
  27. platform = "本山祝福"
  28. # 过滤词库
  29. @classmethod
  30. def benshanzhufu_config(cls, log_type, crawler, text, env):
  31. select_sql = f"""select * from crawler_config where source="benshanzhufu" """
  32. contents = MysqlHelper.get_values(log_type, crawler, select_sql, env, action='')
  33. title_list = []
  34. filter_list = []
  35. for content in contents:
  36. config = content['config']
  37. config_dict = eval(config)
  38. for k, v in config_dict.items():
  39. if k == "title":
  40. title_list_config = v.split(",")
  41. for title in title_list_config:
  42. title_list.append(title)
  43. if k == "filter":
  44. filter_list_config = v.split(",")
  45. for filter_word in filter_list_config:
  46. filter_list.append(filter_word)
  47. if text == "title":
  48. return title_list
  49. elif text == "filter":
  50. return filter_list
  51. @classmethod
  52. def repeat_video(cls, log_type, crawler, video_id, env):
  53. sql = f""" select * from crawler_video where platform="本山祝福" and out_video_id="{video_id}"; """
  54. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  55. return len(repeat_video)
  56. # 推荐列表获取视频
  57. @classmethod
  58. def get_videoList(cls, log_type, crawler, oss_endpoint, env):
  59. while True:
  60. now = int(time.time() * 1000)
  61. url = "https://bszf.wentingyou.cn/index.php/v111/index/index?parameter="
  62. header = {
  63. "content-time": str(now),
  64. # "visitorKey": "165086930003741",
  65. "chatKey": "wx0fb8149da961d3b0",
  66. "cache-time": str(now),
  67. "User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X) "
  68. "AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 "
  69. "MicroMessenger/8.0.20(0x1800142d) NetType/WIFI Language/zh_CN",
  70. "Referer": "https://servicewechat.com/wx0fb8149da961d3b0/2/page-frame.html"
  71. }
  72. parameter = {
  73. "page": random.randint(1, 76),
  74. "ini_id": cls.visitor_key
  75. }
  76. params = parse.quote(json.dumps(parameter))
  77. url = url + str(params)
  78. # try:
  79. urllib3.disable_warnings()
  80. r = requests.get(headers=header, url=url, proxies=proxies, verify=False)
  81. if r.status_code != 200:
  82. Common.logger(log_type, crawler).warning(f"get_videoList:{r.status_code}, {r.text}\n")
  83. cls.visitor_key = ""
  84. cls.page = 1
  85. return
  86. elif r.json()['message'] != "list success":
  87. Common.logger(log_type, crawler).warning(f"get_videoList:{r.status_code}, {r.json()}\n")
  88. cls.visitor_key = ""
  89. cls.page = 1
  90. return
  91. elif "data" not in r.json():
  92. Common.logger(log_type, crawler).warning(f"get_videoList:{r.status_code}, {r.json()}\n")
  93. cls.visitor_key = ""
  94. cls.page = 1
  95. return
  96. elif len(r.json()['data']["list"]) == 0:
  97. Common.logger(log_type, crawler).info(f"没有更多数据了~ {r.json()}\n")
  98. cls.visitor_key = ""
  99. cls.page = 1
  100. return
  101. else:
  102. # 翻页
  103. cls.visitor_key = r.json()["data"]["visitor_key"]
  104. cls.page += 1
  105. feeds = r.json()["data"]["list"]
  106. for i in range(len(feeds)):
  107. video_title = feeds[i].get("title", "").strip().replace("\n", "")\
  108. .replace("/", "").replace("本山祝福", "").replace(" ", "")\
  109. .replace(" ", "").replace("&NBSP", "").replace("\r", "")\
  110. .replace("#", "").replace(".", "。").replace("\\", "")\
  111. .replace(":", "").replace("*", "").replace("?", "")\
  112. .replace("?", "").replace('"', "").replace("<", "")\
  113. .replace(">", "").replace("|", "").replace("'", "").replace('"', "")
  114. video_id = str(feeds[i].get("nid", ""))
  115. play_cnt = 0
  116. comment_cnt = feeds[i].get("commentCount", 0)
  117. share_cnt = 0
  118. like_cnt = 0
  119. publish_time_stamp = feeds[i].get("update_time", 0)
  120. publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
  121. user_name = "本山祝福"
  122. user_id = "benshanzhufu"
  123. cover_url = feeds[i].get("video_cover", "")
  124. video_url = feeds[i].get("video_url", "")
  125. if ".mp4" not in video_url:
  126. video_url = ""
  127. video_dict = {
  128. 'video_title': video_title,
  129. 'video_id': video_id,
  130. 'play_cnt': play_cnt,
  131. 'comment_cnt': comment_cnt,
  132. 'like_cnt': like_cnt,
  133. 'share_cnt': share_cnt,
  134. 'publish_time_stamp': publish_time_stamp,
  135. 'publish_time_str': publish_time_str,
  136. 'user_name': user_name,
  137. 'user_id': user_id,
  138. 'avatar_url': cover_url,
  139. 'cover_url': cover_url,
  140. 'video_url': video_url,
  141. 'session': f"benshanzhufu-{int(time.time())}"
  142. }
  143. for k, v in video_dict.items():
  144. Common.logger(log_type, crawler).info(f"{k}:{v}")
  145. # 过滤无效视频
  146. if video_id == "" or cover_url == "" or video_url == "":
  147. Common.logger(log_type, crawler).info("无效视频\n")
  148. elif any(str(word) if str(word) in video_title else False for word in cls.benshanzhufu_config(log_type, crawler, "filter", env)) is True:
  149. Common.logger(log_type, crawler).info('已中过滤词\n')
  150. elif cls.repeat_video(log_type, crawler, video_id, env) != 0:
  151. Common.logger(log_type, crawler).info('视频已下载\n')
  152. else:
  153. cls.download_publish(log_type, crawler, video_dict, oss_endpoint, env)
  154. # except Exception as e:
  155. # Common.logger(log_type, crawler).error(f"get_videoList异常:{e}\n")
  156. # 下载 / 上传
  157. @classmethod
  158. def download_publish(cls, log_type, crawler, video_dict, oss_endpoint, env):
  159. # try:
  160. # 下载视频
  161. Common.download_method(log_type=log_type, crawler=crawler, text='video', title=video_dict['video_title'], url=video_dict['video_url'])
  162. ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
  163. if ffmpeg_dict is None:
  164. md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
  165. shutil.rmtree(f"./{crawler}/videos/{md_title}/")
  166. Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
  167. return
  168. video_dict["duration"] = ffmpeg_dict["duration"]
  169. video_dict["video_width"] = ffmpeg_dict["width"]
  170. video_dict["video_height"] = ffmpeg_dict["height"]
  171. # 下载封面
  172. Common.download_method(log_type=log_type, crawler=crawler, text='cover', title=video_dict['video_title'], url=video_dict['cover_url'])
  173. # 保存视频信息至txt
  174. Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
  175. # 上传视频
  176. Common.logger(log_type, crawler).info("开始上传视频...")
  177. our_video_id = Publish.upload_and_publish(log_type=log_type,
  178. crawler=crawler,
  179. strategy="推荐榜爬虫策略",
  180. our_uid="recommend",
  181. env=env,
  182. oss_endpoint=oss_endpoint)
  183. if env == 'dev':
  184. our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  185. else:
  186. our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  187. Common.logger(log_type, crawler).info("视频上传完成")
  188. if our_video_id is None:
  189. # 删除视频文件夹
  190. shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
  191. return
  192. # 视频写入飞书
  193. Feishu.insert_columns(log_type, crawler, "440018", "ROWS", 1, 2)
  194. upload_time = int(time.time())
  195. values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
  196. "推荐榜爬虫策略",
  197. video_dict['video_id'],
  198. video_dict['video_title'],
  199. our_video_link,
  200. video_dict['play_cnt'],
  201. video_dict['comment_cnt'],
  202. video_dict['like_cnt'],
  203. video_dict['share_cnt'],
  204. video_dict['duration'],
  205. f"{video_dict['video_width']}*{video_dict['video_height']}",
  206. video_dict['publish_time_str'],
  207. video_dict['user_name'],
  208. video_dict['user_id'],
  209. video_dict['avatar_url'],
  210. video_dict['cover_url'],
  211. video_dict['video_url']]]
  212. time.sleep(0.5)
  213. Feishu.update_values(log_type, crawler, "440018", "E2:Z2", values)
  214. Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
  215. rule_dict = {}
  216. # 视频信息保存数据库
  217. insert_sql = f""" insert into crawler_video(video_id,
  218. out_user_id,
  219. platform,
  220. strategy,
  221. out_video_id,
  222. video_title,
  223. cover_url,
  224. video_url,
  225. duration,
  226. publish_time,
  227. play_cnt,
  228. crawler_rule,
  229. width,
  230. height)
  231. values({our_video_id},
  232. "{video_dict['user_id']}",
  233. "{cls.platform}",
  234. "推荐榜爬虫策略",
  235. "{video_dict['video_id']}",
  236. "{video_dict['video_title']}",
  237. "{video_dict['cover_url']}",
  238. "{video_dict['video_url']}",
  239. {int(video_dict['duration'])},
  240. "{video_dict['publish_time_str']}",
  241. {int(video_dict['play_cnt'])},
  242. '{json.dumps(rule_dict)}',
  243. {int(video_dict['video_width'])},
  244. {int(video_dict['video_height'])}) """
  245. Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
  246. MysqlHelper.update_values(log_type, crawler, insert_sql, env, action='')
  247. Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
  248. # except Exception as e:
  249. # Common.logger(log_type, crawler).error(f"download_publish异常:{e}\n")
  250. # # 删除视频文件夹
  251. # shutil.rmtree(f"./{crawler}/videos/")
  252. # return
  253. if __name__ == "__main__":
  254. BenshanzhufuRecommend.get_videoList("recommend", "benshanzhufu", "out", "dev")
  255. pass