kanyikan_recommend0627.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2023/6/21
  4. import json
  5. import os
  6. import random
  7. import shutil
  8. import sys
  9. import time
  10. from hashlib import md5
  11. import requests
  12. import urllib3
  13. sys.path.append(os.getcwd())
  14. from common.mq import MQ
  15. from common.common import Common
  16. from common.feishu import Feishu
  17. from common.publish import Publish
  18. from common.scheduling_db import MysqlHelper
  19. from common.public import get_config_from_mysql, download_rule
  20. proxies = {"http": None, "https": None}
  21. class KanyikanRecommend:
  22. platform = "看一看"
  23. strategy = "推荐抓取策略"
  24. @classmethod
  25. def repeat_video(cls, log_type, crawler, video_id, env):
  26. # sql = f""" select * from crawler_video where platform="{cls.platform}" and strategy="{cls.strategy}" and out_video_id="{video_id}" """
  27. sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and out_video_id="{video_id}"; """
  28. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  29. return len(repeat_video)
  30. @classmethod
  31. def get_videoList(cls, log_type, crawler, our_uid, rule_dict, env):
  32. mq = MQ(topic_name="topic_crawler_etl_" + env)
  33. try:
  34. Common.logger(log_type, crawler).info(f"正在抓取列表页")
  35. Common.logging(log_type, crawler, env, f"正在抓取列表页")
  36. session = Common.get_session(log_type, crawler, env)
  37. if session is None:
  38. time.sleep(1)
  39. cls.get_videoList(log_type, crawler, our_uid, rule_dict, env)
  40. url = 'https://search.weixin.qq.com/cgi-bin/recwxa/recwxavideolist?'
  41. header = {
  42. "Connection": "keep-alive",
  43. "content-type": "application/json",
  44. "Accept-Encoding": "gzip,compress,br,deflate",
  45. "User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X) "
  46. "AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 MicroMessenger/8.0.18(0x18001236) "
  47. "NetType/WIFI Language/zh_CN",
  48. "Referer": "https://servicewechat.com/wxbb9a805eb4f9533c/234/page-frame.html",
  49. }
  50. params = {
  51. 'session': session,
  52. "offset": 0,
  53. "wxaVersion": "3.9.2",
  54. "count": "10",
  55. "channelid": "208",
  56. "scene": '310',
  57. "subscene": '1089',
  58. "clientVersion": '8.0.18',
  59. "sharesearchid": '0',
  60. "nettype": 'wifi',
  61. "switchprofile": "0",
  62. "switchnewuser": "0",
  63. }
  64. urllib3.disable_warnings()
  65. response = requests.get(url=url, headers=header, params=params, proxies=proxies, verify=False)
  66. if "data" not in response.text:
  67. Common.logger(log_type, crawler).info("获取视频list时,session过期,随机睡眠 31-50 秒")
  68. Common.logging(log_type, crawler, env, "获取视频list时,session过期,随机睡眠 31-50 秒")
  69. # 如果返回空信息,则随机睡眠 31-40 秒
  70. time.sleep(random.randint(31, 40))
  71. cls.get_videoList(log_type, crawler, our_uid, rule_dict, env)
  72. elif "items" not in response.json()["data"]:
  73. Common.logger(log_type, crawler).info(f"get_feeds:{response.json()},随机睡眠 1-3 分钟")
  74. Common.logging(log_type, crawler, env, f"get_feeds:{response.json()},随机睡眠 1-3 分钟")
  75. # 如果返回空信息,则随机睡眠 1-3 分钟
  76. time.sleep(random.randint(60, 180))
  77. cls.get_videoList(log_type, crawler, our_uid, rule_dict, env)
  78. feeds = response.json().get("data", {}).get("items", "")
  79. if feeds == "":
  80. Common.logger(log_type, crawler).info(f"feeds:{feeds}")
  81. Common.logging(log_type, crawler, env, f"feeds:{feeds}")
  82. return
  83. for i in range(len(feeds)):
  84. try:
  85. video_title = feeds[i].get("title", "").strip().replace("\n", "") \
  86. .replace("/", "").replace("\\", "").replace("\r", "") \
  87. .replace(":", "").replace("*", "").replace("?", "") \
  88. .replace("?", "").replace('"', "").replace("<", "") \
  89. .replace(">", "").replace("|", "").replace(" ", "") \
  90. .replace("&NBSP", "").replace(".", "。").replace(" ", "") \
  91. .replace("'", "").replace("#", "").replace("Merge", "")
  92. publish_time_stamp = feeds[i].get("date", 0)
  93. publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
  94. # 获取播放地址
  95. if "videoInfo" not in feeds[i]:
  96. video_url = ""
  97. elif "mpInfo" in feeds[i]["videoInfo"]["videoCdnInfo"]:
  98. if len(feeds[i]["videoInfo"]["videoCdnInfo"]["mpInfo"]["urlInfo"]) > 2:
  99. video_url = feeds[i]["videoInfo"]["videoCdnInfo"]["mpInfo"]["urlInfo"][2]["url"]
  100. else:
  101. video_url = feeds[i]["videoInfo"]["videoCdnInfo"]["mpInfo"]["urlInfo"][0]["url"]
  102. elif "ctnInfo" in feeds[i]["videoInfo"]["videoCdnInfo"]:
  103. video_url = feeds[i]["videoInfo"]["videoCdnInfo"]["ctnInfo"]["urlInfo"][0]["url"]
  104. else:
  105. video_url = feeds[i]["videoInfo"]["videoCdnInfo"]["urlInfo"][0]["url"]
  106. video_dict = {
  107. "video_title": video_title,
  108. "video_id": feeds[i].get("videoId", ""),
  109. "play_cnt": feeds[i].get("playCount", 0),
  110. "like_cnt": feeds[i].get("liked_cnt", 0),
  111. "comment_cnt": feeds[i].get("comment_cnt", 0),
  112. "share_cnt": feeds[i].get("shared_cnt", 0),
  113. "duration": feeds[i].get("mediaDuration", 0),
  114. "video_width": feeds[i].get("short_video_info", {}).get("width", 0),
  115. "video_height": feeds[i].get("short_video_info", {}).get("height", 0),
  116. "publish_time_stamp": publish_time_stamp,
  117. "publish_time_str": publish_time_str,
  118. "user_name": feeds[i].get("source", "").strip().replace("\n", ""),
  119. "user_id": feeds[i].get("openid", ""),
  120. "avatar_url": feeds[i].get("bizIcon", ""),
  121. "cover_url": feeds[i].get("thumbUrl", ""),
  122. "video_url": video_url,
  123. "session": session,
  124. }
  125. for k, v in video_dict.items():
  126. Common.logger(log_type, crawler).info(f"{k}:{v}")
  127. Common.logging(log_type, crawler, env, f"video_dict:{video_dict}")
  128. if video_dict["video_id"] == "" or video_dict["video_title"] == "" or video_dict["video_url"] == "":
  129. Common.logger(log_type, crawler).info("无效视频\n")
  130. Common.logging(log_type, crawler, env, "无效视频\n")
  131. elif download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
  132. Common.logger(log_type, crawler).info("不满足抓取规则\n")
  133. Common.logging(log_type, crawler, env, "不满足抓取规则\n")
  134. elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
  135. Common.logger(log_type, crawler).info('视频已下载\n')
  136. Common.logging(log_type, crawler, env, '视频已下载\n')
  137. else:
  138. # cls.download_publish(log_type=log_type,
  139. # crawler=crawler,
  140. # our_uid=our_uid,
  141. # video_dict=video_dict,
  142. # rule_dict=rule_dict,
  143. # env=env)
  144. video_dict["out_user_id"] = video_dict["user_id"]
  145. video_dict["platform"] = crawler
  146. video_dict["strategy"] = log_type
  147. video_dict["out_video_id"] = video_dict["video_id"]
  148. video_dict["width"] = video_dict["video_width"]
  149. video_dict["height"] = video_dict["video_height"]
  150. video_dict["crawler_rule"] = json.dumps(rule_dict)
  151. video_dict["user_id"] = our_uid
  152. video_dict["publish_time"] = video_dict["publish_time_str"]
  153. mq.send_msg(video_dict)
  154. except Exception as e:
  155. Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
  156. Common.logging(log_type, crawler, env, f"抓取单条视频异常:{e}\n")
  157. except Exception as e:
  158. Common.logger(log_type, crawler).error(f"抓取列表页时异常:{e}\n")
  159. Common.logging(log_type, crawler, env, f"抓取列表页时异常:{e}\n")
  160. @classmethod
  161. def download_publish(cls, log_type, crawler, our_uid, video_dict, rule_dict, env):
  162. # 下载视频
  163. Common.download_method(log_type=log_type, crawler=crawler, text='video', title=video_dict['video_title'], url=video_dict['video_url'])
  164. md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
  165. try:
  166. if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
  167. # 删除视频文件夹
  168. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  169. Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
  170. Common.logging(log_type, crawler, env, "视频size=0,删除成功\n")
  171. return
  172. except FileNotFoundError:
  173. # 删除视频文件夹
  174. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  175. Common.logger(log_type, crawler).info("视频文件不存在,删除文件夹成功\n")
  176. Common.logging(log_type, crawler, env, "视频文件不存在,删除文件夹成功\n")
  177. return
  178. # 下载封面
  179. Common.download_method(log_type=log_type, crawler=crawler, text='cover', title=video_dict['video_title'], url=video_dict['cover_url'])
  180. # 保存视频信息至txt
  181. Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
  182. # 上传视频
  183. Common.logger(log_type, crawler).info("开始上传视频...")
  184. Common.logging(log_type, crawler, env, "开始上传视频...")
  185. if env == "dev":
  186. oss_endpoint = "out"
  187. our_video_id = Publish.upload_and_publish(log_type=log_type,
  188. crawler=crawler,
  189. strategy=cls.strategy,
  190. our_uid=our_uid,
  191. env=env,
  192. oss_endpoint=oss_endpoint)
  193. our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  194. else:
  195. our_video_id = Publish.upload_and_publish(log_type=log_type,
  196. crawler=crawler,
  197. strategy=cls.strategy,
  198. our_uid=our_uid,
  199. env=env,
  200. oss_endpoint="out")
  201. our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  202. if our_video_id is None:
  203. try:
  204. # 删除视频文件夹
  205. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  206. return
  207. except FileNotFoundError:
  208. return
  209. # 视频信息保存数据库
  210. insert_sql = f""" insert into crawler_video(video_id,
  211. user_id,
  212. out_user_id,
  213. platform,
  214. strategy,
  215. out_video_id,
  216. video_title,
  217. cover_url,
  218. video_url,
  219. duration,
  220. publish_time,
  221. play_cnt,
  222. crawler_rule,
  223. width,
  224. height)
  225. values({our_video_id},
  226. {our_uid},
  227. "{video_dict['user_id']}",
  228. "{cls.platform}",
  229. "{cls.strategy}",
  230. "{video_dict['video_id']}",
  231. "{video_dict['video_title']}",
  232. "{video_dict['cover_url']}",
  233. "{video_dict['video_url']}",
  234. {int(video_dict['duration'])},
  235. "{video_dict['publish_time_str']}",
  236. {int(video_dict['play_cnt'])},
  237. '{json.dumps(rule_dict)}',
  238. {int(video_dict['video_width'])},
  239. {int(video_dict['video_height'])}) """
  240. Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
  241. Common.logging(log_type, crawler, env, f"insert_sql:{insert_sql}")
  242. MysqlHelper.update_values(log_type, crawler, insert_sql, env, action="")
  243. Common.logger(log_type, crawler).info('视频信息写入数据库成功')
  244. Common.logging(log_type, crawler, env, '视频信息写入数据库成功')
  245. # 保存视频信息到云文档:
  246. Feishu.insert_columns(log_type, crawler, "20ce0c", "ROWS", 1, 2)
  247. # 看一看+ ,视频ID工作表,首行写入数据
  248. values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time()))),
  249. "推荐榜",
  250. str(video_dict["video_id"]),
  251. str(video_dict["video_title"]),
  252. our_video_link,
  253. video_dict["play_cnt"],
  254. video_dict["comment_cnt"],
  255. video_dict["like_cnt"],
  256. video_dict["share_cnt"],
  257. video_dict["duration"],
  258. f'{video_dict["video_width"]}*{video_dict["video_height"]}',
  259. video_dict["publish_time_str"],
  260. video_dict["user_name"],
  261. video_dict["user_id"],
  262. video_dict["avatar_url"],
  263. video_dict["cover_url"],
  264. video_dict["video_url"]]]
  265. time.sleep(0.5)
  266. Feishu.update_values(log_type, crawler, "20ce0c", "F2:Z2", values)
  267. Common.logger(log_type, crawler).info("视频信息保存至云文档成功\n")
  268. Common.logging(log_type, crawler, env, "视频信息保存至云文档成功\n")
  269. if __name__ == "__main__":
  270. print(get_config_from_mysql(log_type="recommend",
  271. source="kanyikan",
  272. env="dev",
  273. text="filter",
  274. action=""))
  275. pass