download_weishi.py 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2022/4/8
  4. import json
  5. import time
  6. import requests
  7. import urllib3
  8. from main.common import Common
  9. from main.publish import Publish
  10. proxies = {"http": None, "https": None}
  11. class Weishi:
  12. @staticmethod
  13. def weishi_download_rule(d_duration, d_width, d_height, d_play_cnt):
  14. """
  15. 下载视频的基本规则
  16. :param d_duration: 时长
  17. :param d_width: 宽
  18. :param d_height: 高
  19. :param d_play_cnt: 播放量
  20. :return: 满足规则,返回 True;反之,返回 False
  21. """
  22. if 600 >= int(float(d_duration)) >= 60:
  23. if int(d_width) >= 720 or int(d_height) >= 720:
  24. if int(d_play_cnt) >= 100000:
  25. return True
  26. else:
  27. return False
  28. return False
  29. return False
  30. @classmethod
  31. def get_weishi_recommend(cls):
  32. """
  33. 从微视小程序首页推荐获取视频list:
  34. 1.在 weishi_videoid.txt 中去重
  35. 2.在 weishi_feeds.txt 中去重
  36. 3.添加视频信息到 weishi_feeds.txt
  37. """
  38. url = "https://api.weishi.qq.com/trpc.weishi.weishi_h5_proxy.weishi_h5_proxy/WxminiGetFeedList"
  39. cookies = {
  40. "wesee_authtype": "3",
  41. "wesee_openid": "oWGa05FrwkuUvT-4n1qGeQuhVsc8",
  42. "wesee_openkey": "8c3ec202f5d679fb5ee6d9f643640d9a2580ba504612e2d979a881d3169caf189e2a5c1d532eeff172bc21cf2"
  43. "6230941ccbc10243a7879e8165ca608c17060de606a6d08afe0a3abd5250629314f9a99e9d1003b201bf5ec",
  44. "wesee_personid": "1593522421826902",
  45. "wesee_refresh_token": "",
  46. "wesee_access_token": "8c3ec202f5d679fb5ee6d9f643640d9a2580ba504612e2d979a881d3169caf18"
  47. "9e2a5c1d532eeff172bc21cf26230941ccbc10243a7879e8165ca608c17060de6"
  48. "06a6d08afe0a3abd5250629314f9a99e9d1003b201bf5ec",
  49. "wesee_thr_appid": "wx75ee9f19b93e5c46",
  50. "wesee_ichid": "8"
  51. }
  52. json_data = {
  53. "req_body": {
  54. "requestType": 16,
  55. "isrefresh": 0,
  56. "isfirst": 0,
  57. "attachInfo": "",
  58. "scene_id": 22,
  59. "requestExt": {
  60. "mini_openid": "oWGa05FrwkuUvT-4n1qGeQuhVsc8",
  61. "notLogin-personid": "1593522421826902"
  62. }
  63. },
  64. "req_header": {
  65. "mapExt": "{\"imageSize\":\"480\",\"adaptScene\":\"PicHDWebpLimitScene\"}"
  66. }
  67. }
  68. try:
  69. urllib3.disable_warnings()
  70. r = requests.post(url=url, cookies=cookies, json=json_data, proxies=proxies, verify=False)
  71. response = json.loads(r.content.decode("utf8"))
  72. if "rsp_body" not in response:
  73. Common.crawler_log().info("获取微视视频 list 出错:{},休眠 10s".format(response))
  74. time.sleep(10)
  75. else:
  76. feeds = response["rsp_body"]["feeds"]
  77. for i in range(len(feeds)):
  78. if "video" not in feeds[i]:
  79. Common.crawler_log().info("无视频信息")
  80. else:
  81. # 视频 ID
  82. if "id" not in feeds[i]["video"]:
  83. video_id = "0"
  84. Common.crawler_log().info("video_id:{}".format(video_id))
  85. else:
  86. video_id = feeds[i]["video"]["id"]
  87. Common.crawler_log().info("video_id:{}".format(video_id))
  88. # 视频标题
  89. video_title = feeds[i]["desc"].strip().replace("\n", "") \
  90. .replace("/", "").replace("快手", "").replace(" ", "") \
  91. .replace(" ", "").replace("&NBSP", "").replace("\r", "")
  92. Common.crawler_log().info("video_title:{}".format(video_title))
  93. # 视频发布时间
  94. if "createTime" not in feeds[i]:
  95. video_send_time = "0"
  96. Common.crawler_log().info("video_send_time:不存在")
  97. else:
  98. video_send_time = int(feeds[i]["createTime"])*1000
  99. Common.crawler_log().info(
  100. "video_send_time:{}".format(time.strftime(
  101. "%Y-%m-%d %H:%M:%S", time.localtime(int(video_send_time)/1000))))
  102. # 视频封面地址
  103. if len(feeds[i]["images"]) == 0:
  104. cover_url = "0"
  105. Common.crawler_log().info("cover_url:不存在")
  106. else:
  107. cover_url = feeds[i]["images"][0]["url"]
  108. Common.crawler_log().info("cover_url:{}".format(cover_url))
  109. # 视频播放地址
  110. if "url" not in feeds[i]["video"]:
  111. video_url = "0"
  112. Common.crawler_log().info("video_url:不存在")
  113. else:
  114. video_url = feeds[i]["video"]["url"]
  115. Common.crawler_log().info("video_url:{}".format(video_url))
  116. # 视频分辨率
  117. if "width" not in feeds[i]["video"] or "height" not in feeds[i]["video"]:
  118. video_width = "0"
  119. video_height = "0"
  120. video_resolution = str(video_width) + "*" + str(video_height)
  121. Common.crawler_log().info("无分辨率")
  122. else:
  123. video_width = feeds[i]["video"]["width"]
  124. video_height = feeds[i]["video"]["height"]
  125. video_resolution = str(video_width) + "*" + str(video_height)
  126. Common.crawler_log().info("video_resolution:{}".format(video_resolution))
  127. # 视频时长
  128. if "duration" not in feeds[i]["video"]:
  129. video_duration = "0"
  130. Common.crawler_log().info("video_duration:不存在")
  131. else:
  132. video_duration = int(int(feeds[i]["video"]["duration"]) / 1000)
  133. Common.crawler_log().info("video_duration:{}秒".format(video_duration))
  134. # 播放数
  135. if "playNum" not in feeds[i]["ugcData"]:
  136. video_play_cnt = "0"
  137. Common.crawler_log().info("video_play_cnt:{}".format(video_play_cnt))
  138. else:
  139. video_play_cnt = feeds[i]["ugcData"]["playNum"]
  140. Common.crawler_log().info("video_play_cnt:{}".format(video_play_cnt))
  141. # 点赞数
  142. if "dingCount" not in feeds[i]["ugcData"]:
  143. video_like_cnt = "0"
  144. Common.crawler_log().info("video_like_cnt:{}".format(video_like_cnt))
  145. else:
  146. video_like_cnt = feeds[i]["ugcData"]["dingCount"]
  147. Common.crawler_log().info("video_like_cnt:{}".format(video_like_cnt))
  148. # 分享数
  149. if "shareNum" not in feeds[i]["ugcData"]:
  150. video_share_cnt = "0"
  151. Common.crawler_log().info("video_share_cnt:{}".format(video_share_cnt))
  152. else:
  153. video_share_cnt = feeds[i]["ugcData"]["shareNum"]
  154. Common.crawler_log().info("video_share_cnt:{}".format(video_share_cnt))
  155. # 评论数
  156. if "totalCommentNum" not in feeds[i]["ugcData"]:
  157. video_comment_cnt = "0"
  158. Common.crawler_log().info("video_comment_cnt:{}".format(video_comment_cnt))
  159. else:
  160. video_comment_cnt = feeds[i]["ugcData"]["totalCommentNum"]
  161. Common.crawler_log().info("video_comment_cnt:{}".format(video_comment_cnt))
  162. # 用户 ID
  163. user_id = feeds[i]["poster"]["id"]
  164. Common.crawler_log().info("user_id:{}".format(user_id))
  165. # 用户昵称
  166. user_name = feeds[i]["poster"]["nick"].strip().replace("\n", "") \
  167. .replace("/", "").replace("快手", "").replace(" ", "") \
  168. .replace(" ", "").replace("&NBSP", "").replace("\r", "")
  169. Common.crawler_log().info("user_name:{}".format(user_name))
  170. # 用户头像地址
  171. if "thumbURL" not in feeds[i]["material"] and "avatar" not in feeds[i]["poster"]:
  172. head_url = "0"
  173. Common.crawler_log().info("head_url:不存在")
  174. elif "thumbURL" in feeds[i]["material"]:
  175. head_url = feeds[i]["material"]["thumbURL"]
  176. Common.crawler_log().info("head_url:{}".format(head_url))
  177. else:
  178. head_url = feeds[i]["poster"]["avatar"]
  179. Common.crawler_log().info("head_url:{}".format(head_url))
  180. # 从 weishi_videoid.txt 中去重
  181. videos_ids = Common.read_txt("weishi_videoid.txt")
  182. if video_id in [v_id.strip() for v_id in videos_ids]:
  183. Common.crawler_log().info("该视频已下载:{}".format(video_title))
  184. pass
  185. else:
  186. Common.crawler_log().info("该视频未下载:{}".format(video_title))
  187. # 从 weishi_feeds.txt 中去重
  188. contents = Common.read_txt("weishi_feeds.txt")
  189. # 若 weishi_feeds.txt 为空时,直接保存
  190. if len(contents) == 0 and head_url != "0" \
  191. and cover_url != "0" and video_url != "0" \
  192. and video_duration != "0" and video_id != "0":
  193. basic_time = int(time.time())
  194. Common.crawler_log().info("添加视频信息至weishi_feeds.txt:{}".format(video_title))
  195. with open(r"./txt/weishi_feeds.txt", "a", encoding="UTF-8") as f_a:
  196. f_a.write(str(basic_time) + " + " +
  197. str(video_id) + " + " +
  198. str(video_play_cnt) + " + " +
  199. str(video_title) + " + " +
  200. str(video_duration) + " + " +
  201. str(video_comment_cnt) + " + " +
  202. str(video_like_cnt) + " + " +
  203. str(video_share_cnt) + " + " +
  204. str(video_resolution) + " + " +
  205. str(video_send_time) + " + " +
  206. str(user_name) + " + " +
  207. str(head_url) + " + " +
  208. str(cover_url) + " + " +
  209. str(video_url) + " + " +
  210. str(user_id) + " + " +
  211. str("oWGa05FrwkuUvT-4n1qGeQuhVsc8") + "\n")
  212. else:
  213. if video_id in [content.split(" + ")[1] for content in contents]:
  214. Common.crawler_log().info("该视频已在 weishi_feeds.txt 中:{}".format(video_title))
  215. elif head_url == "0" or cover_url == "0" \
  216. or video_url == "0" or video_duration == "0" or video_id == "0":
  217. Common.crawler_log().info("视频封面/播放地址/播放时长/用户头像不存在")
  218. else:
  219. basic_time = int(time.time())
  220. Common.crawler_log().info("添加视频信息至weishi_feeds.txt:{}".format(video_title))
  221. with open(r"./txt/weishi_feeds.txt", "a", encoding="UTF-8") as f_a:
  222. f_a.write(str(basic_time) + " + " +
  223. str(video_id) + " + " +
  224. str(video_play_cnt) + " + " +
  225. str(video_title) + " + " +
  226. str(video_duration) + " + " +
  227. str(video_comment_cnt) + " + " +
  228. str(video_like_cnt) + " + " +
  229. str(video_share_cnt) + " + " +
  230. str(video_resolution) + " + " +
  231. str(video_send_time) + " + " +
  232. str(user_name) + " + " +
  233. str(head_url) + " + " +
  234. str(cover_url) + " + " +
  235. str(video_url) + " + " +
  236. str(user_id) + " + " +
  237. str("oWGa05FrwkuUvT-4n1qGeQuhVsc8") + "\n")
  238. except Exception as e:
  239. Common.crawler_log().error("获取微视视频 list 异常:{}".format(e))
  240. @classmethod
  241. def download_weishi_play_video(cls, env):
  242. """
  243. 下载播放量视频
  244. 测试环境:env == dev
  245. 正式环境:env == prod
  246. """
  247. videos = Common.read_txt("weishi_feeds.txt")
  248. for video in videos:
  249. download_video_id = video.strip().split(" + ")[1]
  250. download_video_title = video.strip().split(" + ")[3]
  251. download_video_duration = video.strip().split(" + ")[4]
  252. download_video_play_cnt = video.strip().split(" + ")[2]
  253. download_video_comment_cnt = video.strip().split(" + ")[5]
  254. download_video_like_cnt = video.strip().split(" + ")[6]
  255. download_video_share_cnt = video.strip().split(" + ")[7]
  256. download_video_resolution = video.strip().split(" + ")[8]
  257. download_video_width = download_video_resolution.split("*")[0]
  258. download_video_height = download_video_resolution.split("*")[-1]
  259. download_video_send_time = video.strip().split(" + ")[9]
  260. download_user_name = video.strip().split(" + ")[10]
  261. download_head_url = video.strip().split(" + ")[11]
  262. download_cover_url = video.strip().split(" + ")[12]
  263. download_video_url = video.strip().split(" + ")[13]
  264. download_video_session = video.strip().split(" + ")[-1]
  265. if cls.weishi_download_rule(download_video_duration, download_video_width,
  266. download_video_height, download_video_play_cnt) is True:
  267. Common.crawler_log().info("开始下载视频:{}".format(download_video_title))
  268. # 下载封面
  269. Common.download_method(text="cover", d_name=download_video_title, d_url=download_cover_url)
  270. # 下载视频
  271. Common.download_method(text="video", d_name=download_video_title, d_url=download_video_url)
  272. # 保存视频信息至 weishi_videoid.txt
  273. with open("./txt/weishi_videoid.txt", "a", encoding="UTF-8") as fa:
  274. fa.write(download_video_id + "\n")
  275. # 保存视频信息至 "./videos/{download_video_title}/info.txt"
  276. with open("./videos/" + download_video_title + "/info.txt", "a", encoding="UTF-8") as f_a:
  277. f_a.write(str(download_video_id) + "\n" +
  278. str(download_video_title) + "\n" +
  279. str(download_video_duration) + "\n" +
  280. str(download_video_play_cnt) + "\n" +
  281. str(download_video_comment_cnt) + "\n" +
  282. str(download_video_like_cnt) + "\n" +
  283. str(download_video_share_cnt) + "\n" +
  284. str(download_video_resolution) + "\n" +
  285. str(download_video_send_time) + "\n" +
  286. str(download_user_name) + "\n" +
  287. str(download_head_url) + "\n" +
  288. str(download_video_url) + "\n" +
  289. str(download_cover_url) + "\n" +
  290. str(download_video_session))
  291. # 上传视频
  292. if env == "dev":
  293. Common.crawler_log().info("开始上传视频:{}".format(download_video_title))
  294. Publish.upload_and_publish("dev", "play")
  295. elif env == "prod":
  296. Common.crawler_log().info("开始上传视频:{}".format(download_video_title))
  297. Publish.upload_and_publish("prod", "play")
  298. # 删除该视频在weishi_feeds.txt中的信息
  299. Common.crawler_log().info("删除该视频在weishi_feeds.txt中的信息:{}".format(download_video_title))
  300. with open(r"./txt/weishi_feeds.txt", "r", encoding="UTF-8") as f_r:
  301. lines = f_r.readlines()
  302. with open(r"./txt/weishi_feeds.txt", "w", encoding="utf-8") as f_w:
  303. for line in lines:
  304. if download_video_id in line.split(" + ")[1]:
  305. continue
  306. f_w.write(line)
  307. else:
  308. # 删除该视频在weishi_feeds.txt中的信息
  309. Common.crawler_log().info("该视频不满足下载规则,删除在weishi_feeds.txt中的信息:{}".format(download_video_title))
  310. with open(r"./txt/weishi_feeds.txt", "r", encoding="UTF-8") as f_r:
  311. lines = f_r.readlines()
  312. with open(r"./txt/weishi_feeds.txt", "w", encoding="utf-8") as f_w:
  313. for line in lines:
  314. if download_video_id in line.split(" + ")[1]:
  315. continue
  316. f_w.write(line)
  317. if __name__ == "__main__":
  318. weishi = Weishi()
  319. weishi.get_weishi_recommend()