download_weishi.py 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2022/4/8
  4. import json
  5. import time
  6. import requests
  7. import urllib3
  8. from main.common import Common
  9. from main.publish import Publish
  10. class Weishi:
  11. @staticmethod
  12. def weishi_download_rule(d_duration, d_width, d_height, d_play_cnt):
  13. """
  14. 下载视频的基本规则
  15. :param d_duration: 时长
  16. :param d_width: 宽
  17. :param d_height: 高
  18. :param d_play_cnt: 播放量
  19. :return: 满足规则,返回 True;反之,返回 False
  20. """
  21. if 600 >= int(float(d_duration)) >= 60:
  22. if int(d_width) >= 720 or int(d_height) >= 720:
  23. if int(d_play_cnt) >= 100000:
  24. return True
  25. else:
  26. return False
  27. return False
  28. return False
  29. @classmethod
  30. def get_weishi_recommend(cls):
  31. """
  32. 从微视小程序首页推荐获取视频list:
  33. 1.在 weishi_videoid.txt 中去重
  34. 2.在 weishi_feeds.txt 中去重
  35. 3.添加视频信息到 weishi_feeds.txt
  36. """
  37. url = "https://api.weishi.qq.com/trpc.weishi.weishi_h5_proxy.weishi_h5_proxy/WxminiGetFeedList"
  38. cookies = {
  39. "wesee_authtype": "3",
  40. "wesee_openid": "oWGa05FrwkuUvT-4n1qGeQuhVsc8",
  41. "wesee_openkey": "8c3ec202f5d679fb5ee6d9f643640d9a2580ba504612e2d979a881d3169caf189e2a5c1d532eeff172bc21cf2"
  42. "6230941ccbc10243a7879e8165ca608c17060de606a6d08afe0a3abd5250629314f9a99e9d1003b201bf5ec",
  43. "wesee_personid": "1593522421826902",
  44. "wesee_refresh_token": "",
  45. "wesee_access_token": "8c3ec202f5d679fb5ee6d9f643640d9a2580ba504612e2d979a881d3169caf18"
  46. "9e2a5c1d532eeff172bc21cf26230941ccbc10243a7879e8165ca608c17060de6"
  47. "06a6d08afe0a3abd5250629314f9a99e9d1003b201bf5ec",
  48. "wesee_thr_appid": "wx75ee9f19b93e5c46",
  49. "wesee_ichid": "8"
  50. }
  51. json_data = {
  52. "req_body": {
  53. "requestType": 16,
  54. "isrefresh": 0,
  55. "isfirst": 0,
  56. "attachInfo": "",
  57. "scene_id": 22,
  58. "requestExt": {
  59. "mini_openid": "oWGa05FrwkuUvT-4n1qGeQuhVsc8",
  60. "notLogin-personid": "1593522421826902"
  61. }
  62. },
  63. "req_header": {
  64. "mapExt": "{\"imageSize\":\"480\",\"adaptScene\":\"PicHDWebpLimitScene\"}"
  65. }
  66. }
  67. try:
  68. urllib3.disable_warnings()
  69. r = requests.post(url=url, cookies=cookies, json=json_data, verify=False)
  70. response = json.loads(r.content.decode("utf8"))
  71. if "rsp_body" not in response:
  72. Common.crawler_log().info("获取微视视频 list 出错:{},休眠 10s".format(response))
  73. time.sleep(10)
  74. else:
  75. feeds = response["rsp_body"]["feeds"]
  76. for i in range(len(feeds)):
  77. if "video" not in feeds[i]:
  78. Common.crawler_log().info("无视频信息")
  79. else:
  80. # 视频 ID
  81. if "id" not in feeds[i]["video"]:
  82. video_id = "0"
  83. Common.crawler_log().info("video_id:{}".format(video_id))
  84. else:
  85. video_id = feeds[i]["video"]["id"]
  86. Common.crawler_log().info("video_id:{}".format(video_id))
  87. # 视频标题
  88. video_title = feeds[i]["desc"].strip().replace("\n", "") \
  89. .replace("/", "").replace("快手", "").replace(" ", "") \
  90. .replace(" ", "").replace("&NBSP", "").replace("\r", "")
  91. Common.crawler_log().info("video_title:{}".format(video_title))
  92. # 视频发布时间
  93. if "createTime" not in feeds[i]:
  94. video_send_time = "0"
  95. Common.crawler_log().info("video_send_time:不存在")
  96. else:
  97. video_send_time = int(feeds[i]["createTime"])*1000
  98. Common.crawler_log().info(
  99. "video_send_time:{}".format(time.strftime(
  100. "%Y-%m-%d %H:%M:%S", time.localtime(int(video_send_time)/1000))))
  101. # 视频封面地址
  102. if len(feeds[i]["images"]) == 0:
  103. cover_url = "0"
  104. Common.crawler_log().info("cover_url:不存在")
  105. else:
  106. cover_url = feeds[i]["images"][0]["url"]
  107. Common.crawler_log().info("cover_url:{}".format(cover_url))
  108. # 视频播放地址
  109. if "url" not in feeds[i]["video"]:
  110. video_url = "0"
  111. Common.crawler_log().info("video_url:不存在")
  112. else:
  113. video_url = feeds[i]["video"]["url"]
  114. Common.crawler_log().info("video_url:{}".format(video_url))
  115. # 视频分辨率
  116. if "width" not in feeds[i]["video"] or "height" not in feeds[i]["video"]:
  117. video_width = "0"
  118. video_height = "0"
  119. video_resolution = str(video_width) + "*" + str(video_height)
  120. Common.crawler_log().info("无分辨率")
  121. else:
  122. video_width = feeds[i]["video"]["width"]
  123. video_height = feeds[i]["video"]["height"]
  124. video_resolution = str(video_width) + "*" + str(video_height)
  125. Common.crawler_log().info("video_resolution:{}".format(video_resolution))
  126. # 视频时长
  127. if "duration" not in feeds[i]["video"]:
  128. video_duration = "0"
  129. Common.crawler_log().info("video_duration:不存在")
  130. else:
  131. video_duration = int(int(feeds[i]["video"]["duration"]) / 1000)
  132. Common.crawler_log().info("video_duration:{}秒".format(video_duration))
  133. # 播放数
  134. if "playNum" not in feeds[i]["ugcData"]:
  135. video_play_cnt = "0"
  136. Common.crawler_log().info("video_play_cnt:{}".format(video_play_cnt))
  137. else:
  138. video_play_cnt = feeds[i]["ugcData"]["playNum"]
  139. Common.crawler_log().info("video_play_cnt:{}".format(video_play_cnt))
  140. # 点赞数
  141. if "dingCount" not in feeds[i]["ugcData"]:
  142. video_like_cnt = "0"
  143. Common.crawler_log().info("video_like_cnt:{}".format(video_like_cnt))
  144. else:
  145. video_like_cnt = feeds[i]["ugcData"]["dingCount"]
  146. Common.crawler_log().info("video_like_cnt:{}".format(video_like_cnt))
  147. # 分享数
  148. if "shareNum" not in feeds[i]["ugcData"]:
  149. video_share_cnt = "0"
  150. Common.crawler_log().info("video_share_cnt:{}".format(video_share_cnt))
  151. else:
  152. video_share_cnt = feeds[i]["ugcData"]["shareNum"]
  153. Common.crawler_log().info("video_share_cnt:{}".format(video_share_cnt))
  154. # 评论数
  155. if "totalCommentNum" not in feeds[i]["ugcData"]:
  156. video_comment_cnt = "0"
  157. Common.crawler_log().info("video_comment_cnt:{}".format(video_comment_cnt))
  158. else:
  159. video_comment_cnt = feeds[i]["ugcData"]["totalCommentNum"]
  160. Common.crawler_log().info("video_comment_cnt:{}".format(video_comment_cnt))
  161. # 用户 ID
  162. user_id = feeds[i]["poster"]["id"]
  163. Common.crawler_log().info("user_id:{}".format(user_id))
  164. # 用户昵称
  165. user_name = feeds[i]["poster"]["nick"].strip().replace("\n", "") \
  166. .replace("/", "").replace("快手", "").replace(" ", "") \
  167. .replace(" ", "").replace("&NBSP", "").replace("\r", "")
  168. Common.crawler_log().info("user_name:{}".format(user_name))
  169. # 用户头像地址
  170. if "thumbURL" not in feeds[i]["material"] and "avatar" not in feeds[i]["poster"]:
  171. head_url = "0"
  172. Common.crawler_log().info("head_url:不存在")
  173. elif "thumbURL" in feeds[i]["material"]:
  174. head_url = feeds[i]["material"]["thumbURL"]
  175. Common.crawler_log().info("head_url:{}".format(head_url))
  176. else:
  177. head_url = feeds[i]["poster"]["avatar"]
  178. Common.crawler_log().info("head_url:{}".format(head_url))
  179. # 从 weishi_videoid.txt 中去重
  180. videos_ids = Common.read_txt("weishi_videoid.txt")
  181. if video_id in [v_id.strip() for v_id in videos_ids]:
  182. Common.crawler_log().info("该视频已下载:{}".format(video_title))
  183. pass
  184. else:
  185. Common.crawler_log().info("该视频未下载:{}".format(video_title))
  186. # 从 weishi_feeds.txt 中去重
  187. contents = Common.read_txt("weishi_feeds.txt")
  188. # 若 weishi_feeds.txt 为空时,直接保存
  189. if len(contents) == 0 and head_url != "0" \
  190. and cover_url != "0" and video_url != "0" \
  191. and video_duration != "0" and video_id != "0":
  192. basic_time = int(time.time())
  193. Common.crawler_log().info("添加视频信息至weishi_feeds.txt:{}".format(video_title))
  194. with open("./txt/weishi_feeds.txt", "a", encoding="utf8") as f_a:
  195. f_a.write(str(basic_time) + " + " +
  196. str(video_id) + " + " +
  197. str(video_play_cnt) + " + " +
  198. str(video_title) + " + " +
  199. str(video_duration) + " + " +
  200. str(video_comment_cnt) + " + " +
  201. str(video_like_cnt) + " + " +
  202. str(video_share_cnt) + " + " +
  203. str(video_resolution) + " + " +
  204. str(video_send_time) + " + " +
  205. str(user_name) + " + " +
  206. str(head_url) + " + " +
  207. str(cover_url) + " + " +
  208. str(video_url) + " + " +
  209. str(user_id) + " + " +
  210. str("oWGa05FrwkuUvT-4n1qGeQuhVsc8") + "\n")
  211. else:
  212. if video_id in [content.split(" + ")[1] for content in contents]:
  213. Common.crawler_log().info("该视频已在 weishi_feeds.txt 中:{}".format(video_title))
  214. elif head_url == "0" or cover_url == "0" \
  215. or video_url == "0" or video_duration == "0" or video_id == "0":
  216. Common.crawler_log().info("视频封面/播放地址/播放时长/用户头像不存在")
  217. else:
  218. basic_time = int(time.time())
  219. Common.crawler_log().info("添加视频信息至weishi_feeds.txt:{}".format(video_title))
  220. with open("./txt/weishi_feeds.txt", "a", encoding="utf8") as f_a:
  221. f_a.write(str(basic_time) + " + " +
  222. str(video_id) + " + " +
  223. str(video_play_cnt) + " + " +
  224. str(video_title) + " + " +
  225. str(video_duration) + " + " +
  226. str(video_comment_cnt) + " + " +
  227. str(video_like_cnt) + " + " +
  228. str(video_share_cnt) + " + " +
  229. str(video_resolution) + " + " +
  230. str(video_send_time) + " + " +
  231. str(user_name) + " + " +
  232. str(head_url) + " + " +
  233. str(cover_url) + " + " +
  234. str(video_url) + " + " +
  235. str(user_id) + " + " +
  236. str("oWGa05FrwkuUvT-4n1qGeQuhVsc8") + "\n")
  237. except Exception as e:
  238. Common.crawler_log().error("获取微视视频 list 异常:{}".format(e))
  239. @classmethod
  240. def download_weishi_play_video(cls, env):
  241. """
  242. 下载播放量视频
  243. 测试环境:env == dev
  244. 正式环境:env == prod
  245. """
  246. videos = Common.read_txt("weishi_feeds.txt")
  247. for video in videos:
  248. download_video_id = video.strip().split(" + ")[1]
  249. download_video_title = video.strip().split(" + ")[3]
  250. download_video_duration = video.strip().split(" + ")[4]
  251. download_video_play_cnt = video.strip().split(" + ")[2]
  252. download_video_comment_cnt = video.strip().split(" + ")[5]
  253. download_video_like_cnt = video.strip().split(" + ")[6]
  254. download_video_share_cnt = video.strip().split(" + ")[7]
  255. download_video_resolution = video.strip().split(" + ")[8]
  256. download_video_width = download_video_resolution.split("*")[0]
  257. download_video_height = download_video_resolution.split("*")[-1]
  258. download_video_send_time = video.strip().split(" + ")[9]
  259. download_user_name = video.strip().split(" + ")[10]
  260. download_head_url = video.strip().split(" + ")[11]
  261. download_cover_url = video.strip().split(" + ")[12]
  262. download_video_url = video.strip().split(" + ")[13]
  263. download_video_session = video.strip().split(" + ")[-1]
  264. if cls.weishi_download_rule(download_video_duration, download_video_width,
  265. download_video_height, download_video_play_cnt) is True:
  266. Common.crawler_log().info("开始下载视频:{}".format(download_video_title))
  267. # 下载封面
  268. Common.download_method(text="cover", d_name=download_video_title, d_url=download_cover_url)
  269. # 下载视频
  270. Common.download_method(text="video", d_name=download_video_title, d_url=download_video_url)
  271. # 保存视频信息至 weishi_videoid.txt
  272. with open("./txt/weishi_videoid.txt", "a", encoding="utf8") as fa:
  273. fa.write(download_video_id + "\n")
  274. # 保存视频信息至 "./videos/{download_video_title}/info.txt"
  275. with open("./videos/" + download_video_title + "/info.txt", "a", encoding="utf8") as f_a:
  276. f_a.write(str(download_video_id) + "\n" +
  277. str(download_video_title) + "\n" +
  278. str(download_video_duration) + "\n" +
  279. str(download_video_play_cnt) + "\n" +
  280. str(download_video_comment_cnt) + "\n" +
  281. str(download_video_like_cnt) + "\n" +
  282. str(download_video_share_cnt) + "\n" +
  283. str(download_video_resolution) + "\n" +
  284. str(download_video_send_time) + "\n" +
  285. str(download_user_name) + "\n" +
  286. str(download_head_url) + "\n" +
  287. str(download_video_url) + "\n" +
  288. str(download_cover_url) + "\n" +
  289. str(download_video_session))
  290. # 上传视频
  291. if env == "dev":
  292. Common.crawler_log().info("开始上传视频:{}".format(download_video_title))
  293. Publish.upload_and_publish("dev", "play")
  294. elif env == "prod":
  295. Common.crawler_log().info("开始上传视频:{}".format(download_video_title))
  296. Publish.upload_and_publish("prod", "play")
  297. # 删除该视频在weishi_feeds.txt中的信息
  298. Common.crawler_log().info("删除该视频在weishi_feeds.txt中的信息:{}".format(download_video_title))
  299. with open("./txt/weishi_feeds.txt", "r", encoding="utf8") as f_r:
  300. lines = f_r.readlines()
  301. with open("./txt/weishi_feeds.txt", "w", encoding="utf-8") as f_w:
  302. for line in lines:
  303. if download_video_id in line.split(" + ")[1]:
  304. continue
  305. f_w.write(line)
  306. else:
  307. # 删除该视频在weishi_feeds.txt中的信息
  308. Common.crawler_log().info("该视频不满足下载规则,删除在weishi_feeds.txt中的信息:{}".format(download_video_title))
  309. with open("./txt/weishi_feeds.txt", "r", encoding="utf8") as f_r:
  310. lines = f_r.readlines()
  311. with open("./txt/weishi_feeds.txt", "w", encoding="utf-8") as f_w:
  312. for line in lines:
  313. if download_video_id in line.split(" + ")[1]:
  314. continue
  315. f_w.write(line)
  316. if __name__ == "__main__":
  317. weishi = Weishi()
  318. weishi.get_weishi_recommend()