get_feeds.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2022/4/18
  4. """
  5. 获取看一看+小程序,首页推荐视频列表
  6. """
  7. import json
  8. import os
  9. import random
  10. import sys
  11. import time
  12. import requests
  13. import urllib3
  14. from main.feishu_lib import Feishu
  15. sys.path.append(os.getcwd())
  16. from main.common import Common
  17. proxies = {"http": None, "https": None}
  18. def get_feeds():
  19. """
  20. 1.从看一看+小程序首页推荐,获取视频列表
  21. 2.先在 https://w42nne6hzg.feishu.cn/sheets/shtcngRPoDYAi24x52j2nDuHMih?sheet=20ce0c 中去重
  22. 3.再从 https://w42nne6hzg.feishu.cn/sheets/shtcngRPoDYAi24x52j2nDuHMih?sheet=Y8N3Vl 中去重
  23. 4.添加视频信息至 https://w42nne6hzg.feishu.cn/sheets/shtcngRPoDYAi24x52j2nDuHMih?sheet=Y8N3Vl
  24. """
  25. host = "https://search.weixin.qq.com"
  26. url = '/cgi-bin/recwxa/recwxavideolist?'
  27. video_list_session = Common.get_session()
  28. Common.crawler_log().info("获取视频list时,session:{}".format(video_list_session))
  29. header = {
  30. "Connection": "keep-alive",
  31. "content-type": "application/json",
  32. "Accept-Encoding": "gzip,compress,br,deflate",
  33. "User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X) "
  34. "AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 MicroMessenger/8.0.18(0x18001236) "
  35. "NetType/WIFI Language/zh_CN",
  36. "Referer": "https://servicewechat.com/wxbb9a805eb4f9533c/234/page-frame.html",
  37. }
  38. params = {
  39. 'session': video_list_session,
  40. "offset": 0,
  41. "wxaVersion": "3.9.2",
  42. "count": "10",
  43. "channelid": "208",
  44. "scene": '310',
  45. "subscene": '1089',
  46. "clientVersion": '8.0.18',
  47. "sharesearchid": '0',
  48. "nettype": 'wifi',
  49. "switchprofile": "0",
  50. "switchnewuser": "0",
  51. }
  52. try:
  53. urllib3.disable_warnings()
  54. r = requests.get(host + url, headers=header, params=params, proxies=proxies, verify=False)
  55. response = json.loads(r.content.decode("utf8"))
  56. if "data" not in response:
  57. Common.crawler_log().info("获取视频list时,session过期,随机睡眠 31-50 秒")
  58. # 如果返回空信息,则随机睡眠 31-40 秒
  59. time.sleep(random.randint(31, 40))
  60. get_feeds()
  61. elif "items" not in response["data"]:
  62. Common.crawler_log().info("获取视频list时,返回空信息,随机睡眠 1-3 分钟")
  63. # 如果返回空信息,则随机睡眠 1-3 分钟
  64. time.sleep(random.randint(60, 180))
  65. get_feeds()
  66. else:
  67. items = response["data"]["items"]
  68. for i in range(len(items)):
  69. # 如果该视频没有视频信息,则忽略
  70. if "videoInfo" not in items[i]:
  71. Common.crawler_log().info("无视频信息")
  72. else:
  73. # 获取视频ID
  74. video_id = items[i]["videoId"]
  75. Common.crawler_log().info('视频ID:{}'.format(video_id))
  76. # 获取视频标题
  77. video_title = items[i]["title"].strip().replace("\n", "")\
  78. .replace("/", "").replace("\\", "").replace("\r", "")\
  79. .replace(":", "").replace("*", "").replace("?", "")\
  80. .replace("?", "").replace('"', "").replace("<", "")\
  81. .replace(">", "").replace("|", "").replace(" ", "")\
  82. .replace("&NBSP", "").replace(".", "。").replace(" ", "")
  83. Common.crawler_log().info('视频标题:{}'.format(video_title))
  84. # 获取视频播放次数
  85. video_play_cnt = items[i]["playCount"]
  86. Common.crawler_log().info('视频播放次数:{}'.format(video_play_cnt))
  87. # 获取视频点赞数
  88. video_liked_cnt = items[i]["liked_cnt"]
  89. Common.crawler_log().info('视频点赞数:{}'.format(video_liked_cnt))
  90. # 获取视频时长
  91. video_duration = items[i]["mediaDuration"]
  92. Common.crawler_log().info('视频时长:{}秒'.format(video_duration))
  93. # 获取视频评论数
  94. video_comment_cnt = items[i]["comment_cnt"]
  95. Common.crawler_log().info('视频评论数:{}'.format(video_comment_cnt))
  96. # 获取视频分享数
  97. video_shared_cnt = items[i]["shared_cnt"]
  98. Common.crawler_log().info('视频分享数:{}'.format(video_shared_cnt))
  99. # 获取视频发布时间
  100. video_send_date = items[i]["date"]
  101. Common.crawler_log().info('视频发布时间:{}'.format(
  102. time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(video_send_date))))
  103. # 获取视频用户名
  104. video_user = items[i]["source"].strip().replace("\n", "")
  105. Common.crawler_log().info('视频用户名:{}'.format(video_user))
  106. # 获取视频宽高
  107. if "short_video_info" not in items[i]:
  108. video_width = "0"
  109. video_height = "0"
  110. video_resolution = str(video_width) + "*" + str(video_height)
  111. Common.crawler_log().info("无分辨率:{}".format(video_resolution))
  112. elif len(items[i]["short_video_info"]) == 0:
  113. video_width = "0"
  114. video_height = "0"
  115. video_resolution = str(video_width) + "*" + str(video_height)
  116. Common.crawler_log().info("无分辨率:{}".format(video_resolution))
  117. else:
  118. # 视频宽
  119. video_width = items[i]["short_video_info"]["width"]
  120. # 视频高
  121. video_height = items[i]["short_video_info"]["height"]
  122. video_resolution = str(video_width) + "*" + str(video_height)
  123. Common.crawler_log().info('视频宽高:{}'.format(video_resolution))
  124. # 获取视频用户头像
  125. video_user_cover = items[i]["bizIcon"]
  126. Common.crawler_log().info('视频用户头像:{}'.format(video_user_cover))
  127. # 获取视频封面
  128. if "smartCoverUrl" in items[i]:
  129. video_cover = items[i]["smartCoverUrl"]
  130. Common.crawler_log().info('视频封面:{}'.format(video_cover))
  131. else:
  132. video_cover = items[i]["thumbUrl"]
  133. Common.crawler_log().info('视频封面:{}'.format(video_cover))
  134. # 获取播放地址
  135. if "mpInfo" in items[i]["videoInfo"]["videoCdnInfo"].keys():
  136. if len(items[i]["videoInfo"]["videoCdnInfo"]["mpInfo"]["urlInfo"]) > 2:
  137. url = items[i]["videoInfo"]["videoCdnInfo"]["mpInfo"]["urlInfo"][2]["url"]
  138. Common.crawler_log().info('视频播放地址:{}'.format(url))
  139. else:
  140. url = items[i]["videoInfo"]["videoCdnInfo"]["mpInfo"]["urlInfo"][0]["url"]
  141. Common.crawler_log().info('视频播放地址:{}'.format(url))
  142. elif "ctnInfo" in items[i]["videoInfo"]["videoCdnInfo"]:
  143. url = items[i]["videoInfo"]["videoCdnInfo"]["ctnInfo"]["urlInfo"][0]["url"]
  144. Common.crawler_log().info('视频播放地址:{}'.format(url))
  145. else:
  146. url = items[i]["videoInfo"]["videoCdnInfo"]["urlInfo"][0]["url"]
  147. Common.crawler_log().info('视频播放地址:{}'.format(url))
  148. # 过滤无效视频
  149. if video_id == "" \
  150. or video_send_date == "" \
  151. or video_title.strip() == "" \
  152. or video_play_cnt == "" \
  153. or video_liked_cnt == "" \
  154. or video_duration == "" \
  155. or video_comment_cnt == "" \
  156. or video_shared_cnt == "" \
  157. or video_user == "" \
  158. or video_user_cover == "" \
  159. or video_cover == "" \
  160. or url == "":
  161. Common.crawler_log().info("无效视频")
  162. else:
  163. # 从 云文档 去重:https://w42nne6hzg.feishu.cn/sheets/shtcngRPoDYAi24x52j2nDuHMih?sheet=20ce0c
  164. if video_id in [j for i in Feishu.get_values_batch("20ce0c") for j in i]:
  165. Common.crawler_log().info("该视频已下载:{}".format(video_title))
  166. else:
  167. Common.crawler_log().info("该视频未下载:{}".format(video_title))
  168. # 从 云文档 去重:https://w42nne6hzg.feishu.cn/sheets/shtcngRPoDYAi24x52j2nDuHMih?sheet=Y8N3Vl
  169. if video_id in [j for i in Feishu.get_values_batch("Y8N3Vl") for j in i]:
  170. Common.crawler_log().info("该视频已在kanyikan_feeds_1中:{}".format(video_title))
  171. else:
  172. Common.crawler_log().info("添加该视频信息至kanyikan_feeds_1:{}".format(video_title))
  173. # 看一看+工作表,插入首行
  174. print(Feishu.insert_columns("Y8N3Vl"))
  175. # 获取当前时间
  176. get_feeds_time = int(time.time())
  177. # 看一看云文档,工作表 kanyikan_feeds_1 中写入数据
  178. Feishu.update_values("Y8N3Vl",
  179. a1=str(get_feeds_time),
  180. b1=str(video_id),
  181. c1=str(video_play_cnt),
  182. d1=str(video_title),
  183. e1=str(video_duration),
  184. f1=str(video_comment_cnt),
  185. g1=str(video_liked_cnt),
  186. h1=str(video_shared_cnt),
  187. i1=str(video_resolution),
  188. j1=str(video_send_date),
  189. k1=str(video_user),
  190. l1=str(video_user_cover),
  191. m1=str(video_cover),
  192. n1=str(url),
  193. o1=str(video_list_session))
  194. except Exception as e:
  195. Common.crawler_log().error("获取视频 list 时异常:{}".format(e))
  196. if __name__ == "__main__":
  197. get_feeds()