kanyikan_recommend_plus.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217
  1. # -*- coding: utf-8 -*-
  2. # @Time: 2023/10/26
  3. import json
  4. import os
  5. import random
  6. import sys
  7. import time
  8. import requests
  9. import urllib3
  10. sys.path.append(os.getcwd())
  11. from common.mq import MQ
  12. from common.common import Common
  13. from common.scheduling_db import MysqlHelper
  14. from common.public import get_config_from_mysql, download_rule
  15. proxies = {"http": None, "https": None}
  16. class KanyikanRecommend:
  17. platform = "看一看"
  18. strategy = "随机数据抓取"
  19. @classmethod
  20. def repeat_video(cls, log_type, crawler, video_id, env):
  21. sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and create_time>='2023-10-09' and out_video_id="{video_id}"; """
  22. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  23. return len(repeat_video)
  24. @classmethod
  25. def get_videoList(cls, log_type, crawler, our_uid, rule_dict, env):
  26. mq = MQ(topic_name="topic_crawler_etl_" + env)
  27. try:
  28. Common.logger(log_type, crawler).info(f"正在抓取列表页{crawler}")
  29. Common.logging(log_type, crawler, env, f"正在抓取列表页")
  30. Common.logger(log_type, crawler).info(f"Test{crawler}")
  31. session = Common.get_session(log_type, crawler, env)
  32. if session is None:
  33. time.sleep(1)
  34. cls.get_videoList(log_type, crawler, our_uid, rule_dict, env)
  35. for i in range(20):
  36. url = 'https://search.weixin.qq.com/cgi-bin/recwxa/recwxavideolist?'
  37. vid = random.choice(
  38. ["wxv_2914137535516262402", "wxv_3043328250787430408", "wxv_2551550656600670208",
  39. "wxv_2687527849242787845", "wxv_3022753845694726149", "wxv_3086921997647118337"])
  40. channelid = random.choice(
  41. ["200201", "200", "208", "208201"])
  42. switchnewuser = random.choice(
  43. ["0", "1"])
  44. switchprofile = random.choice(
  45. ["0", "1"])
  46. subscene = random.choice(
  47. ["1089", "1074", "208"])
  48. params = random.choice([{
  49. 'session': session,
  50. "offset": 0,
  51. "wxaVersion": "3.9.2",
  52. "count": "10",
  53. "channelid": channelid,
  54. "scene": '310',
  55. "subscene": subscene,
  56. "clientVersion": '8.0.18',
  57. "sharesearchid": '0',
  58. "nettype": 'wifi',
  59. "switchprofile": switchprofile,
  60. "switchnewuser": switchnewuser,
  61. }, {
  62. "session": session,
  63. "wxaVersion": "3.17.8",
  64. "channelid": channelid,
  65. "vid": vid,
  66. "offset": 0,
  67. "count": "15",
  68. "scene": '310',
  69. "subscene": subscene,
  70. "model": "MacBookPro14%2C111.6.7",
  71. "nettype": 'wifi',
  72. "clientVersion": '3.5.5',
  73. "sharesearchid": '0',
  74. "presearchid": "17530764723864413041",
  75. "sharesource": "0",
  76. "isFromUgc": "false",
  77. "ad": 0,
  78. "switchprofile": switchprofile,
  79. "switchnewuser": switchnewuser,
  80. }])
  81. header = {
  82. 'Host': 'search.weixin.qq.com',
  83. 'Content-Type': 'application/json',
  84. 'X-WX-ClientVersion': '0x33050520',
  85. 'X-WECHAT-UIN': 'b2hfbTQ1WGNjSzQxemdfanpMSml1TEtfbEtsVQ==',
  86. 'Accept': '*/*',
  87. 'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E217 MicroMessenger/6.8.0(0x16080000) NetType/WIFI Language/en Branch/Br_trunk MiniProgramEnv/Mac',
  88. 'Referer': 'https://servicewechat.com/wxbb9a805eb4f9533c/268/page-frame.html',
  89. 'Accept-Language': 'zh-cn'
  90. }
  91. urllib3.disable_warnings()
  92. response = requests.get(url=url, headers=header, params=params, proxies=proxies, verify=False)
  93. # print(response)
  94. if "data" not in response.text:
  95. Common.logger(log_type, crawler).info("获取视频list时,session过期,随机睡眠 31-50 秒")
  96. Common.logging(log_type, crawler, env, "获取视频list时,session过期,随机睡眠 31-50 秒")
  97. # 如果返回空信息,则随机睡眠 31-40 秒
  98. time.sleep(random.randint(31, 40))
  99. cls.get_videoList(log_type, crawler, our_uid, rule_dict, env)
  100. elif "items" not in response.json()["data"]:
  101. Common.logger(log_type, crawler).info(f"get_feeds:{response.json()},随机睡眠 1-3 分钟")
  102. Common.logging(log_type, crawler, env, f"get_feeds:{response.json()},随机睡眠 1-3 分钟")
  103. # 如果返回空信息,则随机睡眠 1-3 分钟
  104. time.sleep(random.randint(60, 180))
  105. cls.get_videoList(log_type, crawler, our_uid, rule_dict, env)
  106. feeds = response.json().get("data", {}).get("items", "")
  107. if feeds == "":
  108. Common.logger(log_type, crawler).info(f"feeds:{feeds}")
  109. Common.logging(log_type, crawler, env, f"feeds:{feeds}")
  110. return
  111. for i in range(len(feeds)):
  112. try:
  113. video_title = feeds[i].get("title", "").strip().replace("\n", "") \
  114. .replace("/", "").replace("\\", "").replace("\r", "") \
  115. .replace(":", "").replace("*", "").replace("?", "") \
  116. .replace("?", "").replace('"', "").replace("<", "") \
  117. .replace(">", "").replace("|", "").replace(" ", "") \
  118. .replace("&NBSP", "").replace(".", "。").replace(" ", "") \
  119. .replace("'", "").replace("#", "").replace("Merge", "")
  120. publish_time_stamp = feeds[i].get("date", 0)
  121. publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
  122. # 获取播放地址
  123. if "videoInfo" not in feeds[i]:
  124. video_url = ""
  125. elif "mpInfo" in feeds[i]["videoInfo"]["videoCdnInfo"]:
  126. if len(feeds[i]["videoInfo"]["videoCdnInfo"]["mpInfo"]["urlInfo"]) > 2:
  127. video_url = feeds[i]["videoInfo"]["videoCdnInfo"]["mpInfo"]["urlInfo"][2]["url"]
  128. else:
  129. video_url = feeds[i]["videoInfo"]["videoCdnInfo"]["mpInfo"]["urlInfo"][0]["url"]
  130. elif "ctnInfo" in feeds[i]["videoInfo"]["videoCdnInfo"]:
  131. video_url = feeds[i]["videoInfo"]["videoCdnInfo"]["ctnInfo"]["urlInfo"][0]["url"]
  132. else:
  133. video_url = feeds[i]["videoInfo"]["videoCdnInfo"]["urlInfo"][0]["url"]
  134. videoId = feeds[i].get("videoId", "")
  135. videoId = "{}kyk_plus".format(videoId)
  136. playCount = int(feeds[i].get("playCount", 0))
  137. shared_cnt = int(feeds[i].get("shared_cnt", 0))
  138. video_dict = {
  139. "video_title": video_title,
  140. "video_id": videoId,
  141. "play_cnt": feeds[i].get("playCount", 0),
  142. "like_cnt": feeds[i].get("liked_cnt", 0),
  143. "comment_cnt": feeds[i].get("comment_cnt", 0),
  144. "share_cnt": feeds[i].get("shared_cnt", 0),
  145. "duration": feeds[i].get("mediaDuration", 0),
  146. "video_width": feeds[i].get("short_video_info", {}).get("width", 0),
  147. "video_height": feeds[i].get("short_video_info", {}).get("height", 0),
  148. "publish_time_stamp": publish_time_stamp,
  149. "publish_time_str": publish_time_str,
  150. "user_name": feeds[i].get("source", "").strip().replace("\n", ""),
  151. "user_id": feeds[i].get("openid", ""),
  152. "avatar_url": feeds[i].get("bizIcon", ""),
  153. "cover_url": feeds[i].get("thumbUrl", ""),
  154. "video_url": video_url,
  155. "session": session,
  156. }
  157. for k, v in video_dict.items():
  158. Common.logger(log_type, crawler).info(f"{k}:{v}")
  159. Common.logging(log_type, crawler, env, f"video_dict:{video_dict}")
  160. video_percent = '%.2f' % (shared_cnt / playCount)
  161. if float(video_percent) <= 0.05:
  162. Common.logger(log_type, crawler).info(f"分享/播放<0.05:{video_percent}\n")
  163. Common.logging(log_type, crawler, env, f"分享/播放<0.05:{video_percent}\n")
  164. continue
  165. if video_dict["video_id"] == "" or video_dict["video_title"] == "" or video_dict["video_url"] == "":
  166. Common.logger(log_type, crawler).info("无效视频\n")
  167. Common.logging(log_type, crawler, env, "无效视频\n")
  168. elif download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
  169. Common.logger(log_type, crawler).info("不满足抓取规则\n")
  170. Common.logging(log_type, crawler, env, "不满足抓取规则\n")
  171. elif any(str(word) if str(word) in video_dict["video_title"] else False
  172. for word in get_config_from_mysql(log_type=log_type,
  173. source=crawler,
  174. env=env,
  175. text="filter",
  176. action="")) is True:
  177. Common.logger(log_type, crawler).info('已中过滤词\n')
  178. Common.logging(log_type, crawler, env, '已中过滤词\n')
  179. elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
  180. Common.logger(log_type, crawler).info('视频已下载\n')
  181. Common.logging(log_type, crawler, env, '视频已下载\n')
  182. else:
  183. video_dict["out_user_id"] = video_dict["user_id"]
  184. video_dict["platform"] = crawler
  185. video_dict["strategy"] = log_type
  186. video_dict["strategy_type"] = "data"
  187. video_dict["out_video_id"] = video_dict["video_id"]
  188. video_dict["width"] = video_dict["video_width"]
  189. video_dict["height"] = video_dict["video_height"]
  190. video_dict["crawler_rule"] = json.dumps(rule_dict)
  191. video_dict["user_id"] = our_uid
  192. video_dict["publish_time"] = video_dict["publish_time_str"]
  193. mq.send_msg(video_dict)
  194. time.sleep(random.randint(10, 15))
  195. except Exception as e:
  196. Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
  197. Common.logging(log_type, crawler, env, f"抓取单条视频异常:{e}\n")
  198. except Exception as e:
  199. Common.logger(log_type, crawler).error(f"抓取列表页时异常:{e}\n")
  200. Common.logging(log_type, crawler, env, f"抓取列表页时异常:{e}\n")
  201. if __name__ == "__main__":
  202. KanyikanRecommend.get_videoList(
  203. log_type="recommend",
  204. crawler="kanyikan",
  205. env="prod",
  206. rule_dict={'share_cnt': {'min': 300, 'max': 0}},
  207. our_uid=64080779
  208. )