kanyikan_recommend_plus.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308
  1. # -*- coding: utf-8 -*-
  2. # @Time: 2023/10/26
  3. import json
  4. import os
  5. import random
  6. import sys
  7. import time
  8. import requests
  9. import urllib3
  10. sys.path.append(os.getcwd())
  11. from common.mq import MQ
  12. from common.common import Common
  13. from common.scheduling_db import MysqlHelper
  14. from common import AliyunLogger
  15. from common.public import get_config_from_mysql, download_rule
  16. proxies = {"http": None, "https": None}
  17. class KanyikanRecommend:
  18. platform = "看一看"
  19. strategy = "随机数据抓取"
  20. @classmethod
  21. def repeat_video(cls, log_type, crawler, video_id, env):
  22. sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and create_time>='2023-10-09' and out_video_id="{video_id}"; """
  23. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  24. return len(repeat_video)
  25. @classmethod
  26. def get_videoList(cls, log_type, crawler, our_uid, rule_dict, env):
  27. mq = MQ(topic_name="topic_crawler_etl_" + env)
  28. try:
  29. session = Common.get_session(log_type, crawler, env)
  30. if session is None:
  31. time.sleep(1)
  32. cls.get_videoList(log_type, crawler, our_uid, rule_dict, env)
  33. for i in range(20):
  34. url = 'https://search.weixin.qq.com/cgi-bin/recwxa/recwxavideolist?'
  35. vid = random.choice(
  36. ["wxv_3183841422983217154", "wxv_2930758110737334272", "wxv_2988109621326512134",
  37. "wxv_2676332817823432706", "wxv_3176172124915433476", "wxv_2844480939899650049",
  38. "wxv_2801905452978274308", "wxv_2946787506342117382", "wxv_2935943471797125120",
  39. "wxv_2756464139115659264", "wxv_3174430452460453896", "wxv_3126758748858908674",
  40. "wxv_3182262442043621385", "wxv_3058491263710314497", "wxv_2952726055449051140",
  41. "wxv_3076106053748015108", "wxv_2074265064492040192", "wxv_2999570992006021122"])
  42. channelid = random.choice(
  43. ["200201", "200", "208", "208201"])
  44. switchnewuser = random.choice(
  45. ["0", "1"])
  46. switchprofile = random.choice(
  47. ["0", "1"])
  48. subscene = random.choice(
  49. ["1089", "1074", "208"])
  50. params = random.choice([{
  51. 'session': session,
  52. "offset": 0,
  53. "wxaVersion": "3.9.2",
  54. "count": "10",
  55. "channelid": channelid,
  56. "scene": '310',
  57. "subscene": subscene,
  58. "clientVersion": '8.0.18',
  59. "sharesearchid": '0',
  60. "nettype": 'wifi',
  61. "switchprofile": switchprofile,
  62. "switchnewuser": switchnewuser,
  63. }, {
  64. "session": session,
  65. "wxaVersion": "3.17.8",
  66. "channelid": channelid,
  67. "vid": vid,
  68. "offset": 0,
  69. "count": "15",
  70. "scene": '310',
  71. "subscene": subscene,
  72. "model": "MacBookPro14%2C111.6.7",
  73. "nettype": 'wifi',
  74. "clientVersion": '3.5.5',
  75. "sharesearchid": '0',
  76. "presearchid": "17530764723864413041",
  77. "sharesource": "0",
  78. "isFromUgc": "false",
  79. "ad": 0,
  80. "switchprofile": switchprofile,
  81. "switchnewuser": switchnewuser,
  82. }])
  83. header = {
  84. 'Host': 'search.weixin.qq.com',
  85. 'Content-Type': 'application/json',
  86. 'X-WX-ClientVersion': '0x33050520',
  87. 'X-WECHAT-UIN': 'b2hfbTQ1WGNjSzQxemdfanpMSml1TEtfbEtsVQ==',
  88. 'Accept': '*/*',
  89. 'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E217 MicroMessenger/6.8.0(0x16080000) NetType/WIFI Language/en Branch/Br_trunk MiniProgramEnv/Mac',
  90. 'Referer': 'https://servicewechat.com/wxbb9a805eb4f9533c/268/page-frame.html',
  91. 'Accept-Language': 'zh-cn'
  92. }
  93. urllib3.disable_warnings()
  94. response = requests.get(url=url, headers=header, params=params, proxies=proxies, verify=False)
  95. # print(response)
  96. if "data" not in response.text:
  97. Common.logger(log_type, crawler).info("获取视频list时,session过期,随机睡眠 31-50 秒")
  98. Common.logging(log_type, crawler, env, "获取视频list时,session过期,随机睡眠 31-50 秒")
  99. AliyunLogger.logging(
  100. code="2000",
  101. platform=crawler,
  102. mode=log_type,
  103. env=env,
  104. message=f"获取视频list时,session过期,随机睡眠 31-50 秒"
  105. )
  106. # 如果返回空信息,则随机睡眠 31-40 秒
  107. time.sleep(random.randint(31, 40))
  108. cls.get_videoList(log_type, crawler, our_uid, rule_dict, env)
  109. elif "items" not in response.json()["data"]:
  110. Common.logger(log_type, crawler).info(f"get_feeds:{response.json()},随机睡眠 1-3 分钟")
  111. Common.logging(log_type, crawler, env, f"get_feeds:{response.json()},随机睡眠 1-3 分钟")
  112. AliyunLogger.logging(
  113. code="2000",
  114. platform=crawler,
  115. mode=log_type,
  116. env=env,
  117. message=f"get_feeds:{response.json()},随机睡眠 1-3 分钟"
  118. )
  119. # 如果返回空信息,则随机睡眠 1-3 分钟
  120. time.sleep(random.randint(60, 180))
  121. cls.get_videoList(log_type, crawler, our_uid, rule_dict, env)
  122. feeds = response.json().get("data", {}).get("items", "")
  123. if feeds == "":
  124. Common.logger(log_type, crawler).info(f"feeds:{feeds}")
  125. Common.logging(log_type, crawler, env, f"feeds:{feeds}")
  126. return
  127. for i in range(len(feeds)):
  128. try:
  129. AliyunLogger.logging(
  130. code="1001",
  131. platform=crawler,
  132. mode=log_type,
  133. env=env,
  134. message='扫描到一条视频\n'
  135. )
  136. video_title = feeds[i].get("title", "").strip().replace("\n", "") \
  137. .replace("/", "").replace("\\", "").replace("\r", "") \
  138. .replace(":", "").replace("*", "").replace("?", "") \
  139. .replace("?", "").replace('"', "").replace("<", "") \
  140. .replace(">", "").replace("|", "").replace(" ", "") \
  141. .replace("&NBSP", "").replace(".", "。").replace(" ", "") \
  142. .replace("'", "").replace("#", "").replace("Merge", "")
  143. publish_time_stamp = feeds[i].get("date", 0)
  144. publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
  145. # 获取播放地址
  146. if "videoInfo" not in feeds[i]:
  147. video_url = ""
  148. elif "mpInfo" in feeds[i]["videoInfo"]["videoCdnInfo"]:
  149. if len(feeds[i]["videoInfo"]["videoCdnInfo"]["mpInfo"]["urlInfo"]) > 2:
  150. video_url = feeds[i]["videoInfo"]["videoCdnInfo"]["mpInfo"]["urlInfo"][2]["url"]
  151. else:
  152. video_url = feeds[i]["videoInfo"]["videoCdnInfo"]["mpInfo"]["urlInfo"][0]["url"]
  153. elif "ctnInfo" in feeds[i]["videoInfo"]["videoCdnInfo"]:
  154. video_url = feeds[i]["videoInfo"]["videoCdnInfo"]["ctnInfo"]["urlInfo"][0]["url"]
  155. else:
  156. video_url = feeds[i]["videoInfo"]["videoCdnInfo"]["urlInfo"][0]["url"]
  157. videoId = feeds[i].get("videoId", "")
  158. videoId = "{}kyk_plus".format(videoId)
  159. playCount = int(feeds[i].get("playCount", 0))
  160. shared_cnt = int(feeds[i].get("shared_cnt", 0))
  161. video_dict = {
  162. "video_title": video_title,
  163. "video_id": videoId,
  164. "play_cnt": feeds[i].get("playCount", 0),
  165. "like_cnt": feeds[i].get("liked_cnt", 0),
  166. "comment_cnt": feeds[i].get("comment_cnt", 0),
  167. "share_cnt": feeds[i].get("shared_cnt", 0),
  168. "duration": feeds[i].get("mediaDuration", 0),
  169. "video_width": feeds[i].get("short_video_info", {}).get("width", 0),
  170. "video_height": feeds[i].get("short_video_info", {}).get("height", 0),
  171. "publish_time_stamp": publish_time_stamp,
  172. "publish_time_str": publish_time_str,
  173. "user_name": feeds[i].get("source", "").strip().replace("\n", ""),
  174. "user_id": feeds[i].get("openid", ""),
  175. "avatar_url": feeds[i].get("bizIcon", ""),
  176. "cover_url": feeds[i].get("thumbUrl", ""),
  177. "video_url": video_url,
  178. "session": session,
  179. }
  180. for k, v in video_dict.items():
  181. Common.logger(log_type, crawler).info(f"{k}:{v}")
  182. Common.logging(log_type, crawler, env, f"video_dict:{video_dict}")
  183. AliyunLogger.logging(
  184. code="1000",
  185. platform=crawler,
  186. mode=log_type,
  187. env=env,
  188. message=f"{video_dict}\n"
  189. )
  190. video_percent = '%.2f' % (shared_cnt / playCount)
  191. if float(video_percent) < 0.05:
  192. Common.logger(log_type, crawler).info(f"分享/播放:{video_percent}\n")
  193. Common.logging(log_type, crawler, env, f"分享/播放:{video_percent}\n")
  194. AliyunLogger.logging(
  195. code="2004",
  196. platform=crawler,
  197. mode=log_type,
  198. env=env,
  199. message=f"不符合抓取条件,分享/播放:{video_percent}\n"
  200. )
  201. continue
  202. elif shared_cnt < 800:
  203. Common.logger(log_type, crawler).info(f"播放量:{playCount}\n")
  204. Common.logging(log_type, crawler, env, f"播放量:{playCount}\n")
  205. AliyunLogger.logging(
  206. code="2004",
  207. platform=crawler,
  208. mode=log_type,
  209. env=env,
  210. message=f"不符合抓取条件,播放量:{playCount}\n"
  211. )
  212. continue
  213. if video_dict["video_id"] == "" or video_dict["video_title"] == "" or video_dict["video_url"] == "":
  214. Common.logger(log_type, crawler).info("无效视频\n")
  215. Common.logging(log_type, crawler, env, "无效视频\n")
  216. AliyunLogger.logging(
  217. code="2004",
  218. platform=crawler,
  219. mode=log_type,
  220. env=env,
  221. message=f"无效视频"
  222. )
  223. elif download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
  224. Common.logger(log_type, crawler).info("不满足抓取规则\n")
  225. Common.logging(log_type, crawler, env, "不满足抓取规则\n")
  226. AliyunLogger.logging(
  227. code="2004",
  228. platform=crawler,
  229. mode=log_type,
  230. env=env,
  231. message='不满足抓取规则\n'
  232. )
  233. elif any(str(word) if str(word) in video_dict["video_title"] else False
  234. for word in get_config_from_mysql(log_type=log_type,
  235. source=crawler,
  236. env=env,
  237. text="filter",
  238. action="")) is True:
  239. Common.logger(log_type, crawler).info('已中过滤词\n')
  240. Common.logging(log_type, crawler, env, '已中过滤词\n')
  241. AliyunLogger.logging(
  242. code="2004",
  243. platform=crawler,
  244. mode=log_type,
  245. env=env,
  246. message='已中过滤词\n'
  247. )
  248. elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
  249. Common.logger(log_type, crawler).info('视频已下载\n')
  250. Common.logging(log_type, crawler, env, '视频已下载\n')
  251. AliyunLogger.logging(
  252. code="2002",
  253. platform=crawler,
  254. mode=log_type,
  255. env=env,
  256. message='视频已下载\n'
  257. )
  258. else:
  259. video_dict["out_user_id"] = video_dict["user_id"]
  260. video_dict["platform"] = crawler
  261. video_dict["strategy"] = log_type
  262. video_dict["strategy_type"] = "data"
  263. video_dict["out_video_id"] = video_dict["video_id"]
  264. video_dict["width"] = video_dict["video_width"]
  265. video_dict["height"] = video_dict["video_height"]
  266. video_dict["crawler_rule"] = json.dumps(rule_dict)
  267. video_dict["user_id"] = our_uid
  268. video_dict["publish_time"] = video_dict["publish_time_str"]
  269. mq.send_msg(video_dict)
  270. time.sleep(random.randint(10, 15))
  271. except Exception as e:
  272. Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
  273. Common.logging(log_type, crawler, env, f"抓取单条视频异常:{e}\n")
  274. AliyunLogger.logging(
  275. code="3000",
  276. platform=crawler,
  277. mode=log_type,
  278. env=env,
  279. message=f"抓取单条视频异常:{e}\n"
  280. )
  281. except Exception as e:
  282. Common.logger(log_type, crawler).error(f"抓取列表页时异常:{e}\n")
  283. Common.logging(log_type, crawler, env, f"抓取列表页时异常:{e}\n")
  284. AliyunLogger.logging(
  285. code="3000",
  286. platform=crawler,
  287. mode=log_type,
  288. env=env,
  289. message=f"抓取列表页时异常:{e}\n"
  290. )
  291. if __name__ == "__main__":
  292. KanyikanRecommend.get_videoList(
  293. log_type="recommend",
  294. crawler="kanyikan",
  295. env="prod",
  296. rule_dict={'share_cnt': {'min': 300, 'max': 0}},
  297. our_uid=64080779
  298. )