kanyikan_recommend_feed.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335
  1. # -*- coding: utf-8 -*-
  2. # @Time: 2023/11/03
  3. import json
  4. import os
  5. import random
  6. import sys
  7. import time
  8. import requests
  9. import urllib3
  10. sys.path.append(os.getcwd())
  11. from common import AliyunLogger
  12. from common.mq import MQ
  13. from common.common import Common
  14. from common.scheduling_db import MysqlHelper
  15. from common.public import get_config_from_mysql, download_rule
  16. proxies = {"http": None, "https": None}
  17. class KanyikanRecommend:
  18. platform = "看一看-feed流"
  19. strategy = "feed流"
  20. @classmethod
  21. def repeat_video(cls, log_type, crawler, video_id, env):
  22. sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and create_time>='2023-10-09' and out_video_id="{video_id}"; """
  23. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  24. return len(repeat_video)
  25. @classmethod
  26. def get_vid(cls,session, log_type, crawler, env, our_uid, rule_dict):
  27. url = 'https://search.weixin.qq.com/cgi-bin/recwxa/recwxavideolist?'
  28. header = {
  29. 'Host': 'search.weixin.qq.com',
  30. 'Content-Type': 'application/json',
  31. 'X-WX-ClientVersion': '0x33050520',
  32. 'X-WECHAT-UIN': 'b2hfbTQ1WGNjSzQxemdfanpMSml1TEtfbEtsVQ==',
  33. 'Accept': '*/*',
  34. 'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E217 MicroMessenger/6.8.0(0x16080000) NetType/WIFI Language/en Branch/Br_trunk MiniProgramEnv/Mac',
  35. 'Referer': 'https://servicewechat.com/wxbb9a805eb4f9533c/268/page-frame.html',
  36. 'Accept-Language': 'zh-cn'
  37. }
  38. params = {
  39. "session": session,
  40. "offset": 0,
  41. "count": "10",
  42. "channelid": "200",
  43. "scene": '310',
  44. "subscene": '1074',
  45. "sharesearchid": '0',
  46. "nettype": 'wifi',
  47. "switchprofile": "0",
  48. "switchnewuser": "0",
  49. "ad": 0
  50. }
  51. urllib3.disable_warnings()
  52. response = requests.get(url=url, headers=header, params=params, proxies=proxies, verify=False)
  53. if "data" not in response.text:
  54. Common.logger(log_type, crawler).info("获取视频list时,session过期,随机睡眠 31-50 秒")
  55. Common.logging(log_type, crawler, env, "获取视频list时,session过期,随机睡眠 31-50 秒")
  56. AliyunLogger.logging(
  57. code="2000",
  58. platform=crawler,
  59. mode=log_type,
  60. env=env,
  61. message=f"获取视频list时,session过期,随机睡眠 31-50 秒"
  62. )
  63. # 如果返回空信息,则随机睡眠 31-40 秒
  64. time.sleep(random.randint(31, 40))
  65. cls.get_videoList(log_type, crawler, our_uid, rule_dict, env)
  66. elif "items" not in response.json()["data"]:
  67. Common.logger(log_type, crawler).info(f"get_feeds:{response.json()},随机睡眠 1-3 分钟")
  68. Common.logging(log_type, crawler, env, f"get_feeds:{response.json()},随机睡眠 1-3 分钟")
  69. AliyunLogger.logging(
  70. code="2000",
  71. platform=crawler,
  72. mode=log_type,
  73. env=env,
  74. message=f"get_feeds:{response.json()},随机睡眠 1-3 分钟"
  75. )
  76. # 如果返回空信息,则随机睡眠 1-3 分钟
  77. time.sleep(random.randint(60, 180))
  78. cls.get_videoList(log_type, crawler, our_uid, rule_dict, env)
  79. feeds = response.json().get("data", {}).get("items", "")
  80. return feeds
  81. @classmethod
  82. def get_videoList(cls, log_type, crawler, our_uid, rule_dict, env):
  83. mq = MQ(topic_name="topic_crawler_etl_" + env)
  84. try:
  85. session = Common.get_session(log_type, crawler, env)
  86. if session is None:
  87. time.sleep(1)
  88. cls.get_videoList(log_type, crawler, our_uid, rule_dict, env)
  89. feeds = cls.get_vid(session,log_type, crawler, env, our_uid, rule_dict)
  90. if feeds == "":
  91. Common.logger(log_type, crawler).info(f"feeds:{feeds}")
  92. Common.logging(log_type, crawler, env, f"feeds:{feeds}")
  93. return
  94. for i in range(len(feeds)):
  95. vid = feeds[i].get("videoId", "")
  96. shared_cnt = int(feeds[i].get("shared_cnt", 0))
  97. liked_cnt = int(feeds[i].get("liked_cnt", 0))
  98. playCount = int(feeds[i].get("playCount", 0))
  99. video_percent = '%.2f' % (liked_cnt / playCount)
  100. if playCount < 100000:
  101. if float(video_percent) < 0.01 and playCount < 10000:
  102. Common.logger(log_type, crawler).info(f"分享/播放:{video_percent},播放量:{playCount}\n")
  103. Common.logging(log_type, crawler, env, f"分享/播放:{video_percent},播放量:{playCount}\n")
  104. continue
  105. time.sleep(random.randint(4, 10))
  106. url1 = "https://search.weixin.qq.com/cgi-bin/recwxa/recwxavideolist?"
  107. header = {
  108. 'Host': 'search.weixin.qq.com',
  109. 'Content-Type': 'application/json',
  110. 'X-WX-ClientVersion': '0x33050520',
  111. 'X-WECHAT-UIN': 'b2hfbTQ1WGNjSzQxemdfanpMSml1TEtfbEtsVQ==',
  112. 'Accept': '*/*',
  113. 'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E217 MicroMessenger/6.8.0(0x16080000) NetType/WIFI Language/en Branch/Br_trunk MiniProgramEnv/Mac',
  114. 'Referer': 'https://servicewechat.com/wxbb9a805eb4f9533c/269/page-frame.html',
  115. 'Accept-Language': 'zh-cn'
  116. }
  117. params = {
  118. "session": session,
  119. "offset": 0,
  120. "count": "30",
  121. "channelid": "200201",
  122. "vid": vid,
  123. "scene": "310",
  124. "subscene": '1098'
  125. }
  126. urllib3.disable_warnings()
  127. response = requests.get(url=url1, headers=header, params=params, proxies=proxies, verify=False)
  128. if "data" not in response.text:
  129. Common.logger(log_type, crawler).info("获取视频list时,session过期,随机睡眠 31-50 秒")
  130. Common.logging(log_type, crawler, env, "获取视频list时,session过期,随机睡眠 31-50 秒")
  131. AliyunLogger.logging(
  132. code="2000",
  133. platform=crawler,
  134. mode=log_type,
  135. env=env,
  136. message=f"获取视频list时,session过期,随机睡眠 31-50 秒"
  137. )
  138. # 如果返回空信息,则随机睡眠 31-40 秒
  139. time.sleep(random.randint(31, 40))
  140. cls.get_videoList(log_type, crawler, our_uid, rule_dict, env)
  141. elif "items" not in response.json()["data"]:
  142. Common.logger(log_type, crawler).info(f"get_feeds:{response.json()},随机睡眠 1-3 分钟")
  143. Common.logging(log_type, crawler, env, f"get_feeds:{response.json()},随机睡眠 1-3 分钟")
  144. AliyunLogger.logging(
  145. code="2000",
  146. platform=crawler,
  147. mode=log_type,
  148. env=env,
  149. message=f"get_feeds:{response.json()},随机睡眠 1-3 分钟"
  150. )
  151. # 如果返回空信息,则随机睡眠 1-3 分钟
  152. time.sleep(random.randint(60, 180))
  153. cls.get_videoList(log_type, crawler, our_uid, rule_dict, env)
  154. feeds = response.json().get("data", {}).get("items", "")
  155. if feeds == "":
  156. Common.logger(log_type, crawler).info(f"feeds:{feeds}")
  157. Common.logging(log_type, crawler, env, f"feeds:{feeds}")
  158. AliyunLogger.logging(
  159. code="2001",
  160. platform=crawler,
  161. mode=log_type,
  162. env=env,
  163. message=f"没有更多视频啦 ~\n"
  164. )
  165. return
  166. for j in range(len(feeds)):
  167. try:
  168. AliyunLogger.logging(
  169. code="1001",
  170. platform=crawler,
  171. mode=log_type,
  172. env=env,
  173. message='扫描到一条视频\n'
  174. )
  175. video_title = feeds[j].get("title", "").strip().replace("\n", "") \
  176. .replace("/", "").replace("\\", "").replace("\r", "") \
  177. .replace(":", "").replace("*", "").replace("?", "") \
  178. .replace("?", "").replace('"', "").replace("<", "") \
  179. .replace(">", "").replace("|", "").replace(" ", "") \
  180. .replace("&NBSP", "").replace(".", "。").replace(" ", "") \
  181. .replace("'", "").replace("#", "").replace("Merge", "")
  182. publish_time_stamp = feeds[j].get("date", 0)
  183. publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
  184. # 获取播放地址
  185. if "videoInfo" not in feeds[j]:
  186. video_url = ""
  187. elif "mpInfo" in feeds[j]["videoInfo"]["videoCdnInfo"]:
  188. if len(feeds[j]["videoInfo"]["videoCdnInfo"]["mpInfo"]["urlInfo"]) > 2:
  189. video_url = feeds[j]["videoInfo"]["videoCdnInfo"]["mpInfo"]["urlInfo"][2]["url"]
  190. else:
  191. video_url = feeds[j]["videoInfo"]["videoCdnInfo"]["mpInfo"]["urlInfo"][0]["url"]
  192. elif "ctnInfo" in feeds[j]["videoInfo"]["videoCdnInfo"]:
  193. video_url = feeds[j]["videoInfo"]["videoCdnInfo"]["ctnInfo"]["urlInfo"][0]["url"]
  194. else:
  195. video_url = feeds[j]["videoInfo"]["videoCdnInfo"]["urlInfo"][0]["url"]
  196. video_id = feeds[j].get("videoId", "")+"feed"
  197. s_cnt = int(feeds[j].get("shared_cnt", 0))
  198. p_count = int(feeds[j].get("playCount", 0))
  199. l_cnt = int(feeds[j].get("liked_cnt", 0))
  200. if p_count < 100000:
  201. if s_cnt < 200 and p_count < 15000:
  202. Common.logger(log_type, crawler).info(f"分享:{video_percent},播放量:{playCount}\n")
  203. Common.logging(log_type, crawler, env, f"分享:{video_percent},播放量:{playCount}\n")
  204. continue
  205. video_dict = {
  206. "video_title": video_title,
  207. "video_id": video_id,
  208. "play_cnt": feeds[j].get("playCount", 0),
  209. "like_cnt": feeds[j].get("liked_cnt", 0),
  210. "comment_cnt": feeds[j].get("comment_cnt", 0),
  211. "share_cnt": feeds[j].get("shared_cnt", 0),
  212. "duration": feeds[j].get("mediaDuration", 0),
  213. "video_width": feeds[j].get("short_video_info", {}).get("width", 0),
  214. "video_height": feeds[j].get("short_video_info", {}).get("height", 0),
  215. "publish_time_stamp": publish_time_stamp,
  216. "publish_time_str": publish_time_str,
  217. "user_name": feeds[j].get("source", "").strip().replace("\n", ""),
  218. "user_id": feeds[j].get("openid", ""),
  219. "avatar_url": feeds[j].get("bizIcon", ""),
  220. "cover_url": feeds[j].get("thumbUrl", ""),
  221. "video_url": video_url,
  222. "session": session,
  223. }
  224. for k, v in video_dict.items():
  225. Common.logger(log_type, crawler).info(f"{k}:{v}")
  226. Common.logging(log_type, crawler, env, f"video_dict:{video_dict}")
  227. AliyunLogger.logging(
  228. code="1000",
  229. platform=crawler,
  230. mode=log_type,
  231. env=env,
  232. message=f"{video_dict}\n"
  233. )
  234. if video_dict["video_id"] == "" or video_dict["video_title"] == "" or video_dict[
  235. "video_url"] == "":
  236. Common.logger(log_type, crawler).info("无效视频\n")
  237. Common.logging(log_type, crawler, env, "无效视频\n")
  238. AliyunLogger.logging(
  239. code="2004",
  240. platform=crawler,
  241. mode=log_type,
  242. env=env,
  243. message='无效视频\n'
  244. )
  245. elif download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict,
  246. rule_dict=rule_dict) is False:
  247. Common.logger(log_type, crawler).info("不满足抓取规则\n")
  248. Common.logging(log_type, crawler, env, "不满足抓取规则\n")
  249. AliyunLogger.logging(
  250. code="2004",
  251. platform=crawler,
  252. mode=log_type,
  253. env=env,
  254. message='不满足抓取规则\n'
  255. )
  256. elif any(str(word) if str(word) in video_dict["video_title"] else False
  257. for word in get_config_from_mysql(log_type=log_type,
  258. source=crawler,
  259. env=env,
  260. text="filter",
  261. action="")) is True:
  262. Common.logger(log_type, crawler).info('已中过滤词\n')
  263. Common.logging(log_type, crawler, env, '已中过滤词\n')
  264. AliyunLogger.logging(
  265. code="2004",
  266. platform=crawler,
  267. mode=log_type,
  268. env=env,
  269. message='已中过滤词\n'
  270. )
  271. elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
  272. Common.logger(log_type, crawler).info('视频已下载\n')
  273. Common.logging(log_type, crawler, env, '视频已下载\n')
  274. AliyunLogger.logging(
  275. code="2002",
  276. platform=crawler,
  277. mode=log_type,
  278. env=env,
  279. message='视频已下载\n'
  280. )
  281. else:
  282. video_dict["out_user_id"] = video_dict["user_id"]
  283. video_dict["platform"] = crawler
  284. video_dict["strategy"] = log_type
  285. video_dict["strategy_type"] = "hcm"
  286. video_dict["out_video_id"] = video_dict["video_id"]
  287. video_dict["width"] = video_dict["video_width"]
  288. video_dict["height"] = video_dict["video_height"]
  289. video_dict["crawler_rule"] = json.dumps(rule_dict)
  290. video_dict["user_id"] = our_uid
  291. video_dict["publish_time"] = video_dict["publish_time_str"]
  292. mq.send_msg(video_dict)
  293. except Exception as e:
  294. Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
  295. Common.logging(log_type, crawler, env, f"抓取单条视频异常:{e}\n")
  296. AliyunLogger.logging(
  297. code="3000",
  298. platform=crawler,
  299. mode=log_type,
  300. env=env,
  301. message=f"抓取单条视频异常:{e}\n"
  302. )
  303. except Exception as e:
  304. Common.logger(log_type, crawler).error(f"抓取列表页时异常:{e}\n")
  305. Common.logging(log_type, crawler, env, f"抓取列表页时异常:{e}\n")
  306. AliyunLogger.logging(
  307. code="3000",
  308. platform=crawler,
  309. mode=log_type,
  310. env=env,
  311. message=f"抓取列表页时异常:{e}\n"
  312. )
  313. if __name__ == "__main__":
  314. KanyikanRecommend.get_videoList(
  315. log_type="recommend",
  316. crawler="kanyikan",
  317. env="prod",
  318. rule_dict={'share_cnt': {'min': 300, 'max': 0}},
  319. our_uid=64080779
  320. )