kyk_recommend.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2023/6/21
  4. """
  5. 看一看推荐
  6. 207 服务器抓取模式
  7. 微信 session 从爬虫平台获取
  8. """
  9. import json
  10. import os
  11. import random
  12. import sys
  13. import time
  14. import requests
  15. import urllib3
  16. sys.path.append(os.getcwd())
  17. from common.mq import MQ
  18. from common.common import Common
  19. from common.scheduling_db import MysqlHelper
  20. from common.public import get_config_from_mysql, download_rule
  21. proxies = {"http": None, "https": None}
  22. class KanyikanRecommend:
  23. platform = "看一看"
  24. strategy = "推荐抓取策略"
  25. @classmethod
  26. def get_session(cls, log_type, crawler, env):
  27. session_sql = """ SELECT * FROM crawler_config WHERE `source` in ('kanyikan', '看一看'); """
  28. session_response = MysqlHelper.get_values(log_type, crawler, session_sql, env, action="")
  29. for config in session_response:
  30. if "token" in config["config"] and config["title"] == "看一看推荐":
  31. token_str = config["config"]
  32. token = json.loads(token_str)["token"]
  33. return token
  34. return None
  35. @classmethod
  36. def repeat_video(cls, log_type, crawler, video_id, env):
  37. sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and create_time>='2023-06-26' and out_video_id="{video_id}"; """
  38. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  39. return len(repeat_video)
  40. @classmethod
  41. def get_videoList(cls, log_type, crawler, our_uid, rule_dict, env):
  42. mq = MQ(topic_name="topic_crawler_etl_" + env)
  43. try:
  44. Common.logger(log_type, crawler).info(f"正在抓取列表页")
  45. Common.logging(log_type, crawler, env, f"正在抓取列表页")
  46. session = cls.get_session(log_type, crawler, env)
  47. Common.logger(log_type, crawler).info(f"session:{session}")
  48. if session is None:
  49. Common.logger(log_type, crawler).info("session is None!")
  50. time.sleep(1)
  51. cls.get_videoList(log_type, crawler, our_uid, rule_dict, env)
  52. url = 'https://search.weixin.qq.com/cgi-bin/recwxa/recwxavideolist?'
  53. header = {
  54. "Connection": "keep-alive",
  55. "content-type": "application/json",
  56. "Accept-Encoding": "gzip,compress,br,deflate",
  57. "User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X) "
  58. "AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 MicroMessenger/8.0.18(0x18001236) "
  59. "NetType/WIFI Language/zh_CN",
  60. "Referer": "https://servicewechat.com/wxbb9a805eb4f9533c/234/page-frame.html",
  61. }
  62. params = {
  63. 'session': session,
  64. "offset": 0,
  65. "wxaVersion": "3.9.2",
  66. "count": "10",
  67. "channelid": "208",
  68. "scene": '310',
  69. "subscene": '1089',
  70. "clientVersion": '8.0.18',
  71. "sharesearchid": '0',
  72. "nettype": 'wifi',
  73. "switchprofile": "0",
  74. "switchnewuser": "0",
  75. }
  76. urllib3.disable_warnings()
  77. response = requests.get(url=url, headers=header, params=params, proxies=proxies, verify=False)
  78. if "data" not in response.text:
  79. Common.logger(log_type, crawler).info("获取视频list时,session过期,随机睡眠 31-50 秒")
  80. Common.logging(log_type, crawler, env, "获取视频list时,session过期,随机睡眠 31-50 秒")
  81. # 如果返回空信息,则随机睡眠 31-40 秒
  82. time.sleep(random.randint(31, 40))
  83. cls.get_videoList(log_type, crawler, our_uid, rule_dict, env)
  84. elif "items" not in response.json()["data"]:
  85. Common.logger(log_type, crawler).info(f"get_feeds:{response.json()},随机睡眠 1-3 分钟")
  86. Common.logging(log_type, crawler, env, f"get_feeds:{response.json()},随机睡眠 1-3 分钟")
  87. # 如果返回空信息,则随机睡眠 1-3 分钟
  88. time.sleep(random.randint(60, 180))
  89. cls.get_videoList(log_type, crawler, our_uid, rule_dict, env)
  90. feeds = response.json().get("data", {}).get("items", "")
  91. if feeds == "":
  92. Common.logger(log_type, crawler).info(f"feeds:{feeds}")
  93. Common.logging(log_type, crawler, env, f"feeds:{feeds}")
  94. return
  95. for i in range(len(feeds)):
  96. try:
  97. video_title = feeds[i].get("title", "").strip().replace("\n", "") \
  98. .replace("/", "").replace("\\", "").replace("\r", "") \
  99. .replace(":", "").replace("*", "").replace("?", "") \
  100. .replace("?", "").replace('"', "").replace("<", "") \
  101. .replace(">", "").replace("|", "").replace(" ", "") \
  102. .replace("&NBSP", "").replace(".", "。").replace(" ", "") \
  103. .replace("'", "").replace("#", "").replace("Merge", "")
  104. publish_time_stamp = feeds[i].get("date", 0)
  105. publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
  106. # 获取播放地址
  107. if "videoInfo" not in feeds[i]:
  108. video_url = ""
  109. elif "mpInfo" in feeds[i]["videoInfo"]["videoCdnInfo"]:
  110. if len(feeds[i]["videoInfo"]["videoCdnInfo"]["mpInfo"]["urlInfo"]) > 2:
  111. video_url = feeds[i]["videoInfo"]["videoCdnInfo"]["mpInfo"]["urlInfo"][2]["url"]
  112. else:
  113. video_url = feeds[i]["videoInfo"]["videoCdnInfo"]["mpInfo"]["urlInfo"][0]["url"]
  114. elif "ctnInfo" in feeds[i]["videoInfo"]["videoCdnInfo"]:
  115. video_url = feeds[i]["videoInfo"]["videoCdnInfo"]["ctnInfo"]["urlInfo"][0]["url"]
  116. else:
  117. video_url = feeds[i]["videoInfo"]["videoCdnInfo"]["urlInfo"][0]["url"]
  118. video_dict = {
  119. "video_title": video_title,
  120. "video_id": feeds[i].get("videoId", ""),
  121. "play_cnt": feeds[i].get("playCount", 0),
  122. "like_cnt": feeds[i].get("liked_cnt", 0),
  123. "comment_cnt": feeds[i].get("comment_cnt", 0),
  124. "share_cnt": feeds[i].get("shared_cnt", 0),
  125. "duration": feeds[i].get("mediaDuration", 0),
  126. "video_width": feeds[i].get("short_video_info", {}).get("width", 0),
  127. "video_height": feeds[i].get("short_video_info", {}).get("height", 0),
  128. "publish_time_stamp": publish_time_stamp,
  129. "publish_time_str": publish_time_str,
  130. "user_name": feeds[i].get("source", "").strip().replace("\n", ""),
  131. "user_id": feeds[i].get("openid", ""),
  132. "avatar_url": feeds[i].get("bizIcon", ""),
  133. "cover_url": feeds[i].get("thumbUrl", ""),
  134. "video_url": video_url,
  135. "session": session,
  136. }
  137. for k, v in video_dict.items():
  138. Common.logger(log_type, crawler).info(f"{k}:{v}")
  139. Common.logging(log_type, crawler, env, f"video_dict:{video_dict}")
  140. if video_dict["video_id"] == "" or video_dict["video_title"] == "" or video_dict["video_url"] == "":
  141. Common.logger(log_type, crawler).info("无效视频\n")
  142. Common.logging(log_type, crawler, env, "无效视频\n")
  143. elif download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
  144. Common.logger(log_type, crawler).info("不满足抓取规则\n")
  145. Common.logging(log_type, crawler, env, "不满足抓取规则\n")
  146. elif any(str(word) if str(word) in video_dict["video_title"] else False
  147. for word in get_config_from_mysql(log_type=log_type,
  148. source=crawler,
  149. env=env,
  150. text="filter",
  151. action="")) is True:
  152. Common.logger(log_type, crawler).info('已中过滤词\n')
  153. Common.logging(log_type, crawler, env, '已中过滤词\n')
  154. elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
  155. Common.logger(log_type, crawler).info('视频已下载\n')
  156. Common.logging(log_type, crawler, env, '视频已下载\n')
  157. else:
  158. video_dict["out_user_id"] = video_dict["user_id"]
  159. video_dict["platform"] = crawler
  160. video_dict["strategy"] = log_type
  161. video_dict["out_video_id"] = video_dict["video_id"]
  162. video_dict["width"] = video_dict["video_width"]
  163. video_dict["height"] = video_dict["video_height"]
  164. video_dict["crawler_rule"] = json.dumps(rule_dict)
  165. video_dict["user_id"] = our_uid
  166. video_dict["publish_time"] = video_dict["publish_time_str"]
  167. mq.send_msg(video_dict)
  168. except Exception as e:
  169. Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
  170. Common.logging(log_type, crawler, env, f"抓取单条视频异常:{e}\n")
  171. except Exception as e:
  172. Common.logger(log_type, crawler).error(f"抓取列表页时异常:{e}\n")
  173. Common.logging(log_type, crawler, env, f"抓取列表页时异常:{e}\n")
  174. if __name__ == "__main__":
  175. # print(get_config_from_mysql(log_type="recommend",
  176. # source="kanyikan",
  177. # env="dev",
  178. # text="filter",
  179. # action=""))
  180. KanyikanRecommend.get_session(log_type="recommend", crawler="kanyikan", env="dev")
  181. pass