kuaishou_author_scheduling_new.py 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354
  1. # -*- coding: utf-8 -*-
  2. # @Time: 2023/11/07
  3. import os
  4. import sys
  5. import time
  6. from datetime import date, timedelta
  7. import requests
  8. import json
  9. import urllib3
  10. sys.path.append(os.getcwd())
  11. from common.common import Common
  12. from common import AliyunLogger
  13. from common.mq import MQ
  14. from requests.adapters import HTTPAdapter
  15. from common.scheduling_db import MysqlHelper
  16. from common.public import random_title, get_config_from_mysql, download_rule
  17. class KuaishouauthorScheduling:
  18. platform = "快手"
  19. download_cnt = 0
  20. @classmethod
  21. def videos_cnt(cls, rule_dict):
  22. videos_cnt = rule_dict.get("videos_cnt", {}).get("min", 0)
  23. if videos_cnt == 0:
  24. videos_cnt = 1000
  25. return videos_cnt
  26. @classmethod
  27. def video_title(cls, log_type, crawler, env, title):
  28. title_split1 = title.split(" #")
  29. if title_split1[0] != "":
  30. title1 = title_split1[0]
  31. else:
  32. title1 = title_split1[-1]
  33. title_split2 = title1.split(" #")
  34. if title_split2[0] != "":
  35. title2 = title_split2[0]
  36. else:
  37. title2 = title_split2[-1]
  38. title_split3 = title2.split("@")
  39. if title_split3[0] != "":
  40. title3 = title_split3[0]
  41. else:
  42. title3 = title_split3[-1]
  43. video_title = title3.strip().replace("\n", "") \
  44. .replace("/", "").replace("快手", "").replace(" ", "") \
  45. .replace(" ", "").replace("&NBSP", "").replace("\r", "") \
  46. .replace("#", "").replace(".", "。").replace("\\", "") \
  47. .replace(":", "").replace("*", "").replace("?", "") \
  48. .replace("?", "").replace('"', "").replace("<", "") \
  49. .replace(">", "").replace("|", "").replace("@", "").replace('"', '').replace("'", '')[:40]
  50. if video_title.replace(" ", "") == "" or video_title == "。。。" or video_title == "...":
  51. return random_title(log_type, crawler, env, text='title')
  52. else:
  53. return video_title
  54. @classmethod
  55. def get_cookie(cls, log_type, crawler, env):
  56. select_sql = f""" select * from crawler_config where source="{crawler}" """
  57. configs = MysqlHelper.get_values(log_type, crawler, select_sql, env, action="")
  58. for config in configs:
  59. if "cookie" in config["config"]:
  60. cookie_dict = {
  61. "cookie_id": config["id"],
  62. "title": config["title"].strip(),
  63. "cookie": dict(eval(config["config"]))["cookie"].strip(),
  64. "update_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(config["update_time"] / 1000))),
  65. "operator": config["operator"].strip()
  66. }
  67. return cookie_dict
  68. @classmethod
  69. def get_videoList(cls, log_type, crawler, user_dict, rule_dict, env):
  70. pcursor = ""
  71. mq = MQ(topic_name="topic_crawler_etl_" + env)
  72. while True:
  73. url = "https://www.kuaishou.com/graphql"
  74. payload = json.dumps({
  75. "operationName": "visionProfilePhotoList",
  76. "variables": {
  77. "userId": user_dict["link"].replace("https://www.kuaishou.com/profile/", ""),
  78. "pcursor": pcursor,
  79. "page": "profile"
  80. },
  81. "query": "fragment photoContent on PhotoEntity {\n id\n duration\n caption\n originCaption\n likeCount\n viewCount\n commentCount\n realLikeCount\n coverUrl\n photoUrl\n photoH265Url\n manifest\n manifestH265\n videoResource\n coverUrls {\n url\n __typename\n }\n timestamp\n expTag\n animatedCoverUrl\n distance\n videoRatio\n liked\n stereoType\n profileUserTopPhoto\n musicBlocked\n __typename\n}\n\nfragment feedContent on Feed {\n type\n author {\n id\n name\n headerUrl\n following\n headerUrls {\n url\n __typename\n }\n __typename\n }\n photo {\n ...photoContent\n __typename\n }\n canAddComment\n llsid\n status\n currentPcursor\n tags {\n type\n name\n __typename\n }\n __typename\n}\n\nquery visionProfilePhotoList($pcursor: String, $userId: String, $page: String, $webPageArea: String) {\n visionProfilePhotoList(pcursor: $pcursor, userId: $userId, page: $page, webPageArea: $webPageArea) {\n result\n llsid\n webPageArea\n feeds {\n ...feedContent\n __typename\n }\n hostName\n pcursor\n __typename\n }\n}\n"
  82. })
  83. cookie = cls.get_cookie(log_type, crawler, env)["cookie"]
  84. headers = {
  85. 'Accept': '*/*',
  86. 'Content-Type': 'application/json',
  87. 'Origin': 'https://www.kuaishou.com',
  88. 'Cookie': cookie,
  89. 'Content-Length': '1260',
  90. 'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
  91. 'Host': 'www.kuaishou.com',
  92. 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.6.1 Safari/605.1.15',
  93. 'Referer': f'https://www.kuaishou.com/profile/{user_dict["link"].replace("https://www.kuaishou.com/profile/", "")}',
  94. 'Accept-Encoding': 'gzip, deflate, br',
  95. 'Connection': 'keep-alive'
  96. }
  97. urllib3.disable_warnings()
  98. s = requests.session()
  99. # max_retries=3 重试3次
  100. s.mount('http://', HTTPAdapter(max_retries=3))
  101. s.mount('https://', HTTPAdapter(max_retries=3))
  102. response = s.post(url=url, headers=headers, data=payload, proxies=Common.tunnel_proxies(), verify=False, timeout=10)
  103. response.close()
  104. # Common.logger(log_type, crawler).info(f"response:{response.text}\n")
  105. if response.status_code != 200:
  106. Common.logger(log_type, crawler).warning(f"response:{response.text}\n")
  107. AliyunLogger.logging(
  108. code="2000",
  109. platform=crawler,
  110. mode=log_type,
  111. env=env,
  112. message=f"response:{response.json()}\n"
  113. )
  114. return
  115. elif "data" not in response.json():
  116. Common.logger(log_type, crawler).warning(f"response:{response.json()}\n")
  117. AliyunLogger.logging(
  118. code="2000",
  119. platform=crawler,
  120. mode=log_type,
  121. env=env,
  122. message=f"response:{response.json()}\n"
  123. )
  124. return
  125. elif "visionProfilePhotoList" not in response.json()["data"]:
  126. Common.logger(log_type, crawler).warning(f"response:{response.json()}\n")
  127. AliyunLogger.logging(
  128. code="2000",
  129. platform=crawler,
  130. mode=log_type,
  131. env=env,
  132. message=f"response:{response.json()}\n"
  133. )
  134. return
  135. elif "feeds" not in response.json()["data"]["visionProfilePhotoList"]:
  136. Common.logger(log_type, crawler).warning(f"response:{response.json()}\n")
  137. AliyunLogger.logging(
  138. code="2000",
  139. platform=crawler,
  140. mode=log_type,
  141. env=env,
  142. message=f"response:{response.json()}\n"
  143. )
  144. return
  145. elif len(response.json()["data"]["visionProfilePhotoList"]["feeds"]) == 0:
  146. Common.logger(log_type, crawler).warning(f"没有更多视频啦 ~\n")
  147. AliyunLogger.logging(
  148. code="2001",
  149. platform=crawler,
  150. mode=log_type,
  151. env=env,
  152. message= f"没有更多视频啦 ~\n"
  153. )
  154. return
  155. pcursor = response.json()['data']['visionProfilePhotoList']['pcursor']
  156. feeds = response.json()['data']['visionProfilePhotoList']['feeds']
  157. for i in range(len(feeds)):
  158. try:
  159. if cls.download_cnt >= cls.videos_cnt(rule_dict):
  160. Common.logger(log_type, crawler).info(f"已下载视频数:{cls.download_cnt}\n")
  161. AliyunLogger.logging(
  162. code="2002",
  163. platform=crawler,
  164. mode=log_type,
  165. env=env,
  166. message=f"已下载视频数:{cls.download_cnt}\n"
  167. )
  168. return
  169. video_title = feeds[i].get("photo", {}).get("caption", random_title(log_type, crawler, env, text='title'))
  170. video_title = cls.video_title(log_type, crawler, env, video_title)
  171. try:
  172. video_id = feeds[i].get("photo", {}).get("videoResource").get("h264", {}).get("videoId", "")
  173. video_width = feeds[i].get("photo", {}).get("videoResource").get("h264", {}).get("adaptationSet", {})[0].get("representation", {})[0].get("width", 0)
  174. video_height = feeds[i].get("photo", {}).get("videoResource").get("h264", {}).get("adaptationSet", {})[0].get("representation", {})[0].get("height", 0)
  175. except KeyError:
  176. video_id = feeds[i].get("photo", {}).get("videoResource").get("hevc", {}).get("videoId", "")
  177. video_width = feeds[i].get("photo", {}).get("videoResource").get("hevc", {}).get("adaptationSet", {})[0].get("representation", {})[0].get("width", 0)
  178. video_height = feeds[i].get("photo", {}).get("videoResource").get("hevc", {}).get("adaptationSet", {})[0].get("representation", {})[0].get("height", 0)
  179. publish_time_stamp = int(int(feeds[i].get('photo', {}).get('timestamp', 0)) / 1000)
  180. publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
  181. date_three_days_ago_string = (date.today() + timedelta(days=-60)).strftime("%Y-%m-%d %H:%M:%S")
  182. rule = publish_time_str > date_three_days_ago_string
  183. if rule == False:
  184. Common.logger(log_type, crawler).info(f"发布时间小于60天,发布时间:{rule}\n")
  185. AliyunLogger.logging(
  186. code="2004",
  187. platform=crawler,
  188. mode=log_type,
  189. env=env,
  190. message=f"发布时间小于60天,发布时间:{rule}\n"
  191. )
  192. continue
  193. realLikeCount = int(feeds[i].get('photo', {}).get('realLikeCount', 0))
  194. if realLikeCount < 10000:
  195. Common.logger(log_type, crawler).info(f"点赞量:{realLikeCount}\n")
  196. AliyunLogger.logging(
  197. code="2004",
  198. platform=crawler,
  199. mode=log_type,
  200. env=env,
  201. message=f"点赞量:{realLikeCount}\n"
  202. )
  203. continue
  204. video_dict = {'video_title': video_title,
  205. 'video_id': video_id,
  206. 'play_cnt': int(feeds[i].get('photo', {}).get('viewCount', 0)),
  207. 'like_cnt': int(feeds[i].get('photo', {}).get('realLikeCount', 0)),
  208. 'comment_cnt': 0,
  209. 'share_cnt': 0,
  210. 'video_width': video_width,
  211. 'video_height': video_height,
  212. 'duration': int(int(feeds[i].get('photo', {}).get('duration', 0)) / 1000),
  213. 'publish_time_stamp': publish_time_stamp,
  214. 'publish_time_str': publish_time_str,
  215. 'user_name': feeds[i].get('author', {}).get('name', ""),
  216. 'user_id': feeds[i].get('author', {}).get('id', ""),
  217. 'avatar_url': feeds[i].get('author', {}).get('headerUrl', ""),
  218. 'cover_url': feeds[i].get('photo', {}).get('coverUrl', ""),
  219. 'video_url': feeds[i].get('photo', {}).get('photoUrl', ""),
  220. 'session': f"kuaishou-{int(time.time())}"}
  221. for k, v in video_dict.items():
  222. Common.logger(log_type, crawler).info(f"{k}:{v}")
  223. AliyunLogger.logging(
  224. code="1000",
  225. platform=crawler,
  226. mode=log_type,
  227. env=env,
  228. message=f"{video_dict}\n"
  229. )
  230. if int((int(time.time()) - int(publish_time_stamp)) / (3600*24)) > int(rule_dict.get("period", {}).get("max", 1000)):
  231. Common.logger(log_type, crawler).info(f'发布时间超过{int(rule_dict.get("period", {}).get("max", 1000))}天\n')
  232. AliyunLogger.logging(
  233. code="2004",
  234. platform=crawler,
  235. mode=log_type,
  236. env=env,
  237. message=f'发布时间超过{int(rule_dict.get("period", {}).get("max", 1000))}天\n'
  238. )
  239. return
  240. if video_dict["video_id"] == '' or video_dict["cover_url"] == '' or video_dict["video_url"] == '':
  241. Common.logger(log_type, crawler).info('无效视频\n')
  242. AliyunLogger.logging(
  243. code="2004",
  244. platform=crawler,
  245. mode=log_type,
  246. env=env,
  247. message='无效视频\n'
  248. )
  249. elif download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
  250. Common.logger(log_type, crawler).info("不满足抓取规则\n")
  251. AliyunLogger.logging(
  252. code="2004",
  253. platform=crawler,
  254. mode=log_type,
  255. env=env,
  256. message='不满足抓取规则\n'
  257. )
  258. elif any(str(word) if str(word) in video_dict["video_title"] else False
  259. for word in get_config_from_mysql(log_type=log_type,
  260. source=crawler,
  261. env=env,
  262. text="filter",
  263. action="")) is True:
  264. Common.logger(log_type, crawler).info('已中过滤词\n')
  265. AliyunLogger.logging(
  266. code="2004",
  267. platform=crawler,
  268. mode=log_type,
  269. env=env,
  270. message='已中过滤词\n'
  271. )
  272. elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
  273. Common.logger(log_type, crawler).info('视频已下载\n')
  274. AliyunLogger.logging(
  275. code="2002",
  276. platform=crawler,
  277. mode=log_type,
  278. env=env,
  279. message='视频已下载\n'
  280. )
  281. else:
  282. video_dict["out_user_id"] = video_dict["user_id"]
  283. video_dict["platform"] = crawler
  284. video_dict["strategy"] = log_type
  285. video_dict["out_video_id"] = video_dict["video_id"]
  286. video_dict["width"] = video_dict["video_width"]
  287. video_dict["height"] = video_dict["video_height"]
  288. video_dict["crawler_rule"] = json.dumps(rule_dict)
  289. video_dict["user_id"] = user_dict["uid"]
  290. video_dict["publish_time"] = video_dict["publish_time_str"]
  291. video_dict["strategy_type"] = log_type
  292. mq.send_msg(video_dict)
  293. cls.download_cnt += 1
  294. except Exception as e:
  295. Common.logger(log_type, crawler).warning(f"抓取单条视频异常:{e}\n")
  296. AliyunLogger.logging(
  297. code="3000",
  298. platform=crawler,
  299. mode=log_type,
  300. env=env,
  301. message=f"抓取单条视频异常:{e}\n"
  302. )
  303. @classmethod
  304. def repeat_video(cls, log_type, crawler, video_id, env):
  305. sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and out_video_id="{video_id}"; """
  306. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  307. return len(repeat_video)
  308. @classmethod
  309. def get_author_videos(cls, log_type, crawler, user_list, rule_dict, env):
  310. for user_dict in user_list:
  311. try:
  312. Common.logger(log_type, crawler).info(f"开始抓取 {user_dict['nick_name']} 主页视频")
  313. AliyunLogger.logging(
  314. code="2000",
  315. platform=crawler,
  316. mode=log_type,
  317. env=env,
  318. message=f"开始抓取 {user_dict['nick_name']} 主页视频"
  319. )
  320. cls.download_cnt = 0
  321. cls.get_videoList(log_type=log_type,
  322. crawler=crawler,
  323. user_dict=user_dict,
  324. rule_dict=rule_dict,
  325. env=env)
  326. except Exception as e:
  327. Common.logger(log_type, crawler).warning(f"抓取用户{user_dict['nick_name']}主页视频时异常:{e}\n")
  328. AliyunLogger.logging(
  329. code="3000",
  330. platform=crawler,
  331. mode=log_type,
  332. env=env,
  333. message=f"抓取用户{user_dict['nick_name']}主页视频时异常:{e}\n"
  334. )
  335. if __name__ == "__main__":
  336. print(KuaishouauthorScheduling.get_cookie("author", "kuaishou", "prod")["cookie"])
  337. pass