kuaishou_author_scheduling_new.py 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378
  1. # -*- coding: utf-8 -*-
  2. # @Time: 2023/11/07
  3. import os
  4. import random
  5. import sys
  6. import time
  7. from datetime import date, timedelta
  8. import requests
  9. import json
  10. import urllib3
  11. sys.path.append(os.getcwd())
  12. from common.common import Common
  13. from common import AliyunLogger
  14. from common.mq import MQ
  15. from requests.adapters import HTTPAdapter
  16. from common.scheduling_db import MysqlHelper
  17. from common.public import random_title, get_config_from_mysql, download_rule
  18. class KuaishouauthorScheduling:
  19. platform = "快手"
  20. download_cnt = 0
  21. @classmethod
  22. def videos_cnt(cls, rule_dict):
  23. videos_cnt = rule_dict.get("videos_cnt", {}).get("min", 0)
  24. if videos_cnt == 0:
  25. videos_cnt = 1000
  26. return videos_cnt
  27. @classmethod
  28. def video_title(cls, log_type, crawler, env, title):
  29. title_split1 = title.split(" #")
  30. if title_split1[0] != "":
  31. title1 = title_split1[0]
  32. else:
  33. title1 = title_split1[-1]
  34. title_split2 = title1.split(" #")
  35. if title_split2[0] != "":
  36. title2 = title_split2[0]
  37. else:
  38. title2 = title_split2[-1]
  39. title_split3 = title2.split("@")
  40. if title_split3[0] != "":
  41. title3 = title_split3[0]
  42. else:
  43. title3 = title_split3[-1]
  44. video_title = title3.strip().replace("\n", "") \
  45. .replace("/", "").replace("快手", "").replace(" ", "") \
  46. .replace(" ", "").replace("&NBSP", "").replace("\r", "") \
  47. .replace("#", "").replace(".", "。").replace("\\", "") \
  48. .replace(":", "").replace("*", "").replace("?", "") \
  49. .replace("?", "").replace('"', "").replace("<", "") \
  50. .replace(">", "").replace("|", "").replace("@", "").replace('"', '').replace("'", '')[:40]
  51. if video_title.replace(" ", "") == "" or video_title == "。。。" or video_title == "...":
  52. return random_title(log_type, crawler, env, text='title')
  53. else:
  54. return video_title
  55. @classmethod
  56. def get_cookie(cls, log_type, crawler, env):
  57. select_sql = f""" select * from crawler_config where source="{crawler}" """
  58. configs = MysqlHelper.get_values(log_type, crawler, select_sql, env, action="")
  59. for config in configs:
  60. if "cookie" in config["config"]:
  61. cookie_dict = {
  62. "cookie_id": config["id"],
  63. "title": config["title"].strip(),
  64. "cookie": dict(eval(config["config"]))["cookie"].strip(),
  65. "update_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(config["update_time"] / 1000))),
  66. "operator": config["operator"].strip()
  67. }
  68. return cookie_dict
  69. @classmethod
  70. def get_videoList(cls, log_type, crawler, user_dict, rule_dict, env):
  71. pcursor = ""
  72. mq = MQ(topic_name="topic_crawler_etl_" + env)
  73. while True:
  74. flag = user_dict["link"].split("_")[0]
  75. if flag == "V1":
  76. rule_dict = {
  77. "play_cnt": {"min": 100000, "max": 0},
  78. 'period': {"min": 15, "max": 15},
  79. 'special': 0.03
  80. }
  81. elif flag == "V2":
  82. rule_dict = {
  83. "play_cnt": {"min": 80000, "max": 0},
  84. 'period': {"min": 7, "max": 7},
  85. 'special': 0.02
  86. }
  87. elif flag == "V3":
  88. rule_dict = {
  89. "play_cnt": {"min": 10000, "max": 0},
  90. 'period': {"min": 3, "max": 3},
  91. 'special': 0.01
  92. }
  93. time.sleep(random.randint(10, 50))
  94. url = "https://www.kuaishou.com/graphql"
  95. if user_dict['link'][0] == "V":
  96. link = user_dict["link"][3:]
  97. else:
  98. link = user_dict["link"]
  99. payload = json.dumps({
  100. "operationName": "visionProfilePhotoList",
  101. "variables": {
  102. "userId": str(link.replace("https://www.kuaishou.com/profile/", "")),
  103. "pcursor": pcursor,
  104. "page": "profile"
  105. },
  106. "query": "fragment photoContent on PhotoEntity {\n id\n duration\n caption\n originCaption\n likeCount\n viewCount\n commentCount\n realLikeCount\n coverUrl\n photoUrl\n photoH265Url\n manifest\n manifestH265\n videoResource\n coverUrls {\n url\n __typename\n }\n timestamp\n expTag\n animatedCoverUrl\n distance\n videoRatio\n liked\n stereoType\n profileUserTopPhoto\n musicBlocked\n __typename\n}\n\nfragment feedContent on Feed {\n type\n author {\n id\n name\n headerUrl\n following\n headerUrls {\n url\n __typename\n }\n __typename\n }\n photo {\n ...photoContent\n __typename\n }\n canAddComment\n llsid\n status\n currentPcursor\n tags {\n type\n name\n __typename\n }\n __typename\n}\n\nquery visionProfilePhotoList($pcursor: String, $userId: String, $page: String, $webPageArea: String) {\n visionProfilePhotoList(pcursor: $pcursor, userId: $userId, page: $page, webPageArea: $webPageArea) {\n result\n llsid\n webPageArea\n feeds {\n ...feedContent\n __typename\n }\n hostName\n pcursor\n __typename\n }\n}\n"
  107. })
  108. cookie = cls.get_cookie(log_type, crawler, env)["cookie"]
  109. headers = {
  110. 'Accept': '*/*',
  111. 'Content-Type': 'application/json',
  112. 'Origin': 'https://www.kuaishou.com',
  113. 'Cookie': cookie,
  114. 'Content-Length': '1260',
  115. 'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
  116. 'Host': 'www.kuaishou.com',
  117. 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.6.1 Safari/605.1.15',
  118. 'Referer': f'https://www.kuaishou.com/profile/{link.replace("https://www.kuaishou.com/profile/", "")}',
  119. 'Accept-Encoding': 'gzip, deflate, br',
  120. 'Connection': 'keep-alive'
  121. }
  122. urllib3.disable_warnings()
  123. s = requests.session()
  124. # max_retries=3 重试3次
  125. s.mount('http://', HTTPAdapter(max_retries=3))
  126. s.mount('https://', HTTPAdapter(max_retries=3))
  127. response = s.post(url=url, headers=headers, data=payload, proxies=Common.tunnel_proxies(), verify=False, timeout=10)
  128. response.close()
  129. # Common.logger(log_type, crawler).info(f"response:{response.text}\n")
  130. if response.status_code != 200:
  131. Common.logger(log_type, crawler).warning(f"response:{response.text}\n")
  132. AliyunLogger.logging(
  133. code="2000",
  134. platform=crawler,
  135. mode=log_type,
  136. env=env,
  137. message=f"response:{response.json()}\n"
  138. )
  139. return
  140. elif "data" not in response.json():
  141. Common.logger(log_type, crawler).warning(f"response:{response.json()}\n")
  142. AliyunLogger.logging(
  143. code="2000",
  144. platform=crawler,
  145. mode=log_type,
  146. env=env,
  147. message=f"response:{response.json()}\n"
  148. )
  149. return
  150. elif "visionProfilePhotoList" not in response.json()["data"]:
  151. Common.logger(log_type, crawler).warning(f"response:{response.json()}\n")
  152. AliyunLogger.logging(
  153. code="2000",
  154. platform=crawler,
  155. mode=log_type,
  156. env=env,
  157. message=f"response:{response.json()}\n"
  158. )
  159. return
  160. elif "feeds" not in response.json()["data"]["visionProfilePhotoList"]:
  161. Common.logger(log_type, crawler).warning(f"response:{response.json()}\n")
  162. AliyunLogger.logging(
  163. code="2000",
  164. platform=crawler,
  165. mode=log_type,
  166. env=env,
  167. message=f"response:{response.json()}\n"
  168. )
  169. return
  170. elif len(response.json()["data"]["visionProfilePhotoList"]["feeds"]) == 0:
  171. Common.logger(log_type, crawler).warning(f"没有更多视频啦 ~\n")
  172. AliyunLogger.logging(
  173. code="2001",
  174. platform=crawler,
  175. mode=log_type,
  176. env=env,
  177. message= f"没有更多视频啦 ~\n"
  178. )
  179. return
  180. pcursor = response.json()['data']['visionProfilePhotoList']['pcursor']
  181. feeds = response.json()['data']['visionProfilePhotoList']['feeds']
  182. for i in range(len(feeds)):
  183. try:
  184. Common.logger(log_type, crawler).info('扫描到一条视频\n')
  185. AliyunLogger.logging(
  186. code="1001",
  187. platform=crawler,
  188. mode=log_type,
  189. env=env,
  190. message='扫描到一条视频\n'
  191. )
  192. if cls.download_cnt >= cls.videos_cnt(rule_dict):
  193. Common.logger(log_type, crawler).info(f"已下载视频数:{cls.download_cnt}\n")
  194. AliyunLogger.logging(
  195. code="2002",
  196. platform=crawler,
  197. mode=log_type,
  198. env=env,
  199. message=f"已下载视频数:{cls.download_cnt}\n"
  200. )
  201. return
  202. video_title = feeds[i].get("photo", {}).get("caption", random_title(log_type, crawler, env, text='title'))
  203. video_title = cls.video_title(log_type, crawler, env, video_title)
  204. try:
  205. video_id = feeds[i].get("photo", {}).get("videoResource").get("h264", {}).get("videoId", "")
  206. video_width = feeds[i].get("photo", {}).get("videoResource").get("h264", {}).get("adaptationSet", {})[0].get("representation", {})[0].get("width", 0)
  207. video_height = feeds[i].get("photo", {}).get("videoResource").get("h264", {}).get("adaptationSet", {})[0].get("representation", {})[0].get("height", 0)
  208. except KeyError:
  209. video_id = feeds[i].get("photo", {}).get("videoResource").get("hevc", {}).get("videoId", "")
  210. video_width = feeds[i].get("photo", {}).get("videoResource").get("hevc", {}).get("adaptationSet", {})[0].get("representation", {})[0].get("width", 0)
  211. video_height = feeds[i].get("photo", {}).get("videoResource").get("hevc", {}).get("adaptationSet", {})[0].get("representation", {})[0].get("height", 0)
  212. publish_time_stamp = int(int(feeds[i].get('photo', {}).get('timestamp', 0)) / 1000)
  213. publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
  214. viewCount = int(feeds[i].get('photo', {}).get('viewCount', 0))
  215. realLikeCount = int(feeds[i].get('photo', {}).get('realLikeCount', 0))
  216. video_percent = '%.2f' % (realLikeCount / viewCount)
  217. special = float(rule_dict.get("special"))
  218. if float(video_percent) < special:
  219. Common.logger(log_type, crawler).info(f"不符合条件:点赞/播放-{video_percent}\n")
  220. AliyunLogger.logging(
  221. code="2004",
  222. platform=crawler,
  223. mode=log_type,
  224. env=env,
  225. message=f"点赞量:{realLikeCount}\n"
  226. )
  227. continue
  228. video_dict = {'video_title': video_title,
  229. 'video_id': video_id,
  230. 'play_cnt': int(feeds[i].get('photo', {}).get('viewCount', 0)),
  231. 'like_cnt': int(feeds[i].get('photo', {}).get('realLikeCount', 0)),
  232. 'comment_cnt': 0,
  233. 'share_cnt': 0,
  234. 'video_width': video_width,
  235. 'video_height': video_height,
  236. 'duration': int(int(feeds[i].get('photo', {}).get('duration', 0)) / 1000),
  237. 'publish_time_stamp': publish_time_stamp,
  238. 'publish_time_str': publish_time_str,
  239. 'user_name': feeds[i].get('author', {}).get('name', ""),
  240. 'user_id': feeds[i].get('author', {}).get('id', ""),
  241. 'avatar_url': feeds[i].get('author', {}).get('headerUrl', ""),
  242. 'cover_url': feeds[i].get('photo', {}).get('coverUrl', ""),
  243. 'video_url': feeds[i].get('photo', {}).get('photoUrl', ""),
  244. 'session': f"kuaishou-{int(time.time())}"}
  245. for k, v in video_dict.items():
  246. Common.logger(log_type, crawler).info(f"{k}:{v}")
  247. AliyunLogger.logging(
  248. code="1000",
  249. platform=crawler,
  250. mode=log_type,
  251. env=env,
  252. message=f"{video_dict}\n"
  253. )
  254. if int((int(time.time()) - int(publish_time_stamp)) / (3600*24)) > int(rule_dict.get("period", {}).get("max", 1000)):
  255. Common.logger(log_type, crawler).info(f'发布时间超过{int(rule_dict.get("period", {}).get("max", 1000))}天\n')
  256. AliyunLogger.logging(
  257. code="2004",
  258. platform=crawler,
  259. mode=log_type,
  260. env=env,
  261. message=f'发布时间超过{int(rule_dict.get("period", {}).get("max", 1000))}天\n'
  262. )
  263. return
  264. if video_dict["video_id"] == '' or video_dict["cover_url"] == '' or video_dict["video_url"] == '':
  265. Common.logger(log_type, crawler).info('无效视频\n')
  266. AliyunLogger.logging(
  267. code="2004",
  268. platform=crawler,
  269. mode=log_type,
  270. env=env,
  271. message='无效视频\n'
  272. )
  273. elif download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
  274. Common.logger(log_type, crawler).info("不满足抓取规则\n")
  275. AliyunLogger.logging(
  276. code="2004",
  277. platform=crawler,
  278. mode=log_type,
  279. env=env,
  280. message='不满足抓取规则\n'
  281. )
  282. elif any(str(word) if str(word) in video_dict["video_title"] else False
  283. for word in get_config_from_mysql(log_type=log_type,
  284. source=crawler,
  285. env=env,
  286. text="filter",
  287. action="")) is True:
  288. Common.logger(log_type, crawler).info('已中过滤词\n')
  289. AliyunLogger.logging(
  290. code="2004",
  291. platform=crawler,
  292. mode=log_type,
  293. env=env,
  294. message='已中过滤词\n'
  295. )
  296. elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
  297. Common.logger(log_type, crawler).info('视频已下载\n')
  298. AliyunLogger.logging(
  299. code="2002",
  300. platform=crawler,
  301. mode=log_type,
  302. env=env,
  303. message='视频已下载\n'
  304. )
  305. else:
  306. video_dict["out_user_id"] = video_dict["user_id"]
  307. video_dict["platform"] = crawler
  308. video_dict["strategy"] = log_type
  309. video_dict["out_video_id"] = video_dict["video_id"]
  310. video_dict["width"] = video_dict["video_width"]
  311. video_dict["height"] = video_dict["video_height"]
  312. video_dict["crawler_rule"] = json.dumps(rule_dict)
  313. video_dict["user_id"] = user_dict["uid"]
  314. video_dict["publish_time"] = video_dict["publish_time_str"]
  315. video_dict["strategy_type"] = log_type
  316. mq.send_msg(video_dict)
  317. cls.download_cnt += 1
  318. except Exception as e:
  319. Common.logger(log_type, crawler).warning(f"抓取单条视频异常:{e}\n")
  320. AliyunLogger.logging(
  321. code="3000",
  322. platform=crawler,
  323. mode=log_type,
  324. env=env,
  325. message=f"抓取单条视频异常:{e}\n"
  326. )
  327. @classmethod
  328. def repeat_video(cls, log_type, crawler, video_id, env):
  329. sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and out_video_id="{video_id}"; """
  330. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  331. return len(repeat_video)
  332. @classmethod
  333. def get_author_videos(cls, log_type, crawler, user_list, rule_dict, env):
  334. for user_dict in user_list:
  335. try:
  336. Common.logger(log_type, crawler).info(f"开始抓取 {user_dict['nick_name']} 主页视频")
  337. AliyunLogger.logging(
  338. code="2000",
  339. platform=crawler,
  340. mode=log_type,
  341. env=env,
  342. message=f"开始抓取 {user_dict['nick_name']} 主页视频"
  343. )
  344. cls.download_cnt = 0
  345. cls.get_videoList(log_type=log_type,
  346. crawler=crawler,
  347. user_dict=user_dict,
  348. rule_dict=rule_dict,
  349. env=env)
  350. except Exception as e:
  351. Common.logger(log_type, crawler).warning(f"抓取用户{user_dict['nick_name']}主页视频时异常:{e}\n")
  352. AliyunLogger.logging(
  353. code="3000",
  354. platform=crawler,
  355. mode=log_type,
  356. env=env,
  357. message=f"抓取用户{user_dict['nick_name']}主页视频时异常:{e}\n"
  358. )
  359. if __name__ == "__main__":
  360. print(KuaishouauthorScheduling.get_cookie("author", "kuaishou", "prod")["cookie"])
  361. pass