kuaishou_follow_scheduling.py 47 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2023/2/24
  4. import os
  5. import random
  6. import shutil
  7. import sys
  8. import time
  9. from hashlib import md5
  10. import requests
  11. import json
  12. import urllib3
  13. from requests.adapters import HTTPAdapter
  14. from selenium import webdriver
  15. from selenium.webdriver import DesiredCapabilities
  16. from selenium.webdriver.chrome.service import Service
  17. sys.path.append(os.getcwd())
  18. from common.common import Common
  19. from common.feishu import Feishu
  20. from common.getuser import getUser
  21. from common.db import MysqlHelper
  22. from common.publish import Publish
  23. class Follow:
  24. platform = "快手"
  25. tag = "快手爬虫,定向爬虫策略"
  26. @classmethod
  27. def get_rule(cls, log_type, crawler, index):
  28. try:
  29. while True:
  30. rule_sheet = Feishu.get_values_batch(log_type, crawler, "3iqG4z")
  31. if rule_sheet is None:
  32. Common.logger(log_type, crawler).warning("rule_sheet is None! 10秒后重新获取")
  33. time.sleep(10)
  34. continue
  35. if index == 1:
  36. rule_dict = {
  37. "play_cnt": f"{rule_sheet[1][1]}{rule_sheet[1][2]}",
  38. "video_width": f"{rule_sheet[2][1]}{rule_sheet[2][2]}",
  39. "video_height": f"{rule_sheet[3][1]}{rule_sheet[3][2]}",
  40. "like_cnt": f"{rule_sheet[4][1]}{rule_sheet[4][2]}",
  41. "duration": f"{rule_sheet[5][1]}{rule_sheet[5][2]}",
  42. "download_cnt": f"{rule_sheet[6][1]}{rule_sheet[6][2]}",
  43. "publish_time": f"{rule_sheet[7][1]}{rule_sheet[7][2]}",
  44. }
  45. # for k, v in rule_dict.items():
  46. # Common.logger(log_type, crawler).info(f"{k}:{v}")
  47. return rule_dict
  48. elif index == 2:
  49. rule_dict = {
  50. "play_cnt": f"{rule_sheet[9][1]}{rule_sheet[9][2]}",
  51. "video_width": f"{rule_sheet[10][1]}{rule_sheet[10][2]}",
  52. "video_height": f"{rule_sheet[11][1]}{rule_sheet[11][2]}",
  53. "like_cnt": f"{rule_sheet[12][1]}{rule_sheet[12][2]}",
  54. "duration": f"{rule_sheet[13][1]}{rule_sheet[13][2]}",
  55. "download_cnt": f"{rule_sheet[14][1]}{rule_sheet[14][2]}",
  56. "publish_time": f"{rule_sheet[15][1]}{rule_sheet[15][2]}",
  57. }
  58. # for k, v in rule_dict.items():
  59. # Common.logger(log_type, crawler).info(f"{k}:{v}")
  60. return rule_dict
  61. except Exception as e:
  62. Common.logger(log_type, crawler).error(f"get_rule:{e}\n")
  63. @classmethod
  64. def download_rule(cls, video_dict, rule_dict):
  65. if eval(f"{video_dict['play_cnt']}{rule_dict['play_cnt']}") is True \
  66. and eval(f"{video_dict['video_width']}{rule_dict['video_width']}") is True \
  67. and eval(f"{video_dict['video_height']}{rule_dict['video_height']}") is True \
  68. and eval(f"{video_dict['like_cnt']}{rule_dict['like_cnt']}") is True \
  69. and eval(f"{video_dict['duration']}{rule_dict['duration']}") is True \
  70. and eval(f"{video_dict['publish_time']}{rule_dict['publish_time']}") is True:
  71. return True
  72. else:
  73. return False
  74. # 过滤词库
  75. @classmethod
  76. def filter_words(cls, log_type, crawler):
  77. try:
  78. while True:
  79. filter_words_sheet = Feishu.get_values_batch(log_type, crawler, 'HIKVvs')
  80. if filter_words_sheet is None:
  81. Common.logger(log_type, crawler).warning(f"filter_words_sheet:{filter_words_sheet} 10秒钟后重试")
  82. continue
  83. filter_words_list = []
  84. for x in filter_words_sheet:
  85. for y in x:
  86. if y is None:
  87. pass
  88. else:
  89. filter_words_list.append(y)
  90. return filter_words_list
  91. except Exception as e:
  92. Common.logger(log_type, crawler).error(f'filter_words异常:{e}\n')
  93. # 万能标题
  94. @classmethod
  95. def random_title(cls, log_type, crawler):
  96. try:
  97. while True:
  98. random_title_sheet = Feishu.get_values_batch(log_type, crawler, '0DiyXe')
  99. if random_title_sheet is None:
  100. Common.logger(log_type, crawler).warning(f"filter_words_sheet:{random_title_sheet} 10秒钟后重试")
  101. continue
  102. random_title_list = []
  103. for x in random_title_sheet:
  104. for y in x:
  105. if y is None:
  106. pass
  107. else:
  108. random_title_list.append(y)
  109. return random.choice(random_title_list)
  110. except Exception as e:
  111. Common.logger(log_type, crawler).error(f'random_title:{e}\n')
  112. # 获取站外用户信息
  113. @classmethod
  114. def get_out_user_info(cls, log_type, crawler, out_uid):
  115. try:
  116. url = "https://www.kuaishou.com/graphql"
  117. payload = json.dumps({
  118. "operationName": "visionProfile",
  119. "variables": {
  120. "userId": str(out_uid)
  121. },
  122. "query": "query visionProfile($userId: String) {\n visionProfile(userId: $userId) {\n result\n hostName\n userProfile {\n ownerCount {\n fan\n photo\n follow\n photo_public\n __typename\n }\n profile {\n gender\n user_name\n user_id\n headurl\n user_text\n user_profile_bg_url\n __typename\n }\n isFollowing\n __typename\n }\n __typename\n }\n}\n"
  123. })
  124. headers = {
  125. # 'Cookie': f'kpf=PC_WEB; clientid=3; did=web_e2901e1c5a13c60af81ba88bc7a3ee24; userId={"".join(str(random.choice(range(1, 10))) for _ in range(10))}; kpn=KUAISHOU_VISION; kuaishou.server.web_st=ChZrdWFpc2hvdS5zZXJ2ZXIud2ViLnN0EqABE4wGjnJauApJelOpl9Xqo8TVDAyra7Pvo0rZtVgMSZxgVuw4Z6P2UtHv_CHOk2Ne2el1hdE_McCptWs8tRdtYlhXFlVOu8rQX7CwexzOBudJAfB3lDN8LPc4o4qHNwqFxy5J5j_WzdllbqMmaDUK9yUxX6XA-JFezzq9jvBwtGv7_hzB7pFrUcH39z0EYOQaZo5lDl-pE09Gw7wr8NvlZRoSdWlbobCW6oJxuQLJTUr9oj_uIiBhkeb1psaIIc3VwfYQ1UfvobrXAP_WpnRabE_3UZUBOygFMAE; kuaishou.server.web_ph=2b981e2051d7130c977fd31df97fe6f5ad54',
  126. 'Cookie': f'kpf=PC_WEB; clientid=3; did=web_e2901e1c5a13c60af81ba88bc7a3ee24; kuaishou.server.web_st=ChZrdWFpc2hvdS5zZXJ2ZXIud2ViLnN0EqABszwASP0eGT2ym0NUdGf1PTi8_gDYrDms9otf5FKMUrx0V7MOTT1hhvpVJijmgku8KYtu3a6g3X1hZODMpX0ebZUip44txWPoY3VRqbsnBKEOs9Qkmx3uLaX33nq8KevOKLoO0cIE8nfwMJISe_BtCHr22cbJkofI0xfJXBt_ZgPqJIqWsUwdgwKzqRYn47ROkIqzeDfEjbxaZxiXy22ZhhoSlCobbmtjkvjpY9x730BPP_C5IiAl9EBaCTyvMw5IIcnkmcjMeWC8w6LKzeFMCNhcqrfkgCgFMAE; kuaishou.server.web_ph=19ae269a54f619c49def39ef5b54ae6d4353; kpn=KUAISHOU_VISION',
  127. 'Referer': f'https://www.kuaishou.com/profile/{out_uid}',
  128. 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.41',
  129. 'content-type': 'application/json',
  130. 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
  131. 'Cache-Control': 'no-cache',
  132. 'Connection': 'keep-alive',
  133. 'Origin': 'https://www.kuaishou.com',
  134. 'Pragma': 'no-cache',
  135. 'Sec-Fetch-Dest': 'empty',
  136. 'Sec-Fetch-Mode': 'cors',
  137. 'Sec-Fetch-Site': 'same-origin',
  138. 'accept': '*/*',
  139. 'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
  140. 'sec-ch-ua-mobile': '?0',
  141. 'sec-ch-ua-platform': '"macOS"'
  142. }
  143. urllib3.disable_warnings()
  144. s = requests.session()
  145. # max_retries=3 重试3次
  146. s.mount('http://', HTTPAdapter(max_retries=3))
  147. s.mount('https://', HTTPAdapter(max_retries=3))
  148. response = s.post(url=url, headers=headers, data=payload, proxies=Common.tunnel_proxies(), verify=False,
  149. timeout=5)
  150. response.close()
  151. # Common.logger(log_type, crawler).info(f"get_out_user_info_response:{response.text}")
  152. if response.status_code != 200:
  153. Common.logger(log_type, crawler).warning(f"get_out_user_info_response:{response.text}\n")
  154. return
  155. elif 'data' not in response.json():
  156. Common.logger(log_type, crawler).warning(f"get_out_user_info_response:{response.json()}\n")
  157. return
  158. elif 'visionProfile' not in response.json()['data']:
  159. Common.logger(log_type, crawler).warning(f"get_out_user_info_response:{response.json()['data']}\n")
  160. return
  161. elif 'userProfile' not in response.json()['data']['visionProfile']:
  162. Common.logger(log_type, crawler).warning(
  163. f"get_out_user_info_response:{response.json()['data']['visionProfile']['userProfile']}\n")
  164. return
  165. else:
  166. userProfile = response.json()['data']['visionProfile']['userProfile']
  167. # Common.logger(log_type, crawler).info(f"userProfile:{userProfile}")
  168. try:
  169. out_fans_str = str(userProfile['ownerCount']['fan'])
  170. except Exception:
  171. out_fans_str = "0"
  172. try:
  173. out_follow_str = str(userProfile['ownerCount']['follow'])
  174. except Exception:
  175. out_follow_str = "0"
  176. try:
  177. out_avatar_url = userProfile['profile']['headurl']
  178. except Exception:
  179. out_avatar_url = ""
  180. Common.logger(log_type, crawler).info(f"out_fans_str:{out_fans_str}")
  181. Common.logger(log_type, crawler).info(f"out_follow_str:{out_follow_str}")
  182. Common.logger(log_type, crawler).info(f"out_avatar_url:{out_avatar_url}")
  183. if "万" in out_fans_str:
  184. out_fans = int(float(out_fans_str.split("万")[0]) * 10000)
  185. else:
  186. out_fans = int(out_fans_str.replace(",", ""))
  187. if "万" in out_follow_str:
  188. out_follow = int(float(out_follow_str.split("万")[0]) * 10000)
  189. else:
  190. out_follow = int(out_follow_str.replace(",", ""))
  191. out_user_dict = {
  192. "out_fans": out_fans,
  193. "out_follow": out_follow,
  194. "out_avatar_url": out_avatar_url
  195. }
  196. Common.logger(log_type, crawler).info(f"out_user_dict:{out_user_dict}")
  197. return out_user_dict
  198. except Exception as e:
  199. Common.logger(log_type, crawler).error(f"get_out_user_info:{e}\n")
  200. # 获取用户信息列表
  201. @classmethod
  202. def get_user_list(cls, log_type, crawler, sheetid, env, machine):
  203. try:
  204. while True:
  205. user_sheet = Feishu.get_values_batch(log_type, crawler, sheetid)
  206. if user_sheet is None:
  207. Common.logger(log_type, crawler).warning(f"user_sheet:{user_sheet} 10秒钟后重试")
  208. continue
  209. our_user_list = []
  210. for i in range(1, len(user_sheet)):
  211. # for i in range(1, 2):
  212. out_uid = user_sheet[i][2]
  213. user_name = user_sheet[i][3]
  214. our_uid = user_sheet[i][6]
  215. our_user_link = user_sheet[i][7]
  216. if out_uid is None or user_name is None:
  217. Common.logger(log_type, crawler).info("空行\n")
  218. else:
  219. Common.logger(log_type, crawler).info(f"正在更新 {user_name} 用户信息\n")
  220. if our_uid is None:
  221. out_user_info = cls.get_out_user_info(log_type, crawler, out_uid)
  222. out_user_dict = {
  223. "out_uid": out_uid,
  224. "user_name": user_name,
  225. "out_avatar_url": out_user_info["out_avatar_url"],
  226. "out_create_time": '',
  227. "out_tag": '',
  228. "out_play_cnt": 0,
  229. "out_fans": out_user_info["out_fans"],
  230. "out_follow": out_user_info["out_follow"],
  231. "out_friend": 0,
  232. "out_like": 0,
  233. "platform": cls.platform,
  234. "tag": cls.tag,
  235. }
  236. our_user_dict = getUser.create_user(log_type=log_type, crawler=crawler,
  237. out_user_dict=out_user_dict, env=env, machine=machine)
  238. our_uid = our_user_dict['our_uid']
  239. our_user_link = our_user_dict['our_user_link']
  240. Feishu.update_values(log_type, crawler, sheetid, f'G{i + 1}:H{i + 1}',
  241. [[our_uid, our_user_link]])
  242. Common.logger(log_type, crawler).info(f'站内用户信息写入飞书成功!\n')
  243. our_user_list.append(our_user_dict)
  244. else:
  245. our_user_dict = {
  246. 'out_uid': out_uid,
  247. 'user_name': user_name,
  248. 'our_uid': our_uid,
  249. 'our_user_link': our_user_link,
  250. }
  251. our_user_list.append(our_user_dict)
  252. return our_user_list
  253. except Exception as e:
  254. Common.logger(log_type, crawler).error(f'get_user_list:{e}\n')
  255. # 处理视频标题
  256. @classmethod
  257. def video_title(cls, log_type, crawler, title):
  258. title_split1 = title.split(" #")
  259. if title_split1[0] != "":
  260. title1 = title_split1[0]
  261. else:
  262. title1 = title_split1[-1]
  263. title_split2 = title1.split(" #")
  264. if title_split2[0] != "":
  265. title2 = title_split2[0]
  266. else:
  267. title2 = title_split2[-1]
  268. title_split3 = title2.split("@")
  269. if title_split3[0] != "":
  270. title3 = title_split3[0]
  271. else:
  272. title3 = title_split3[-1]
  273. video_title = title3.strip().replace("\n", "") \
  274. .replace("/", "").replace("快手", "").replace(" ", "") \
  275. .replace(" ", "").replace("&NBSP", "").replace("\r", "") \
  276. .replace("#", "").replace(".", "。").replace("\\", "") \
  277. .replace(":", "").replace("*", "").replace("?", "") \
  278. .replace("?", "").replace('"', "").replace("<", "") \
  279. .replace(">", "").replace("|", "").replace("@", "")[:40]
  280. if video_title.replace(" ", "") == "" or video_title == "。。。" or video_title == "...":
  281. return cls.random_title(log_type, crawler)
  282. else:
  283. return video_title
  284. @classmethod
  285. def get_cookie(cls, log_type, crawler, out_uid, machine):
  286. try:
  287. # 打印请求配置
  288. ca = DesiredCapabilities.CHROME
  289. ca["goog:loggingPrefs"] = {"performance": "ALL"}
  290. # 不打开浏览器运行
  291. chrome_options = webdriver.ChromeOptions()
  292. chrome_options.add_argument("headless")
  293. chrome_options.add_argument(
  294. f'user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36')
  295. chrome_options.add_argument("--no-sandbox")
  296. # driver初始化
  297. if machine == "aliyun":
  298. driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options)
  299. elif machine == "macpro":
  300. driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options,
  301. service=Service('/Users/lieyunye/Downloads/chromedriver_v107/chromedriver'))
  302. elif machine == "macair":
  303. driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options,
  304. service=Service('/Users/piaoquan/Downloads/chromedriver_v108/chromedriver'))
  305. else:
  306. driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options, service=Service(
  307. '/Users/wangkun/Downloads/chromedriver/chromedriver_v109/chromedriver'))
  308. driver.implicitly_wait(10)
  309. # print('打开个人主页')
  310. driver.get(f'https://www.kuaishou.com/profile/{out_uid}')
  311. time.sleep(1)
  312. # print('解析cookies')
  313. logs = driver.get_log("performance")
  314. # Common.logger(log_type, crawler).info('已获取logs:{}\n', logs)
  315. # print('退出浏览器')
  316. driver.quit()
  317. for line in logs:
  318. msg = json.loads(line['message'])
  319. # Common.logger(log_type, crawler).info(f"{msg}\n\n")
  320. if 'message' not in msg:
  321. pass
  322. elif 'params' not in msg['message']:
  323. pass
  324. elif 'headers' not in msg['message']['params']:
  325. pass
  326. elif 'Cookie' not in msg['message']['params']['headers']:
  327. pass
  328. elif msg['message']['params']['headers']['Host'] != 'www.kuaishou.com':
  329. pass
  330. else:
  331. cookie = msg['message']['params']['headers']['Cookie']
  332. # Common.logger(log_type, crawler).info(f"{cookie}")
  333. return cookie
  334. except Exception as e:
  335. Common.logger(log_type, crawler).error(f"get_cookie:{e}\n")
  336. @classmethod
  337. def get_videoList(cls, log_type, crawler, strategy, our_uid, out_uid, oss_endpoint, env, machine):
  338. try:
  339. download_cnt_1, download_cnt_2 = 0, 0
  340. pcursor = ""
  341. while True:
  342. rule_dict_1 = cls.get_rule(log_type, crawler, 1)
  343. rule_dict_2 = cls.get_rule(log_type, crawler, 2)
  344. if rule_dict_1 is None or rule_dict_2 is None:
  345. Common.logger(log_type, crawler).warning(f"rule_dict is None, 10秒后重试")
  346. time.sleep(10)
  347. else:
  348. break
  349. while True:
  350. if download_cnt_1 >= int(
  351. rule_dict_1['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">", "")[
  352. -1]) and download_cnt_2 >= int(
  353. rule_dict_2['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">", "")[-1]):
  354. Common.logger(log_type, crawler).info(
  355. f"规则1已下载{download_cnt_1}条视频,规则2已下载{download_cnt_2}条视频\n")
  356. return
  357. url = "https://www.kuaishou.com/graphql"
  358. payload = json.dumps({
  359. "operationName": "visionProfilePhotoList",
  360. "variables": {
  361. "userId": out_uid,
  362. "pcursor": pcursor,
  363. "page": "profile"
  364. },
  365. "query": "fragment photoContent on PhotoEntity {\n id\n duration\n caption\n originCaption\n likeCount\n viewCount\n realLikeCount\n coverUrl\n photoUrl\n photoH265Url\n manifest\n manifestH265\n videoResource\n coverUrls {\n url\n __typename\n }\n timestamp\n expTag\n animatedCoverUrl\n distance\n videoRatio\n liked\n stereoType\n profileUserTopPhoto\n musicBlocked\n __typename\n}\n\nfragment feedContent on Feed {\n type\n author {\n id\n name\n headerUrl\n following\n headerUrls {\n url\n __typename\n }\n __typename\n }\n photo {\n ...photoContent\n __typename\n }\n canAddComment\n llsid\n status\n currentPcursor\n tags {\n type\n name\n __typename\n }\n __typename\n}\n\nquery visionProfilePhotoList($pcursor: String, $userId: String, $page: String, $webPageArea: String) {\n visionProfilePhotoList(pcursor: $pcursor, userId: $userId, page: $page, webPageArea: $webPageArea) {\n result\n llsid\n webPageArea\n feeds {\n ...feedContent\n __typename\n }\n hostName\n pcursor\n __typename\n }\n}\n"
  366. })
  367. # get_cookie = cls.get_cookie(log_type, crawler, out_uid, machine)
  368. # if get_cookie is None:
  369. # cookie = 'kpf=PC_WEB; clientid=3; did=web_e2901e1c5a13c60af81ba88bc7a3ee24; kuaishou.server.web_st=ChZrdWFpc2hvdS5zZXJ2ZXIud2ViLnN0EqABszwASP0eGT2ym0NUdGf1PTi8_gDYrDms9otf5FKMUrx0V7MOTT1hhvpVJijmgku8KYtu3a6g3X1hZODMpX0ebZUip44txWPoY3VRqbsnBKEOs9Qkmx3uLaX33nq8KevOKLoO0cIE8nfwMJISe_BtCHr22cbJkofI0xfJXBt_ZgPqJIqWsUwdgwKzqRYn47ROkIqzeDfEjbxaZxiXy22ZhhoSlCobbmtjkvjpY9x730BPP_C5IiAl9EBaCTyvMw5IIcnkmcjMeWC8w6LKzeFMCNhcqrfkgCgFMAE; kuaishou.server.web_ph=19ae269a54f619c49def39ef5b54ae6d4353; kpn=KUAISHOU_VISION'
  370. # else:
  371. # cookie = get_cookie
  372. # Common.logger(log_type, crawler).info(f"cookie:{cookie}")
  373. headers = {
  374. # 'Cookie': f'kpf=PC_WEB; clientid=3; did=web_e2901e1c5a13c60af81ba88bc7a3ee24; userId={"".join(str(random.choice(range(1, 10))) for _ in range(10))}; kuaishou.server.web_st=ChZrdWFpc2hvdS5zZXJ2ZXIud2ViLnN0EqABOLgYYcIJ5ilxU46Jc-HLWThY8sppX3V0htC_KhSGOzAjP2hAOdegzfkZGAxS5rf6rCBS487FkxfYzLkV__I6b1lK16rDjvv94Kkoo4z7mgf8y8rFgWoqrp81JAWTtx00y-wrc1XXPf9RAVQoET70wWaeNG2r5bxtZEiNwpK_zPi0ZdUo0BW13dFKfVssAy2xKYh0UlJ8VSd_vBvyMKSxVBoSf061Kc3w5Nem7YdpVBmH39ceIiBpiGioLzbZqlHiSbwkH_LhUhNXz3o7LITj098KUytk2CgFMAE; kuaishou.server.web_ph=f1033957981996a7d50e849a9ded4cf4adff; kpn=KUAISHOU_VISION',
  375. # 'Cookie': cookie,
  376. 'Cookie': 'kpf=PC_WEB; clientid=3; did=web_e2901e1c5a13c60af81ba88bc7a3ee24; kuaishou.server.web_st=ChZrdWFpc2hvdS5zZXJ2ZXIud2ViLnN0EqABszwASP0eGT2ym0NUdGf1PTi8_gDYrDms9otf5FKMUrx0V7MOTT1hhvpVJijmgku8KYtu3a6g3X1hZODMpX0ebZUip44txWPoY3VRqbsnBKEOs9Qkmx3uLaX33nq8KevOKLoO0cIE8nfwMJISe_BtCHr22cbJkofI0xfJXBt_ZgPqJIqWsUwdgwKzqRYn47ROkIqzeDfEjbxaZxiXy22ZhhoSlCobbmtjkvjpY9x730BPP_C5IiAl9EBaCTyvMw5IIcnkmcjMeWC8w6LKzeFMCNhcqrfkgCgFMAE; kuaishou.server.web_ph=19ae269a54f619c49def39ef5b54ae6d4353; kpn=KUAISHOU_VISION',
  377. 'Referer': f'https://www.kuaishou.com/profile/{out_uid}',
  378. 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.41',
  379. 'content-type': 'application/json',
  380. # 'accept': '*/*',
  381. # 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
  382. # 'Cache-Control': 'no-cache',
  383. # 'Connection': 'keep-alive',
  384. # 'Origin': 'https://www.kuaishou.com',
  385. # 'Pragma': 'no-cache',
  386. # 'Sec-Fetch-Dest': 'empty',
  387. # 'Sec-Fetch-Mode': 'cors',
  388. # 'Sec-Fetch-Site': 'same-origin',
  389. # 'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
  390. # 'sec-ch-ua-mobile': '?0',
  391. # 'sec-ch-ua-platform': '"macOS"'
  392. }
  393. urllib3.disable_warnings()
  394. s = requests.session()
  395. # max_retries=3 重试3次
  396. s.mount('http://', HTTPAdapter(max_retries=3))
  397. s.mount('https://', HTTPAdapter(max_retries=3))
  398. response = s.post(url=url, headers=headers, data=payload, proxies=Common.tunnel_proxies(), verify=False,
  399. timeout=5)
  400. response.close()
  401. # Common.logger(log_type, crawler).info(f"get_videoList:{response.text}\n")
  402. if response.status_code != 200:
  403. Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.text}\n")
  404. return
  405. elif 'data' not in response.json():
  406. Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()}\n")
  407. return
  408. elif 'visionProfilePhotoList' not in response.json()['data']:
  409. Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()['data']}\n")
  410. return
  411. elif 'feeds' not in response.json()['data']['visionProfilePhotoList']:
  412. Common.logger(log_type, crawler).warning(
  413. f"get_videoList_response:{response.json()['data']['visionProfilePhotoList']}\n")
  414. return
  415. elif len(response.json()['data']['visionProfilePhotoList']['feeds']) == 0:
  416. Common.logger(log_type, crawler).info("没有更多视频啦 ~\n")
  417. return
  418. else:
  419. feeds = response.json()['data']['visionProfilePhotoList']['feeds']
  420. pcursor = response.json()['data']['visionProfilePhotoList']['pcursor']
  421. # Common.logger(log_type, crawler).info(f"feeds0: {feeds}\n")
  422. for i in range(len(feeds)):
  423. if 'photo' not in feeds[i]:
  424. Common.logger(log_type, crawler).warning(f"get_videoList:{feeds[i]}\n")
  425. break
  426. # video_title
  427. if 'caption' not in feeds[i]['photo']:
  428. video_title = cls.random_title(log_type, crawler)
  429. elif feeds[i]['photo']['caption'].strip() == "":
  430. video_title = cls.random_title(log_type, crawler)
  431. else:
  432. video_title = cls.video_title(log_type, crawler, feeds[i]['photo']['caption'])
  433. if 'videoResource' not in feeds[i]['photo'] \
  434. and 'manifest' not in feeds[i]['photo'] \
  435. and 'manifestH265' not in feeds[i]['photo']:
  436. Common.logger(log_type, crawler).warning(f"get_videoList:{feeds[i]['photo']}\n")
  437. break
  438. videoResource = feeds[i]['photo']['videoResource']
  439. if 'h264' not in videoResource and 'hevc' not in videoResource:
  440. Common.logger(log_type, crawler).warning(f"get_videoList:{videoResource}\n")
  441. break
  442. # video_id
  443. if 'h264' in videoResource and 'videoId' in videoResource['h264']:
  444. video_id = videoResource['h264']['videoId']
  445. elif 'hevc' in videoResource and 'videoId' in videoResource['hevc']:
  446. video_id = videoResource['hevc']['videoId']
  447. else:
  448. video_id = ""
  449. # play_cnt
  450. if 'viewCount' not in feeds[i]['photo']:
  451. play_cnt = 0
  452. else:
  453. play_cnt = int(feeds[i]['photo']['viewCount'])
  454. # like_cnt
  455. if 'realLikeCount' not in feeds[i]['photo']:
  456. like_cnt = 0
  457. else:
  458. like_cnt = feeds[i]['photo']['realLikeCount']
  459. # publish_time
  460. if 'timestamp' not in feeds[i]['photo']:
  461. publish_time_stamp = 0
  462. publish_time_str = ''
  463. publish_time = 0
  464. else:
  465. publish_time_stamp = int(int(feeds[i]['photo']['timestamp']) / 1000)
  466. publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
  467. publish_time = int((int(time.time()) - publish_time_stamp) / (3600 * 24))
  468. # duration
  469. if 'duration' not in feeds[i]['photo']:
  470. duration = 0
  471. else:
  472. duration = int(int(feeds[i]['photo']['duration']) / 1000)
  473. # video_width / video_height / video_url
  474. mapping = {}
  475. for item in ['width', 'height', 'url']:
  476. try:
  477. val = str(videoResource['h264']['adaptationSet'][0]['representation'][0][item])
  478. except Exception:
  479. val = str(videoResource['hevc']['adaptationSet'][0]['representation'][0][item])
  480. except:
  481. val = ''
  482. mapping[item] = val
  483. video_width = int(mapping['width']) if mapping['width'] != '' else 0
  484. video_height = int(mapping['height']) if mapping['height'] != '' else 0
  485. video_url = mapping['url']
  486. # cover_url
  487. if 'coverUrl' not in feeds[i]['photo']:
  488. cover_url = ""
  489. else:
  490. cover_url = feeds[i]['photo']['coverUrl']
  491. # user_name / avatar_url
  492. try:
  493. user_name = feeds[i]['author']['name']
  494. avatar_url = feeds[i]['author']['headerUrl']
  495. except Exception:
  496. user_name = ''
  497. avatar_url = ''
  498. video_dict = {'video_title': video_title,
  499. 'video_id': video_id,
  500. 'play_cnt': play_cnt,
  501. 'comment_cnt': 0,
  502. 'like_cnt': like_cnt,
  503. 'share_cnt': 0,
  504. 'video_width': video_width,
  505. 'video_height': video_height,
  506. 'duration': duration,
  507. 'publish_time': publish_time,
  508. 'publish_time_stamp': publish_time_stamp,
  509. 'publish_time_str': publish_time_str,
  510. 'user_name': user_name,
  511. 'user_id': out_uid,
  512. 'avatar_url': avatar_url,
  513. 'cover_url': cover_url,
  514. 'video_url': video_url,
  515. 'session': f"kuaishou{int(time.time())}"}
  516. rule_1 = cls.download_rule(video_dict, rule_dict_1)
  517. Common.logger(log_type, crawler).info(f"video_title:{video_title}")
  518. Common.logger(log_type, crawler).info(f"video_id:{video_id}\n")
  519. Common.logger(log_type, crawler).info(
  520. f"play_cnt:{video_dict['play_cnt']}{rule_dict_1['play_cnt']}, {eval(str(video_dict['play_cnt']) + str(rule_dict_1['play_cnt']))}")
  521. Common.logger(log_type, crawler).info(
  522. f"like_cnt:{video_dict['like_cnt']}{rule_dict_1['like_cnt']}, {eval(str(video_dict['like_cnt']) + str(rule_dict_1['like_cnt']))}")
  523. Common.logger(log_type, crawler).info(
  524. f"video_width:{video_dict['video_width']}{rule_dict_1['video_width']}, {eval(str(video_dict['video_width']) + str(rule_dict_1['video_width']))}")
  525. Common.logger(log_type, crawler).info(
  526. f"video_height:{video_dict['video_height']}{rule_dict_1['video_height']}, {eval(str(video_dict['video_height']) + str(rule_dict_1['video_height']))}")
  527. Common.logger(log_type, crawler).info(
  528. f"duration:{video_dict['duration']}{rule_dict_1['duration']}, {eval(str(video_dict['duration']) + str(rule_dict_1['duration']))}")
  529. Common.logger(log_type, crawler).info(
  530. f"publish_time:{video_dict['publish_time']}{rule_dict_1['publish_time']}, {eval(str(video_dict['publish_time']) + str(rule_dict_1['publish_time']))}")
  531. Common.logger(log_type, crawler).info(f"rule_1:{rule_1}\n")
  532. rule_2 = cls.download_rule(video_dict, rule_dict_2)
  533. Common.logger(log_type, crawler).info(
  534. f"play_cnt:{video_dict['play_cnt']}{rule_dict_2['play_cnt']}, {eval(str(video_dict['play_cnt']) + str(rule_dict_2['play_cnt']))}")
  535. Common.logger(log_type, crawler).info(
  536. f"like_cnt:{video_dict['like_cnt']}{rule_dict_2['like_cnt']}, {eval(str(video_dict['like_cnt']) + str(rule_dict_2['like_cnt']))}")
  537. Common.logger(log_type, crawler).info(
  538. f"video_width:{video_dict['video_width']}{rule_dict_2['video_width']}, {eval(str(video_dict['video_width']) + str(rule_dict_2['video_width']))}")
  539. Common.logger(log_type, crawler).info(
  540. f"video_height:{video_dict['video_height']}{rule_dict_2['video_height']}, {eval(str(video_dict['video_height']) + str(rule_dict_2['video_height']))}")
  541. Common.logger(log_type, crawler).info(
  542. f"duration:{video_dict['duration']}{rule_dict_2['duration']}, {eval(str(video_dict['duration']) + str(rule_dict_2['duration']))}")
  543. Common.logger(log_type, crawler).info(
  544. f"publish_time:{video_dict['publish_time']}{rule_dict_2['publish_time']}, {eval(str(video_dict['publish_time']) + str(rule_dict_2['publish_time']))}")
  545. Common.logger(log_type, crawler).info(f"rule_2:{rule_2}\n")
  546. if video_title == "" or video_url == "":
  547. Common.logger(log_type, crawler).info("无效视频\n")
  548. break
  549. elif rule_1 is True:
  550. if download_cnt_1 < int(
  551. rule_dict_1['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">",
  552. "")[
  553. -1]):
  554. download_finished = cls.download_publish(log_type=log_type,
  555. crawler=crawler,
  556. strategy=strategy,
  557. video_dict=video_dict,
  558. rule_dict=rule_dict_1,
  559. our_uid=our_uid,
  560. oss_endpoint=oss_endpoint,
  561. env=env,
  562. machine=machine)
  563. if download_finished is True:
  564. download_cnt_1 += 1
  565. elif rule_2 is True:
  566. if download_cnt_2 < int(
  567. rule_dict_2['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">",
  568. "")[
  569. -1]):
  570. download_finished = cls.download_publish(log_type=log_type,
  571. crawler=crawler,
  572. strategy=strategy,
  573. video_dict=video_dict,
  574. rule_dict=rule_dict_2,
  575. our_uid=our_uid,
  576. oss_endpoint=oss_endpoint,
  577. env=env,
  578. machine=machine)
  579. if download_finished is True:
  580. download_cnt_2 += 1
  581. else:
  582. Common.logger(log_type, crawler).info("不满足下载规则\n")
  583. # Common.logger(log_type, crawler).info(f"feeds: {feeds}\n")
  584. if pcursor == "no_more":
  585. Common.logger(log_type, crawler).info("已经到底了,没有更多内容了\n")
  586. return
  587. except Exception as e:
  588. Common.logger(log_type, crawler).error(f"get_videoList:{e}\n")
  589. @classmethod
  590. def repeat_video(cls, log_type, crawler, video_id, video_title, publish_time, env, machine):
  591. sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}" or (platform="{cls.platform}" and video_title="{video_title}" and publish_time="{publish_time}") """
  592. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env, machine)
  593. return len(repeat_video)
  594. @classmethod
  595. def download_publish(cls, log_type, crawler, strategy, video_dict, rule_dict, our_uid, oss_endpoint, env, machine):
  596. try:
  597. download_finished = False
  598. if cls.repeat_video(log_type, crawler, video_dict['video_id'], video_dict['video_title'],
  599. video_dict['publish_time_str'], env, machine) != 0:
  600. Common.logger(log_type, crawler).info('视频已下载\n')
  601. elif video_dict['video_id'] in [x for y in Feishu.get_values_batch(log_type, crawler, "3cd128") for x in y]:
  602. Common.logger(log_type, crawler).info('视频已下载\n')
  603. elif any(word if word in video_dict['video_title'] else False for word in
  604. cls.filter_words(log_type, crawler)) is True:
  605. Common.logger(log_type, crawler).info('标题已中过滤词\n')
  606. else:
  607. # 下载视频
  608. Common.download_method(log_type=log_type, crawler=crawler, text='video',
  609. title=video_dict['video_title'], url=video_dict['video_url'])
  610. md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
  611. if os.path.getsize(f"./{crawler}/videos/{video_dict['video_title']}/video.mp4") == 0:
  612. # 删除视频文件夹
  613. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  614. Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
  615. return
  616. # ffmpeg_dict = Common.ffmpeg(log_type, crawler,
  617. # f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
  618. # if ffmpeg_dict is None or ffmpeg_dict['size'] == 0:
  619. # Common.logger(log_type, crawler).warning(f"下载的视频无效,已删除\n")
  620. # # 删除视频文件夹
  621. # shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
  622. # return download_finished
  623. # 下载封面
  624. Common.download_method(log_type=log_type, crawler=crawler, text='cover',
  625. title=video_dict['video_title'], url=video_dict['cover_url'])
  626. # 保存视频信息至txt
  627. Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
  628. # 上传视频
  629. Common.logger(log_type, crawler).info("开始上传视频...")
  630. our_video_id = Publish.upload_and_publish(log_type=log_type,
  631. crawler=crawler,
  632. strategy=strategy,
  633. our_uid=our_uid,
  634. env=env,
  635. oss_endpoint=oss_endpoint)
  636. if env == 'dev':
  637. our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  638. else:
  639. our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  640. Common.logger(log_type, crawler).info("视频上传完成")
  641. if our_video_id is None:
  642. Common.logger(log_type, crawler).warning(f"our_video_id:{our_video_id} 删除该视频文件夹")
  643. # 删除视频文件夹
  644. shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
  645. return download_finished
  646. # 视频信息保存数据库
  647. insert_sql = f""" insert into crawler_video(video_id,
  648. user_id,
  649. out_user_id,
  650. platform,
  651. strategy,
  652. out_video_id,
  653. video_title,
  654. cover_url,
  655. video_url,
  656. duration,
  657. publish_time,
  658. play_cnt,
  659. crawler_rule,
  660. width,
  661. height)
  662. values({our_video_id},
  663. {our_uid},
  664. "{video_dict['user_id']}",
  665. "{cls.platform}",
  666. "定向爬虫策略",
  667. "{video_dict['video_id']}",
  668. "{video_dict['video_title']}",
  669. "{video_dict['cover_url']}",
  670. "{video_dict['video_url']}",
  671. {int(video_dict['duration'])},
  672. "{video_dict['publish_time_str']}",
  673. {int(video_dict['play_cnt'])},
  674. '{json.dumps(rule_dict)}',
  675. {int(video_dict['video_width'])},
  676. {int(video_dict['video_height'])}) """
  677. Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
  678. MysqlHelper.update_values(log_type, crawler, insert_sql, env, machine)
  679. Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
  680. # 视频写入飞书
  681. Feishu.insert_columns(log_type, 'kuaishou', "fYdA8F", "ROWS", 1, 2)
  682. upload_time = int(time.time())
  683. values = [[our_video_id,
  684. time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
  685. "定向榜",
  686. str(video_dict['video_id']),
  687. video_dict['video_title'],
  688. our_video_link,
  689. video_dict['play_cnt'],
  690. video_dict['comment_cnt'],
  691. video_dict['like_cnt'],
  692. video_dict['share_cnt'],
  693. video_dict['duration'],
  694. f"{video_dict['video_width']}*{video_dict['video_height']}",
  695. video_dict['publish_time_str'],
  696. video_dict['user_name'],
  697. video_dict['user_id'],
  698. video_dict['avatar_url'],
  699. video_dict['cover_url'],
  700. video_dict['video_url']]]
  701. time.sleep(1)
  702. Feishu.update_values(log_type, 'kuaishou', "fYdA8F", "E2:Z2", values)
  703. Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
  704. download_finished = True
  705. return download_finished
  706. except Exception as e:
  707. Common.logger(log_type, crawler).error(f"download_publish:{e}\n")
  708. @classmethod
  709. def get_follow_videos(cls, log_type, crawler, strategy, oss_endpoint, env, machine):
  710. user_list = cls.get_user_list(log_type=log_type, crawler=crawler, sheetid="bTSzxW", env=env, machine=machine)
  711. for user in user_list:
  712. out_uid = user["out_uid"]
  713. user_name = user["user_name"]
  714. our_uid = user["our_uid"]
  715. Common.logger(log_type, crawler).info(f"开始抓取 {user_name} 用户主页视频\n")
  716. cls.get_videoList(log_type=log_type,
  717. crawler=crawler,
  718. strategy=strategy,
  719. our_uid=our_uid,
  720. out_uid=out_uid,
  721. oss_endpoint=oss_endpoint,
  722. env=env,
  723. machine=machine)
  724. sleep_time = 120
  725. Common.logger(log_type, crawler).info(f"休眠{sleep_time}秒\n")
  726. time.sleep(sleep_time)
  727. if __name__ == "__main__":
  728. # Follow.get_videoList(log_type="follow",
  729. # crawler="kuaishou",
  730. # strategy="定向爬虫策略",
  731. # our_uid="6282431",
  732. # out_uid="3xws7ydsnmp5mgq",
  733. # oss_endpoint="out",
  734. # env="dev",
  735. # machine="local")
  736. # print(Follow.get_out_user_info("follow", "kuaishou", "3xgh4ja9be3wcaw"))
  737. # print(Follow.get_out_user_info("follow", "kuaishou", "3x5wgjhfc7tx8ue"))
  738. print(Follow.get_cookie("cookies", "kuaishou", "3xvp5w6twj77xeq", "local"))
  739. print(Follow.get_cookie("cookies", "kuaishou", "3xgh4ja9be3wcaw", "local"))
  740. print(Follow.get_cookie("cookies", "kuaishou", "3x5wgjhfc7tx8ue", "local"))
  741. pass