kuaishou_follow.py 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2023/2/24
  4. import os
  5. import random
  6. import shutil
  7. import sys
  8. import time
  9. import requests
  10. import json
  11. import urllib3
  12. from requests.adapters import HTTPAdapter
  13. sys.path.append(os.getcwd())
  14. from common.common import Common
  15. from common.feishu import Feishu
  16. from common.users import Users
  17. from common.db import MysqlHelper
  18. from common.publish import Publish
  19. class Follow:
  20. platform = "快手"
  21. tag = "快手爬虫,定向爬虫策略"
  22. @classmethod
  23. def get_rule(cls, log_type, crawler, index):
  24. try:
  25. while True:
  26. rule_sheet = Feishu.get_values_batch(log_type, crawler, "3iqG4z")
  27. if rule_sheet is None:
  28. Common.logger(log_type, crawler).warning("rule_sheet is None! 10秒后重新获取")
  29. time.sleep(10)
  30. continue
  31. if index == 1:
  32. rule_dict = {
  33. "play_cnt": f"{rule_sheet[1][1]}{rule_sheet[1][2]}",
  34. "video_width": f"{rule_sheet[2][1]}{rule_sheet[2][2]}",
  35. "video_height": f"{rule_sheet[3][1]}{rule_sheet[3][2]}",
  36. "like_cnt": f"{rule_sheet[4][1]}{rule_sheet[4][2]}",
  37. "duration": f"{rule_sheet[5][1]}{rule_sheet[5][2]}",
  38. "download_cnt": f"{rule_sheet[6][1]}{rule_sheet[6][2]}",
  39. "publish_time": f"{rule_sheet[7][1]}{rule_sheet[7][2]}",
  40. }
  41. # for k, v in rule_dict.items():
  42. # Common.logger(log_type, crawler).info(f"{k}:{v}")
  43. return rule_dict
  44. elif index == 2:
  45. rule_dict = {
  46. "play_cnt": f"{rule_sheet[9][1]}{rule_sheet[9][2]}",
  47. "video_width": f"{rule_sheet[10][1]}{rule_sheet[10][2]}",
  48. "video_height": f"{rule_sheet[11][1]}{rule_sheet[11][2]}",
  49. "like_cnt": f"{rule_sheet[12][1]}{rule_sheet[12][2]}",
  50. "duration": f"{rule_sheet[13][1]}{rule_sheet[13][2]}",
  51. "download_cnt": f"{rule_sheet[14][1]}{rule_sheet[14][2]}",
  52. "publish_time": f"{rule_sheet[15][1]}{rule_sheet[15][2]}",
  53. }
  54. # for k, v in rule_dict.items():
  55. # Common.logger(log_type, crawler).info(f"{k}:{v}")
  56. return rule_dict
  57. except Exception as e:
  58. Common.logger(log_type, crawler).error(f"get_rule:{e}\n")
  59. @classmethod
  60. def download_rule(cls, video_dict, rule_dict):
  61. if eval(f"{video_dict['play_cnt']}{rule_dict['play_cnt']}") is True\
  62. and eval(f"{video_dict['video_width']}{rule_dict['video_width']}") is True\
  63. and eval(f"{video_dict['video_height']}{rule_dict['video_height']}") is True\
  64. and eval(f"{video_dict['like_cnt']}{rule_dict['like_cnt']}") is True\
  65. and eval(f"{video_dict['duration']}{rule_dict['duration']}") is True\
  66. and eval(f"{video_dict['publish_time']}{rule_dict['publish_time']}") is True:
  67. return True
  68. else:
  69. return False
  70. # 过滤词库
  71. @classmethod
  72. def filter_words(cls, log_type, crawler):
  73. try:
  74. while True:
  75. filter_words_sheet = Feishu.get_values_batch(log_type, crawler, 'HIKVvs')
  76. if filter_words_sheet is None:
  77. Common.logger(log_type, crawler).warning(f"filter_words_sheet:{filter_words_sheet} 10秒钟后重试")
  78. continue
  79. filter_words_list = []
  80. for x in filter_words_sheet:
  81. for y in x:
  82. if y is None:
  83. pass
  84. else:
  85. filter_words_list.append(y)
  86. return filter_words_list
  87. except Exception as e:
  88. Common.logger(log_type, crawler).error(f'filter_words异常:{e}\n')
  89. # 万能标题
  90. @classmethod
  91. def random_title(cls, log_type, crawler):
  92. try:
  93. while True:
  94. random_title_sheet = Feishu.get_values_batch(log_type, crawler, '0DiyXe')
  95. if random_title_sheet is None:
  96. Common.logger(log_type, crawler).warning(f"filter_words_sheet:{random_title_sheet} 10秒钟后重试")
  97. continue
  98. random_title_list = []
  99. for x in random_title_sheet:
  100. for y in x:
  101. if y is None:
  102. pass
  103. else:
  104. random_title_list.append(y)
  105. return random.choice(random_title_list)
  106. except Exception as e:
  107. Common.logger(log_type, crawler).error(f'random_title:{e}\n')
  108. # 获取站外用户信息
  109. @classmethod
  110. def get_out_user_info(cls, log_type, crawler, out_uid):
  111. try:
  112. url = "https://www.kuaishou.com/graphql"
  113. payload = json.dumps({
  114. "operationName": "visionProfile",
  115. "variables": {
  116. "userId": str(out_uid)
  117. },
  118. "query": "query visionProfile($userId: String) {\n visionProfile(userId: $userId) {\n result\n hostName\n userProfile {\n ownerCount {\n fan\n photo\n follow\n photo_public\n __typename\n }\n profile {\n gender\n user_name\n user_id\n headurl\n user_text\n user_profile_bg_url\n __typename\n }\n isFollowing\n __typename\n }\n __typename\n }\n}\n"
  119. })
  120. headers = {
  121. # 'Cookie': f'kpf=PC_WEB; clientid=3; did=web_e2901e1c5a13c60af81ba88bc7a3ee24; userId={"".join(str(random.choice(range(1, 10))) for _ in range(10))}; kpn=KUAISHOU_VISION; kuaishou.server.web_st=ChZrdWFpc2hvdS5zZXJ2ZXIud2ViLnN0EqABE4wGjnJauApJelOpl9Xqo8TVDAyra7Pvo0rZtVgMSZxgVuw4Z6P2UtHv_CHOk2Ne2el1hdE_McCptWs8tRdtYlhXFlVOu8rQX7CwexzOBudJAfB3lDN8LPc4o4qHNwqFxy5J5j_WzdllbqMmaDUK9yUxX6XA-JFezzq9jvBwtGv7_hzB7pFrUcH39z0EYOQaZo5lDl-pE09Gw7wr8NvlZRoSdWlbobCW6oJxuQLJTUr9oj_uIiBhkeb1psaIIc3VwfYQ1UfvobrXAP_WpnRabE_3UZUBOygFMAE; kuaishou.server.web_ph=2b981e2051d7130c977fd31df97fe6f5ad54',
  122. 'Cookie': f'kpf=PC_WEB; clientid=3; did=web_e2901e1c5a13c60af81ba88bc7a3ee24; kuaishou.server.web_st=ChZrdWFpc2hvdS5zZXJ2ZXIud2ViLnN0EqABszwASP0eGT2ym0NUdGf1PTi8_gDYrDms9otf5FKMUrx0V7MOTT1hhvpVJijmgku8KYtu3a6g3X1hZODMpX0ebZUip44txWPoY3VRqbsnBKEOs9Qkmx3uLaX33nq8KevOKLoO0cIE8nfwMJISe_BtCHr22cbJkofI0xfJXBt_ZgPqJIqWsUwdgwKzqRYn47ROkIqzeDfEjbxaZxiXy22ZhhoSlCobbmtjkvjpY9x730BPP_C5IiAl9EBaCTyvMw5IIcnkmcjMeWC8w6LKzeFMCNhcqrfkgCgFMAE; kuaishou.server.web_ph=19ae269a54f619c49def39ef5b54ae6d4353; kpn=KUAISHOU_VISION',
  123. 'Referer': f'https://www.kuaishou.com/profile/{out_uid}',
  124. 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.41',
  125. 'content-type': 'application/json',
  126. 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
  127. 'Cache-Control': 'no-cache',
  128. 'Connection': 'keep-alive',
  129. 'Origin': 'https://www.kuaishou.com',
  130. 'Pragma': 'no-cache',
  131. 'Sec-Fetch-Dest': 'empty',
  132. 'Sec-Fetch-Mode': 'cors',
  133. 'Sec-Fetch-Site': 'same-origin',
  134. 'accept': '*/*',
  135. 'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
  136. 'sec-ch-ua-mobile': '?0',
  137. 'sec-ch-ua-platform': '"macOS"'
  138. }
  139. urllib3.disable_warnings()
  140. s = requests.session()
  141. # max_retries=3 重试3次
  142. s.mount('http://', HTTPAdapter(max_retries=3))
  143. s.mount('https://', HTTPAdapter(max_retries=3))
  144. response = s.post(url=url, headers=headers, data=payload, proxies=Common.tunnel_proxies(), verify=False, timeout=5)
  145. response.close()
  146. # Common.logger(log_type, crawler).info(f"get_out_user_info_response:{response.text}")
  147. if response.status_code != 200:
  148. Common.logger(log_type, crawler).warning(f"get_out_user_info_response:{response.text}\n")
  149. return
  150. elif 'data' not in response.json():
  151. Common.logger(log_type, crawler).warning(f"get_out_user_info_response:{response.json()}\n")
  152. return
  153. elif 'visionProfile' not in response.json()['data']:
  154. Common.logger(log_type, crawler).warning(f"get_out_user_info_response:{response.json()['data']}\n")
  155. return
  156. elif 'userProfile' not in response.json()['data']['visionProfile']:
  157. Common.logger(log_type, crawler).warning(f"get_out_user_info_response:{response.json()['data']['visionProfile']['userProfile']}\n")
  158. return
  159. else:
  160. userProfile = response.json()['data']['visionProfile']['userProfile']
  161. # Common.logger(log_type, crawler).info(f"userProfile:{userProfile}")
  162. try:
  163. out_fans_str = str(userProfile['ownerCount']['fan'])
  164. except Exception:
  165. out_fans_str = "0"
  166. try:
  167. out_follow_str = str(userProfile['ownerCount']['follow'])
  168. except Exception:
  169. out_follow_str = "0"
  170. try:
  171. out_avatar_url = userProfile['profile']['headurl']
  172. except Exception:
  173. out_avatar_url = ""
  174. Common.logger(log_type, crawler).info(f"out_fans_str:{out_fans_str}")
  175. Common.logger(log_type, crawler).info(f"out_follow_str:{out_follow_str}")
  176. Common.logger(log_type, crawler).info(f"out_avatar_url:{out_avatar_url}")
  177. if "万" in out_fans_str:
  178. out_fans = int(float(out_fans_str.split("万")[0]) * 10000)
  179. else:
  180. out_fans = int(out_fans_str.replace(",", ""))
  181. if "万" in out_follow_str:
  182. out_follow = int(float(out_follow_str.split("万")[0]) * 10000)
  183. else:
  184. out_follow = int(out_follow_str.replace(",", ""))
  185. out_user_dict = {
  186. "out_fans": out_fans,
  187. "out_follow": out_follow,
  188. "out_avatar_url": out_avatar_url
  189. }
  190. Common.logger(log_type, crawler).info(f"out_user_dict:{out_user_dict}")
  191. return out_user_dict
  192. except Exception as e:
  193. Common.logger(log_type, crawler).error(f"get_out_user_info:{e}\n")
  194. # 获取用户信息列表
  195. @classmethod
  196. def get_user_list(cls, log_type, crawler, sheetid, env, machine):
  197. try:
  198. while True:
  199. user_sheet = Feishu.get_values_batch(log_type, crawler, sheetid)
  200. if user_sheet is None:
  201. Common.logger(log_type, crawler).warning(f"user_sheet:{user_sheet} 10秒钟后重试")
  202. continue
  203. our_user_list = []
  204. for i in range(1, len(user_sheet)):
  205. # for i in range(1, 2):
  206. out_uid = user_sheet[i][2]
  207. user_name = user_sheet[i][3]
  208. our_uid = user_sheet[i][6]
  209. our_user_link = user_sheet[i][7]
  210. if out_uid is None or user_name is None:
  211. Common.logger(log_type, crawler).info("空行\n")
  212. else:
  213. Common.logger(log_type, crawler).info(f"正在更新 {user_name} 用户信息\n")
  214. if our_uid is None:
  215. out_user_info = cls.get_out_user_info(log_type, crawler, out_uid)
  216. out_user_dict = {
  217. "out_uid": out_uid,
  218. "user_name": user_name,
  219. "out_avatar_url": out_user_info["out_avatar_url"],
  220. "out_create_time": '',
  221. "out_tag": '',
  222. "out_play_cnt": 0,
  223. "out_fans": out_user_info["out_fans"],
  224. "out_follow": out_user_info["out_follow"],
  225. "out_friend": 0,
  226. "out_like": 0,
  227. "platform": cls.platform,
  228. "tag": cls.tag,
  229. }
  230. our_user_dict = Users.create_user(log_type=log_type, crawler=crawler,
  231. out_user_dict=out_user_dict, env=env, machine=machine)
  232. our_uid = our_user_dict['our_uid']
  233. our_user_link = our_user_dict['our_user_link']
  234. Feishu.update_values(log_type, crawler, sheetid, f'G{i + 1}:H{i + 1}',
  235. [[our_uid, our_user_link]])
  236. Common.logger(log_type, crawler).info(f'站内用户信息写入飞书成功!\n')
  237. our_user_list.append(our_user_dict)
  238. else:
  239. our_user_dict = {
  240. 'out_uid': out_uid,
  241. 'user_name': user_name,
  242. 'our_uid': our_uid,
  243. 'our_user_link': our_user_link,
  244. }
  245. our_user_list.append(our_user_dict)
  246. return our_user_list
  247. except Exception as e:
  248. Common.logger(log_type, crawler).error(f'get_user_list:{e}\n')
  249. # 处理视频标题
  250. @classmethod
  251. def video_title(cls, log_type, crawler, title):
  252. title_split1 = title.split(" #")
  253. if title_split1[0] != "":
  254. title1 = title_split1[0]
  255. else:
  256. title1 = title_split1[-1]
  257. title_split2 = title1.split(" #")
  258. if title_split2[0] != "":
  259. title2 = title_split2[0]
  260. else:
  261. title2 = title_split2[-1]
  262. title_split3 = title2.split("@")
  263. if title_split3[0] != "":
  264. title3 = title_split3[0]
  265. else:
  266. title3 = title_split3[-1]
  267. video_title = title3.strip().replace("\n", "") \
  268. .replace("/", "").replace("快手", "").replace(" ", "") \
  269. .replace(" ", "").replace("&NBSP", "").replace("\r", "") \
  270. .replace("#", "").replace(".", "。").replace("\\", "") \
  271. .replace(":", "").replace("*", "").replace("?", "") \
  272. .replace("?", "").replace('"', "").replace("<", "") \
  273. .replace(">", "").replace("|", "").replace("@", "")[:40]
  274. if video_title.replace(" ", "") == "" or video_title == "。。。" or video_title == "...":
  275. return cls.random_title(log_type, crawler)
  276. else:
  277. return video_title
  278. @classmethod
  279. def get_videoList(cls, log_type, crawler, strategy, our_uid, out_uid, oss_endpoint, env, machine):
  280. try:
  281. download_cnt_1, download_cnt_2 = 0, 0
  282. pcursor = ""
  283. while True:
  284. rule_dict_1 = cls.get_rule(log_type, crawler, 1)
  285. rule_dict_2 = cls.get_rule(log_type, crawler, 2)
  286. if rule_dict_1 is None or rule_dict_2 is None:
  287. Common.logger(log_type, crawler).warning(f"rule_dict is None, 10秒后重试")
  288. time.sleep(10)
  289. else:
  290. break
  291. while True:
  292. if download_cnt_1 >=int(rule_dict_1['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">", "")[-1]) and download_cnt_2 >= int(rule_dict_2['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">", "")[-1]):
  293. Common.logger(log_type, crawler).info(f"规则1已下载{download_cnt_1}条视频,规则2已下载{download_cnt_2}条视频\n")
  294. return
  295. url = "https://www.kuaishou.com/graphql"
  296. payload = json.dumps({
  297. "operationName": "visionProfilePhotoList",
  298. "variables": {
  299. "userId": out_uid,
  300. "pcursor": pcursor,
  301. "page": "profile"
  302. },
  303. "query": "fragment photoContent on PhotoEntity {\n id\n duration\n caption\n originCaption\n likeCount\n viewCount\n realLikeCount\n coverUrl\n photoUrl\n photoH265Url\n manifest\n manifestH265\n videoResource\n coverUrls {\n url\n __typename\n }\n timestamp\n expTag\n animatedCoverUrl\n distance\n videoRatio\n liked\n stereoType\n profileUserTopPhoto\n musicBlocked\n __typename\n}\n\nfragment feedContent on Feed {\n type\n author {\n id\n name\n headerUrl\n following\n headerUrls {\n url\n __typename\n }\n __typename\n }\n photo {\n ...photoContent\n __typename\n }\n canAddComment\n llsid\n status\n currentPcursor\n tags {\n type\n name\n __typename\n }\n __typename\n}\n\nquery visionProfilePhotoList($pcursor: String, $userId: String, $page: String, $webPageArea: String) {\n visionProfilePhotoList(pcursor: $pcursor, userId: $userId, page: $page, webPageArea: $webPageArea) {\n result\n llsid\n webPageArea\n feeds {\n ...feedContent\n __typename\n }\n hostName\n pcursor\n __typename\n }\n}\n"
  304. })
  305. headers = {
  306. # 'Cookie': f'kpf=PC_WEB; clientid=3; did=web_e2901e1c5a13c60af81ba88bc7a3ee24; userId={"".join(str(random.choice(range(1, 10))) for _ in range(10))}; kuaishou.server.web_st=ChZrdWFpc2hvdS5zZXJ2ZXIud2ViLnN0EqABOLgYYcIJ5ilxU46Jc-HLWThY8sppX3V0htC_KhSGOzAjP2hAOdegzfkZGAxS5rf6rCBS487FkxfYzLkV__I6b1lK16rDjvv94Kkoo4z7mgf8y8rFgWoqrp81JAWTtx00y-wrc1XXPf9RAVQoET70wWaeNG2r5bxtZEiNwpK_zPi0ZdUo0BW13dFKfVssAy2xKYh0UlJ8VSd_vBvyMKSxVBoSf061Kc3w5Nem7YdpVBmH39ceIiBpiGioLzbZqlHiSbwkH_LhUhNXz3o7LITj098KUytk2CgFMAE; kuaishou.server.web_ph=f1033957981996a7d50e849a9ded4cf4adff; kpn=KUAISHOU_VISION',
  307. 'Cookie': f'kpf=PC_WEB; clientid=3; did=web_e2901e1c5a13c60af81ba88bc7a3ee24; kuaishou.server.web_st=ChZrdWFpc2hvdS5zZXJ2ZXIud2ViLnN0EqABszwASP0eGT2ym0NUdGf1PTi8_gDYrDms9otf5FKMUrx0V7MOTT1hhvpVJijmgku8KYtu3a6g3X1hZODMpX0ebZUip44txWPoY3VRqbsnBKEOs9Qkmx3uLaX33nq8KevOKLoO0cIE8nfwMJISe_BtCHr22cbJkofI0xfJXBt_ZgPqJIqWsUwdgwKzqRYn47ROkIqzeDfEjbxaZxiXy22ZhhoSlCobbmtjkvjpY9x730BPP_C5IiAl9EBaCTyvMw5IIcnkmcjMeWC8w6LKzeFMCNhcqrfkgCgFMAE; kuaishou.server.web_ph=19ae269a54f619c49def39ef5b54ae6d4353; kpn=KUAISHOU_VISION',
  308. 'Referer': f'https://www.kuaishou.com/profile/{out_uid}',
  309. 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.41',
  310. 'content-type': 'application/json',
  311. # 'accept': '*/*',
  312. # 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
  313. # 'Cache-Control': 'no-cache',
  314. # 'Connection': 'keep-alive',
  315. # 'Origin': 'https://www.kuaishou.com',
  316. # 'Pragma': 'no-cache',
  317. # 'Sec-Fetch-Dest': 'empty',
  318. # 'Sec-Fetch-Mode': 'cors',
  319. # 'Sec-Fetch-Site': 'same-origin',
  320. # 'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
  321. # 'sec-ch-ua-mobile': '?0',
  322. # 'sec-ch-ua-platform': '"macOS"'
  323. }
  324. urllib3.disable_warnings()
  325. s = requests.session()
  326. # max_retries=3 重试3次
  327. s.mount('http://', HTTPAdapter(max_retries=3))
  328. s.mount('https://', HTTPAdapter(max_retries=3))
  329. response = s.post(url=url, headers=headers, data=payload, proxies=Common.tunnel_proxies(), verify=False, timeout=5)
  330. response.close()
  331. # Common.logger(log_type, crawler).info(f"get_videoList:{response.text}\n")
  332. if response.status_code != 200:
  333. Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.text}\n")
  334. return
  335. elif 'data' not in response.json():
  336. Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()}\n")
  337. return
  338. elif 'visionProfilePhotoList' not in response.json()['data']:
  339. Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()['data']}\n")
  340. return
  341. elif 'feeds' not in response.json()['data']['visionProfilePhotoList']:
  342. Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()['data']['visionProfilePhotoList']}\n")
  343. return
  344. elif len(response.json()['data']['visionProfilePhotoList']['feeds']) == 0:
  345. Common.logger(log_type, crawler).info("没有更多视频啦 ~\n")
  346. return
  347. else:
  348. feeds = response.json()['data']['visionProfilePhotoList']['feeds']
  349. pcursor = response.json()['data']['visionProfilePhotoList']['pcursor']
  350. # Common.logger(log_type, crawler).info(f"feeds0: {feeds}\n")
  351. for i in range(len(feeds)):
  352. if 'photo' not in feeds[i]:
  353. Common.logger(log_type, crawler).warning(f"get_videoList:{feeds[i]}\n")
  354. break
  355. # video_title
  356. if 'caption' not in feeds[i]['photo']:
  357. video_title = cls.random_title(log_type, crawler)
  358. elif feeds[i]['photo']['caption'].strip() == "":
  359. video_title = cls.random_title(log_type, crawler)
  360. else:
  361. video_title = cls.video_title(log_type, crawler, feeds[i]['photo']['caption'])
  362. if 'videoResource' not in feeds[i]['photo'] \
  363. and 'manifest' not in feeds[i]['photo']\
  364. and 'manifestH265'not in feeds[i]['photo']:
  365. Common.logger(log_type, crawler).warning(f"get_videoList:{feeds[i]['photo']}\n")
  366. break
  367. videoResource = feeds[i]['photo']['videoResource']
  368. if 'h264' not in videoResource and 'hevc' not in videoResource:
  369. Common.logger(log_type, crawler).warning(f"get_videoList:{videoResource}\n")
  370. break
  371. # video_id
  372. if 'h264' in videoResource and 'videoId' in videoResource['h264']:
  373. video_id = videoResource['h264']['videoId']
  374. elif 'hevc' in videoResource and 'videoId' in videoResource['hevc']:
  375. video_id = videoResource['hevc']['videoId']
  376. else:
  377. video_id = ""
  378. # play_cnt
  379. if 'viewCount' not in feeds[i]['photo']:
  380. play_cnt = 0
  381. else:
  382. play_cnt = int(feeds[i]['photo']['viewCount'])
  383. # like_cnt
  384. if 'realLikeCount' not in feeds[i]['photo']:
  385. like_cnt = 0
  386. else:
  387. like_cnt = feeds[i]['photo']['realLikeCount']
  388. # publish_time
  389. if 'timestamp' not in feeds[i]['photo']:
  390. publish_time_stamp = 0
  391. publish_time_str = ''
  392. publish_time = 0
  393. else:
  394. publish_time_stamp = int(int(feeds[i]['photo']['timestamp'])/1000)
  395. publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
  396. publish_time = int((int(time.time()) - publish_time_stamp) / (3600*24))
  397. # duration
  398. if 'duration' not in feeds[i]['photo']:
  399. duration = 0
  400. else:
  401. duration = int(int(feeds[i]['photo']['duration'])/1000)
  402. # video_width / video_height / video_url
  403. mapping = {}
  404. for item in ['width', 'height', 'url']:
  405. try:
  406. val = str(videoResource['h264']['adaptationSet'][0]['representation'][0][item])
  407. except Exception:
  408. val = str(videoResource['hevc']['adaptationSet'][0]['representation'][0][item])
  409. except:
  410. val = ''
  411. mapping[item] = val
  412. video_width = int(mapping['width']) if mapping['width'] != '' else 0
  413. video_height = int(mapping['height']) if mapping['height'] != '' else 0
  414. video_url = mapping['url']
  415. # cover_url
  416. if 'coverUrl' not in feeds[i]['photo']:
  417. cover_url = ""
  418. else:
  419. cover_url = feeds[i]['photo']['coverUrl']
  420. # user_name / avatar_url
  421. try:
  422. user_name = feeds[i]['author']['name']
  423. avatar_url = feeds[i]['author']['headerUrl']
  424. except Exception:
  425. user_name = ''
  426. avatar_url = ''
  427. video_dict = {'video_title': video_title,
  428. 'video_id': video_id,
  429. 'play_cnt': play_cnt,
  430. 'comment_cnt': 0,
  431. 'like_cnt': like_cnt,
  432. 'share_cnt': 0,
  433. 'video_width': video_width,
  434. 'video_height': video_height,
  435. 'duration': duration,
  436. 'publish_time': publish_time,
  437. 'publish_time_stamp': publish_time_stamp,
  438. 'publish_time_str': publish_time_str,
  439. 'user_name': user_name,
  440. 'user_id': out_uid,
  441. 'avatar_url': avatar_url,
  442. 'cover_url': cover_url,
  443. 'video_url': video_url,
  444. 'session': f"kuaishou{int(time.time())}"}
  445. rule_1 = cls.download_rule(video_dict, rule_dict_1)
  446. Common.logger(log_type, crawler).info(f"video_title:{video_title}")
  447. Common.logger(log_type, crawler).info(f"video_id:{video_id}\n")
  448. Common.logger(log_type, crawler).info(f"play_cnt:{video_dict['play_cnt']}{rule_dict_1['play_cnt']}, {eval(str(video_dict['play_cnt']) + str(rule_dict_1['play_cnt']))}")
  449. Common.logger(log_type, crawler).info(f"like_cnt:{video_dict['like_cnt']}{rule_dict_1['like_cnt']}, {eval(str(video_dict['like_cnt']) + str(rule_dict_1['like_cnt']))}")
  450. Common.logger(log_type, crawler).info(f"video_width:{video_dict['video_width']}{rule_dict_1['video_width']}, {eval(str(video_dict['video_width']) + str(rule_dict_1['video_width']))}")
  451. Common.logger(log_type, crawler).info(f"video_height:{video_dict['video_height']}{rule_dict_1['video_height']}, {eval(str(video_dict['video_height']) + str(rule_dict_1['video_height']))}")
  452. Common.logger(log_type, crawler).info(f"duration:{video_dict['duration']}{rule_dict_1['duration']}, {eval(str(video_dict['duration']) + str(rule_dict_1['duration']))}")
  453. Common.logger(log_type, crawler).info(f"publish_time:{video_dict['publish_time']}{rule_dict_1['publish_time']}, {eval(str(video_dict['publish_time']) + str(rule_dict_1['publish_time']))}")
  454. Common.logger(log_type, crawler).info(f"rule_1:{rule_1}\n")
  455. rule_2 = cls.download_rule(video_dict, rule_dict_2)
  456. Common.logger(log_type, crawler).info(f"play_cnt:{video_dict['play_cnt']}{rule_dict_2['play_cnt']}, {eval(str(video_dict['play_cnt']) + str(rule_dict_2['play_cnt']))}")
  457. Common.logger(log_type, crawler).info(f"like_cnt:{video_dict['like_cnt']}{rule_dict_2['like_cnt']}, {eval(str(video_dict['like_cnt']) + str(rule_dict_2['like_cnt']))}")
  458. Common.logger(log_type, crawler).info(f"video_width:{video_dict['video_width']}{rule_dict_2['video_width']}, {eval(str(video_dict['video_width']) + str(rule_dict_2['video_width']))}")
  459. Common.logger(log_type, crawler).info(f"video_height:{video_dict['video_height']}{rule_dict_2['video_height']}, {eval(str(video_dict['video_height']) + str(rule_dict_2['video_height']))}")
  460. Common.logger(log_type, crawler).info(f"duration:{video_dict['duration']}{rule_dict_2['duration']}, {eval(str(video_dict['duration']) + str(rule_dict_2['duration']))}")
  461. Common.logger(log_type, crawler).info(f"publish_time:{video_dict['publish_time']}{rule_dict_2['publish_time']}, {eval(str(video_dict['publish_time']) + str(rule_dict_2['publish_time']))}")
  462. Common.logger(log_type, crawler).info(f"rule_2:{rule_2}\n")
  463. if video_title == "" or video_url == "":
  464. Common.logger(log_type, crawler).info("无效视频\n")
  465. break
  466. elif rule_1 is True:
  467. if download_cnt_1 < int(rule_dict_1['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">", "")[-1]):
  468. download_finished = cls.download_publish(log_type=log_type,
  469. crawler=crawler,
  470. strategy=strategy,
  471. video_dict=video_dict,
  472. rule_dict=rule_dict_1,
  473. our_uid=our_uid,
  474. oss_endpoint=oss_endpoint,
  475. env=env,
  476. machine=machine)
  477. if download_finished is True:
  478. download_cnt_1 += 1
  479. elif rule_2 is True:
  480. if download_cnt_2 < int(rule_dict_2['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">", "")[-1]):
  481. download_finished = cls.download_publish(log_type=log_type,
  482. crawler=crawler,
  483. strategy=strategy,
  484. video_dict=video_dict,
  485. rule_dict=rule_dict_2,
  486. our_uid=our_uid,
  487. oss_endpoint=oss_endpoint,
  488. env=env,
  489. machine=machine)
  490. if download_finished is True:
  491. download_cnt_2 += 1
  492. else:
  493. Common.logger(log_type, crawler).info("不满足下载规则\n")
  494. # Common.logger(log_type, crawler).info(f"feeds: {feeds}\n")
  495. if pcursor == "no_more":
  496. Common.logger(log_type, crawler).info("已经到底了,没有更多内容了\n")
  497. return
  498. except Exception as e:
  499. Common.logger(log_type, crawler).error(f"get_videoList:{e}\n")
  500. @classmethod
  501. def repeat_video(cls, log_type, crawler, video_id, video_title, publish_time, env, machine):
  502. sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}" or (platform="{cls.platform}" and video_title="{video_title}" and publish_time="{publish_time}") """
  503. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env, machine)
  504. return len(repeat_video)
  505. @classmethod
  506. def download_publish(cls, log_type, crawler, strategy, video_dict, rule_dict, our_uid, oss_endpoint, env, machine):
  507. try:
  508. download_finished = False
  509. if cls.repeat_video(log_type, crawler, video_dict['video_id'], video_dict['video_title'], video_dict['publish_time_str'], env, machine) != 0:
  510. Common.logger(log_type, crawler).info('视频已下载\n')
  511. elif video_dict['video_id'] in [x for y in Feishu.get_values_batch(log_type, crawler, "3cd128") for x in y]:
  512. Common.logger(log_type, crawler).info('视频已下载\n')
  513. elif any(word if word in video_dict['video_title'] else False for word in cls.filter_words(log_type, crawler)) is True:
  514. Common.logger(log_type, crawler).info('标题已中过滤词\n')
  515. else:
  516. # 下载视频
  517. Common.download_method(log_type=log_type, crawler=crawler, text='video', title=video_dict['video_title'], url=video_dict['video_url'])
  518. ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
  519. if ffmpeg_dict is None or ffmpeg_dict['size'] == 0:
  520. Common.logger(log_type, crawler).warning(f"下载的视频无效,已删除\n")
  521. # 删除视频文件夹
  522. shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
  523. return download_finished
  524. # 下载封面
  525. Common.download_method(log_type=log_type, crawler=crawler, text='cover', title=video_dict['video_title'], url=video_dict['cover_url'])
  526. # 保存视频信息至txt
  527. Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
  528. # 上传视频
  529. Common.logger(log_type, crawler).info("开始上传视频...")
  530. our_video_id = Publish.upload_and_publish(log_type=log_type,
  531. crawler=crawler,
  532. strategy=strategy,
  533. our_uid=our_uid,
  534. env=env,
  535. oss_endpoint=oss_endpoint)
  536. if env == 'dev':
  537. our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  538. else:
  539. our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  540. Common.logger(log_type, crawler).info("视频上传完成")
  541. if our_video_id is None:
  542. Common.logger(log_type, crawler).warning(f"our_video_id:{our_video_id} 删除该视频文件夹")
  543. # 删除视频文件夹
  544. shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
  545. return download_finished
  546. # 视频信息保存数据库
  547. insert_sql = f""" insert into crawler_video(video_id,
  548. user_id,
  549. out_user_id,
  550. platform,
  551. strategy,
  552. out_video_id,
  553. video_title,
  554. cover_url,
  555. video_url,
  556. duration,
  557. publish_time,
  558. play_cnt,
  559. crawler_rule,
  560. width,
  561. height)
  562. values({our_video_id},
  563. {our_uid},
  564. "{video_dict['user_id']}",
  565. "{cls.platform}",
  566. "定向爬虫策略",
  567. "{video_dict['video_id']}",
  568. "{video_dict['video_title']}",
  569. "{video_dict['cover_url']}",
  570. "{video_dict['video_url']}",
  571. {int(video_dict['duration'])},
  572. "{video_dict['publish_time_str']}",
  573. {int(video_dict['play_cnt'])},
  574. '{json.dumps(rule_dict)}',
  575. {int(video_dict['video_width'])},
  576. {int(video_dict['video_height'])}) """
  577. Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
  578. MysqlHelper.update_values(log_type, crawler, insert_sql, env, machine)
  579. Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
  580. # 视频写入飞书
  581. Feishu.insert_columns(log_type, 'kuaishou', "fYdA8F", "ROWS", 1, 2)
  582. upload_time = int(time.time())
  583. values = [[our_video_id,
  584. time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
  585. "定向榜",
  586. str(video_dict['video_id']),
  587. video_dict['video_title'],
  588. our_video_link,
  589. video_dict['play_cnt'],
  590. video_dict['comment_cnt'],
  591. video_dict['like_cnt'],
  592. video_dict['share_cnt'],
  593. video_dict['duration'],
  594. f"{video_dict['video_width']}*{video_dict['video_height']}",
  595. video_dict['publish_time_str'],
  596. video_dict['user_name'],
  597. video_dict['user_id'],
  598. video_dict['avatar_url'],
  599. video_dict['cover_url'],
  600. video_dict['video_url']]]
  601. time.sleep(1)
  602. Feishu.update_values(log_type, 'kuaishou', "fYdA8F", "E2:Z2", values)
  603. Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
  604. download_finished = True
  605. return download_finished
  606. except Exception as e:
  607. Common.logger(log_type, crawler).error(f"download_publish:{e}\n")
  608. @classmethod
  609. def get_follow_videos(cls, log_type, crawler, strategy, oss_endpoint, env, machine):
  610. user_list = cls.get_user_list(log_type=log_type, crawler=crawler, sheetid="bTSzxW", env=env, machine=machine)
  611. for user in user_list:
  612. out_uid = user["out_uid"]
  613. user_name = user["user_name"]
  614. our_uid = user["our_uid"]
  615. Common.logger(log_type, crawler).info(f"开始抓取 {user_name} 用户主页视频\n")
  616. cls.get_videoList(log_type=log_type,
  617. crawler=crawler,
  618. strategy=strategy,
  619. our_uid=our_uid,
  620. out_uid=out_uid,
  621. oss_endpoint=oss_endpoint,
  622. env=env,
  623. machine=machine)
  624. time.sleep(3)
  625. if __name__ == "__main__":
  626. # print(Follow.filter_words("follow", "kuaishou"))
  627. # print(Follow.random_title("follow", "kuaishou"))
  628. # Follow.get_user_list("follow", "kuaishou", "2OLxLr", "dev", "local")
  629. # Follow.get_videoList(log_type="follow",
  630. # crawler="kuaishou",
  631. # strategy="定向爬虫策略",
  632. # our_uid="6282431",
  633. # out_uid="3xws7ydsnmp5mgq",
  634. # oss_endpoint="out",
  635. # env="dev",
  636. # machine="local")
  637. # Follow.get_rule("follow", "kuaishou", 1)
  638. # Follow.get_rule("follow", "kuaishou", 2)
  639. print(Follow.get_out_user_info("follow", "kuaishou", "3xgh4ja9be3wcaw"))
  640. print(Follow.get_out_user_info("follow", "kuaishou", "3x5wgjhfc7tx8ue"))
  641. pass