kuaishou_follow_pc.py 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2023/2/24
  4. import os
  5. import random
  6. import shutil
  7. import sys
  8. import time
  9. import requests
  10. import json
  11. sys.path.append(os.getcwd())
  12. from common.common import Common
  13. from common.feishu import Feishu
  14. from common.users import Users
  15. from common.db import MysqlHelper
  16. from common.publish import Publish
  17. class Follow:
  18. platform = "快手"
  19. tag = "快手爬虫,定向爬虫策略"
  20. @classmethod
  21. def get_rule(cls, log_type, crawler, index):
  22. try:
  23. while True:
  24. rule_sheet = Feishu.get_values_batch(log_type, crawler, "3iqG4z")
  25. if rule_sheet is None:
  26. Common.logger(log_type, crawler).warning("rule_sheet is None! 10秒后重新获取")
  27. time.sleep(10)
  28. continue
  29. if index == 1:
  30. rule_dict = {
  31. "play_cnt": f"{rule_sheet[1][1]}{rule_sheet[1][2]}",
  32. "video_width": f"{rule_sheet[2][1]}{rule_sheet[2][2]}",
  33. "video_height": f"{rule_sheet[3][1]}{rule_sheet[3][2]}",
  34. "like_cnt": f"{rule_sheet[4][1]}{rule_sheet[4][2]}",
  35. "duration": f"{rule_sheet[5][1]}{rule_sheet[5][2]}",
  36. "download_cnt": f"{rule_sheet[6][1]}{rule_sheet[6][2]}",
  37. "publish_time": f"{rule_sheet[7][1]}{rule_sheet[7][2]}",
  38. }
  39. # for k, v in rule_dict.items():
  40. # Common.logger(log_type, crawler).info(f"{k}:{v}")
  41. return rule_dict
  42. elif index == 2:
  43. rule_dict = {
  44. "play_cnt": f"{rule_sheet[9][1]}{rule_sheet[9][2]}",
  45. "video_width": f"{rule_sheet[10][1]}{rule_sheet[10][2]}",
  46. "video_height": f"{rule_sheet[11][1]}{rule_sheet[11][2]}",
  47. "like_cnt": f"{rule_sheet[12][1]}{rule_sheet[12][2]}",
  48. "duration": f"{rule_sheet[13][1]}{rule_sheet[13][2]}",
  49. "download_cnt": f"{rule_sheet[14][1]}{rule_sheet[14][2]}",
  50. "publish_time": f"{rule_sheet[15][1]}{rule_sheet[15][2]}",
  51. }
  52. # for k, v in rule_dict.items():
  53. # Common.logger(log_type, crawler).info(f"{k}:{v}")
  54. return rule_dict
  55. except Exception as e:
  56. Common.logger(log_type, crawler).error(f"get_rule:{e}\n")
  57. @classmethod
  58. def download_rule(cls, video_dict, rule_dict):
  59. if eval(f"{video_dict['play_cnt']}{rule_dict['play_cnt']}") is True\
  60. and eval(f"{video_dict['video_width']}{rule_dict['video_width']}") is True\
  61. and eval(f"{video_dict['video_height']}{rule_dict['video_height']}") is True\
  62. and eval(f"{video_dict['like_cnt']}{rule_dict['like_cnt']}") is True\
  63. and eval(f"{video_dict['duration']}{rule_dict['duration']}") is True\
  64. and eval(f"{video_dict['publish_time']}{rule_dict['publish_time']}") is True:
  65. return True
  66. else:
  67. return False
  68. # 过滤词库
  69. @classmethod
  70. def filter_words(cls, log_type, crawler):
  71. try:
  72. while True:
  73. filter_words_sheet = Feishu.get_values_batch(log_type, crawler, 'HIKVvs')
  74. if filter_words_sheet is None:
  75. Common.logger(log_type, crawler).warning(f"filter_words_sheet:{filter_words_sheet} 10秒钟后重试")
  76. continue
  77. filter_words_list = []
  78. for x in filter_words_sheet:
  79. for y in x:
  80. if y is None:
  81. pass
  82. else:
  83. filter_words_list.append(y)
  84. return filter_words_list
  85. except Exception as e:
  86. Common.logger(log_type, crawler).error(f'filter_words异常:{e}\n')
  87. # 万能标题
  88. @classmethod
  89. def random_title(cls, log_type, crawler):
  90. try:
  91. while True:
  92. random_title_sheet = Feishu.get_values_batch(log_type, crawler, '0DiyXe')
  93. if random_title_sheet is None:
  94. Common.logger(log_type, crawler).warning(f"filter_words_sheet:{random_title_sheet} 10秒钟后重试")
  95. continue
  96. random_title_list = []
  97. for x in random_title_sheet:
  98. for y in x:
  99. if y is None:
  100. pass
  101. else:
  102. random_title_list.append(y)
  103. return random.choice(random_title_list)
  104. except Exception as e:
  105. Common.logger(log_type, crawler).error(f'random_title:{e}\n')
  106. # 获取站外用户信息
  107. @classmethod
  108. def get_out_user_info(cls, log_type, crawler, out_uid):
  109. try:
  110. url = "https://www.kuaishou.com/graphql"
  111. payload = json.dumps({
  112. "operationName": "visionProfile",
  113. "variables": {
  114. "userId": out_uid
  115. },
  116. "query": "query visionProfile($userId: String) {\n visionProfile(userId: $userId) {\n result\n hostName\n userProfile {\n ownerCount {\n fan\n photo\n follow\n photo_public\n __typename\n }\n profile {\n gender\n user_name\n user_id\n headurl\n user_text\n user_profile_bg_url\n __typename\n }\n isFollowing\n __typename\n }\n __typename\n }\n}\n"
  117. })
  118. headers = {
  119. 'Cookie': 'kpf=PC_WEB; clientid=3; did=web_e2901e1c5a13c60af81ba88bc7a3ee24; userId=1921947321; kpn=KUAISHOU_VISION; kuaishou.server.web_st=ChZrdWFpc2hvdS5zZXJ2ZXIud2ViLnN0EqABE4wGjnJauApJelOpl9Xqo8TVDAyra7Pvo0rZtVgMSZxgVuw4Z6P2UtHv_CHOk2Ne2el1hdE_McCptWs8tRdtYlhXFlVOu8rQX7CwexzOBudJAfB3lDN8LPc4o4qHNwqFxy5J5j_WzdllbqMmaDUK9yUxX6XA-JFezzq9jvBwtGv7_hzB7pFrUcH39z0EYOQaZo5lDl-pE09Gw7wr8NvlZRoSdWlbobCW6oJxuQLJTUr9oj_uIiBhkeb1psaIIc3VwfYQ1UfvobrXAP_WpnRabE_3UZUBOygFMAE; kuaishou.server.web_ph=2b981e2051d7130c977fd31df97fe6f5ad54',
  120. 'Referer': f'https://www.kuaishou.com/profile/{out_uid}',
  121. 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.41',
  122. 'content-type': 'application/json',
  123. # 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
  124. # 'Cache-Control': 'no-cache',
  125. # 'Connection': 'keep-alive',
  126. # 'Origin': 'https://www.kuaishou.com',
  127. # 'Pragma': 'no-cache',
  128. # 'Sec-Fetch-Dest': 'empty',
  129. # 'Sec-Fetch-Mode': 'cors',
  130. # 'Sec-Fetch-Site': 'same-origin',
  131. # 'accept': '*/*',
  132. # 'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
  133. # 'sec-ch-ua-mobile': '?0',
  134. # 'sec-ch-ua-platform': '"macOS"'
  135. }
  136. response = requests.post(url=url, headers=headers, data=payload)
  137. if response.status_code != 200:
  138. Common.logger(log_type, crawler).warning(f"get_out_user_info_response:{response.text}\n")
  139. return
  140. elif 'data' not in response.json():
  141. Common.logger(log_type, crawler).warning(f"get_out_user_info_response:{response.json()}\n")
  142. return
  143. elif 'visionProfile' not in response.json()['data']:
  144. Common.logger(log_type, crawler).warning(f"get_out_user_info_response:{response.json()['data']}\n")
  145. return
  146. elif 'userProfile' not in response.json()['data']['visionProfile']:
  147. Common.logger(log_type, crawler).warning(f"get_out_user_info_response:{response.json()['data']['visionProfile']['userProfile']}\n")
  148. return
  149. else:
  150. userProfile = response.json()['data']['visionProfile']['userProfile']
  151. out_user_dict = {}
  152. if 'ownerCount' not in userProfile:
  153. out_user_dict['out_fans'] = 0
  154. out_user_dict['out_fans'] = 0
  155. elif 'fan' not in userProfile['ownerCount']:
  156. out_user_dict['out_fans'] = 0
  157. elif 'follow' not in userProfile['ownerCount']:
  158. out_user_dict['out_fans'] = 0
  159. else:
  160. out_fans_str = str(userProfile['ownerCount']['fan'])
  161. out_follow_str = str(userProfile['ownerCount']['follow'])
  162. if "万" in out_fans_str:
  163. out_user_dict['out_fans'] = int(float(out_fans_str.split("万")[0]) * 10000)
  164. else:
  165. out_user_dict['out_fans'] = int(out_fans_str.replace(",", ""))
  166. if "万" in out_follow_str:
  167. out_user_dict['out_follow'] = int(float(out_follow_str.split("万")[0]) * 10000)
  168. else:
  169. out_user_dict['out_follow'] = int(out_follow_str.replace(",", ""))
  170. if 'profile' not in userProfile:
  171. out_user_dict['out_avatar_url'] = ''
  172. elif 'headurl' not in userProfile['profile']:
  173. out_user_dict['out_avatar_url'] = ''
  174. else:
  175. out_user_dict['out_avatar_url'] = userProfile['profile']['headurl']
  176. return out_user_dict
  177. except Exception as e:
  178. Common.logger(log_type, crawler).error(f"get_out_user_info:{e}\n")
  179. # 获取用户信息列表
  180. @classmethod
  181. def get_user_list(cls, log_type, crawler, sheetid, env, machine):
  182. try:
  183. while True:
  184. user_sheet = Feishu.get_values_batch(log_type, crawler, sheetid)
  185. if user_sheet is None:
  186. Common.logger(log_type, crawler).warning(f"user_sheet:{user_sheet} 10秒钟后重试")
  187. continue
  188. our_user_list = []
  189. # for i in range(1, len(user_sheet)):
  190. for i in range(1, 2):
  191. out_uid = user_sheet[i][2]
  192. user_name = user_sheet[i][3]
  193. our_uid = user_sheet[i][6]
  194. our_user_link = user_sheet[i][7]
  195. if out_uid is None or user_name is None:
  196. Common.logger(log_type, crawler).info("空行\n")
  197. else:
  198. Common.logger(log_type, crawler).info(f"正在更新 {user_name} 用户信息\n")
  199. if our_uid is None:
  200. out_user_info = cls.get_out_user_info(log_type, crawler, out_uid)
  201. out_user_dict = {
  202. "out_uid": out_uid,
  203. "user_name": user_name,
  204. "out_avatar_url": out_user_info["out_avatar_url"],
  205. "out_create_time": '',
  206. "out_tag": '',
  207. "out_play_cnt": 0,
  208. "out_fans": out_user_info["out_fans"],
  209. "out_follow": out_user_info["out_follow"],
  210. "out_friend": 0,
  211. "out_like": 0,
  212. "platform": cls.platform,
  213. "tag": cls.tag,
  214. }
  215. our_user_dict = Users.create_user(log_type=log_type, crawler=crawler,
  216. out_user_dict=out_user_dict, env=env, machine=machine)
  217. our_uid = our_user_dict['our_uid']
  218. our_user_link = our_user_dict['our_user_link']
  219. Feishu.update_values(log_type, crawler, sheetid, f'G{i + 1}:H{i + 1}',
  220. [[our_uid, our_user_link]])
  221. Common.logger(log_type, crawler).info(f'站内用户信息写入飞书成功!\n')
  222. our_user_list.append(our_user_dict)
  223. else:
  224. our_user_dict = {
  225. 'out_uid': out_uid,
  226. 'user_name': user_name,
  227. 'our_uid': our_uid,
  228. 'our_user_link': our_user_link,
  229. }
  230. our_user_list.append(our_user_dict)
  231. return our_user_list
  232. except Exception as e:
  233. Common.logger(log_type, crawler).error(f'get_user_list:{e}\n')
  234. # 处理视频标题
  235. @classmethod
  236. def video_title(cls, log_type, crawler, title):
  237. title_split1 = title.split(" #")
  238. if title_split1[0] != "":
  239. title1 = title_split1[0]
  240. else:
  241. title1 = title_split1[-1]
  242. title_split2 = title1.split(" #")
  243. if title_split2[0] != "":
  244. title2 = title_split2[0]
  245. else:
  246. title2 = title_split2[-1]
  247. title_split3 = title2.split("@")
  248. if title_split3[0] != "":
  249. title3 = title_split3[0]
  250. else:
  251. title3 = title_split3[-1]
  252. video_title = title3.strip().replace("\n", "") \
  253. .replace("/", "").replace("快手", "").replace(" ", "") \
  254. .replace(" ", "").replace("&NBSP", "").replace("\r", "") \
  255. .replace("#", "").replace(".", "。").replace("\\", "") \
  256. .replace(":", "").replace("*", "").replace("?", "") \
  257. .replace("?", "").replace('"', "").replace("<", "") \
  258. .replace(">", "").replace("|", "").replace("@", "")[:40]
  259. if video_title.replace(" ", "") == "" or video_title == "。。。" or video_title == "...":
  260. return cls.random_title(log_type, crawler)
  261. else:
  262. return video_title
  263. @classmethod
  264. def get_videoList(cls, log_type, crawler, strategy, our_uid, out_uid, oss_endpoint, env, machine):
  265. download_cnt_1, download_cnt_2 = 0, 0
  266. pcursor = ""
  267. while True:
  268. rule_dict_1 = cls.get_rule(log_type, crawler, 1)
  269. rule_dict_2 = cls.get_rule(log_type, crawler, 2)
  270. if rule_dict_1 is None or rule_dict_2 is None:
  271. Common.logger(log_type, crawler).warning(f"rule_dict is None, 10秒后重试")
  272. time.sleep(10)
  273. else:
  274. break
  275. while True:
  276. if download_cnt_1 >=int(rule_dict_1['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">", "")[-1]) and download_cnt_2 >= int(rule_dict_2['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">", "")[-1]):
  277. Common.logger(log_type, crawler).info(f"规则1已下载{download_cnt_1}条视频,规则2已下载{download_cnt_2}条视频\n")
  278. return
  279. url = "https://www.kuaishou.com/graphql"
  280. payload = json.dumps({
  281. "operationName": "visionProfilePhotoList",
  282. "variables": {
  283. "userId": out_uid,
  284. "pcursor": pcursor,
  285. "page": "profile"
  286. },
  287. "query": "fragment photoContent on PhotoEntity {\n id\n duration\n caption\n originCaption\n likeCount\n viewCount\n realLikeCount\n coverUrl\n photoUrl\n photoH265Url\n manifest\n manifestH265\n videoResource\n coverUrls {\n url\n __typename\n }\n timestamp\n expTag\n animatedCoverUrl\n distance\n videoRatio\n liked\n stereoType\n profileUserTopPhoto\n musicBlocked\n __typename\n}\n\nfragment feedContent on Feed {\n type\n author {\n id\n name\n headerUrl\n following\n headerUrls {\n url\n __typename\n }\n __typename\n }\n photo {\n ...photoContent\n __typename\n }\n canAddComment\n llsid\n status\n currentPcursor\n tags {\n type\n name\n __typename\n }\n __typename\n}\n\nquery visionProfilePhotoList($pcursor: String, $userId: String, $page: String, $webPageArea: String) {\n visionProfilePhotoList(pcursor: $pcursor, userId: $userId, page: $page, webPageArea: $webPageArea) {\n result\n llsid\n webPageArea\n feeds {\n ...feedContent\n __typename\n }\n hostName\n pcursor\n __typename\n }\n}\n"
  288. })
  289. headers = {
  290. 'Cookie': 'kpf=PC_WEB; clientid=3; did=web_e2901e1c5a13c60af81ba88bc7a3ee24; userId=1268646616; kuaishou.server.web_st=ChZrdWFpc2hvdS5zZXJ2ZXIud2ViLnN0EqABOLgYYcIJ5ilxU46Jc-HLWThY8sppX3V0htC_KhSGOzAjP2hAOdegzfkZGAxS5rf6rCBS487FkxfYzLkV__I6b1lK16rDjvv94Kkoo4z7mgf8y8rFgWoqrp81JAWTtx00y-wrc1XXPf9RAVQoET70wWaeNG2r5bxtZEiNwpK_zPi0ZdUo0BW13dFKfVssAy2xKYh0UlJ8VSd_vBvyMKSxVBoSf061Kc3w5Nem7YdpVBmH39ceIiBpiGioLzbZqlHiSbwkH_LhUhNXz3o7LITj098KUytk2CgFMAE; kuaishou.server.web_ph=f1033957981996a7d50e849a9ded4cf4adff; kpn=KUAISHOU_VISION',
  291. 'Referer': f'https://www.kuaishou.com/profile/{out_uid}',
  292. 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.41',
  293. 'content-type': 'application/json',
  294. # 'accept': '*/*',
  295. # 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
  296. # 'Cache-Control': 'no-cache',
  297. # 'Connection': 'keep-alive',
  298. # 'Origin': 'https://www.kuaishou.com',
  299. # 'Pragma': 'no-cache',
  300. # 'Sec-Fetch-Dest': 'empty',
  301. # 'Sec-Fetch-Mode': 'cors',
  302. # 'Sec-Fetch-Site': 'same-origin',
  303. # 'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
  304. # 'sec-ch-ua-mobile': '?0',
  305. # 'sec-ch-ua-platform': '"macOS"'
  306. }
  307. response = requests.post(url=url, headers=headers, data=payload)
  308. if response.status_code != 200:
  309. Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.text}\n")
  310. return
  311. elif 'data' not in response.json():
  312. Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()}\n")
  313. return
  314. elif 'visionProfilePhotoList' not in response.json()['data']:
  315. Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()['data']}\n")
  316. return
  317. elif 'feeds' not in response.json()['data']['visionProfilePhotoList']:
  318. Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()['data']['visionProfilePhotoList']}\n")
  319. return
  320. elif len(response.json()['data']['visionProfilePhotoList']['feeds']) == 0:
  321. Common.logger(log_type, crawler).info("没有更多视频啦 ~\n")
  322. return
  323. else:
  324. feeds = response.json()['data']['visionProfilePhotoList']['feeds']
  325. pcursor = response.json()['data']['visionProfilePhotoList']['pcursor']
  326. for i in range(len(feeds)):
  327. if 'photo' not in feeds[i]:
  328. Common.logger(log_type, crawler).warning(f"get_videoList:{feeds[i]}\n")
  329. break
  330. # video_title
  331. if 'caption' not in feeds[i]['photo']:
  332. video_title = cls.random_title(log_type, crawler)
  333. elif feeds[i]['photo']['caption'].strip() == "":
  334. video_title = cls.random_title(log_type, crawler)
  335. else:
  336. video_title = cls.video_title(log_type, crawler, feeds[i]['photo']['caption'])
  337. if 'videoResource' not in feeds[i]['photo'] \
  338. and 'manifest' not in feeds[i]['photo']\
  339. and 'manifestH265'not in feeds[i]['photo']:
  340. Common.logger(log_type, crawler).warning(f"get_videoList:{feeds[i]['photo']}\n")
  341. break
  342. videoResource = feeds[i]['photo']['videoResource']
  343. if 'h264' not in videoResource and 'hevc' not in videoResource:
  344. Common.logger(log_type, crawler).warning(f"get_videoList:{videoResource}\n")
  345. break
  346. # video_id
  347. if 'h264' in videoResource and 'videoId' in videoResource['h264']:
  348. video_id = videoResource['h264']['videoId']
  349. elif 'hevc' in videoResource and 'videoId' in videoResource['hevc']:
  350. video_id = videoResource['hevc']['videoId']
  351. else:
  352. video_id = ""
  353. # play_cnt
  354. if 'viewCount' not in feeds[i]['photo']:
  355. play_cnt = 0
  356. else:
  357. play_cnt = int(feeds[i]['photo']['viewCount'])
  358. # like_cnt
  359. if 'realLikeCount' not in feeds[i]['photo']:
  360. like_cnt = 0
  361. else:
  362. like_cnt = feeds[i]['photo']['realLikeCount']
  363. # publish_time
  364. if 'timestamp' not in feeds[i]['photo']:
  365. publish_time_stamp = 0
  366. publish_time_str = ''
  367. publish_time = 0
  368. else:
  369. publish_time_stamp = int(int(feeds[i]['photo']['timestamp'])/1000)
  370. publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
  371. publish_time = int((int(time.time()) - publish_time_stamp) / (3600*24))
  372. # duration
  373. if 'duration' not in feeds[i]['photo']:
  374. duration = 0
  375. else:
  376. duration = int(int(feeds[i]['photo']['duration'])/100)
  377. # video_width / video_height / video_url
  378. mapping = {}
  379. for item in ['width', 'height', 'url']:
  380. try:
  381. val = str(videoResource['h264']['adaptationSet'][0]['representation'][0][item])
  382. except Exception:
  383. val = str(videoResource['hevc']['adaptationSet'][0]['representation'][0][item])
  384. except:
  385. val = ''
  386. mapping[item] = val
  387. video_width = int(mapping['width']) if mapping['width'] != '' else 0
  388. video_height = int(mapping['height']) if mapping['height'] != '' else 0
  389. video_url = mapping['url']
  390. # cover_url
  391. if 'coverUrl' not in feeds[i]['photo']:
  392. cover_url = ""
  393. else:
  394. cover_url = feeds[i]['photo']['coverUrl']
  395. # user_name / avatar_url
  396. try:
  397. user_name = feeds[i]['author']['name']
  398. avatar_url = feeds[i]['author']['headerUrl']
  399. except Exception:
  400. user_name = ''
  401. avatar_url = ''
  402. video_dict = {'video_title': video_title,
  403. 'video_id': video_id,
  404. 'play_cnt': play_cnt,
  405. 'comment_cnt': 0,
  406. 'like_cnt': like_cnt,
  407. 'share_cnt': 0,
  408. 'video_width': video_width,
  409. 'video_height': video_height,
  410. 'duration': duration,
  411. 'publish_time': publish_time,
  412. 'publish_time_stamp': publish_time_stamp,
  413. 'publish_time_str': publish_time_str,
  414. 'user_name': user_name,
  415. 'user_id': out_uid,
  416. 'avatar_url': avatar_url,
  417. 'cover_url': cover_url,
  418. 'video_url': video_url,
  419. 'session': f"kuaishou{int(time.time())}"}
  420. rule_1 = cls.download_rule(video_dict, rule_dict_1)
  421. Common.logger(log_type, crawler).info(f"video_title:{video_title}")
  422. Common.logger(log_type, crawler).info(f"video_id:{video_id}\n")
  423. Common.logger(log_type, crawler).info(f"play_cnt:{video_dict['play_cnt']}{rule_dict_1['play_cnt']}, {eval(str(video_dict['play_cnt']) + str(rule_dict_1['play_cnt']))}")
  424. Common.logger(log_type, crawler).info(f"like_cnt:{video_dict['like_cnt']}{rule_dict_1['like_cnt']}, {eval(str(video_dict['like_cnt']) + str(rule_dict_1['like_cnt']))}")
  425. Common.logger(log_type, crawler).info(f"video_width:{video_dict['video_width']}{rule_dict_1['video_width']}, {eval(str(video_dict['video_width']) + str(rule_dict_1['video_width']))}")
  426. Common.logger(log_type, crawler).info(f"video_height:{video_dict['video_height']}{rule_dict_1['video_height']}, {eval(str(video_dict['video_height']) + str(rule_dict_1['video_height']))}")
  427. Common.logger(log_type, crawler).info(f"duration:{video_dict['duration']}{rule_dict_1['duration']}, {eval(str(video_dict['duration']) + str(rule_dict_1['duration']))}")
  428. Common.logger(log_type, crawler).info(f"publish_time_str:{video_dict['publish_time']}{rule_dict_1['publish_time']}, {eval(str(video_dict['publish_time']) + str(rule_dict_1['publish_time']))}")
  429. Common.logger(log_type, crawler).info(f"rule_1:{rule_1}\n")
  430. rule_2 = cls.download_rule(video_dict, rule_dict_2)
  431. Common.logger(log_type, crawler).info(f"play_cnt:{video_dict['play_cnt']}{rule_dict_2['play_cnt']}, {eval(str(video_dict['play_cnt']) + str(rule_dict_2['play_cnt']))}")
  432. Common.logger(log_type, crawler).info(f"like_cnt:{video_dict['like_cnt']}{rule_dict_2['like_cnt']}, {eval(str(video_dict['like_cnt']) + str(rule_dict_2['like_cnt']))}")
  433. Common.logger(log_type, crawler).info(f"video_width:{video_dict['video_width']}{rule_dict_2['video_width']}, {eval(str(video_dict['video_width']) + str(rule_dict_2['video_width']))}")
  434. Common.logger(log_type, crawler).info(f"video_height:{video_dict['video_height']}{rule_dict_2['video_height']}, {eval(str(video_dict['video_height']) + str(rule_dict_2['video_height']))}")
  435. Common.logger(log_type, crawler).info(f"duration:{video_dict['duration']}{rule_dict_2['duration']}, {eval(str(video_dict['duration']) + str(rule_dict_2['duration']))}")
  436. Common.logger(log_type, crawler).info(f"publish_time_str:{video_dict['publish_time']}{rule_dict_2['publish_time']}, {eval(str(video_dict['publish_time']) + str(rule_dict_2['publish_time']))}")
  437. Common.logger(log_type, crawler).info(f"rule_2:{rule_2}\n")
  438. if video_title == "" or video_url == "":
  439. Common.logger(log_type, crawler).info("无效视频\n")
  440. break
  441. elif rule_1 is True:
  442. if download_cnt_1 < int(rule_dict_1['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">", "")[-1]):
  443. cls.download_publish(log_type=log_type,
  444. crawler=crawler,
  445. strategy=strategy,
  446. video_dict=video_dict,
  447. rule_dict=rule_dict_1,
  448. our_uid=our_uid,
  449. oss_endpoint=oss_endpoint,
  450. env=env,
  451. machine=machine)
  452. download_cnt_1 += 1
  453. elif rule_2 is True:
  454. if download_cnt_2 < int(rule_dict_2['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">", "")[-1]):
  455. cls.download_publish(log_type=log_type,
  456. crawler=crawler,
  457. strategy=strategy,
  458. video_dict=video_dict,
  459. rule_dict=rule_dict_2,
  460. our_uid=our_uid,
  461. oss_endpoint=oss_endpoint,
  462. env=env,
  463. machine=machine)
  464. download_cnt_2 += 1
  465. if pcursor == "no_more":
  466. Common.logger(log_type, crawler).info("已经到底了,没有更多内容了\n")
  467. return
  468. @classmethod
  469. def repeat_video(cls, log_type, crawler, video_id, env, machine):
  470. sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}"; """
  471. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env, machine)
  472. return len(repeat_video)
  473. @classmethod
  474. def download_publish(cls, log_type, crawler, strategy, video_dict, rule_dict, our_uid, oss_endpoint, env, machine):
  475. if cls.repeat_video(log_type, crawler, video_dict['video_id'], env, machine) != 0:
  476. Common.logger(log_type, crawler).info('视频已下载\n')
  477. elif video_dict['video_id'] in [x for y in Feishu.get_values_batch(log_type, crawler, "3cd128") for x in y]:
  478. Common.logger(log_type, crawler).info('视频已下载\n')
  479. elif any(word if word in video_dict['video_title'] else False for word in cls.filter_words(log_type, crawler)) is True:
  480. Common.logger(log_type, crawler).info('标题已中过滤词\n')
  481. else:
  482. # 下载封面
  483. Common.download_method(log_type=log_type, crawler=crawler, text='cover', title=video_dict['video_title'], url=video_dict['cover_url'])
  484. # 下载视频
  485. Common.download_method(log_type=log_type, crawler=crawler, text='video', title=video_dict['video_title'], url=video_dict['video_url'])
  486. # 保存视频信息至txt
  487. Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
  488. # 上传视频
  489. Common.logger(log_type, crawler).info("开始上传视频...")
  490. our_video_id = Publish.upload_and_publish(log_type=log_type,
  491. crawler=crawler,
  492. strategy=strategy,
  493. our_uid=our_uid,
  494. env=env,
  495. oss_endpoint=oss_endpoint)
  496. if env == 'dev':
  497. our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  498. else:
  499. our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  500. Common.logger(log_type, crawler).info("视频上传完成")
  501. if our_video_id is None:
  502. Common.logger(log_type, crawler).warning(f"our_video_id:{our_video_id} 删除该视频文件夹")
  503. # 删除视频文件夹
  504. shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
  505. return
  506. # 视频信息保存数据库
  507. insert_sql = f""" insert into crawler_video(video_id,
  508. user_id,
  509. out_user_id,
  510. platform,
  511. strategy,
  512. out_video_id,
  513. video_title,
  514. cover_url,
  515. video_url,
  516. duration,
  517. publish_time,
  518. play_cnt,
  519. crawler_rule,
  520. width,
  521. height)
  522. values({our_video_id},
  523. {our_uid},
  524. "{video_dict['user_id']}",
  525. "{cls.platform}",
  526. "定向爬虫策略",
  527. "{video_dict['video_id']}",
  528. "{video_dict['video_title']}",
  529. "{video_dict['cover_url']}",
  530. "{video_dict['video_url']}",
  531. {int(video_dict['duration'])},
  532. "{video_dict['publish_time_str']}",
  533. {int(video_dict['play_cnt'])},
  534. '{json.dumps(rule_dict)}',
  535. {int(video_dict['video_width'])},
  536. {int(video_dict['video_height'])}) """
  537. Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
  538. MysqlHelper.update_values(log_type, crawler, insert_sql, env, machine)
  539. Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
  540. # 视频写入飞书
  541. Feishu.insert_columns(log_type, 'kuaishou', "fYdA8F", "ROWS", 1, 2)
  542. upload_time = int(time.time())
  543. values = [[our_uid,
  544. time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
  545. "定向榜",
  546. str(video_dict['video_id']),
  547. video_dict['video_title'],
  548. our_video_link,
  549. video_dict['play_cnt'],
  550. video_dict['comment_cnt'],
  551. video_dict['like_cnt'],
  552. video_dict['share_cnt'],
  553. video_dict['duration'],
  554. f"{video_dict['video_width']}*{video_dict['video_height']}",
  555. video_dict['publish_time_str'],
  556. video_dict['user_name'],
  557. video_dict['user_id'],
  558. video_dict['avatar_url'],
  559. video_dict['cover_url'],
  560. video_dict['video_url']]]
  561. time.sleep(1)
  562. Feishu.update_values(log_type, 'kuaishou', "fYdA8F", "E2:Z2", values)
  563. Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
  564. @classmethod
  565. def get_follow_videos(cls, log_type, crawler, strategy, oss_endpoint, env, machine):
  566. user_list = cls.get_user_list(log_type=log_type, crawler=crawler, sheetid="bTSzxW", env=env, machine=machine)
  567. for user in user_list:
  568. out_uid = user["out_uid"]
  569. user_name = user["user_name"]
  570. our_uid = user["our_uid"]
  571. Common.logger(log_type, crawler).info(f"开始抓取 {user_name} 用户主页视频\n")
  572. cls.get_videoList(log_type=log_type,
  573. crawler=crawler,
  574. strategy=strategy,
  575. our_uid=our_uid,
  576. out_uid=out_uid,
  577. oss_endpoint=oss_endpoint,
  578. env=env,
  579. machine=machine)
  580. cls.pcursor = ""
  581. cls.download_cnt = 0
  582. time.sleep(3)
  583. if __name__ == "__main__":
  584. # print(Follow.filter_words("follow", "kuaishou"))
  585. # print(Follow.random_title("follow", "kuaishou"))
  586. # Follow.get_user_list("follow", "kuaishou", "2OLxLr", "dev", "local")
  587. Follow.get_videoList(log_type="follow",
  588. crawler="kuaishou",
  589. strategy="定向爬虫策略",
  590. our_uid="6282431",
  591. out_uid="3xws7ydsnmp5mgq",
  592. oss_endpoint="out",
  593. env="dev",
  594. machine="local")
  595. # Follow.get_rule("follow", "kuaishou", 1)
  596. # Follow.get_rule("follow", "kuaishou", 2)
  597. pass