kuaishou_follow.py 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2023/2/24
  4. import os
  5. import random
  6. import shutil
  7. import sys
  8. import time
  9. import requests
  10. import json
  11. sys.path.append(os.getcwd())
  12. from common.common import Common
  13. from common.feishu import Feishu
  14. from common.users import Users
  15. from common.db import MysqlHelper
  16. from common.publish import Publish
  17. class Follow:
  18. platform = "快手"
  19. tag = "快手爬虫,定向爬虫策略"
  20. @classmethod
  21. def get_rule(cls, log_type, crawler, index):
  22. try:
  23. while True:
  24. rule_sheet = Feishu.get_values_batch(log_type, crawler, "3iqG4z")
  25. if rule_sheet is None:
  26. Common.logger(log_type, crawler).warning("rule_sheet is None! 10秒后重新获取")
  27. time.sleep(10)
  28. continue
  29. if index == 1:
  30. rule_dict = {
  31. "play_cnt": f"{rule_sheet[1][1]}{rule_sheet[1][2]}",
  32. "video_width": f"{rule_sheet[2][1]}{rule_sheet[2][2]}",
  33. "video_height": f"{rule_sheet[3][1]}{rule_sheet[3][2]}",
  34. "like_cnt": f"{rule_sheet[4][1]}{rule_sheet[4][2]}",
  35. "duration": f"{rule_sheet[5][1]}{rule_sheet[5][2]}",
  36. "download_cnt": f"{rule_sheet[6][1]}{rule_sheet[6][2]}",
  37. "publish_time": f"{rule_sheet[7][1]}{rule_sheet[7][2]}",
  38. }
  39. # for k, v in rule_dict.items():
  40. # Common.logger(log_type, crawler).info(f"{k}:{v}")
  41. return rule_dict
  42. elif index == 2:
  43. rule_dict = {
  44. "play_cnt": f"{rule_sheet[9][1]}{rule_sheet[9][2]}",
  45. "video_width": f"{rule_sheet[10][1]}{rule_sheet[10][2]}",
  46. "video_height": f"{rule_sheet[11][1]}{rule_sheet[11][2]}",
  47. "like_cnt": f"{rule_sheet[12][1]}{rule_sheet[12][2]}",
  48. "duration": f"{rule_sheet[13][1]}{rule_sheet[13][2]}",
  49. "download_cnt": f"{rule_sheet[14][1]}{rule_sheet[14][2]}",
  50. "publish_time": f"{rule_sheet[15][1]}{rule_sheet[15][2]}",
  51. }
  52. # for k, v in rule_dict.items():
  53. # Common.logger(log_type, crawler).info(f"{k}:{v}")
  54. return rule_dict
  55. except Exception as e:
  56. Common.logger(log_type, crawler).error(f"get_rule:{e}\n")
  57. @classmethod
  58. def download_rule(cls, video_dict, rule_dict):
  59. if eval(f"{video_dict['play_cnt']}{rule_dict['play_cnt']}") is True\
  60. and eval(f"{video_dict['video_width']}{rule_dict['video_width']}") is True\
  61. and eval(f"{video_dict['video_height']}{rule_dict['video_height']}") is True\
  62. and eval(f"{video_dict['like_cnt']}{rule_dict['like_cnt']}") is True\
  63. and eval(f"{video_dict['duration']}{rule_dict['duration']}") is True\
  64. and eval(f"{video_dict['publish_time']}{rule_dict['publish_time']}") is True:
  65. return True
  66. else:
  67. return False
  68. # 过滤词库
  69. @classmethod
  70. def filter_words(cls, log_type, crawler):
  71. try:
  72. while True:
  73. filter_words_sheet = Feishu.get_values_batch(log_type, crawler, 'HIKVvs')
  74. if filter_words_sheet is None:
  75. Common.logger(log_type, crawler).warning(f"filter_words_sheet:{filter_words_sheet} 10秒钟后重试")
  76. continue
  77. filter_words_list = []
  78. for x in filter_words_sheet:
  79. for y in x:
  80. if y is None:
  81. pass
  82. else:
  83. filter_words_list.append(y)
  84. return filter_words_list
  85. except Exception as e:
  86. Common.logger(log_type, crawler).error(f'filter_words异常:{e}\n')
  87. # 万能标题
  88. @classmethod
  89. def random_title(cls, log_type, crawler):
  90. try:
  91. while True:
  92. random_title_sheet = Feishu.get_values_batch(log_type, crawler, '0DiyXe')
  93. if random_title_sheet is None:
  94. Common.logger(log_type, crawler).warning(f"filter_words_sheet:{random_title_sheet} 10秒钟后重试")
  95. continue
  96. random_title_list = []
  97. for x in random_title_sheet:
  98. for y in x:
  99. if y is None:
  100. pass
  101. else:
  102. random_title_list.append(y)
  103. return random.choice(random_title_list)
  104. except Exception as e:
  105. Common.logger(log_type, crawler).error(f'random_title:{e}\n')
  106. # 获取站外用户信息
  107. @classmethod
  108. def get_out_user_info(cls, log_type, crawler, out_uid):
  109. try:
  110. url = "https://www.kuaishou.com/graphql"
  111. payload = json.dumps({
  112. "operationName": "visionProfile",
  113. "variables": {
  114. "userId": str(out_uid)
  115. },
  116. "query": "query visionProfile($userId: String) {\n visionProfile(userId: $userId) {\n result\n hostName\n userProfile {\n ownerCount {\n fan\n photo\n follow\n photo_public\n __typename\n }\n profile {\n gender\n user_name\n user_id\n headurl\n user_text\n user_profile_bg_url\n __typename\n }\n isFollowing\n __typename\n }\n __typename\n }\n}\n"
  117. })
  118. headers = {
  119. # 'Cookie': f'kpf=PC_WEB; clientid=3; did=web_e2901e1c5a13c60af81ba88bc7a3ee24; userId={"".join(str(random.choice(range(1, 10))) for _ in range(10))}; kpn=KUAISHOU_VISION; kuaishou.server.web_st=ChZrdWFpc2hvdS5zZXJ2ZXIud2ViLnN0EqABE4wGjnJauApJelOpl9Xqo8TVDAyra7Pvo0rZtVgMSZxgVuw4Z6P2UtHv_CHOk2Ne2el1hdE_McCptWs8tRdtYlhXFlVOu8rQX7CwexzOBudJAfB3lDN8LPc4o4qHNwqFxy5J5j_WzdllbqMmaDUK9yUxX6XA-JFezzq9jvBwtGv7_hzB7pFrUcH39z0EYOQaZo5lDl-pE09Gw7wr8NvlZRoSdWlbobCW6oJxuQLJTUr9oj_uIiBhkeb1psaIIc3VwfYQ1UfvobrXAP_WpnRabE_3UZUBOygFMAE; kuaishou.server.web_ph=2b981e2051d7130c977fd31df97fe6f5ad54',
  120. 'Cookie': f'kpf=PC_WEB; clientid=3; did=web_e2901e1c5a13c60af81ba88bc7a3ee24; userId=3352428474; kpn=KUAISHOU_VISION; kuaishou.server.web_st=ChZrdWFpc2hvdS5zZXJ2ZXIud2ViLnN0EqABaRXtfRHlzKlQVj0Nm_M1G2wrIN1p6g3UTwfqfez6rkLVj6mPNt3RBAsLkyemMpvTLerPw0h41Q0lowqcImvIv5dlSGDEpQoj-VTAmOR2Suzm8vCRakG7XziAWyI0PXJKhvdXms-9Giy_4TnoniB49Oo3m7qXjXVBCzybcWS5BO90OLkhD30GYmGEnBBvkBI2oErJy3mNbafQdBQ6SxSUHhoS-1Rj5-IBBNoxoIePYcxZFs4oIiCvaT7sRn-zrF7X2ClPhfNh6lgClmH8MUjXszUfY_TPLCgFMAE; kuaishou.server.web_ph=1b62b98fc28bc23a42cd85240e1fd6025983',
  121. 'Referer': f'https://www.kuaishou.com/profile/{out_uid}',
  122. 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.41',
  123. 'content-type': 'application/json',
  124. 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
  125. 'Cache-Control': 'no-cache',
  126. 'Connection': 'keep-alive',
  127. 'Origin': 'https://www.kuaishou.com',
  128. 'Pragma': 'no-cache',
  129. 'Sec-Fetch-Dest': 'empty',
  130. 'Sec-Fetch-Mode': 'cors',
  131. 'Sec-Fetch-Site': 'same-origin',
  132. 'accept': '*/*',
  133. 'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
  134. 'sec-ch-ua-mobile': '?0',
  135. 'sec-ch-ua-platform': '"macOS"'
  136. }
  137. response = requests.post(url=url, headers=headers, data=payload)
  138. Common.logger(log_type, crawler).info(f"get_out_user_info_response:{response.text}")
  139. if response.status_code != 200:
  140. Common.logger(log_type, crawler).warning(f"get_out_user_info_response:{response.text}\n")
  141. return
  142. elif 'data' not in response.json():
  143. Common.logger(log_type, crawler).warning(f"get_out_user_info_response:{response.json()}\n")
  144. return
  145. elif 'visionProfile' not in response.json()['data']:
  146. Common.logger(log_type, crawler).warning(f"get_out_user_info_response:{response.json()['data']}\n")
  147. return
  148. elif 'userProfile' not in response.json()['data']['visionProfile']:
  149. Common.logger(log_type, crawler).warning(f"get_out_user_info_response:{response.json()['data']['visionProfile']['userProfile']}\n")
  150. return
  151. else:
  152. userProfile = response.json()['data']['visionProfile']['userProfile']
  153. Common.logger(log_type, crawler).info(f"userProfile:{userProfile}")
  154. try:
  155. out_fans_str = str(userProfile['ownerCount']['fan'])
  156. except Exception:
  157. out_fans_str = "0"
  158. try:
  159. out_follow_str = str(userProfile['ownerCount']['follow'])
  160. except Exception:
  161. out_follow_str = "0"
  162. try:
  163. out_avatar_url = userProfile['profile']['headurl']
  164. except Exception:
  165. out_avatar_url = ""
  166. Common.logger(log_type, crawler).info(f"out_fans_str:{out_fans_str}")
  167. Common.logger(log_type, crawler).info(f"out_follow_str:{out_follow_str}")
  168. Common.logger(log_type, crawler).info(f"out_avatar_url:{out_avatar_url}")
  169. if "万" in out_fans_str:
  170. out_fans = int(float(out_fans_str.split("万")[0]) * 10000)
  171. else:
  172. out_fans = int(out_fans_str.replace(",", ""))
  173. if "万" in out_follow_str:
  174. out_follow = int(float(out_follow_str.split("万")[0]) * 10000)
  175. else:
  176. out_follow = int(out_follow_str.replace(",", ""))
  177. out_user_dict = {
  178. "out_fans": out_fans,
  179. "out_follow": out_follow,
  180. "out_avatar_url": out_avatar_url
  181. }
  182. Common.logger(log_type, crawler).info(f"out_user_dict:{out_user_dict}")
  183. return out_user_dict
  184. except Exception as e:
  185. Common.logger(log_type, crawler).error(f"get_out_user_info:{e}\n")
  186. # 获取用户信息列表
  187. @classmethod
  188. def get_user_list(cls, log_type, crawler, sheetid, env, machine):
  189. try:
  190. while True:
  191. user_sheet = Feishu.get_values_batch(log_type, crawler, sheetid)
  192. if user_sheet is None:
  193. Common.logger(log_type, crawler).warning(f"user_sheet:{user_sheet} 10秒钟后重试")
  194. continue
  195. our_user_list = []
  196. for i in range(1, len(user_sheet)):
  197. # for i in range(1, 2):
  198. out_uid = user_sheet[i][2]
  199. user_name = user_sheet[i][3]
  200. our_uid = user_sheet[i][6]
  201. our_user_link = user_sheet[i][7]
  202. if out_uid is None or user_name is None:
  203. Common.logger(log_type, crawler).info("空行\n")
  204. else:
  205. Common.logger(log_type, crawler).info(f"正在更新 {user_name} 用户信息\n")
  206. if our_uid is None:
  207. out_user_info = cls.get_out_user_info(log_type, crawler, out_uid)
  208. out_user_dict = {
  209. "out_uid": out_uid,
  210. "user_name": user_name,
  211. "out_avatar_url": out_user_info["out_avatar_url"],
  212. "out_create_time": '',
  213. "out_tag": '',
  214. "out_play_cnt": 0,
  215. "out_fans": out_user_info["out_fans"],
  216. "out_follow": out_user_info["out_follow"],
  217. "out_friend": 0,
  218. "out_like": 0,
  219. "platform": cls.platform,
  220. "tag": cls.tag,
  221. }
  222. our_user_dict = Users.create_user(log_type=log_type, crawler=crawler,
  223. out_user_dict=out_user_dict, env=env, machine=machine)
  224. our_uid = our_user_dict['our_uid']
  225. our_user_link = our_user_dict['our_user_link']
  226. Feishu.update_values(log_type, crawler, sheetid, f'G{i + 1}:H{i + 1}',
  227. [[our_uid, our_user_link]])
  228. Common.logger(log_type, crawler).info(f'站内用户信息写入飞书成功!\n')
  229. our_user_list.append(our_user_dict)
  230. else:
  231. our_user_dict = {
  232. 'out_uid': out_uid,
  233. 'user_name': user_name,
  234. 'our_uid': our_uid,
  235. 'our_user_link': our_user_link,
  236. }
  237. our_user_list.append(our_user_dict)
  238. return our_user_list
  239. except Exception as e:
  240. Common.logger(log_type, crawler).error(f'get_user_list:{e}\n')
  241. # 处理视频标题
  242. @classmethod
  243. def video_title(cls, log_type, crawler, title):
  244. title_split1 = title.split(" #")
  245. if title_split1[0] != "":
  246. title1 = title_split1[0]
  247. else:
  248. title1 = title_split1[-1]
  249. title_split2 = title1.split(" #")
  250. if title_split2[0] != "":
  251. title2 = title_split2[0]
  252. else:
  253. title2 = title_split2[-1]
  254. title_split3 = title2.split("@")
  255. if title_split3[0] != "":
  256. title3 = title_split3[0]
  257. else:
  258. title3 = title_split3[-1]
  259. video_title = title3.strip().replace("\n", "") \
  260. .replace("/", "").replace("快手", "").replace(" ", "") \
  261. .replace(" ", "").replace("&NBSP", "").replace("\r", "") \
  262. .replace("#", "").replace(".", "。").replace("\\", "") \
  263. .replace(":", "").replace("*", "").replace("?", "") \
  264. .replace("?", "").replace('"', "").replace("<", "") \
  265. .replace(">", "").replace("|", "").replace("@", "")[:40]
  266. if video_title.replace(" ", "") == "" or video_title == "。。。" or video_title == "...":
  267. return cls.random_title(log_type, crawler)
  268. else:
  269. return video_title
  270. @classmethod
  271. def get_videoList(cls, log_type, crawler, strategy, our_uid, out_uid, oss_endpoint, env, machine):
  272. download_cnt_1, download_cnt_2 = 0, 0
  273. pcursor = ""
  274. while True:
  275. rule_dict_1 = cls.get_rule(log_type, crawler, 1)
  276. rule_dict_2 = cls.get_rule(log_type, crawler, 2)
  277. if rule_dict_1 is None or rule_dict_2 is None:
  278. Common.logger(log_type, crawler).warning(f"rule_dict is None, 10秒后重试")
  279. time.sleep(10)
  280. else:
  281. break
  282. while True:
  283. if download_cnt_1 >=int(rule_dict_1['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">", "")[-1]) and download_cnt_2 >= int(rule_dict_2['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">", "")[-1]):
  284. Common.logger(log_type, crawler).info(f"规则1已下载{download_cnt_1}条视频,规则2已下载{download_cnt_2}条视频\n")
  285. return
  286. url = "https://www.kuaishou.com/graphql"
  287. payload = json.dumps({
  288. "operationName": "visionProfilePhotoList",
  289. "variables": {
  290. "userId": out_uid,
  291. "pcursor": pcursor,
  292. "page": "profile"
  293. },
  294. "query": "fragment photoContent on PhotoEntity {\n id\n duration\n caption\n originCaption\n likeCount\n viewCount\n realLikeCount\n coverUrl\n photoUrl\n photoH265Url\n manifest\n manifestH265\n videoResource\n coverUrls {\n url\n __typename\n }\n timestamp\n expTag\n animatedCoverUrl\n distance\n videoRatio\n liked\n stereoType\n profileUserTopPhoto\n musicBlocked\n __typename\n}\n\nfragment feedContent on Feed {\n type\n author {\n id\n name\n headerUrl\n following\n headerUrls {\n url\n __typename\n }\n __typename\n }\n photo {\n ...photoContent\n __typename\n }\n canAddComment\n llsid\n status\n currentPcursor\n tags {\n type\n name\n __typename\n }\n __typename\n}\n\nquery visionProfilePhotoList($pcursor: String, $userId: String, $page: String, $webPageArea: String) {\n visionProfilePhotoList(pcursor: $pcursor, userId: $userId, page: $page, webPageArea: $webPageArea) {\n result\n llsid\n webPageArea\n feeds {\n ...feedContent\n __typename\n }\n hostName\n pcursor\n __typename\n }\n}\n"
  295. })
  296. headers = {
  297. 'Cookie': f'kpf=PC_WEB; clientid=3; did=web_e2901e1c5a13c60af81ba88bc7a3ee24; userId={"".join(str(random.choice(range(1, 10))) for _ in range(10))}; kuaishou.server.web_st=ChZrdWFpc2hvdS5zZXJ2ZXIud2ViLnN0EqABOLgYYcIJ5ilxU46Jc-HLWThY8sppX3V0htC_KhSGOzAjP2hAOdegzfkZGAxS5rf6rCBS487FkxfYzLkV__I6b1lK16rDjvv94Kkoo4z7mgf8y8rFgWoqrp81JAWTtx00y-wrc1XXPf9RAVQoET70wWaeNG2r5bxtZEiNwpK_zPi0ZdUo0BW13dFKfVssAy2xKYh0UlJ8VSd_vBvyMKSxVBoSf061Kc3w5Nem7YdpVBmH39ceIiBpiGioLzbZqlHiSbwkH_LhUhNXz3o7LITj098KUytk2CgFMAE; kuaishou.server.web_ph=f1033957981996a7d50e849a9ded4cf4adff; kpn=KUAISHOU_VISION',
  298. 'Referer': f'https://www.kuaishou.com/profile/{out_uid}',
  299. 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.41',
  300. 'content-type': 'application/json',
  301. # 'accept': '*/*',
  302. # 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
  303. # 'Cache-Control': 'no-cache',
  304. # 'Connection': 'keep-alive',
  305. # 'Origin': 'https://www.kuaishou.com',
  306. # 'Pragma': 'no-cache',
  307. # 'Sec-Fetch-Dest': 'empty',
  308. # 'Sec-Fetch-Mode': 'cors',
  309. # 'Sec-Fetch-Site': 'same-origin',
  310. # 'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
  311. # 'sec-ch-ua-mobile': '?0',
  312. # 'sec-ch-ua-platform': '"macOS"'
  313. }
  314. response = requests.post(url=url, headers=headers, data=payload)
  315. if response.status_code != 200:
  316. Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.text}\n")
  317. return
  318. elif 'data' not in response.json():
  319. Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()}\n")
  320. return
  321. elif 'visionProfilePhotoList' not in response.json()['data']:
  322. Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()['data']}\n")
  323. return
  324. elif 'feeds' not in response.json()['data']['visionProfilePhotoList']:
  325. Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()['data']['visionProfilePhotoList']}\n")
  326. return
  327. elif len(response.json()['data']['visionProfilePhotoList']['feeds']) == 0:
  328. Common.logger(log_type, crawler).info("没有更多视频啦 ~\n")
  329. return
  330. else:
  331. feeds = response.json()['data']['visionProfilePhotoList']['feeds']
  332. pcursor = response.json()['data']['visionProfilePhotoList']['pcursor']
  333. for i in range(len(feeds)):
  334. if 'photo' not in feeds[i]:
  335. Common.logger(log_type, crawler).warning(f"get_videoList:{feeds[i]}\n")
  336. break
  337. # video_title
  338. if 'caption' not in feeds[i]['photo']:
  339. video_title = cls.random_title(log_type, crawler)
  340. elif feeds[i]['photo']['caption'].strip() == "":
  341. video_title = cls.random_title(log_type, crawler)
  342. else:
  343. video_title = cls.video_title(log_type, crawler, feeds[i]['photo']['caption'])
  344. if 'videoResource' not in feeds[i]['photo'] \
  345. and 'manifest' not in feeds[i]['photo']\
  346. and 'manifestH265'not in feeds[i]['photo']:
  347. Common.logger(log_type, crawler).warning(f"get_videoList:{feeds[i]['photo']}\n")
  348. break
  349. videoResource = feeds[i]['photo']['videoResource']
  350. if 'h264' not in videoResource and 'hevc' not in videoResource:
  351. Common.logger(log_type, crawler).warning(f"get_videoList:{videoResource}\n")
  352. break
  353. # video_id
  354. if 'h264' in videoResource and 'videoId' in videoResource['h264']:
  355. video_id = videoResource['h264']['videoId']
  356. elif 'hevc' in videoResource and 'videoId' in videoResource['hevc']:
  357. video_id = videoResource['hevc']['videoId']
  358. else:
  359. video_id = ""
  360. # play_cnt
  361. if 'viewCount' not in feeds[i]['photo']:
  362. play_cnt = 0
  363. else:
  364. play_cnt = int(feeds[i]['photo']['viewCount'])
  365. # like_cnt
  366. if 'realLikeCount' not in feeds[i]['photo']:
  367. like_cnt = 0
  368. else:
  369. like_cnt = feeds[i]['photo']['realLikeCount']
  370. # publish_time
  371. if 'timestamp' not in feeds[i]['photo']:
  372. publish_time_stamp = 0
  373. publish_time_str = ''
  374. publish_time = 0
  375. else:
  376. publish_time_stamp = int(int(feeds[i]['photo']['timestamp'])/1000)
  377. publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
  378. publish_time = int((int(time.time()) - publish_time_stamp) / (3600*24))
  379. # duration
  380. if 'duration' not in feeds[i]['photo']:
  381. duration = 0
  382. else:
  383. duration = int(int(feeds[i]['photo']['duration'])/1000)
  384. # video_width / video_height / video_url
  385. mapping = {}
  386. for item in ['width', 'height', 'url']:
  387. try:
  388. val = str(videoResource['h264']['adaptationSet'][0]['representation'][0][item])
  389. except Exception:
  390. val = str(videoResource['hevc']['adaptationSet'][0]['representation'][0][item])
  391. except:
  392. val = ''
  393. mapping[item] = val
  394. video_width = int(mapping['width']) if mapping['width'] != '' else 0
  395. video_height = int(mapping['height']) if mapping['height'] != '' else 0
  396. video_url = mapping['url']
  397. # cover_url
  398. if 'coverUrl' not in feeds[i]['photo']:
  399. cover_url = ""
  400. else:
  401. cover_url = feeds[i]['photo']['coverUrl']
  402. # user_name / avatar_url
  403. try:
  404. user_name = feeds[i]['author']['name']
  405. avatar_url = feeds[i]['author']['headerUrl']
  406. except Exception:
  407. user_name = ''
  408. avatar_url = ''
  409. video_dict = {'video_title': video_title,
  410. 'video_id': video_id,
  411. 'play_cnt': play_cnt,
  412. 'comment_cnt': 0,
  413. 'like_cnt': like_cnt,
  414. 'share_cnt': 0,
  415. 'video_width': video_width,
  416. 'video_height': video_height,
  417. 'duration': duration,
  418. 'publish_time': publish_time,
  419. 'publish_time_stamp': publish_time_stamp,
  420. 'publish_time_str': publish_time_str,
  421. 'user_name': user_name,
  422. 'user_id': out_uid,
  423. 'avatar_url': avatar_url,
  424. 'cover_url': cover_url,
  425. 'video_url': video_url,
  426. 'session': f"kuaishou{int(time.time())}"}
  427. rule_1 = cls.download_rule(video_dict, rule_dict_1)
  428. Common.logger(log_type, crawler).info(f"video_title:{video_title}")
  429. Common.logger(log_type, crawler).info(f"video_id:{video_id}\n")
  430. Common.logger(log_type, crawler).info(f"play_cnt:{video_dict['play_cnt']}{rule_dict_1['play_cnt']}, {eval(str(video_dict['play_cnt']) + str(rule_dict_1['play_cnt']))}")
  431. Common.logger(log_type, crawler).info(f"like_cnt:{video_dict['like_cnt']}{rule_dict_1['like_cnt']}, {eval(str(video_dict['like_cnt']) + str(rule_dict_1['like_cnt']))}")
  432. Common.logger(log_type, crawler).info(f"video_width:{video_dict['video_width']}{rule_dict_1['video_width']}, {eval(str(video_dict['video_width']) + str(rule_dict_1['video_width']))}")
  433. Common.logger(log_type, crawler).info(f"video_height:{video_dict['video_height']}{rule_dict_1['video_height']}, {eval(str(video_dict['video_height']) + str(rule_dict_1['video_height']))}")
  434. Common.logger(log_type, crawler).info(f"duration:{video_dict['duration']}{rule_dict_1['duration']}, {eval(str(video_dict['duration']) + str(rule_dict_1['duration']))}")
  435. Common.logger(log_type, crawler).info(f"publish_time:{video_dict['publish_time']}{rule_dict_1['publish_time']}, {eval(str(video_dict['publish_time']) + str(rule_dict_1['publish_time']))}")
  436. Common.logger(log_type, crawler).info(f"rule_1:{rule_1}\n")
  437. rule_2 = cls.download_rule(video_dict, rule_dict_2)
  438. Common.logger(log_type, crawler).info(f"play_cnt:{video_dict['play_cnt']}{rule_dict_2['play_cnt']}, {eval(str(video_dict['play_cnt']) + str(rule_dict_2['play_cnt']))}")
  439. Common.logger(log_type, crawler).info(f"like_cnt:{video_dict['like_cnt']}{rule_dict_2['like_cnt']}, {eval(str(video_dict['like_cnt']) + str(rule_dict_2['like_cnt']))}")
  440. Common.logger(log_type, crawler).info(f"video_width:{video_dict['video_width']}{rule_dict_2['video_width']}, {eval(str(video_dict['video_width']) + str(rule_dict_2['video_width']))}")
  441. Common.logger(log_type, crawler).info(f"video_height:{video_dict['video_height']}{rule_dict_2['video_height']}, {eval(str(video_dict['video_height']) + str(rule_dict_2['video_height']))}")
  442. Common.logger(log_type, crawler).info(f"duration:{video_dict['duration']}{rule_dict_2['duration']}, {eval(str(video_dict['duration']) + str(rule_dict_2['duration']))}")
  443. Common.logger(log_type, crawler).info(f"publish_time:{video_dict['publish_time']}{rule_dict_2['publish_time']}, {eval(str(video_dict['publish_time']) + str(rule_dict_2['publish_time']))}")
  444. Common.logger(log_type, crawler).info(f"rule_2:{rule_2}\n")
  445. if video_title == "" or video_url == "":
  446. Common.logger(log_type, crawler).info("无效视频\n")
  447. break
  448. elif rule_1 is True:
  449. if download_cnt_1 < int(rule_dict_1['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">", "")[-1]):
  450. download_finished = cls.download_publish(log_type=log_type,
  451. crawler=crawler,
  452. strategy=strategy,
  453. video_dict=video_dict,
  454. rule_dict=rule_dict_1,
  455. our_uid=our_uid,
  456. oss_endpoint=oss_endpoint,
  457. env=env,
  458. machine=machine)
  459. if download_finished is True:
  460. download_cnt_1 += 1
  461. elif rule_2 is True:
  462. if download_cnt_2 < int(rule_dict_2['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">", "")[-1]):
  463. download_finished = cls.download_publish(log_type=log_type,
  464. crawler=crawler,
  465. strategy=strategy,
  466. video_dict=video_dict,
  467. rule_dict=rule_dict_2,
  468. our_uid=our_uid,
  469. oss_endpoint=oss_endpoint,
  470. env=env,
  471. machine=machine)
  472. if download_finished is True:
  473. download_cnt_2 += 1
  474. else:
  475. Common.logger(log_type, crawler).info("不满足下载规则\n")
  476. if pcursor == "no_more":
  477. Common.logger(log_type, crawler).info("已经到底了,没有更多内容了\n")
  478. return
  479. @classmethod
  480. def repeat_video(cls, log_type, crawler, video_id, env, machine):
  481. sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}"; """
  482. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env, machine)
  483. return len(repeat_video)
  484. @classmethod
  485. def download_publish(cls, log_type, crawler, strategy, video_dict, rule_dict, our_uid, oss_endpoint, env, machine):
  486. download_finished = False
  487. if cls.repeat_video(log_type, crawler, video_dict['video_id'], env, machine) != 0:
  488. Common.logger(log_type, crawler).info('视频已下载\n')
  489. elif video_dict['video_id'] in [x for y in Feishu.get_values_batch(log_type, crawler, "3cd128") for x in y]:
  490. Common.logger(log_type, crawler).info('视频已下载\n')
  491. elif any(word if word in video_dict['video_title'] else False for word in cls.filter_words(log_type, crawler)) is True:
  492. Common.logger(log_type, crawler).info('标题已中过滤词\n')
  493. else:
  494. # 下载封面
  495. Common.download_method(log_type=log_type, crawler=crawler, text='cover', title=video_dict['video_title'], url=video_dict['cover_url'])
  496. # 下载视频
  497. Common.download_method(log_type=log_type, crawler=crawler, text='video', title=video_dict['video_title'], url=video_dict['video_url'])
  498. # 保存视频信息至txt
  499. Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
  500. # 上传视频
  501. Common.logger(log_type, crawler).info("开始上传视频...")
  502. our_video_id = Publish.upload_and_publish(log_type=log_type,
  503. crawler=crawler,
  504. strategy=strategy,
  505. our_uid=our_uid,
  506. env=env,
  507. oss_endpoint=oss_endpoint)
  508. if env == 'dev':
  509. our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  510. else:
  511. our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  512. Common.logger(log_type, crawler).info("视频上传完成")
  513. if our_video_id is None:
  514. Common.logger(log_type, crawler).warning(f"our_video_id:{our_video_id} 删除该视频文件夹")
  515. # 删除视频文件夹
  516. shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
  517. return download_finished
  518. # 视频信息保存数据库
  519. insert_sql = f""" insert into crawler_video(video_id,
  520. user_id,
  521. out_user_id,
  522. platform,
  523. strategy,
  524. out_video_id,
  525. video_title,
  526. cover_url,
  527. video_url,
  528. duration,
  529. publish_time,
  530. play_cnt,
  531. crawler_rule,
  532. width,
  533. height)
  534. values({our_video_id},
  535. {our_uid},
  536. "{video_dict['user_id']}",
  537. "{cls.platform}",
  538. "定向爬虫策略",
  539. "{video_dict['video_id']}",
  540. "{video_dict['video_title']}",
  541. "{video_dict['cover_url']}",
  542. "{video_dict['video_url']}",
  543. {int(video_dict['duration'])},
  544. "{video_dict['publish_time_str']}",
  545. {int(video_dict['play_cnt'])},
  546. '{json.dumps(rule_dict)}',
  547. {int(video_dict['video_width'])},
  548. {int(video_dict['video_height'])}) """
  549. Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
  550. MysqlHelper.update_values(log_type, crawler, insert_sql, env, machine)
  551. Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
  552. # 视频写入飞书
  553. Feishu.insert_columns(log_type, 'kuaishou', "fYdA8F", "ROWS", 1, 2)
  554. upload_time = int(time.time())
  555. values = [[our_uid,
  556. time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
  557. "定向榜",
  558. str(video_dict['video_id']),
  559. video_dict['video_title'],
  560. our_video_link,
  561. video_dict['play_cnt'],
  562. video_dict['comment_cnt'],
  563. video_dict['like_cnt'],
  564. video_dict['share_cnt'],
  565. video_dict['duration'],
  566. f"{video_dict['video_width']}*{video_dict['video_height']}",
  567. video_dict['publish_time_str'],
  568. video_dict['user_name'],
  569. video_dict['user_id'],
  570. video_dict['avatar_url'],
  571. video_dict['cover_url'],
  572. video_dict['video_url']]]
  573. time.sleep(1)
  574. Feishu.update_values(log_type, 'kuaishou', "fYdA8F", "E2:Z2", values)
  575. Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
  576. download_finished = True
  577. return download_finished
  578. @classmethod
  579. def get_follow_videos(cls, log_type, crawler, strategy, oss_endpoint, env, machine):
  580. user_list = cls.get_user_list(log_type=log_type, crawler=crawler, sheetid="bTSzxW", env=env, machine=machine)
  581. for user in user_list:
  582. out_uid = user["out_uid"]
  583. user_name = user["user_name"]
  584. our_uid = user["our_uid"]
  585. Common.logger(log_type, crawler).info(f"开始抓取 {user_name} 用户主页视频\n")
  586. cls.get_videoList(log_type=log_type,
  587. crawler=crawler,
  588. strategy=strategy,
  589. our_uid=our_uid,
  590. out_uid=out_uid,
  591. oss_endpoint=oss_endpoint,
  592. env=env,
  593. machine=machine)
  594. time.sleep(3)
  595. if __name__ == "__main__":
  596. # print(Follow.filter_words("follow", "kuaishou"))
  597. # print(Follow.random_title("follow", "kuaishou"))
  598. # Follow.get_user_list("follow", "kuaishou", "2OLxLr", "dev", "local")
  599. # Follow.get_videoList(log_type="follow",
  600. # crawler="kuaishou",
  601. # strategy="定向爬虫策略",
  602. # our_uid="6282431",
  603. # out_uid="3xws7ydsnmp5mgq",
  604. # oss_endpoint="out",
  605. # env="dev",
  606. # machine="local")
  607. # Follow.get_rule("follow", "kuaishou", 1)
  608. # Follow.get_rule("follow", "kuaishou", 2)
  609. print(Follow.get_out_user_info("follow", "kuaishou", "3xgh4ja9be3wcaw"))
  610. print(Follow.get_out_user_info("follow", "kuaishou", "3x5wgjhfc7tx8ue"))
  611. pass