kuaishou_follow.py 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2023/2/24
  4. import os
  5. import random
  6. import shutil
  7. import sys
  8. import time
  9. import requests
  10. import json
  11. sys.path.append(os.getcwd())
  12. from common.common import Common
  13. from common.feishu import Feishu
  14. from common.users import Users
  15. from common.db import MysqlHelper
  16. from common.publish import Publish
  17. class Follow:
  18. platform = "快手"
  19. tag = "快手爬虫,定向爬虫策略"
  20. @classmethod
  21. def get_rule(cls, log_type, crawler, index):
  22. try:
  23. while True:
  24. rule_sheet = Feishu.get_values_batch(log_type, crawler, "3iqG4z")
  25. if rule_sheet is None:
  26. Common.logger(log_type, crawler).warning("rule_sheet is None! 10秒后重新获取")
  27. time.sleep(10)
  28. continue
  29. if index == 1:
  30. rule_dict = {
  31. "play_cnt": f"{rule_sheet[1][1]}{rule_sheet[1][2]}",
  32. "video_width": f"{rule_sheet[2][1]}{rule_sheet[2][2]}",
  33. "video_height": f"{rule_sheet[3][1]}{rule_sheet[3][2]}",
  34. "like_cnt": f"{rule_sheet[4][1]}{rule_sheet[4][2]}",
  35. "duration": f"{rule_sheet[5][1]}{rule_sheet[5][2]}",
  36. "download_cnt": f"{rule_sheet[6][1]}{rule_sheet[6][2]}",
  37. "publish_time": f"{rule_sheet[7][1]}{rule_sheet[7][2]}",
  38. }
  39. # for k, v in rule_dict.items():
  40. # Common.logger(log_type, crawler).info(f"{k}:{v}")
  41. return rule_dict
  42. elif index == 2:
  43. rule_dict = {
  44. "play_cnt": f"{rule_sheet[9][1]}{rule_sheet[9][2]}",
  45. "video_width": f"{rule_sheet[10][1]}{rule_sheet[10][2]}",
  46. "video_height": f"{rule_sheet[11][1]}{rule_sheet[11][2]}",
  47. "like_cnt": f"{rule_sheet[12][1]}{rule_sheet[12][2]}",
  48. "duration": f"{rule_sheet[13][1]}{rule_sheet[13][2]}",
  49. "download_cnt": f"{rule_sheet[14][1]}{rule_sheet[14][2]}",
  50. "publish_time": f"{rule_sheet[15][1]}{rule_sheet[15][2]}",
  51. }
  52. # for k, v in rule_dict.items():
  53. # Common.logger(log_type, crawler).info(f"{k}:{v}")
  54. return rule_dict
  55. except Exception as e:
  56. Common.logger(log_type, crawler).error(f"get_rule:{e}\n")
  57. @classmethod
  58. def download_rule(cls, video_dict, rule_dict):
  59. if eval(f"{video_dict['play_cnt']}{rule_dict['play_cnt']}") is True\
  60. and eval(f"{video_dict['video_width']}{rule_dict['video_width']}") is True\
  61. and eval(f"{video_dict['video_height']}{rule_dict['video_height']}") is True\
  62. and eval(f"{video_dict['like_cnt']}{rule_dict['like_cnt']}") is True\
  63. and eval(f"{video_dict['duration']}{rule_dict['duration']}") is True\
  64. and eval(f"{video_dict['publish_time']}{rule_dict['publish_time']}") is True:
  65. return True
  66. else:
  67. return False
  68. # 过滤词库
  69. @classmethod
  70. def filter_words(cls, log_type, crawler):
  71. try:
  72. while True:
  73. filter_words_sheet = Feishu.get_values_batch(log_type, crawler, 'HIKVvs')
  74. if filter_words_sheet is None:
  75. Common.logger(log_type, crawler).warning(f"filter_words_sheet:{filter_words_sheet} 10秒钟后重试")
  76. continue
  77. filter_words_list = []
  78. for x in filter_words_sheet:
  79. for y in x:
  80. if y is None:
  81. pass
  82. else:
  83. filter_words_list.append(y)
  84. return filter_words_list
  85. except Exception as e:
  86. Common.logger(log_type, crawler).error(f'filter_words异常:{e}\n')
  87. # 万能标题
  88. @classmethod
  89. def random_title(cls, log_type, crawler):
  90. try:
  91. while True:
  92. random_title_sheet = Feishu.get_values_batch(log_type, crawler, '0DiyXe')
  93. if random_title_sheet is None:
  94. Common.logger(log_type, crawler).warning(f"filter_words_sheet:{random_title_sheet} 10秒钟后重试")
  95. continue
  96. random_title_list = []
  97. for x in random_title_sheet:
  98. for y in x:
  99. if y is None:
  100. pass
  101. else:
  102. random_title_list.append(y)
  103. return random.choice(random_title_list)
  104. except Exception as e:
  105. Common.logger(log_type, crawler).error(f'random_title:{e}\n')
  106. # 获取站外用户信息
  107. @classmethod
  108. def get_out_user_info(cls, log_type, crawler, out_uid):
  109. try:
  110. url = "https://www.kuaishou.com/graphql"
  111. payload = json.dumps({
  112. "operationName": "visionProfile",
  113. "variables": {
  114. "userId": str(out_uid)
  115. },
  116. "query": "query visionProfile($userId: String) {\n visionProfile(userId: $userId) {\n result\n hostName\n userProfile {\n ownerCount {\n fan\n photo\n follow\n photo_public\n __typename\n }\n profile {\n gender\n user_name\n user_id\n headurl\n user_text\n user_profile_bg_url\n __typename\n }\n isFollowing\n __typename\n }\n __typename\n }\n}\n"
  117. })
  118. headers = {
  119. # 'Cookie': f'kpf=PC_WEB; clientid=3; did=web_e2901e1c5a13c60af81ba88bc7a3ee24; userId={"".join(str(random.choice(range(1, 10))) for _ in range(10))}; kpn=KUAISHOU_VISION; kuaishou.server.web_st=ChZrdWFpc2hvdS5zZXJ2ZXIud2ViLnN0EqABE4wGjnJauApJelOpl9Xqo8TVDAyra7Pvo0rZtVgMSZxgVuw4Z6P2UtHv_CHOk2Ne2el1hdE_McCptWs8tRdtYlhXFlVOu8rQX7CwexzOBudJAfB3lDN8LPc4o4qHNwqFxy5J5j_WzdllbqMmaDUK9yUxX6XA-JFezzq9jvBwtGv7_hzB7pFrUcH39z0EYOQaZo5lDl-pE09Gw7wr8NvlZRoSdWlbobCW6oJxuQLJTUr9oj_uIiBhkeb1psaIIc3VwfYQ1UfvobrXAP_WpnRabE_3UZUBOygFMAE; kuaishou.server.web_ph=2b981e2051d7130c977fd31df97fe6f5ad54',
  120. 'Cookie': f'kpf=PC_WEB; clientid=3; did=web_e2901e1c5a13c60af81ba88bc7a3ee24; userId=3352428474; kpn=KUAISHOU_VISION; kuaishou.server.web_st=ChZrdWFpc2hvdS5zZXJ2ZXIud2ViLnN0EqABaRXtfRHlzKlQVj0Nm_M1G2wrIN1p6g3UTwfqfez6rkLVj6mPNt3RBAsLkyemMpvTLerPw0h41Q0lowqcImvIv5dlSGDEpQoj-VTAmOR2Suzm8vCRakG7XziAWyI0PXJKhvdXms-9Giy_4TnoniB49Oo3m7qXjXVBCzybcWS5BO90OLkhD30GYmGEnBBvkBI2oErJy3mNbafQdBQ6SxSUHhoS-1Rj5-IBBNoxoIePYcxZFs4oIiCvaT7sRn-zrF7X2ClPhfNh6lgClmH8MUjXszUfY_TPLCgFMAE; kuaishou.server.web_ph=1b62b98fc28bc23a42cd85240e1fd6025983',
  121. 'Referer': f'https://www.kuaishou.com/profile/{out_uid}',
  122. 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.41',
  123. 'content-type': 'application/json',
  124. 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
  125. 'Cache-Control': 'no-cache',
  126. 'Connection': 'keep-alive',
  127. 'Origin': 'https://www.kuaishou.com',
  128. 'Pragma': 'no-cache',
  129. 'Sec-Fetch-Dest': 'empty',
  130. 'Sec-Fetch-Mode': 'cors',
  131. 'Sec-Fetch-Site': 'same-origin',
  132. 'accept': '*/*',
  133. 'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
  134. 'sec-ch-ua-mobile': '?0',
  135. 'sec-ch-ua-platform': '"macOS"'
  136. }
  137. response = requests.post(url=url, headers=headers, data=payload)
  138. Common.logger(log_type, crawler).info(f"get_out_user_info_response:{response.text}")
  139. if response.status_code != 200:
  140. Common.logger(log_type, crawler).warning(f"get_out_user_info_response:{response.text}\n")
  141. return
  142. elif 'data' not in response.json():
  143. Common.logger(log_type, crawler).warning(f"get_out_user_info_response:{response.json()}\n")
  144. return
  145. elif 'visionProfile' not in response.json()['data']:
  146. Common.logger(log_type, crawler).warning(f"get_out_user_info_response:{response.json()['data']}\n")
  147. return
  148. elif 'userProfile' not in response.json()['data']['visionProfile']:
  149. Common.logger(log_type, crawler).warning(f"get_out_user_info_response:{response.json()['data']['visionProfile']['userProfile']}\n")
  150. return
  151. else:
  152. userProfile = response.json()['data']['visionProfile']['userProfile']
  153. Common.logger(log_type, crawler).info(f"userProfile:{userProfile}")
  154. try:
  155. out_fans_str = str(userProfile['ownerCount']['fan'])
  156. except Exception:
  157. out_fans_str = "0"
  158. try:
  159. out_follow_str = str(userProfile['ownerCount']['follow'])
  160. except Exception:
  161. out_follow_str = "0"
  162. try:
  163. out_avatar_url = userProfile['profile']['headurl']
  164. except Exception:
  165. out_avatar_url = ""
  166. Common.logger(log_type, crawler).info(f"out_fans_str:{out_fans_str}")
  167. Common.logger(log_type, crawler).info(f"out_follow_str:{out_follow_str}")
  168. Common.logger(log_type, crawler).info(f"out_avatar_url:{out_avatar_url}")
  169. if "万" in out_fans_str:
  170. out_fans = int(float(out_fans_str.split("万")[0]) * 10000)
  171. else:
  172. out_fans = int(out_fans_str.replace(",", ""))
  173. if "万" in out_follow_str:
  174. out_follow = int(float(out_follow_str.split("万")[0]) * 10000)
  175. else:
  176. out_follow = int(out_follow_str.replace(",", ""))
  177. out_user_dict = {
  178. "out_fans": out_fans,
  179. "out_follow": out_follow,
  180. "out_avatar_url": out_avatar_url
  181. }
  182. Common.logger(log_type, crawler).info(f"out_user_dict:{out_user_dict}")
  183. return out_user_dict
  184. except Exception as e:
  185. Common.logger(log_type, crawler).error(f"get_out_user_info:{e}\n")
  186. # 获取用户信息列表
  187. @classmethod
  188. def get_user_list(cls, log_type, crawler, sheetid, env, machine):
  189. try:
  190. while True:
  191. user_sheet = Feishu.get_values_batch(log_type, crawler, sheetid)
  192. if user_sheet is None:
  193. Common.logger(log_type, crawler).warning(f"user_sheet:{user_sheet} 10秒钟后重试")
  194. continue
  195. our_user_list = []
  196. for i in range(1, len(user_sheet)):
  197. # for i in range(1, 2):
  198. out_uid = user_sheet[i][2]
  199. user_name = user_sheet[i][3]
  200. our_uid = user_sheet[i][6]
  201. our_user_link = user_sheet[i][7]
  202. if out_uid is None or user_name is None:
  203. Common.logger(log_type, crawler).info("空行\n")
  204. else:
  205. Common.logger(log_type, crawler).info(f"正在更新 {user_name} 用户信息\n")
  206. if our_uid is None:
  207. out_user_info = cls.get_out_user_info(log_type, crawler, out_uid)
  208. out_user_dict = {
  209. "out_uid": out_uid,
  210. "user_name": user_name,
  211. "out_avatar_url": out_user_info["out_avatar_url"],
  212. "out_create_time": '',
  213. "out_tag": '',
  214. "out_play_cnt": 0,
  215. "out_fans": out_user_info["out_fans"],
  216. "out_follow": out_user_info["out_follow"],
  217. "out_friend": 0,
  218. "out_like": 0,
  219. "platform": cls.platform,
  220. "tag": cls.tag,
  221. }
  222. our_user_dict = Users.create_user(log_type=log_type, crawler=crawler,
  223. out_user_dict=out_user_dict, env=env, machine=machine)
  224. our_uid = our_user_dict['our_uid']
  225. our_user_link = our_user_dict['our_user_link']
  226. Feishu.update_values(log_type, crawler, sheetid, f'G{i + 1}:H{i + 1}',
  227. [[our_uid, our_user_link]])
  228. Common.logger(log_type, crawler).info(f'站内用户信息写入飞书成功!\n')
  229. our_user_list.append(our_user_dict)
  230. else:
  231. our_user_dict = {
  232. 'out_uid': out_uid,
  233. 'user_name': user_name,
  234. 'our_uid': our_uid,
  235. 'our_user_link': our_user_link,
  236. }
  237. our_user_list.append(our_user_dict)
  238. return our_user_list
  239. except Exception as e:
  240. Common.logger(log_type, crawler).error(f'get_user_list:{e}\n')
  241. # 处理视频标题
  242. @classmethod
  243. def video_title(cls, log_type, crawler, title):
  244. title_split1 = title.split(" #")
  245. if title_split1[0] != "":
  246. title1 = title_split1[0]
  247. else:
  248. title1 = title_split1[-1]
  249. title_split2 = title1.split(" #")
  250. if title_split2[0] != "":
  251. title2 = title_split2[0]
  252. else:
  253. title2 = title_split2[-1]
  254. title_split3 = title2.split("@")
  255. if title_split3[0] != "":
  256. title3 = title_split3[0]
  257. else:
  258. title3 = title_split3[-1]
  259. video_title = title3.strip().replace("\n", "") \
  260. .replace("/", "").replace("快手", "").replace(" ", "") \
  261. .replace(" ", "").replace("&NBSP", "").replace("\r", "") \
  262. .replace("#", "").replace(".", "。").replace("\\", "") \
  263. .replace(":", "").replace("*", "").replace("?", "") \
  264. .replace("?", "").replace('"', "").replace("<", "") \
  265. .replace(">", "").replace("|", "").replace("@", "")[:40]
  266. if video_title.replace(" ", "") == "" or video_title == "。。。" or video_title == "...":
  267. return cls.random_title(log_type, crawler)
  268. else:
  269. return video_title
  270. @classmethod
  271. def get_videoList(cls, log_type, crawler, strategy, our_uid, out_uid, oss_endpoint, env, machine):
  272. try:
  273. download_cnt_1, download_cnt_2 = 0, 0
  274. pcursor = ""
  275. while True:
  276. rule_dict_1 = cls.get_rule(log_type, crawler, 1)
  277. rule_dict_2 = cls.get_rule(log_type, crawler, 2)
  278. if rule_dict_1 is None or rule_dict_2 is None:
  279. Common.logger(log_type, crawler).warning(f"rule_dict is None, 10秒后重试")
  280. time.sleep(10)
  281. else:
  282. break
  283. while True:
  284. if download_cnt_1 >=int(rule_dict_1['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">", "")[-1]) and download_cnt_2 >= int(rule_dict_2['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">", "")[-1]):
  285. Common.logger(log_type, crawler).info(f"规则1已下载{download_cnt_1}条视频,规则2已下载{download_cnt_2}条视频\n")
  286. return
  287. url = "https://www.kuaishou.com/graphql"
  288. payload = json.dumps({
  289. "operationName": "visionProfilePhotoList",
  290. "variables": {
  291. "userId": out_uid,
  292. "pcursor": pcursor,
  293. "page": "profile"
  294. },
  295. "query": "fragment photoContent on PhotoEntity {\n id\n duration\n caption\n originCaption\n likeCount\n viewCount\n realLikeCount\n coverUrl\n photoUrl\n photoH265Url\n manifest\n manifestH265\n videoResource\n coverUrls {\n url\n __typename\n }\n timestamp\n expTag\n animatedCoverUrl\n distance\n videoRatio\n liked\n stereoType\n profileUserTopPhoto\n musicBlocked\n __typename\n}\n\nfragment feedContent on Feed {\n type\n author {\n id\n name\n headerUrl\n following\n headerUrls {\n url\n __typename\n }\n __typename\n }\n photo {\n ...photoContent\n __typename\n }\n canAddComment\n llsid\n status\n currentPcursor\n tags {\n type\n name\n __typename\n }\n __typename\n}\n\nquery visionProfilePhotoList($pcursor: String, $userId: String, $page: String, $webPageArea: String) {\n visionProfilePhotoList(pcursor: $pcursor, userId: $userId, page: $page, webPageArea: $webPageArea) {\n result\n llsid\n webPageArea\n feeds {\n ...feedContent\n __typename\n }\n hostName\n pcursor\n __typename\n }\n}\n"
  296. })
  297. headers = {
  298. 'Cookie': f'kpf=PC_WEB; clientid=3; did=web_e2901e1c5a13c60af81ba88bc7a3ee24; userId={"".join(str(random.choice(range(1, 10))) for _ in range(10))}; kuaishou.server.web_st=ChZrdWFpc2hvdS5zZXJ2ZXIud2ViLnN0EqABOLgYYcIJ5ilxU46Jc-HLWThY8sppX3V0htC_KhSGOzAjP2hAOdegzfkZGAxS5rf6rCBS487FkxfYzLkV__I6b1lK16rDjvv94Kkoo4z7mgf8y8rFgWoqrp81JAWTtx00y-wrc1XXPf9RAVQoET70wWaeNG2r5bxtZEiNwpK_zPi0ZdUo0BW13dFKfVssAy2xKYh0UlJ8VSd_vBvyMKSxVBoSf061Kc3w5Nem7YdpVBmH39ceIiBpiGioLzbZqlHiSbwkH_LhUhNXz3o7LITj098KUytk2CgFMAE; kuaishou.server.web_ph=f1033957981996a7d50e849a9ded4cf4adff; kpn=KUAISHOU_VISION',
  299. 'Referer': f'https://www.kuaishou.com/profile/{out_uid}',
  300. 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.41',
  301. 'content-type': 'application/json',
  302. # 'accept': '*/*',
  303. # 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
  304. # 'Cache-Control': 'no-cache',
  305. # 'Connection': 'keep-alive',
  306. # 'Origin': 'https://www.kuaishou.com',
  307. # 'Pragma': 'no-cache',
  308. # 'Sec-Fetch-Dest': 'empty',
  309. # 'Sec-Fetch-Mode': 'cors',
  310. # 'Sec-Fetch-Site': 'same-origin',
  311. # 'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
  312. # 'sec-ch-ua-mobile': '?0',
  313. # 'sec-ch-ua-platform': '"macOS"'
  314. }
  315. response = requests.post(url=url, headers=headers, data=payload)
  316. if response.status_code != 200:
  317. Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.text}\n")
  318. return
  319. elif 'data' not in response.json():
  320. Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()}\n")
  321. return
  322. elif 'visionProfilePhotoList' not in response.json()['data']:
  323. Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()['data']}\n")
  324. return
  325. elif 'feeds' not in response.json()['data']['visionProfilePhotoList']:
  326. Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()['data']['visionProfilePhotoList']}\n")
  327. return
  328. elif len(response.json()['data']['visionProfilePhotoList']['feeds']) == 0:
  329. Common.logger(log_type, crawler).info("没有更多视频啦 ~\n")
  330. return
  331. else:
  332. feeds = response.json()['data']['visionProfilePhotoList']['feeds']
  333. pcursor = response.json()['data']['visionProfilePhotoList']['pcursor']
  334. Common.logger(log_type, crawler).info(f"feeds0: {feeds}\n")
  335. for i in range(len(feeds)):
  336. if 'photo' not in feeds[i]:
  337. Common.logger(log_type, crawler).warning(f"get_videoList:{feeds[i]}\n")
  338. break
  339. # video_title
  340. if 'caption' not in feeds[i]['photo']:
  341. video_title = cls.random_title(log_type, crawler)
  342. elif feeds[i]['photo']['caption'].strip() == "":
  343. video_title = cls.random_title(log_type, crawler)
  344. else:
  345. video_title = cls.video_title(log_type, crawler, feeds[i]['photo']['caption'])
  346. if 'videoResource' not in feeds[i]['photo'] \
  347. and 'manifest' not in feeds[i]['photo']\
  348. and 'manifestH265'not in feeds[i]['photo']:
  349. Common.logger(log_type, crawler).warning(f"get_videoList:{feeds[i]['photo']}\n")
  350. break
  351. videoResource = feeds[i]['photo']['videoResource']
  352. if 'h264' not in videoResource and 'hevc' not in videoResource:
  353. Common.logger(log_type, crawler).warning(f"get_videoList:{videoResource}\n")
  354. break
  355. # video_id
  356. if 'h264' in videoResource and 'videoId' in videoResource['h264']:
  357. video_id = videoResource['h264']['videoId']
  358. elif 'hevc' in videoResource and 'videoId' in videoResource['hevc']:
  359. video_id = videoResource['hevc']['videoId']
  360. else:
  361. video_id = ""
  362. # play_cnt
  363. if 'viewCount' not in feeds[i]['photo']:
  364. play_cnt = 0
  365. else:
  366. play_cnt = int(feeds[i]['photo']['viewCount'])
  367. # like_cnt
  368. if 'realLikeCount' not in feeds[i]['photo']:
  369. like_cnt = 0
  370. else:
  371. like_cnt = feeds[i]['photo']['realLikeCount']
  372. # publish_time
  373. if 'timestamp' not in feeds[i]['photo']:
  374. publish_time_stamp = 0
  375. publish_time_str = ''
  376. publish_time = 0
  377. else:
  378. publish_time_stamp = int(int(feeds[i]['photo']['timestamp'])/1000)
  379. publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
  380. publish_time = int((int(time.time()) - publish_time_stamp) / (3600*24))
  381. # duration
  382. if 'duration' not in feeds[i]['photo']:
  383. duration = 0
  384. else:
  385. duration = int(int(feeds[i]['photo']['duration'])/1000)
  386. # video_width / video_height / video_url
  387. mapping = {}
  388. for item in ['width', 'height', 'url']:
  389. try:
  390. val = str(videoResource['h264']['adaptationSet'][0]['representation'][0][item])
  391. except Exception:
  392. val = str(videoResource['hevc']['adaptationSet'][0]['representation'][0][item])
  393. except:
  394. val = ''
  395. mapping[item] = val
  396. video_width = int(mapping['width']) if mapping['width'] != '' else 0
  397. video_height = int(mapping['height']) if mapping['height'] != '' else 0
  398. video_url = mapping['url']
  399. # cover_url
  400. if 'coverUrl' not in feeds[i]['photo']:
  401. cover_url = ""
  402. else:
  403. cover_url = feeds[i]['photo']['coverUrl']
  404. # user_name / avatar_url
  405. try:
  406. user_name = feeds[i]['author']['name']
  407. avatar_url = feeds[i]['author']['headerUrl']
  408. except Exception:
  409. user_name = ''
  410. avatar_url = ''
  411. video_dict = {'video_title': video_title,
  412. 'video_id': video_id,
  413. 'play_cnt': play_cnt,
  414. 'comment_cnt': 0,
  415. 'like_cnt': like_cnt,
  416. 'share_cnt': 0,
  417. 'video_width': video_width,
  418. 'video_height': video_height,
  419. 'duration': duration,
  420. 'publish_time': publish_time,
  421. 'publish_time_stamp': publish_time_stamp,
  422. 'publish_time_str': publish_time_str,
  423. 'user_name': user_name,
  424. 'user_id': out_uid,
  425. 'avatar_url': avatar_url,
  426. 'cover_url': cover_url,
  427. 'video_url': video_url,
  428. 'session': f"kuaishou{int(time.time())}"}
  429. rule_1 = cls.download_rule(video_dict, rule_dict_1)
  430. Common.logger(log_type, crawler).info(f"video_title:{video_title}")
  431. Common.logger(log_type, crawler).info(f"video_id:{video_id}\n")
  432. Common.logger(log_type, crawler).info(f"play_cnt:{video_dict['play_cnt']}{rule_dict_1['play_cnt']}, {eval(str(video_dict['play_cnt']) + str(rule_dict_1['play_cnt']))}")
  433. Common.logger(log_type, crawler).info(f"like_cnt:{video_dict['like_cnt']}{rule_dict_1['like_cnt']}, {eval(str(video_dict['like_cnt']) + str(rule_dict_1['like_cnt']))}")
  434. Common.logger(log_type, crawler).info(f"video_width:{video_dict['video_width']}{rule_dict_1['video_width']}, {eval(str(video_dict['video_width']) + str(rule_dict_1['video_width']))}")
  435. Common.logger(log_type, crawler).info(f"video_height:{video_dict['video_height']}{rule_dict_1['video_height']}, {eval(str(video_dict['video_height']) + str(rule_dict_1['video_height']))}")
  436. Common.logger(log_type, crawler).info(f"duration:{video_dict['duration']}{rule_dict_1['duration']}, {eval(str(video_dict['duration']) + str(rule_dict_1['duration']))}")
  437. Common.logger(log_type, crawler).info(f"publish_time:{video_dict['publish_time']}{rule_dict_1['publish_time']}, {eval(str(video_dict['publish_time']) + str(rule_dict_1['publish_time']))}")
  438. Common.logger(log_type, crawler).info(f"rule_1:{rule_1}\n")
  439. rule_2 = cls.download_rule(video_dict, rule_dict_2)
  440. Common.logger(log_type, crawler).info(f"play_cnt:{video_dict['play_cnt']}{rule_dict_2['play_cnt']}, {eval(str(video_dict['play_cnt']) + str(rule_dict_2['play_cnt']))}")
  441. Common.logger(log_type, crawler).info(f"like_cnt:{video_dict['like_cnt']}{rule_dict_2['like_cnt']}, {eval(str(video_dict['like_cnt']) + str(rule_dict_2['like_cnt']))}")
  442. Common.logger(log_type, crawler).info(f"video_width:{video_dict['video_width']}{rule_dict_2['video_width']}, {eval(str(video_dict['video_width']) + str(rule_dict_2['video_width']))}")
  443. Common.logger(log_type, crawler).info(f"video_height:{video_dict['video_height']}{rule_dict_2['video_height']}, {eval(str(video_dict['video_height']) + str(rule_dict_2['video_height']))}")
  444. Common.logger(log_type, crawler).info(f"duration:{video_dict['duration']}{rule_dict_2['duration']}, {eval(str(video_dict['duration']) + str(rule_dict_2['duration']))}")
  445. Common.logger(log_type, crawler).info(f"publish_time:{video_dict['publish_time']}{rule_dict_2['publish_time']}, {eval(str(video_dict['publish_time']) + str(rule_dict_2['publish_time']))}")
  446. Common.logger(log_type, crawler).info(f"rule_2:{rule_2}\n")
  447. if video_title == "" or video_url == "":
  448. Common.logger(log_type, crawler).info("无效视频\n")
  449. break
  450. elif rule_1 is True:
  451. if download_cnt_1 < int(rule_dict_1['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">", "")[-1]):
  452. download_finished = cls.download_publish(log_type=log_type,
  453. crawler=crawler,
  454. strategy=strategy,
  455. video_dict=video_dict,
  456. rule_dict=rule_dict_1,
  457. our_uid=our_uid,
  458. oss_endpoint=oss_endpoint,
  459. env=env,
  460. machine=machine)
  461. if download_finished is True:
  462. download_cnt_1 += 1
  463. elif rule_2 is True:
  464. if download_cnt_2 < int(rule_dict_2['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">", "")[-1]):
  465. download_finished = cls.download_publish(log_type=log_type,
  466. crawler=crawler,
  467. strategy=strategy,
  468. video_dict=video_dict,
  469. rule_dict=rule_dict_2,
  470. our_uid=our_uid,
  471. oss_endpoint=oss_endpoint,
  472. env=env,
  473. machine=machine)
  474. if download_finished is True:
  475. download_cnt_2 += 1
  476. else:
  477. Common.logger(log_type, crawler).info("不满足下载规则\n")
  478. Common.logger(log_type, crawler).info(f"feeds: {feeds}\n")
  479. if pcursor == "no_more":
  480. Common.logger(log_type, crawler).info("已经到底了,没有更多内容了\n")
  481. return
  482. except Exception as e:
  483. Common.logger(log_type, crawler).error(f"get_videoList:{e}\n")
  484. @classmethod
  485. def repeat_video(cls, log_type, crawler, video_id, env, machine):
  486. sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}"; """
  487. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env, machine)
  488. return len(repeat_video)
  489. @classmethod
  490. def download_publish(cls, log_type, crawler, strategy, video_dict, rule_dict, our_uid, oss_endpoint, env, machine):
  491. try:
  492. download_finished = False
  493. if cls.repeat_video(log_type, crawler, video_dict['video_id'], env, machine) != 0:
  494. Common.logger(log_type, crawler).info('视频已下载\n')
  495. elif video_dict['video_id'] in [x for y in Feishu.get_values_batch(log_type, crawler, "3cd128") for x in y]:
  496. Common.logger(log_type, crawler).info('视频已下载\n')
  497. elif any(word if word in video_dict['video_title'] else False for word in cls.filter_words(log_type, crawler)) is True:
  498. Common.logger(log_type, crawler).info('标题已中过滤词\n')
  499. else:
  500. # 下载封面
  501. Common.download_method(log_type=log_type, crawler=crawler, text='cover', title=video_dict['video_title'], url=video_dict['cover_url'])
  502. # 下载视频
  503. Common.download_method(log_type=log_type, crawler=crawler, text='video', title=video_dict['video_title'], url=video_dict['video_url'])
  504. # 保存视频信息至txt
  505. Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
  506. # 上传视频
  507. Common.logger(log_type, crawler).info("开始上传视频...")
  508. our_video_id = Publish.upload_and_publish(log_type=log_type,
  509. crawler=crawler,
  510. strategy=strategy,
  511. our_uid=our_uid,
  512. env=env,
  513. oss_endpoint=oss_endpoint)
  514. if env == 'dev':
  515. our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  516. else:
  517. our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  518. Common.logger(log_type, crawler).info("视频上传完成")
  519. if our_video_id is None:
  520. Common.logger(log_type, crawler).warning(f"our_video_id:{our_video_id} 删除该视频文件夹")
  521. # 删除视频文件夹
  522. shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
  523. return download_finished
  524. # 视频信息保存数据库
  525. insert_sql = f""" insert into crawler_video(video_id,
  526. user_id,
  527. out_user_id,
  528. platform,
  529. strategy,
  530. out_video_id,
  531. video_title,
  532. cover_url,
  533. video_url,
  534. duration,
  535. publish_time,
  536. play_cnt,
  537. crawler_rule,
  538. width,
  539. height)
  540. values({our_video_id},
  541. {our_uid},
  542. "{video_dict['user_id']}",
  543. "{cls.platform}",
  544. "定向爬虫策略",
  545. "{video_dict['video_id']}",
  546. "{video_dict['video_title']}",
  547. "{video_dict['cover_url']}",
  548. "{video_dict['video_url']}",
  549. {int(video_dict['duration'])},
  550. "{video_dict['publish_time_str']}",
  551. {int(video_dict['play_cnt'])},
  552. '{json.dumps(rule_dict)}',
  553. {int(video_dict['video_width'])},
  554. {int(video_dict['video_height'])}) """
  555. Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
  556. MysqlHelper.update_values(log_type, crawler, insert_sql, env, machine)
  557. Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
  558. # 视频写入飞书
  559. Feishu.insert_columns(log_type, 'kuaishou', "fYdA8F", "ROWS", 1, 2)
  560. upload_time = int(time.time())
  561. values = [[our_uid,
  562. time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
  563. "定向榜",
  564. str(video_dict['video_id']),
  565. video_dict['video_title'],
  566. our_video_link,
  567. video_dict['play_cnt'],
  568. video_dict['comment_cnt'],
  569. video_dict['like_cnt'],
  570. video_dict['share_cnt'],
  571. video_dict['duration'],
  572. f"{video_dict['video_width']}*{video_dict['video_height']}",
  573. video_dict['publish_time_str'],
  574. video_dict['user_name'],
  575. video_dict['user_id'],
  576. video_dict['avatar_url'],
  577. video_dict['cover_url'],
  578. video_dict['video_url']]]
  579. time.sleep(1)
  580. Feishu.update_values(log_type, 'kuaishou', "fYdA8F", "E2:Z2", values)
  581. Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
  582. download_finished = True
  583. return download_finished
  584. except Exception as e:
  585. Common.logger(log_type, crawler).error(f"download_publish:{e}\n")
  586. @classmethod
  587. def get_follow_videos(cls, log_type, crawler, strategy, oss_endpoint, env, machine):
  588. user_list = cls.get_user_list(log_type=log_type, crawler=crawler, sheetid="bTSzxW", env=env, machine=machine)
  589. for user in user_list:
  590. out_uid = user["out_uid"]
  591. user_name = user["user_name"]
  592. our_uid = user["our_uid"]
  593. Common.logger(log_type, crawler).info(f"开始抓取 {user_name} 用户主页视频\n")
  594. cls.get_videoList(log_type=log_type,
  595. crawler=crawler,
  596. strategy=strategy,
  597. our_uid=our_uid,
  598. out_uid=out_uid,
  599. oss_endpoint=oss_endpoint,
  600. env=env,
  601. machine=machine)
  602. time.sleep(3)
  603. if __name__ == "__main__":
  604. # print(Follow.filter_words("follow", "kuaishou"))
  605. # print(Follow.random_title("follow", "kuaishou"))
  606. # Follow.get_user_list("follow", "kuaishou", "2OLxLr", "dev", "local")
  607. # Follow.get_videoList(log_type="follow",
  608. # crawler="kuaishou",
  609. # strategy="定向爬虫策略",
  610. # our_uid="6282431",
  611. # out_uid="3xws7ydsnmp5mgq",
  612. # oss_endpoint="out",
  613. # env="dev",
  614. # machine="local")
  615. # Follow.get_rule("follow", "kuaishou", 1)
  616. # Follow.get_rule("follow", "kuaishou", 2)
  617. print(Follow.get_out_user_info("follow", "kuaishou", "3xgh4ja9be3wcaw"))
  618. print(Follow.get_out_user_info("follow", "kuaishou", "3x5wgjhfc7tx8ue"))
  619. pass