kuaishou_author.py 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2023/5/24
  4. import os
  5. import shutil
  6. import sys
  7. import time
  8. from hashlib import md5
  9. import requests
  10. import json
  11. import urllib3
  12. from requests.adapters import HTTPAdapter
  13. sys.path.append(os.getcwd())
  14. from common.common import Common
  15. from common.feishu import Feishu
  16. from common.getuser import getUser
  17. from common.db import MysqlHelper
  18. from common.publish import Publish
  19. from common.public import random_title, get_config_from_mysql
  20. from common.public import get_user_from_mysql
  21. class KuaishouauthorScheduling:
  22. platform = "快手"
  23. # 获取站外用户信息
  24. @classmethod
  25. def get_out_user_info(cls, log_type, crawler, out_uid):
  26. try:
  27. url = "https://www.kuaishou.com/graphql"
  28. payload = json.dumps({
  29. "operationName": "visionProfile",
  30. "variables": {
  31. "userId": out_uid
  32. },
  33. "query": "query visionProfile($userId: String) {\n visionProfile(userId: $userId) {\n result\n hostName\n userProfile {\n ownerCount {\n fan\n photo\n follow\n photo_public\n __typename\n }\n profile {\n gender\n user_name\n user_id\n headurl\n user_text\n user_profile_bg_url\n __typename\n }\n isFollowing\n __typename\n }\n __typename\n }\n}\n"
  34. })
  35. # s = string.ascii_lowercase
  36. # r = random.choice(s)
  37. headers = {
  38. 'Accept': '*/*',
  39. 'Content-Type': 'application/json',
  40. 'Origin': 'https://www.kuaishou.com',
  41. 'Cookie': f'kpf=PC_WEB; clientid=3; did={cls.get_did(log_type, crawler)}; kpn=KUAISHOU_VISION',
  42. 'Content-Length': '552',
  43. 'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
  44. 'Host': 'www.kuaishou.com',
  45. 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.6.1 Safari/605.1.15',
  46. 'Referer': 'https://www.kuaishou.com/profile/{}'.format(out_uid),
  47. 'Accept-Encoding': 'gzip, deflate, br',
  48. 'Connection': 'keep-alive'
  49. }
  50. urllib3.disable_warnings()
  51. s = requests.session()
  52. # max_retries=3 重试3次
  53. s.mount('http://', HTTPAdapter(max_retries=3))
  54. s.mount('https://', HTTPAdapter(max_retries=3))
  55. response = s.post(url=url, headers=headers, data=payload, proxies=Common.tunnel_proxies(), verify=False,
  56. timeout=5)
  57. response.close()
  58. # Common.logger(log_type, crawler).info(f"get_out_user_info_response:{response.text}")
  59. if response.status_code != 200:
  60. Common.logger(log_type, crawler).warning(f"get_out_user_info_response:{response.text}\n")
  61. return
  62. elif 'data' not in response.json():
  63. Common.logger(log_type, crawler).warning(f"get_out_user_info_response:{response.json()}\n")
  64. return
  65. elif 'visionProfile' not in response.json()['data']:
  66. Common.logger(log_type, crawler).warning(f"get_out_user_info_response:{response.json()['data']}\n")
  67. return
  68. elif 'userProfile' not in response.json()['data']['visionProfile']:
  69. Common.logger(log_type, crawler).warning(
  70. f"get_out_user_info_response:{response.json()['data']['visionProfile']['userProfile']}\n")
  71. return
  72. else:
  73. userProfile = response.json()['data']['visionProfile']['userProfile']
  74. # Common.logger(log_type, crawler).info(f"userProfile:{userProfile}")
  75. try:
  76. out_fans_str = str(userProfile['ownerCount']['fan'])
  77. except Exception:
  78. out_fans_str = "0"
  79. try:
  80. out_follow_str = str(userProfile['ownerCount']['follow'])
  81. except Exception:
  82. out_follow_str = "0"
  83. try:
  84. out_avatar_url = userProfile['profile']['headurl']
  85. except Exception:
  86. out_avatar_url = ""
  87. Common.logger(log_type, crawler).info(f"out_fans_str:{out_fans_str}")
  88. Common.logger(log_type, crawler).info(f"out_follow_str:{out_follow_str}")
  89. Common.logger(log_type, crawler).info(f"out_avatar_url:{out_avatar_url}")
  90. if "万" in out_fans_str:
  91. out_fans = int(float(out_fans_str.split("万")[0]) * 10000)
  92. else:
  93. out_fans = int(out_fans_str.replace(",", ""))
  94. if "万" in out_follow_str:
  95. out_follow = int(float(out_follow_str.split("万")[0]) * 10000)
  96. else:
  97. out_follow = int(out_follow_str.replace(",", ""))
  98. out_user_dict = {
  99. "out_fans": out_fans,
  100. "out_follow": out_follow,
  101. "out_avatar_url": out_avatar_url
  102. }
  103. Common.logger(log_type, crawler).info(f"out_user_dict:{out_user_dict}")
  104. return out_user_dict
  105. except Exception as e:
  106. Common.logger(log_type, crawler).error(f"get_out_user_info:{e}\n")
  107. # 获取用户信息列表
  108. @classmethod
  109. def get_user_list(cls, log_type, crawler, sheetid, env, machine):
  110. try:
  111. while True:
  112. user_sheet = Feishu.get_values_batch(log_type, crawler, sheetid)
  113. if user_sheet is None:
  114. Common.logger(log_type, crawler).warning(f"user_sheet:{user_sheet} 10秒钟后重试")
  115. continue
  116. our_user_list = []
  117. for i in range(1, len(user_sheet)):
  118. # for i in range(1, 2):
  119. out_uid = user_sheet[i][2]
  120. user_name = user_sheet[i][3]
  121. our_uid = user_sheet[i][6]
  122. our_user_link = user_sheet[i][7]
  123. if out_uid is None or user_name is None:
  124. Common.logger(log_type, crawler).info("空行\n")
  125. else:
  126. Common.logger(log_type, crawler).info(f"正在更新 {user_name} 用户信息\n")
  127. if our_uid is None:
  128. out_user_info = cls.get_out_user_info(log_type, crawler, out_uid)
  129. out_user_dict = {
  130. "out_uid": out_uid,
  131. "user_name": user_name,
  132. "out_avatar_url": out_user_info["out_avatar_url"],
  133. "out_create_time": '',
  134. "out_tag": '',
  135. "out_play_cnt": 0,
  136. "out_fans": out_user_info["out_fans"],
  137. "out_follow": out_user_info["out_follow"],
  138. "out_friend": 0,
  139. "out_like": 0,
  140. "platform": cls.platform,
  141. "tag": cls.tag,
  142. }
  143. our_user_dict = getUser.create_user(log_type=log_type, crawler=crawler,
  144. out_user_dict=out_user_dict, env=env, machine=machine)
  145. our_uid = our_user_dict['our_uid']
  146. our_user_link = our_user_dict['our_user_link']
  147. Feishu.update_values(log_type, crawler, sheetid, f'G{i + 1}:H{i + 1}',
  148. [[our_uid, our_user_link]])
  149. Common.logger(log_type, crawler).info(f'站内用户信息写入飞书成功!\n')
  150. our_user_list.append(our_user_dict)
  151. else:
  152. our_user_dict = {
  153. 'out_uid': out_uid,
  154. 'user_name': user_name,
  155. 'our_uid': our_uid,
  156. 'our_user_link': our_user_link,
  157. }
  158. our_user_list.append(our_user_dict)
  159. return our_user_list
  160. except Exception as e:
  161. Common.logger(log_type, crawler).error(f'get_user_list:{e}\n')
  162. # 处理视频标题
  163. @classmethod
  164. def video_title(cls, log_type, crawler, env, title):
  165. title_split1 = title.split(" #")
  166. if title_split1[0] != "":
  167. title1 = title_split1[0]
  168. else:
  169. title1 = title_split1[-1]
  170. title_split2 = title1.split(" #")
  171. if title_split2[0] != "":
  172. title2 = title_split2[0]
  173. else:
  174. title2 = title_split2[-1]
  175. title_split3 = title2.split("@")
  176. if title_split3[0] != "":
  177. title3 = title_split3[0]
  178. else:
  179. title3 = title_split3[-1]
  180. video_title = title3.strip().replace("\n", "") \
  181. .replace("/", "").replace("快手", "").replace(" ", "") \
  182. .replace(" ", "").replace("&NBSP", "").replace("\r", "") \
  183. .replace("#", "").replace(".", "。").replace("\\", "") \
  184. .replace(":", "").replace("*", "").replace("?", "") \
  185. .replace("?", "").replace('"', "").replace("<", "") \
  186. .replace(">", "").replace("|", "").replace("@", "").replace('"', '').replace("'", '')[:40]
  187. if video_title.replace(" ", "") == "" or video_title == "。。。" or video_title == "...":
  188. return random_title(log_type, crawler, env, text='title')
  189. else:
  190. return video_title
  191. @classmethod
  192. def get_did(cls, log_type, crawler):
  193. while True:
  194. did_sheet = Feishu.get_values_batch(log_type, crawler, "G7acT6")
  195. if did_sheet is None:
  196. Common.logger(log_type, crawler).warning(f"did_sheet:{did_sheet}")
  197. time.sleep(2)
  198. continue
  199. return did_sheet[0][1]
  200. @classmethod
  201. def get_videoList(cls, log_type, crawler, strategy, our_uid, out_uid, oss_endpoint, env, machine, pcursor=""):
  202. download_cnt_1, download_cnt_2 = 0, 0
  203. rule_dict_1 = cls.get_rule(log_type, crawler, 1)
  204. rule_dict_2 = cls.get_rule(log_type, crawler, 2)
  205. if rule_dict_1 is None or rule_dict_2 is None:
  206. Common.logger(log_type, crawler).warning(f"rule_dict is None")
  207. return
  208. url = "https://www.kuaishou.com/graphql"
  209. payload = json.dumps({
  210. "operationName": "visionProfilePhotoList",
  211. "variables": {
  212. "userId": out_uid,
  213. "pcursor": "",
  214. "page": "profile"
  215. },
  216. "query": "fragment photoContent on PhotoEntity {\n id\n duration\n caption\n originCaption\n likeCount\n viewCount\n commentCount\n realLikeCount\n coverUrl\n photoUrl\n photoH265Url\n manifest\n manifestH265\n videoResource\n coverUrls {\n url\n __typename\n }\n timestamp\n expTag\n animatedCoverUrl\n distance\n videoRatio\n liked\n stereoType\n profileUserTopPhoto\n musicBlocked\n __typename\n}\n\nfragment feedContent on Feed {\n type\n author {\n id\n name\n headerUrl\n following\n headerUrls {\n url\n __typename\n }\n __typename\n }\n photo {\n ...photoContent\n __typename\n }\n canAddComment\n llsid\n status\n currentPcursor\n tags {\n type\n name\n __typename\n }\n __typename\n}\n\nquery visionProfilePhotoList($pcursor: String, $userId: String, $page: String, $webPageArea: String) {\n visionProfilePhotoList(pcursor: $pcursor, userId: $userId, page: $page, webPageArea: $webPageArea) {\n result\n llsid\n webPageArea\n feeds {\n ...feedContent\n __typename\n }\n hostName\n pcursor\n __typename\n }\n}\n"
  217. })
  218. headers = {
  219. 'Accept': '*/*',
  220. 'Content-Type': 'application/json',
  221. 'Origin': 'https://www.kuaishou.com',
  222. 'Cookie': f'kpf=PC_WEB; clientid=3; did={cls.get_did(log_type, crawler)}; kpn=KUAISHOU_VISION',
  223. 'Content-Length': '1260',
  224. 'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
  225. 'Host': 'www.kuaishou.com',
  226. 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.6.1 Safari/605.1.15',
  227. 'Referer': 'https://www.kuaishou.com/profile/{}'.format(out_uid),
  228. 'Accept-Encoding': 'gzip, deflate, br',
  229. 'Connection': 'keep-alive'
  230. }
  231. response = requests.post(url=url, headers=headers, data=payload, proxies=Common.tunnel_proxies(),
  232. verify=False, timeout=10)
  233. try:
  234. feeds = response.json()['data']['visionProfilePhotoList']['feeds']
  235. except Exception as e:
  236. Common.logger(log_type, crawler).error(f"get_videoList:{e},response:{response.text}")
  237. return
  238. if not feeds:
  239. Common.logger(log_type, crawler).info("没有更多视频啦 ~\n")
  240. return
  241. pcursor = response.json()['data']['visionProfilePhotoList']['pcursor']
  242. # Common.logger(log_type, crawler).info(f"feeds0: {feeds}\n")
  243. for i in range(len(feeds)):
  244. try:
  245. # video_title
  246. if 'caption' not in feeds[i]['photo']:
  247. video_title = random_title(log_type, crawler, env, text='title')
  248. elif feeds[i]['photo']['caption'].strip() == "":
  249. video_title = random_title(log_type, crawler, env, text='title')
  250. else:
  251. video_title = cls.video_title(log_type, crawler, env, feeds[i]['photo']['caption'])
  252. if 'videoResource' not in feeds[i]['photo'] \
  253. and 'manifest' not in feeds[i]['photo'] \
  254. and 'manifestH265' not in feeds[i]['photo']:
  255. Common.logger(log_type, crawler).warning(f"get_videoList:{feeds[i]['photo']}\n")
  256. break
  257. videoResource = feeds[i]['photo']['videoResource']
  258. if 'h264' not in videoResource and 'hevc' not in videoResource:
  259. Common.logger(log_type, crawler).warning(f"get_videoList:{videoResource}\n")
  260. break
  261. # video_id
  262. if 'h264' in videoResource and 'videoId' in videoResource['h264']:
  263. video_id = videoResource['h264']['videoId']
  264. elif 'hevc' in videoResource and 'videoId' in videoResource['hevc']:
  265. video_id = videoResource['hevc']['videoId']
  266. else:
  267. video_id = ""
  268. # play_cnt
  269. if 'viewCount' not in feeds[i]['photo']:
  270. play_cnt = 0
  271. else:
  272. play_cnt = int(feeds[i]['photo']['viewCount'])
  273. # like_cnt
  274. if 'realLikeCount' not in feeds[i]['photo']:
  275. like_cnt = 0
  276. else:
  277. like_cnt = feeds[i]['photo']['realLikeCount']
  278. # publish_time
  279. if 'timestamp' not in feeds[i]['photo']:
  280. publish_time_stamp = 0
  281. publish_time_str = ''
  282. publish_time = 0
  283. else:
  284. publish_time_stamp = int(int(feeds[i]['photo']['timestamp']) / 1000)
  285. publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
  286. publish_time = int((int(time.time()) - publish_time_stamp) / (3600 * 24))
  287. # duration
  288. if 'duration' not in feeds[i]['photo']:
  289. duration = 0
  290. else:
  291. duration = int(int(feeds[i]['photo']['duration']) / 1000)
  292. # video_width / video_height / video_url
  293. mapping = {}
  294. for item in ['width', 'height']:
  295. try:
  296. val = str(videoResource['h264']['adaptationSet'][0]['representation'][0][item])
  297. except:
  298. val = str(videoResource['hevc']['adaptationSet'][0]['representation'][0][item])
  299. mapping[item] = val
  300. video_width = int(mapping['width']) if mapping['width'] else 0
  301. video_height = int(mapping['height']) if mapping['height'] else 0
  302. # cover_url
  303. if 'coverUrl' not in feeds[i]['photo']:
  304. cover_url = ""
  305. else:
  306. cover_url = feeds[i]['photo']['coverUrl']
  307. # user_name / avatar_url
  308. user_name = feeds[i]['author']['name']
  309. avatar_url = feeds[i]['author']['headerUrl']
  310. video_url = feeds[i]['photo']['photoUrl']
  311. video_dict = {'video_title': video_title,
  312. 'video_id': video_id,
  313. 'play_cnt': play_cnt,
  314. 'comment_cnt': 0,
  315. 'like_cnt': like_cnt,
  316. 'share_cnt': 0,
  317. 'video_width': video_width,
  318. 'video_height': video_height,
  319. 'duration': duration,
  320. 'publish_time': publish_time,
  321. 'publish_time_stamp': publish_time_stamp,
  322. 'publish_time_str': publish_time_str,
  323. 'user_name': user_name,
  324. 'user_id': out_uid,
  325. 'avatar_url': avatar_url,
  326. 'cover_url': cover_url,
  327. 'video_url': video_url,
  328. 'session': f"kuaishou{int(time.time())}"}
  329. rule_1 = cls.download_rule(video_dict, rule_dict_1)
  330. Common.logger(log_type, crawler).info(f"video_title:{video_title}")
  331. Common.logger(log_type, crawler).info(f"video_id:{video_id}\n")
  332. Common.logger(log_type, crawler).info(
  333. f"play_cnt:{video_dict['play_cnt']}{rule_dict_1['play_cnt']}, {eval(str(video_dict['play_cnt']) + str(rule_dict_1['play_cnt']))}")
  334. Common.logger(log_type, crawler).info(
  335. f"like_cnt:{video_dict['like_cnt']}{rule_dict_1['like_cnt']}, {eval(str(video_dict['like_cnt']) + str(rule_dict_1['like_cnt']))}")
  336. Common.logger(log_type, crawler).info(
  337. f"video_width:{video_dict['video_width']}{rule_dict_1['video_width']}, {eval(str(video_dict['video_width']) + str(rule_dict_1['video_width']))}")
  338. Common.logger(log_type, crawler).info(
  339. f"video_height:{video_dict['video_height']}{rule_dict_1['video_height']}, {eval(str(video_dict['video_height']) + str(rule_dict_1['video_height']))}")
  340. Common.logger(log_type, crawler).info(
  341. f"duration:{video_dict['duration']}{rule_dict_1['duration']}, {eval(str(video_dict['duration']) + str(rule_dict_1['duration']))}")
  342. Common.logger(log_type, crawler).info(
  343. f"publish_time:{video_dict['publish_time']}{rule_dict_1['publish_time']}, {eval(str(video_dict['publish_time']) + str(rule_dict_1['publish_time']))}")
  344. Common.logger(log_type, crawler).info(f"rule_1:{rule_1}\n")
  345. rule_2 = cls.download_rule(video_dict, rule_dict_2)
  346. Common.logger(log_type, crawler).info(
  347. f"play_cnt:{video_dict['play_cnt']}{rule_dict_2['play_cnt']}, {eval(str(video_dict['play_cnt']) + str(rule_dict_2['play_cnt']))}")
  348. Common.logger(log_type, crawler).info(
  349. f"like_cnt:{video_dict['like_cnt']}{rule_dict_2['like_cnt']}, {eval(str(video_dict['like_cnt']) + str(rule_dict_2['like_cnt']))}")
  350. Common.logger(log_type, crawler).info(
  351. f"video_width:{video_dict['video_width']}{rule_dict_2['video_width']}, {eval(str(video_dict['video_width']) + str(rule_dict_2['video_width']))}")
  352. Common.logger(log_type, crawler).info(
  353. f"video_height:{video_dict['video_height']}{rule_dict_2['video_height']}, {eval(str(video_dict['video_height']) + str(rule_dict_2['video_height']))}")
  354. Common.logger(log_type, crawler).info(
  355. f"duration:{video_dict['duration']}{rule_dict_2['duration']}, {eval(str(video_dict['duration']) + str(rule_dict_2['duration']))}")
  356. Common.logger(log_type, crawler).info(
  357. f"publish_time:{video_dict['publish_time']}{rule_dict_2['publish_time']}, {eval(str(video_dict['publish_time']) + str(rule_dict_2['publish_time']))}")
  358. Common.logger(log_type, crawler).info(f"rule_2:{rule_2}\n")
  359. if video_title == "" or video_url == "":
  360. Common.logger(log_type, crawler).info("无效视频\n")
  361. continue
  362. elif rule_1 is True:
  363. if download_cnt_1 < int(
  364. rule_dict_1['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">",
  365. "")[
  366. -1]):
  367. cls.download_publish(log_type=log_type,
  368. crawler=crawler,
  369. strategy=strategy,
  370. video_dict=video_dict,
  371. rule_dict=rule_dict_1,
  372. our_uid=our_uid,
  373. oss_endpoint=oss_endpoint,
  374. env=env,
  375. machine=machine)
  376. # if download_finished is True:
  377. # download_cnt_1 += 1
  378. elif rule_2 is True:
  379. if download_cnt_2 < int(
  380. rule_dict_2['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">",
  381. "")[
  382. -1]):
  383. cls.download_publish(log_type=log_type,
  384. crawler=crawler,
  385. strategy=strategy,
  386. video_dict=video_dict,
  387. rule_dict=rule_dict_2,
  388. our_uid=our_uid,
  389. oss_endpoint=oss_endpoint,
  390. env=env,
  391. machine=machine)
  392. # if download_finished is True:
  393. # download_cnt_2 += 1
  394. else:
  395. Common.logger(log_type, crawler).info("不满足下载规则\n")
  396. # Common.logger(log_type, crawler).info(f"feeds: {feeds}\n")
  397. except Exception as e:
  398. Common.logger(log_type, crawler).warning(f"抓取单条视频异常:{e}\n")
  399. # if pcursor == "no_more":
  400. # Common.logger(log_type, crawler).info(f"作者,{out_uid},已经到底了,没有更多内容了\n")
  401. # return
  402. # cls.get_videoList(log_type, crawler, strategy, our_uid, out_uid, oss_endpoint, env, machine,
  403. # pcursor=pcursor)
  404. # time.sleep(random.randint(1, 3))
  405. @classmethod
  406. def repeat_video(cls, log_type, crawler, video_id, video_title, publish_time, env, machine):
  407. sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}" or (platform="{cls.platform}" and video_title="{video_title}" and publish_time="{publish_time}") """
  408. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env, machine)
  409. return len(repeat_video)
  410. @classmethod
  411. def download_publish(cls, log_type, crawler, strategy, video_dict, rule_dict, our_uid, oss_endpoint, env, machine):
  412. filter_words = get_config_from_mysql(log_type, crawler, env, text='filter')
  413. for filter_word in filter_words:
  414. if filter_word in video_dict['video_title']:
  415. Common.logger(log_type, crawler).info('标题已中过滤词:{}\n', video_dict['video_title'])
  416. return
  417. download_finished = False
  418. if cls.repeat_video(log_type, crawler, video_dict['video_id'], video_dict['video_title'],
  419. video_dict['publish_time_str'], env, machine) != 0:
  420. Common.logger(log_type, crawler).info('视频已下载\n')
  421. else:
  422. # 下载视频
  423. Common.download_method(log_type=log_type, crawler=crawler, text='video',
  424. title=video_dict['video_title'], url=video_dict['video_url'])
  425. md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
  426. try:
  427. if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
  428. # 删除视频文件夹
  429. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  430. Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
  431. return
  432. except FileNotFoundError:
  433. # 删除视频文件夹
  434. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  435. Common.logger(log_type, crawler).info("未发现视频文件,删除成功\n")
  436. return
  437. # 下载封面
  438. Common.download_method(log_type=log_type, crawler=crawler, text='cover',
  439. title=video_dict['video_title'], url=video_dict['cover_url'])
  440. # 保存视频信息至txt
  441. Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
  442. # 上传视频
  443. Common.logger(log_type, crawler).info("开始上传视频...")
  444. our_video_id = Publish.upload_and_publish(log_type=log_type,
  445. crawler=crawler,
  446. strategy=strategy,
  447. our_uid=our_uid,
  448. env=env,
  449. oss_endpoint=oss_endpoint)
  450. if env == 'dev':
  451. our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  452. else:
  453. our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  454. Common.logger(log_type, crawler).info("视频上传完成")
  455. if our_video_id is None:
  456. try:
  457. Common.logger(log_type, crawler).warning(f"our_video_id:{our_video_id} 删除该视频文件夹")
  458. # 删除视频文件夹
  459. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  460. return download_finished
  461. except FileNotFoundError:
  462. return download_finished
  463. # 视频信息保存数据库
  464. insert_sql = f""" insert into crawler_video(video_id,
  465. user_id,
  466. out_user_id,
  467. platform,
  468. strategy,
  469. out_video_id,
  470. video_title,
  471. cover_url,
  472. video_url,
  473. duration,
  474. publish_time,
  475. play_cnt,
  476. crawler_rule,
  477. width,
  478. height)
  479. values({our_video_id},
  480. {our_uid},
  481. "{video_dict['user_id']}",
  482. "{cls.platform}",
  483. "定向爬虫策略",
  484. "{video_dict['video_id']}",
  485. "{video_dict['video_title']}",
  486. "{video_dict['cover_url']}",
  487. "{video_dict['video_url']}",
  488. {int(video_dict['duration'])},
  489. "{video_dict['publish_time_str']}",
  490. {int(video_dict['play_cnt'])},
  491. '{json.dumps(rule_dict)}',
  492. {int(video_dict['video_width'])},
  493. {int(video_dict['video_height'])}) """
  494. Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
  495. MysqlHelper.update_values(log_type, crawler, insert_sql, env, machine)
  496. Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
  497. # 视频写入飞书
  498. Feishu.insert_columns(log_type, 'kuaishou', "fYdA8F", "ROWS", 1, 2)
  499. upload_time = int(time.time())
  500. values = [[our_video_id,
  501. time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
  502. "定向榜",
  503. str(video_dict['video_id']),
  504. video_dict['video_title'],
  505. our_video_link,
  506. video_dict['play_cnt'],
  507. video_dict['comment_cnt'],
  508. video_dict['like_cnt'],
  509. video_dict['share_cnt'],
  510. video_dict['duration'],
  511. f"{video_dict['video_width']}*{video_dict['video_height']}",
  512. video_dict['publish_time_str'],
  513. video_dict['user_name'],
  514. video_dict['user_id'],
  515. video_dict['avatar_url'],
  516. video_dict['cover_url'],
  517. video_dict['video_url']]]
  518. time.sleep(1)
  519. Feishu.update_values(log_type, 'kuaishou', "fYdA8F", "E2:Z2", values)
  520. Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
  521. download_finished = True
  522. return download_finished
  523. @classmethod
  524. def get_follow_videos(cls, log_type, crawler, strategy, oss_endpoint, env, machine):
  525. user_list = get_user_from_mysql(log_type, crawler, crawler, env)
  526. for user in user_list:
  527. try:
  528. spider_link = user["link"]
  529. out_uid = spider_link.split('/')[-1]
  530. user_name = user["nick_name"]
  531. our_uid = user["uid"]
  532. Common.logger(log_type, crawler).info(f"开始抓取 {user_name} 用户主页视频\n")
  533. cls.get_videoList(log_type=log_type,
  534. crawler=crawler,
  535. strategy=strategy,
  536. our_uid=our_uid,
  537. out_uid=out_uid,
  538. oss_endpoint=oss_endpoint,
  539. env=env,
  540. machine=machine)
  541. except Exception as e:
  542. Common.logger(log_type, crawler).warning(f"抓取用户{user}时异常:{e}\n")
  543. if __name__ == "__main__":
  544. print(KuaishouauthorScheduling.get_did("follow", "kuaishou"))
  545. pass