kuaishou_follow_scheduling.py 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596
  1. # -*- coding: utf-8 -*-
  2. # @Author: lierqiang
  3. # @Time: 2023/2/24
  4. import os
  5. import random
  6. import shutil
  7. import sys
  8. import time
  9. from hashlib import md5
  10. import requests
  11. import json
  12. import urllib3
  13. from requests.adapters import HTTPAdapter
  14. sys.path.append(os.getcwd())
  15. from common.common import Common
  16. from common.feishu import Feishu
  17. from common.getuser import getUser
  18. # from common.db import MysqlHelper
  19. from common.scheduling_db import MysqlHelper
  20. from common.publish import Publish
  21. from common.public import random_title, get_config_from_mysql
  22. from common.public import get_user_from_mysql
  23. from common.userAgent import get_random_user_agent
  24. class KuaiShouFollowScheduling:
  25. platform = "快手"
  26. tag = "快手爬虫,定向爬虫策略"
  27. @classmethod
  28. def get_rule(cls, log_type, crawler, index):
  29. try:
  30. rule_sheet = Feishu.get_values_batch(log_type, crawler, "3iqG4z")
  31. if index == 1:
  32. rule_dict = {
  33. "play_cnt": f"{rule_sheet[1][1]}{rule_sheet[1][2]}",
  34. "video_width": f"{rule_sheet[2][1]}{rule_sheet[2][2]}",
  35. "video_height": f"{rule_sheet[3][1]}{rule_sheet[3][2]}",
  36. "like_cnt": f"{rule_sheet[4][1]}{rule_sheet[4][2]}",
  37. "duration": f"{rule_sheet[5][1]}{rule_sheet[5][2]}",
  38. "download_cnt": f"{rule_sheet[6][1]}{rule_sheet[6][2]}",
  39. "publish_time": f"{rule_sheet[7][1]}{rule_sheet[7][2]}",
  40. }
  41. # for k, v in rule_dict.items():
  42. # Common.logger(log_type, crawler).info(f"{k}:{v}")
  43. return rule_dict
  44. elif index == 2:
  45. rule_dict = {
  46. "play_cnt": f"{rule_sheet[9][1]}{rule_sheet[9][2]}",
  47. "video_width": f"{rule_sheet[10][1]}{rule_sheet[10][2]}",
  48. "video_height": f"{rule_sheet[11][1]}{rule_sheet[11][2]}",
  49. "like_cnt": f"{rule_sheet[12][1]}{rule_sheet[12][2]}",
  50. "duration": f"{rule_sheet[13][1]}{rule_sheet[13][2]}",
  51. "download_cnt": f"{rule_sheet[14][1]}{rule_sheet[14][2]}",
  52. "publish_time": f"{rule_sheet[15][1]}{rule_sheet[15][2]}",
  53. }
  54. # for k, v in rule_dict.items():
  55. # Common.logger(log_type, crawler).info(f"{k}:{v}")
  56. return rule_dict
  57. except Exception as e:
  58. Common.logger(log_type, crawler).error(f"get_rule:{e}\n")
  59. @classmethod
  60. def download_rule(cls, video_dict, rule_dict):
  61. if video_dict['like_cnt'] >= rule_dict['like_cnt']['min']:
  62. if video_dict['publish_time'] >= rule_dict['publish_time']['min']:
  63. if video_dict['duration'] >= rule_dict['duration']['min']:
  64. if video_dict['video_width'] >= rule_dict['width']['min'] \
  65. or video_dict['video_height'] >= rule_dict['height']['min']:
  66. return True
  67. else:
  68. return False
  69. else:
  70. return False
  71. else:
  72. return False
  73. else:
  74. return False
  75. # 过滤词库
  76. @classmethod
  77. def filter_words(cls, log_type, crawler):
  78. try:
  79. while True:
  80. filter_words_sheet = Feishu.get_values_batch(log_type, crawler, 'HIKVvs')
  81. if filter_words_sheet is None:
  82. Common.logger(log_type, crawler).warning(f"filter_words_sheet:{filter_words_sheet} 10秒钟后重试")
  83. continue
  84. filter_words_list = []
  85. for x in filter_words_sheet:
  86. for y in x:
  87. if y is None:
  88. pass
  89. else:
  90. filter_words_list.append(y)
  91. return filter_words_list
  92. except Exception as e:
  93. Common.logger(log_type, crawler).error(f'filter_words异常:{e}\n')
  94. # 获取站外用户信息
  95. @classmethod
  96. def get_out_user_info(cls, log_type, crawler, out_uid):
  97. try:
  98. url = "https://www.kuaishou.com/graphql"
  99. payload = json.dumps({
  100. "operationName": "visionProfile",
  101. "variables": {
  102. "userId": out_uid
  103. },
  104. "query": "query visionProfile($userId: String) {\n visionProfile(userId: $userId) {\n result\n hostName\n userProfile {\n ownerCount {\n fan\n photo\n follow\n photo_public\n __typename\n }\n profile {\n gender\n user_name\n user_id\n headurl\n user_text\n user_profile_bg_url\n __typename\n }\n isFollowing\n __typename\n }\n __typename\n }\n}\n"
  105. })
  106. headers = {
  107. 'Accept': '*/*',
  108. 'Content-Type': 'application/json',
  109. 'Origin': 'https://www.kuaishou.com',
  110. 'Cookie': 'did=web_b219a1e87b0d4fe35eba87d2087bebb9;; clientid=3; kpf=PC_WEB; kpn=KUAISHOU_VISION',
  111. 'Content-Length': '552',
  112. 'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
  113. 'Host': 'www.kuaishou.com',
  114. 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.6.1 Safari/605.1.15',
  115. 'Referer': 'https://www.kuaishou.com/profile/{}'.format(out_uid),
  116. 'Accept-Encoding': 'gzip, deflate, br',
  117. 'Connection': 'keep-alive'
  118. }
  119. urllib3.disable_warnings()
  120. s = requests.session()
  121. # max_retries=3 重试3次
  122. s.mount('http://', HTTPAdapter(max_retries=3))
  123. s.mount('https://', HTTPAdapter(max_retries=3))
  124. response = s.post(url=url, headers=headers, data=payload, proxies=Common.tunnel_proxies(), verify=False,
  125. timeout=5)
  126. response.close()
  127. # Common.logger(log_type, crawler).info(f"get_out_user_info_response:{response.text}")
  128. if response.status_code != 200:
  129. Common.logger(log_type, crawler).warning(f"get_out_user_info_response:{response.text}\n")
  130. return
  131. elif 'data' not in response.json():
  132. Common.logger(log_type, crawler).warning(f"get_out_user_info_response:{response.json()}\n")
  133. return
  134. elif 'visionProfile' not in response.json()['data']:
  135. Common.logger(log_type, crawler).warning(f"get_out_user_info_response:{response.json()['data']}\n")
  136. return
  137. elif 'userProfile' not in response.json()['data']['visionProfile']:
  138. Common.logger(log_type, crawler).warning(
  139. f"get_out_user_info_response:{response.json()['data']['visionProfile']['userProfile']}\n")
  140. return
  141. else:
  142. userProfile = response.json()['data']['visionProfile']['userProfile']
  143. # Common.logger(log_type, crawler).info(f"userProfile:{userProfile}")
  144. try:
  145. out_fans_str = str(userProfile['ownerCount']['fan'])
  146. except Exception:
  147. out_fans_str = "0"
  148. try:
  149. out_follow_str = str(userProfile['ownerCount']['follow'])
  150. except Exception:
  151. out_follow_str = "0"
  152. try:
  153. out_avatar_url = userProfile['profile']['headurl']
  154. except Exception:
  155. out_avatar_url = ""
  156. Common.logger(log_type, crawler).info(f"out_fans_str:{out_fans_str}")
  157. Common.logger(log_type, crawler).info(f"out_follow_str:{out_follow_str}")
  158. Common.logger(log_type, crawler).info(f"out_avatar_url:{out_avatar_url}")
  159. if "万" in out_fans_str:
  160. out_fans = int(float(out_fans_str.split("万")[0]) * 10000)
  161. else:
  162. out_fans = int(out_fans_str.replace(",", ""))
  163. if "万" in out_follow_str:
  164. out_follow = int(float(out_follow_str.split("万")[0]) * 10000)
  165. else:
  166. out_follow = int(out_follow_str.replace(",", ""))
  167. out_user_dict = {
  168. "out_fans": out_fans,
  169. "out_follow": out_follow,
  170. "out_avatar_url": out_avatar_url
  171. }
  172. Common.logger(log_type, crawler).info(f"out_user_dict:{out_user_dict}")
  173. return out_user_dict
  174. except Exception as e:
  175. Common.logger(log_type, crawler).error(f"get_out_user_info:{e}\n")
  176. # 获取用户信息列表
  177. @classmethod
  178. def get_user_list(cls, log_type, crawler, sheetid, env):
  179. try:
  180. while True:
  181. user_sheet = Feishu.get_values_batch(log_type, crawler, sheetid)
  182. if user_sheet is None:
  183. Common.logger(log_type, crawler).warning(f"user_sheet:{user_sheet} 10秒钟后重试")
  184. continue
  185. our_user_list = []
  186. for i in range(1, len(user_sheet)):
  187. # for i in range(1, 2):
  188. out_uid = user_sheet[i][2]
  189. user_name = user_sheet[i][3]
  190. our_uid = user_sheet[i][6]
  191. our_user_link = user_sheet[i][7]
  192. if out_uid is None or user_name is None:
  193. Common.logger(log_type, crawler).info("空行\n")
  194. else:
  195. Common.logger(log_type, crawler).info(f"正在更新 {user_name} 用户信息\n")
  196. if our_uid is None:
  197. out_user_info = cls.get_out_user_info(log_type, crawler, out_uid)
  198. out_user_dict = {
  199. "out_uid": out_uid,
  200. "user_name": user_name,
  201. "out_avatar_url": out_user_info["out_avatar_url"],
  202. "out_create_time": '',
  203. "out_tag": '',
  204. "out_play_cnt": 0,
  205. "out_fans": out_user_info["out_fans"],
  206. "out_follow": out_user_info["out_follow"],
  207. "out_friend": 0,
  208. "out_like": 0,
  209. "platform": cls.platform,
  210. "tag": cls.tag,
  211. }
  212. our_user_dict = getUser.create_user(log_type=log_type, crawler=crawler,
  213. out_user_dict=out_user_dict, env=env)
  214. our_uid = our_user_dict['our_uid']
  215. our_user_link = our_user_dict['our_user_link']
  216. Feishu.update_values(log_type, crawler, sheetid, f'G{i + 1}:H{i + 1}',
  217. [[our_uid, our_user_link]])
  218. Common.logger(log_type, crawler).info(f'站内用户信息写入飞书成功!\n')
  219. our_user_list.append(our_user_dict)
  220. else:
  221. our_user_dict = {
  222. 'out_uid': out_uid,
  223. 'user_name': user_name,
  224. 'our_uid': our_uid,
  225. 'our_user_link': our_user_link,
  226. }
  227. our_user_list.append(our_user_dict)
  228. return our_user_list
  229. except Exception as e:
  230. Common.logger(log_type, crawler).error(f'get_user_list:{e}\n')
  231. # 处理视频标题
  232. @classmethod
  233. def video_title(cls, log_type, crawler, env, title):
  234. title_split1 = title.split(" #")
  235. if title_split1[0] != "":
  236. title1 = title_split1[0]
  237. else:
  238. title1 = title_split1[-1]
  239. title_split2 = title1.split(" #")
  240. if title_split2[0] != "":
  241. title2 = title_split2[0]
  242. else:
  243. title2 = title_split2[-1]
  244. title_split3 = title2.split("@")
  245. if title_split3[0] != "":
  246. title3 = title_split3[0]
  247. else:
  248. title3 = title_split3[-1]
  249. video_title = title3.strip().replace("\n", "") \
  250. .replace("/", "").replace("快手", "").replace(" ", "") \
  251. .replace(" ", "").replace("&NBSP", "").replace("\r", "") \
  252. .replace("#", "").replace(".", "。").replace("\\", "") \
  253. .replace(":", "").replace("*", "").replace("?", "") \
  254. .replace("?", "").replace('"', "").replace("<", "") \
  255. .replace(">", "").replace("|", "").replace("@", "").replace('"', '').replace("'", '')[:40]
  256. if video_title.replace(" ", "") == "" or video_title == "。。。" or video_title == "...":
  257. return random_title(log_type, crawler, env, text='title')
  258. else:
  259. return video_title
  260. @classmethod
  261. def get_videoList(cls, log_type, crawler, strategy, task, our_uid, out_uid, oss_endpoint, env, pcursor=""):
  262. rule_dict_1 = task['rule_dict']
  263. url = "https://www.kuaishou.com/graphql"
  264. payload = json.dumps({
  265. "operationName": "visionProfilePhotoList",
  266. "variables": {
  267. "userId": out_uid,
  268. "pcursor": "",
  269. "page": "profile"
  270. },
  271. "query": "fragment photoContent on PhotoEntity {\n id\n duration\n caption\n originCaption\n likeCount\n viewCount\n commentCount\n realLikeCount\n coverUrl\n photoUrl\n photoH265Url\n manifest\n manifestH265\n videoResource\n coverUrls {\n url\n __typename\n }\n timestamp\n expTag\n animatedCoverUrl\n distance\n videoRatio\n liked\n stereoType\n profileUserTopPhoto\n musicBlocked\n __typename\n}\n\nfragment feedContent on Feed {\n type\n author {\n id\n name\n headerUrl\n following\n headerUrls {\n url\n __typename\n }\n __typename\n }\n photo {\n ...photoContent\n __typename\n }\n canAddComment\n llsid\n status\n currentPcursor\n tags {\n type\n name\n __typename\n }\n __typename\n}\n\nquery visionProfilePhotoList($pcursor: String, $userId: String, $page: String, $webPageArea: String) {\n visionProfilePhotoList(pcursor: $pcursor, userId: $userId, page: $page, webPageArea: $webPageArea) {\n result\n llsid\n webPageArea\n feeds {\n ...feedContent\n __typename\n }\n hostName\n pcursor\n __typename\n }\n}\n"
  272. })
  273. headers = {
  274. 'Accept': '*/*',
  275. 'Content-Type': 'application/json',
  276. 'Origin': 'https://www.kuaishou.com',
  277. 'Cookie': 'kpf=PC_WEB; clientid=3; did=web_b219a1e87b0d4fe35eba87d2087bebb9; kpn=KUAISHOU_VISION',
  278. 'Content-Length': '1260',
  279. 'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
  280. 'Host': 'www.kuaishou.com',
  281. 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.6.1 Safari/605.1.15',
  282. 'Referer': 'https://www.kuaishou.com/profile/{}'.format(out_uid),
  283. 'Accept-Encoding': 'gzip, deflate, br',
  284. 'Connection': 'keep-alive'
  285. }
  286. try:
  287. response = requests.post(url=url, headers=headers, data=payload, proxies=Common.tunnel_proxies(),
  288. verify=False, timeout=10)
  289. feeds = response.json()['data']['visionProfilePhotoList']['feeds']
  290. except Exception as e:
  291. Common.logger(log_type, crawler).error(f"get_videoList:{e},response:{response.text}")
  292. return
  293. if not feeds:
  294. Common.logger(log_type, crawler).info("没有更多视频啦 ~\n")
  295. return
  296. pcursor = response.json()['data']['visionProfilePhotoList']['pcursor']
  297. for i in range(len(feeds)):
  298. # video_title
  299. if 'caption' not in feeds[i]['photo']:
  300. video_title = random_title(log_type, crawler, env, text='title')
  301. elif feeds[i]['photo']['caption'].strip() == "":
  302. video_title = random_title(log_type, crawler, env, text='title')
  303. else:
  304. video_title = cls.video_title(log_type, crawler, env, feeds[i]['photo']['caption'])
  305. if 'videoResource' not in feeds[i]['photo'] \
  306. and 'manifest' not in feeds[i]['photo'] \
  307. and 'manifestH265' not in feeds[i]['photo']:
  308. Common.logger(log_type, crawler).warning(f"get_videoList:{feeds[i]['photo']}\n")
  309. break
  310. videoResource = feeds[i]['photo']['videoResource']
  311. if 'h264' not in videoResource and 'hevc' not in videoResource:
  312. Common.logger(log_type, crawler).warning(f"get_videoList:{videoResource}\n")
  313. break
  314. # video_id
  315. if 'h264' in videoResource and 'videoId' in videoResource['h264']:
  316. video_id = videoResource['h264']['videoId']
  317. elif 'hevc' in videoResource and 'videoId' in videoResource['hevc']:
  318. video_id = videoResource['hevc']['videoId']
  319. else:
  320. video_id = ""
  321. # play_cnt
  322. if 'viewCount' not in feeds[i]['photo']:
  323. play_cnt = 0
  324. else:
  325. play_cnt = int(feeds[i]['photo']['viewCount'])
  326. # like_cnt
  327. if 'realLikeCount' not in feeds[i]['photo']:
  328. like_cnt = 0
  329. else:
  330. like_cnt = feeds[i]['photo']['realLikeCount']
  331. # publish_time
  332. if 'timestamp' not in feeds[i]['photo']:
  333. publish_time_stamp = 0
  334. publish_time_str = ''
  335. publish_time = 0
  336. else:
  337. publish_time_stamp = int(int(feeds[i]['photo']['timestamp']) / 1000)
  338. publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
  339. publish_time = int((int(time.time()) - publish_time_stamp) / (3600 * 24))
  340. # duration
  341. if 'duration' not in feeds[i]['photo']:
  342. duration = 0
  343. else:
  344. duration = int(int(feeds[i]['photo']['duration']) / 1000)
  345. # video_width / video_height / video_url
  346. mapping = {}
  347. for item in ['width', 'height']:
  348. try:
  349. val = str(videoResource['h264']['adaptationSet'][0]['representation'][0][item])
  350. except:
  351. val = str(videoResource['hevc']['adaptationSet'][0]['representation'][0][item])
  352. mapping[item] = val
  353. video_width = int(mapping['width']) if mapping['width'] else 0
  354. video_height = int(mapping['height']) if mapping['height'] else 0
  355. # cover_url
  356. if 'coverUrl' not in feeds[i]['photo']:
  357. cover_url = ""
  358. else:
  359. cover_url = feeds[i]['photo']['coverUrl']
  360. # user_name / avatar_url
  361. user_name = feeds[i]['author']['name']
  362. avatar_url = feeds[i]['author']['headerUrl']
  363. video_url = feeds[i]['photo']['photoUrl']
  364. video_dict = {'video_title': video_title,
  365. 'video_id': video_id,
  366. 'play_cnt': play_cnt,
  367. 'comment_cnt': 0,
  368. 'like_cnt': like_cnt,
  369. 'share_cnt': 0,
  370. 'video_width': video_width,
  371. 'video_height': video_height,
  372. 'duration': duration,
  373. 'publish_time': publish_time,
  374. 'publish_time_stamp': publish_time_stamp,
  375. 'publish_time_str': publish_time_str,
  376. 'user_name': user_name,
  377. 'user_id': out_uid,
  378. 'avatar_url': avatar_url,
  379. 'cover_url': cover_url,
  380. 'video_url': video_url,
  381. 'session': f"kuaishou{int(time.time())}"}
  382. for k, v in video_dict.items():
  383. Common.logger(log_type, crawler).info(f"{k}:{v}")
  384. rule_1 = cls.download_rule(video_dict, rule_dict_1)
  385. if rule_1 is True:
  386. cls.download_publish(log_type=log_type,
  387. crawler=crawler,
  388. strategy=strategy,
  389. video_dict=video_dict,
  390. rule_dict=rule_dict_1,
  391. our_uid=our_uid,
  392. oss_endpoint=oss_endpoint,
  393. env=env,
  394. )
  395. else:
  396. Common.logger(log_type, crawler).info("不满足下载规则\n")
  397. # if pcursor == "no_more":
  398. # Common.logger(log_type, crawler).info(f"作者,{out_uid},已经到底了,没有更多内容了\n")
  399. # return
  400. # cls.get_videoList(log_type, crawler, strategy, our_uid, out_uid, oss_endpoint, env,
  401. # pcursor=pcursor)
  402. # time.sleep(random.randint(1, 3))
  403. @classmethod
  404. def repeat_video(cls, log_type, crawler, video_id, video_title, publish_time, env):
  405. sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}" or (platform="{cls.platform}" and video_title="{video_title}" and publish_time="{publish_time}") """
  406. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  407. return len(repeat_video)
  408. @classmethod
  409. def download_publish(cls, log_type, crawler, strategy, video_dict, rule_dict, our_uid, oss_endpoint, env):
  410. try:
  411. filter_words = get_config_from_mysql(log_type, crawler, env, text='filter')
  412. for filter_word in filter_words:
  413. if filter_word in video_dict['video_title']:
  414. Common.logger(log_type, crawler).info('标题已中过滤词:{}\n', video_dict['video_title'])
  415. return
  416. if cls.repeat_video(log_type, crawler, video_dict['video_id'], video_dict['video_title'],
  417. video_dict['publish_time_str'], env) != 0:
  418. Common.logger(log_type, crawler).info('视频已下载\n')
  419. else:
  420. # 下载视频
  421. Common.download_method(log_type=log_type, crawler=crawler, text='video',
  422. title=video_dict['video_title'], url=video_dict['video_url'])
  423. md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
  424. if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
  425. # 删除视频文件夹
  426. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  427. Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
  428. return
  429. # ffmpeg_dict = Common.ffmpeg(log_type, crawler,
  430. # f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
  431. # if ffmpeg_dict is None or ffmpeg_dict['size'] == 0:
  432. # Common.logger(log_type, crawler).warning(f"下载的视频无效,已删除\n")
  433. # # 删除视频文件夹
  434. # shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
  435. # return download_finished
  436. # 下载封面
  437. Common.download_method(log_type=log_type, crawler=crawler, text='cover',
  438. title=video_dict['video_title'], url=video_dict['cover_url'])
  439. # 保存视频信息至txt
  440. Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
  441. # 上传视频
  442. Common.logger(log_type, crawler).info("开始上传视频...")
  443. our_video_id = Publish.upload_and_publish(log_type=log_type,
  444. crawler=crawler,
  445. strategy=strategy,
  446. our_uid=our_uid,
  447. env=env,
  448. oss_endpoint=oss_endpoint)
  449. if env == 'dev':
  450. our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  451. else:
  452. our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  453. Common.logger(log_type, crawler).info("视频上传完成")
  454. if our_video_id is None:
  455. Common.logger(log_type, crawler).warning(f"our_video_id:{our_video_id} 删除该视频文件夹")
  456. # 删除视频文件夹
  457. shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
  458. return
  459. # 视频信息保存数据库
  460. insert_sql = f""" insert into crawler_video(video_id,
  461. user_id,
  462. out_user_id,
  463. platform,
  464. strategy,
  465. out_video_id,
  466. video_title,
  467. cover_url,
  468. video_url,
  469. duration,
  470. publish_time,
  471. play_cnt,
  472. crawler_rule,
  473. width,
  474. height)
  475. values({our_video_id},
  476. {our_uid},
  477. "{video_dict['user_id']}",
  478. "{cls.platform}",
  479. "定向爬虫策略",
  480. "{video_dict['video_id']}",
  481. "{video_dict['video_title']}",
  482. "{video_dict['cover_url']}",
  483. "{video_dict['video_url']}",
  484. {int(video_dict['duration'])},
  485. "{video_dict['publish_time_str']}",
  486. {int(video_dict['play_cnt'])},
  487. '{json.dumps(rule_dict)}',
  488. {int(video_dict['video_width'])},
  489. {int(video_dict['video_height'])}) """
  490. Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
  491. MysqlHelper.update_values(log_type, crawler, insert_sql, env)
  492. Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
  493. # 视频写入飞书
  494. Feishu.insert_columns(log_type, 'kuaishou', "fYdA8F", "ROWS", 1, 2)
  495. upload_time = int(time.time())
  496. values = [[our_video_id,
  497. time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
  498. "定向榜",
  499. str(video_dict['video_id']),
  500. video_dict['video_title'],
  501. our_video_link,
  502. video_dict['play_cnt'],
  503. video_dict['comment_cnt'],
  504. video_dict['like_cnt'],
  505. video_dict['share_cnt'],
  506. video_dict['duration'],
  507. f"{video_dict['video_width']}*{video_dict['video_height']}",
  508. video_dict['publish_time_str'],
  509. video_dict['user_name'],
  510. video_dict['user_id'],
  511. video_dict['avatar_url'],
  512. video_dict['cover_url'],
  513. video_dict['video_url']]]
  514. time.sleep(1)
  515. Feishu.update_values(log_type, 'kuaishou', "fYdA8F", "E2:Z2", values)
  516. Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
  517. download_finished = True
  518. return
  519. except Exception as e:
  520. Common.logger(log_type, crawler).error(f"download_publish:{e}\n")
  521. @classmethod
  522. def get_follow_videos(cls, log_type, crawler, task, oss_endpoint, env):
  523. user_list = get_user_from_mysql(log_type, crawler, crawler, env)
  524. strategy = '定向抓取策略'
  525. for user in user_list:
  526. try:
  527. spider_link = user["link"]
  528. out_uid = spider_link.split('/')[-1]
  529. user_name = user["nick_name"]
  530. our_uid = user["uid"]
  531. Common.logger(log_type, crawler).info(f"开始抓取 {user_name} 用户主页视频\n")
  532. cls.get_videoList(log_type=log_type,
  533. crawler=crawler,
  534. strategy=strategy,
  535. task=task,
  536. our_uid=our_uid,
  537. out_uid=out_uid,
  538. oss_endpoint=oss_endpoint,
  539. env=env)
  540. except Exception as e:
  541. continue
  542. if __name__ == "__main__":
  543. KuaiShouFollowScheduling.get_follow_videos(
  544. log_type="follow",
  545. crawler="kuaishou",
  546. task="",
  547. oss_endpoint="out",
  548. env="dev",
  549. )
  550. # print(KuaiShouFollow.get_out_user_info("follow", "kuaishou", "3xnk3wbm3vfiha6"))
  551. # print(Follow.get_out_user_info("follow", "kuaishou", "3x5wgjhfc7tx8ue"))