recommend_kuaishou.py 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2023/2/24
  4. import os
  5. import random
  6. import shutil
  7. import sys
  8. import time
  9. from hashlib import md5
  10. import requests
  11. import json
  12. import urllib3
  13. from requests.adapters import HTTPAdapter
  14. sys.path.append(os.getcwd())
  15. from common.common import Common
  16. from common.feishu import Feishu
  17. from common.getuser import getUser
  18. from common.db import MysqlHelper
  19. from common.publish import Publish
  20. from common.public import get_user_from_mysql
  21. from common.userAgent import get_random_user_agent
  22. class KuaiShouRecommend:
  23. platform = "快手"
  24. tag = "快手爬虫,推荐爬虫策略"
  25. @classmethod
  26. def get_rule(cls, log_type, crawler):
  27. try:
  28. rule_sheet = Feishu.get_values_batch(log_type, crawler, "NQ6CZN")
  29. rule_dict = {
  30. # "play_cnt": int(rule_sheet[0][2]),
  31. # "comment_cnt": int(rule_sheet[1][2]),
  32. "video_width": int(rule_sheet[0][2]),
  33. "video_height": int(rule_sheet[1][2]),
  34. "like_cnt": int(rule_sheet[2][2]),
  35. "duration": int(rule_sheet[3][2]),
  36. "publish_time": int(rule_sheet[4][2]),
  37. }
  38. return rule_dict
  39. except Exception as e:
  40. Common.logger(log_type, crawler).error(f"get_rule:{e}\n")
  41. @classmethod
  42. def download_rule(cls, video_dict, rule_dict):
  43. if eval(f"{video_dict['video_width']}{rule_dict['video_width']}") is True \
  44. and eval(f"{video_dict['video_height']}{rule_dict['video_height']}") is True \
  45. and eval(f"{video_dict['like_cnt']}{rule_dict['like_cnt']}") is True \
  46. and eval(f"{video_dict['duration']}{rule_dict['duration']}") is True \
  47. and eval(f"{video_dict['publish_time']}{rule_dict['publish_time']}") is True:
  48. return True
  49. else:
  50. return False
  51. # 过滤词库
  52. @classmethod
  53. def filter_words(cls, log_type, crawler):
  54. try:
  55. while True:
  56. filter_words_sheet = Feishu.get_values_batch(log_type, crawler, 'HIKVvs')
  57. if filter_words_sheet is None:
  58. Common.logger(log_type, crawler).warning(f"filter_words_sheet:{filter_words_sheet} 10秒钟后重试")
  59. continue
  60. filter_words_list = []
  61. for x in filter_words_sheet:
  62. for y in x:
  63. if y is None:
  64. pass
  65. else:
  66. filter_words_list.append(y)
  67. return filter_words_list
  68. except Exception as e:
  69. Common.logger(log_type, crawler).error(f'filter_words异常:{e}\n')
  70. # 万能标题
  71. @classmethod
  72. def random_title(cls, log_type, crawler):
  73. try:
  74. while True:
  75. random_title_sheet = Feishu.get_values_batch(log_type, crawler, '0DiyXe')
  76. if random_title_sheet is None:
  77. Common.logger(log_type, crawler).warning(f"filter_words_sheet:{random_title_sheet} 10秒钟后重试")
  78. continue
  79. random_title_list = []
  80. for x in random_title_sheet:
  81. for y in x:
  82. if y is None:
  83. pass
  84. else:
  85. random_title_list.append(y)
  86. return random.choice(random_title_list)
  87. except Exception as e:
  88. Common.logger(log_type, crawler).error(f'random_title:{e}\n')
  89. # 获取用户信息列表
  90. @classmethod
  91. def get_user_list(cls, log_type, crawler, sheetid, env, machine):
  92. try:
  93. while True:
  94. user_sheet = Feishu.get_values_batch(log_type, crawler, sheetid)
  95. if user_sheet is None:
  96. Common.logger(log_type, crawler).warning(f"user_sheet:{user_sheet} 10秒钟后重试")
  97. continue
  98. our_user_list = []
  99. for i in range(1, len(user_sheet)):
  100. # for i in range(1, 2):
  101. out_uid = user_sheet[i][2]
  102. user_name = user_sheet[i][3]
  103. our_uid = user_sheet[i][6]
  104. our_user_link = user_sheet[i][7]
  105. if out_uid is None or user_name is None:
  106. Common.logger(log_type, crawler).info("空行\n")
  107. else:
  108. Common.logger(log_type, crawler).info(f"正在更新 {user_name} 用户信息\n")
  109. if our_uid is None:
  110. out_user_info = cls.get_out_user_info(log_type, crawler, out_uid)
  111. out_user_dict = {
  112. "out_uid": out_uid,
  113. "user_name": user_name,
  114. "out_avatar_url": out_user_info["out_avatar_url"],
  115. "out_create_time": '',
  116. "out_tag": '',
  117. "out_play_cnt": 0,
  118. "out_fans": out_user_info["out_fans"],
  119. "out_follow": out_user_info["out_follow"],
  120. "out_friend": 0,
  121. "out_like": 0,
  122. "platform": cls.platform,
  123. "tag": cls.tag,
  124. }
  125. our_user_dict = getUser.create_user(log_type=log_type, crawler=crawler,
  126. out_user_dict=out_user_dict, env=env, machine=machine)
  127. our_uid = our_user_dict['our_uid']
  128. our_user_link = our_user_dict['our_user_link']
  129. Feishu.update_values(log_type, crawler, sheetid, f'G{i + 1}:H{i + 1}',
  130. [[our_uid, our_user_link]])
  131. Common.logger(log_type, crawler).info(f'站内用户信息写入飞书成功!\n')
  132. our_user_list.append(our_user_dict)
  133. else:
  134. our_user_dict = {
  135. 'out_uid': out_uid,
  136. 'user_name': user_name,
  137. 'our_uid': our_uid,
  138. 'our_user_link': our_user_link,
  139. }
  140. our_user_list.append(our_user_dict)
  141. return our_user_list
  142. except Exception as e:
  143. Common.logger(log_type, crawler).error(f'get_user_list:{e}\n')
  144. # 处理视频标题
  145. @classmethod
  146. def video_title(cls, log_type, crawler, title):
  147. title_split1 = title.split(" #")
  148. if title_split1[0] != "":
  149. title1 = title_split1[0]
  150. else:
  151. title1 = title_split1[-1]
  152. title_split2 = title1.split(" #")
  153. if title_split2[0] != "":
  154. title2 = title_split2[0]
  155. else:
  156. title2 = title_split2[-1]
  157. title_split3 = title2.split("@")
  158. if title_split3[0] != "":
  159. title3 = title_split3[0]
  160. else:
  161. title3 = title_split3[-1]
  162. video_title = title3.strip().replace("\n", "") \
  163. .replace("/", "").replace("快手", "").replace(" ", "") \
  164. .replace(" ", "").replace("&NBSP", "").replace("\r", "") \
  165. .replace("#", "").replace(".", "。").replace("\\", "") \
  166. .replace(":", "").replace("*", "").replace("?", "") \
  167. .replace("?", "").replace('"', "").replace("<", "") \
  168. .replace(">", "").replace("|", "").replace("@", "")[:40]
  169. if video_title.replace(" ", "") == "" or video_title == "。。。" or video_title == "...":
  170. return cls.random_title(log_type, crawler)
  171. else:
  172. return video_title
  173. @classmethod
  174. def get_videoList(cls, log_type, crawler, strategy, our_uid, oss_endpoint, env, machine):
  175. # rule_dict_1 = cls.get_rule(log_type, crawler)
  176. # if rule_dict_1 is None:
  177. # Common.logger(log_type, crawler).warning(f"rule_dict is None")
  178. # return
  179. for i in range(100):
  180. url = "https://www.kuaishou.com/graphql"
  181. payload = json.dumps({
  182. "operationName": "visionNewRecoFeed",
  183. "variables": {
  184. "dailyFirstPage": False
  185. },
  186. "query": "fragment photoContent on PhotoEntity {\n id\n duration\n caption\n originCaption\n likeCount\n viewCount\n realLikeCount\n coverUrl\n photoUrl\n photoH265Url\n manifest\n manifestH265\n videoResource\n coverUrls {\n url\n __typename\n }\n timestamp\n expTag\n animatedCoverUrl\n distance\n videoRatio\n liked\n stereoType\n profileUserTopPhoto\n musicBlocked\n __typename\n}\n\nfragment feedContent on Feed {\n type\n author {\n id\n name\n headerUrl\n following\n headerUrls {\n url\n __typename\n }\n __typename\n }\n photo {\n ...photoContent\n __typename\n }\n canAddComment\n llsid\n status\n currentPcursor\n tags {\n type\n name\n __typename\n }\n __typename\n}\n\nfragment photoResult on PhotoResult {\n result\n llsid\n expTag\n serverExpTag\n pcursor\n feeds {\n ...feedContent\n __typename\n }\n webPageArea\n __typename\n}\n\nquery visionNewRecoFeed($semKeyword: String, $semCrowd: String, $utmSource: String, $utmMedium: String, $utmCampaign: String, $dailyFirstPage: Boolean) {\n visionNewRecoFeed(semKeyword: $semKeyword, semCrowd: $semCrowd, utmSource: $utmSource, utmMedium: $utmMedium, utmCampaign: $utmCampaign, dailyFirstPage: $dailyFirstPage) {\n ...photoResult\n __typename\n }\n}\n"
  187. })
  188. headers = {
  189. 'Accept-Language': 'zh-CN,zh;q=0.9',
  190. 'Connection': 'keep-alive',
  191. 'Cookie': 'kpf=PC_WEB; clientid=3; did=web_aba004b1780f4d7174d0a2ff42da1fe7; kpn=KUAISHOU_VISION;',
  192. 'Origin': 'https://www.kuaishou.com',
  193. 'Referer': 'https://www.kuaishou.com/new-reco',
  194. 'Sec-Fetch-Dest': 'empty',
  195. 'Sec-Fetch-Mode': 'cors',
  196. 'Sec-Fetch-Site': 'same-origin',
  197. 'User-Agent': get_random_user_agent('pc'),
  198. 'accept': '*/*',
  199. 'content-type': 'application/json',
  200. 'sec-ch-ua': '"Google Chrome";v="111", "Not(A:Brand";v="8", "Chromium";v="111"',
  201. 'sec-ch-ua-mobile': '?0',
  202. 'sec-ch-ua-platform': '"macOS"'
  203. }
  204. try:
  205. urllib3.disable_warnings()
  206. s = requests.session()
  207. # max_retries=3 重试3次
  208. s.mount('http://', HTTPAdapter(max_retries=3))
  209. s.mount('https://', HTTPAdapter(max_retries=3))
  210. response = s.post(url=url, headers=headers, data=payload, proxies=Common.tunnel_proxies(), verify=False,
  211. timeout=10)
  212. response.close()
  213. except Exception as e:
  214. Common.logger(log_type, crawler).error(f"get_videoList:{e}\n")
  215. return
  216. # Common.logger(log_type, crawler).info(f"get_videoList:{response.text}\n")
  217. if response.status_code != 200:
  218. Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.text}\n")
  219. return
  220. elif 'data' not in response.json():
  221. Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()}\n")
  222. return
  223. elif 'visionNewRecoFeed' not in response.json()['data']:
  224. Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()['data']}\n")
  225. return
  226. elif 'feeds' not in response.json()['data']['visionNewRecoFeed']:
  227. Common.logger(log_type, crawler).warning(
  228. f"get_videoList_response:{response.json()['data']['visionNewRecoFeed']}\n")
  229. return
  230. elif len(response.json()['data']['visionNewRecoFeed']['feeds']) == 0:
  231. Common.logger(log_type, crawler).info("没有更多视频啦 ~\n")
  232. return
  233. else:
  234. feeds = response.json()['data']['visionNewRecoFeed']['feeds']
  235. # pcursor = response.json()['data']['visionNewRecoFeed']['pcursor']
  236. for i in range(len(feeds)):
  237. if 'photo' not in feeds[i]:
  238. Common.logger(log_type, crawler).warning(f"get_videoList:{feeds[i]}\n")
  239. break
  240. # video_title
  241. if 'caption' not in feeds[i]['photo']:
  242. video_title = cls.random_title(log_type, crawler)
  243. elif feeds[i]['photo']['caption'].strip() == "":
  244. video_title = cls.random_title(log_type, crawler)
  245. else:
  246. video_title = cls.video_title(log_type, crawler, feeds[i]['photo']['caption'])
  247. if 'videoResource' not in feeds[i]['photo'] \
  248. and 'manifest' not in feeds[i]['photo'] \
  249. and 'manifestH265' not in feeds[i]['photo']:
  250. Common.logger(log_type, crawler).warning(f"get_videoList:{feeds[i]['photo']}\n")
  251. break
  252. videoResource = feeds[i]['photo']['videoResource']
  253. if 'h264' not in videoResource and 'hevc' not in videoResource:
  254. Common.logger(log_type, crawler).warning(f"get_videoList:{videoResource}\n")
  255. break
  256. # video_id
  257. if 'h264' in videoResource and 'videoId' in videoResource['h264']:
  258. video_id = videoResource['h264']['videoId']
  259. elif 'hevc' in videoResource and 'videoId' in videoResource['hevc']:
  260. video_id = videoResource['hevc']['videoId']
  261. else:
  262. video_id = ""
  263. # play_cnt
  264. if 'viewCount' not in feeds[i]['photo']:
  265. play_cnt = 0
  266. else:
  267. play_cnt = int(feeds[i]['photo']['viewCount'])
  268. # like_cnt
  269. if 'realLikeCount' not in feeds[i]['photo']:
  270. like_cnt = 0
  271. else:
  272. like_cnt = feeds[i]['photo']['realLikeCount']
  273. # publish_time
  274. if 'timestamp' not in feeds[i]['photo']:
  275. publish_time_stamp = 0
  276. publish_time_str = ''
  277. publish_time = 0
  278. else:
  279. publish_time_stamp = int(int(feeds[i]['photo']['timestamp']) / 1000)
  280. publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
  281. publish_time = int((int(time.time()) - publish_time_stamp) / (3600 * 24))
  282. # duration
  283. if 'duration' not in feeds[i]['photo']:
  284. duration = 0
  285. else:
  286. duration = int(int(feeds[i]['photo']['duration']) / 1000)
  287. # video_width / video_height / video_url
  288. mapping = {}
  289. for item in ['width', 'height']:
  290. try:
  291. val = str(videoResource['h264']['adaptationSet'][0]['representation'][0][item])
  292. except Exception:
  293. val = str(videoResource['hevc']['adaptationSet'][0]['representation'][0][item])
  294. except:
  295. val = ''
  296. mapping[item] = val
  297. video_width = int(mapping['width']) if mapping['width'] != '' else 0
  298. video_height = int(mapping['height']) if mapping['height'] != '' else 0
  299. # cover_url
  300. if 'coverUrl' not in feeds[i]['photo']:
  301. cover_url = ""
  302. else:
  303. cover_url = feeds[i]['photo']['coverUrl']
  304. # user_name / avatar_url
  305. try:
  306. user_name = feeds[i]['author']['name']
  307. avatar_url = feeds[i]['author']['headerUrl']
  308. user_id = feeds[i]['author']['id']
  309. except Exception:
  310. user_name = ''
  311. avatar_url = ''
  312. user_id = ''
  313. video_url = feeds[i]['photo']['photoUrl']
  314. video_dict = {'video_title': video_title,
  315. 'video_id': video_id,
  316. 'play_cnt': play_cnt,
  317. 'comment_cnt': 0,
  318. 'like_cnt': like_cnt,
  319. 'share_cnt': 0,
  320. 'video_width': video_width,
  321. 'video_height': video_height,
  322. 'duration': duration,
  323. 'publish_time': publish_time,
  324. 'publish_time_stamp': publish_time_stamp,
  325. 'publish_time_str': publish_time_str,
  326. 'user_name': user_name,
  327. 'user_id': user_id,
  328. 'avatar_url': avatar_url,
  329. 'cover_url': cover_url,
  330. 'video_url': video_url,
  331. 'session': f"kuaishou{int(time.time())}"}
  332. # rule_1 = cls.download_rule(video_dict, rule_dict_1)
  333. # Common.logger(log_type, crawler).info(f"video_title:{video_title}")
  334. # Common.logger(log_type, crawler).info(f"video_id:{video_id}\n")
  335. #
  336. # Common.logger(log_type, crawler).info(
  337. # f"like_cnt:{video_dict['like_cnt']}{rule_dict_1['like_cnt']}, {eval(str(video_dict['like_cnt']) + str(rule_dict_1['like_cnt']))}")
  338. # Common.logger(log_type, crawler).info(
  339. # f"video_width:{video_dict['video_width']}{rule_dict_1['video_width']}, {eval(str(video_dict['video_width']) + str(rule_dict_1['video_width']))}")
  340. # Common.logger(log_type, crawler).info(
  341. # f"video_height:{video_dict['video_height']}{rule_dict_1['video_height']}, {eval(str(video_dict['video_height']) + str(rule_dict_1['video_height']))}")
  342. # Common.logger(log_type, crawler).info(
  343. # f"duration:{video_dict['duration']}{rule_dict_1['duration']}, {eval(str(video_dict['duration']) + str(rule_dict_1['duration']))}")
  344. # Common.logger(log_type, crawler).info(
  345. # f"publish_time:{video_dict['publish_time']}{rule_dict_1['publish_time']}, {eval(str(video_dict['publish_time']) + str(rule_dict_1['publish_time']))}")
  346. # Common.logger(log_type, crawler).info(f"rule_1:{rule_1}\n")
  347. if video_title == "" or video_url == "":
  348. Common.logger(log_type, crawler).info("无效视频\n")
  349. continue
  350. cls.download_publish(log_type=log_type,
  351. crawler=crawler,
  352. strategy=strategy,
  353. video_dict=video_dict,
  354. rule_dict={},
  355. our_uid=our_uid,
  356. oss_endpoint=oss_endpoint,
  357. env=env,
  358. machine=machine)
  359. # elif rule_1 is True:
  360. # cls.download_publish(log_type=log_type,
  361. # crawler=crawler,
  362. # strategy=strategy,
  363. # video_dict=video_dict,
  364. # rule_dict=rule_dict_1,
  365. # our_uid=our_uid,
  366. # oss_endpoint=oss_endpoint,
  367. # env=env,
  368. # machine=machine)
  369. # else:
  370. # Common.logger(log_type, crawler).info("不满足下载规则\n")
  371. # if pcursor == "no_more":
  372. # Common.logger(log_type, crawler).info(f"作者,{out_uid},已经到底了,没有更多内容了\n")
  373. # return
  374. # cls.get_videoList(log_type, crawler, strategy, our_uid, out_uid, oss_endpoint, env, machine,
  375. # pcursor=pcursor)
  376. # time.sleep(random.randint(1, 3))
  377. @classmethod
  378. def repeat_video(cls, log_type, crawler, video_id, video_title, publish_time, env, machine):
  379. sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}" or (platform="{cls.platform}" and video_title="{video_title}" and publish_time="{publish_time}") """
  380. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env, machine)
  381. return len(repeat_video)
  382. @classmethod
  383. def download_publish(cls, log_type, crawler, strategy, video_dict, rule_dict, our_uid, oss_endpoint, env, machine):
  384. try:
  385. download_finished = False
  386. if cls.repeat_video(log_type, crawler, video_dict['video_id'], video_dict['video_title'],
  387. video_dict['publish_time_str'], env, machine) != 0:
  388. Common.logger(log_type, crawler).info('视频已下载\n')
  389. elif any(word if word in video_dict['video_title'] else False for word in
  390. cls.filter_words(log_type, crawler)) is True:
  391. Common.logger(log_type, crawler).info('标题已中过滤词\n')
  392. else:
  393. # 下载视频
  394. Common.download_method(log_type=log_type, crawler=crawler, text='video',
  395. title=video_dict['video_title'], url=video_dict['video_url'])
  396. md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
  397. if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
  398. # 删除视频文件夹
  399. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  400. Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
  401. return
  402. # 下载封面
  403. Common.download_method(log_type=log_type, crawler=crawler, text='cover',
  404. title=video_dict['video_title'], url=video_dict['cover_url'])
  405. # 保存视频信息至txt
  406. Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
  407. # 上传视频
  408. Common.logger(log_type, crawler).info("开始上传视频...")
  409. our_video_id = Publish.upload_and_publish(log_type=log_type,
  410. crawler=crawler,
  411. strategy=strategy,
  412. our_uid=our_uid,
  413. env=env,
  414. oss_endpoint=oss_endpoint)
  415. if env == 'dev':
  416. our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  417. else:
  418. our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  419. Common.logger(log_type, crawler).info("视频上传完成")
  420. if our_video_id is None:
  421. Common.logger(log_type, crawler).warning(f"our_video_id:{our_video_id} 删除该视频文件夹")
  422. # 删除视频文件夹
  423. shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
  424. return download_finished
  425. # 视频信息保存数据库
  426. insert_sql = f""" insert into crawler_video(video_id,
  427. user_id,
  428. out_user_id,
  429. platform,
  430. strategy,
  431. out_video_id,
  432. video_title,
  433. cover_url,
  434. video_url,
  435. duration,
  436. publish_time,
  437. play_cnt,
  438. crawler_rule,
  439. width,
  440. height)
  441. values({our_video_id},
  442. {our_uid},
  443. "{video_dict['user_id']}",
  444. "{cls.platform}",
  445. {strategy},
  446. "{video_dict['video_id']}",
  447. "{video_dict['video_title']}",
  448. "{video_dict['cover_url']}",
  449. "{video_dict['video_url']}",
  450. {int(video_dict['duration'])},
  451. "{video_dict['publish_time_str']}",
  452. {int(video_dict['play_cnt'])},
  453. '{json.dumps(rule_dict)}',
  454. {int(video_dict['video_width'])},
  455. {int(video_dict['video_height'])}) """
  456. Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
  457. MysqlHelper.update_values(log_type, crawler, insert_sql, env, machine)
  458. Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
  459. # 视频写入飞书
  460. Feishu.insert_columns(log_type, 'kuaishou', "Aps2BI", "ROWS", 1, 2)
  461. upload_time = int(time.time())
  462. values = [[our_video_id,
  463. time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
  464. strategy,
  465. str(video_dict['video_id']),
  466. video_dict['video_title'],
  467. our_video_link,
  468. video_dict['play_cnt'],
  469. video_dict['comment_cnt'],
  470. video_dict['like_cnt'],
  471. video_dict['share_cnt'],
  472. video_dict['duration'],
  473. f"{video_dict['video_width']}*{video_dict['video_height']}",
  474. video_dict['publish_time_str'],
  475. video_dict['user_name'],
  476. video_dict['user_id'],
  477. video_dict['avatar_url'],
  478. video_dict['cover_url'],
  479. video_dict['video_url']]]
  480. time.sleep(1)
  481. Feishu.update_values(log_type, 'kuaishou', "Aps2BI", "E2:Z2", values)
  482. Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
  483. download_finished = True
  484. return download_finished
  485. except Exception as e:
  486. Common.logger(log_type, crawler).error(f"download_publish:{e}\n")
  487. if __name__ == "__main__":
  488. KuaiShouRecommend.get_videoList('recommend', 'kuaishou', '推荐抓取策略', 55440319, 'outer', 'prod', 'aliyun')