kuaishou_recommend_shceduling.py 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2023/2/24
  4. import os
  5. import random
  6. import shutil
  7. import sys
  8. import time
  9. import string
  10. from hashlib import md5
  11. import requests
  12. import json
  13. import urllib3
  14. from requests.adapters import HTTPAdapter
  15. sys.path.append(os.getcwd())
  16. from common.common import Common
  17. from common.feishu import Feishu
  18. from common.getuser import getUser
  19. # from common.db import MysqlHelper
  20. from common.scheduling_db import MysqlHelper
  21. from common.publish import Publish
  22. from common.public import get_user_from_mysql, random_title, get_config_from_mysql
  23. from common.userAgent import get_random_user_agent
  24. class KuaiShouRecommendScheduling:
  25. platform = "快手"
  26. tag = "快手爬虫,推荐爬虫策略"
  27. @classmethod
  28. def get_rule(cls, log_type, crawler):
  29. try:
  30. rule_sheet = Feishu.get_values_batch(log_type, crawler, "NQ6CZN")
  31. rule_dict = {
  32. "play_cnt": f"{rule_sheet[0][1]}{rule_sheet[0][2]}",
  33. "video_width": f"{rule_sheet[1][1]}{rule_sheet[1][2]}",
  34. "video_height": f"{rule_sheet[2][1]}{rule_sheet[2][2]}",
  35. "like_cnt": f"{rule_sheet[5][1]}{rule_sheet[5][2]}",
  36. "duration": f"{rule_sheet[3][1]}{rule_sheet[3][2]}",
  37. "publish_time": f"{rule_sheet[4][1]}{rule_sheet[4][2]}",
  38. }
  39. return rule_dict
  40. except Exception as e:
  41. Common.logger(log_type, crawler).error(f"get_rule:{e}\n")
  42. @classmethod
  43. def download_rule(cls, video_dict, rule_dict):
  44. if video_dict['like_cnt'] >= rule_dict['like_cnt']['min']:
  45. if video_dict['publish_time'] >= rule_dict['publish_time']['min']:
  46. if video_dict['duration'] >= rule_dict['duration']['min']:
  47. if video_dict['video_width'] >= rule_dict['width']['min'] \
  48. or video_dict['video_height'] >= rule_dict['height']['min']:
  49. return True
  50. else:
  51. return False
  52. else:
  53. return False
  54. else:
  55. return False
  56. else:
  57. return False
  58. # 过滤词库
  59. @classmethod
  60. def filter_words(cls, log_type, crawler):
  61. try:
  62. while True:
  63. filter_words_sheet = Feishu.get_values_batch(log_type, crawler, 'HIKVvs')
  64. if filter_words_sheet is None:
  65. Common.logger(log_type, crawler).warning(f"filter_words_sheet:{filter_words_sheet} 10秒钟后重试")
  66. continue
  67. filter_words_list = []
  68. for x in filter_words_sheet:
  69. for y in x:
  70. if y is None:
  71. pass
  72. else:
  73. filter_words_list.append(y)
  74. return filter_words_list
  75. except Exception as e:
  76. Common.logger(log_type, crawler).error(f'filter_words异常:{e}\n')
  77. # 获取用户信息列表
  78. @classmethod
  79. def get_user_list(cls, log_type, crawler, sheetid, env):
  80. try:
  81. while True:
  82. user_sheet = Feishu.get_values_batch(log_type, crawler, sheetid)
  83. if user_sheet is None:
  84. Common.logger(log_type, crawler).warning(f"user_sheet:{user_sheet} 10秒钟后重试")
  85. continue
  86. our_user_list = []
  87. for i in range(1, len(user_sheet)):
  88. # for i in range(1, 2):
  89. out_uid = user_sheet[i][2]
  90. user_name = user_sheet[i][3]
  91. our_uid = user_sheet[i][6]
  92. our_user_link = user_sheet[i][7]
  93. if out_uid is None or user_name is None:
  94. Common.logger(log_type, crawler).info("空行\n")
  95. else:
  96. Common.logger(log_type, crawler).info(f"正在更新 {user_name} 用户信息\n")
  97. if our_uid is None:
  98. out_user_info = cls.get_out_user_info(log_type, crawler, out_uid)
  99. out_user_dict = {
  100. "out_uid": out_uid,
  101. "user_name": user_name,
  102. "out_avatar_url": out_user_info["out_avatar_url"],
  103. "out_create_time": '',
  104. "out_tag": '',
  105. "out_play_cnt": 0,
  106. "out_fans": out_user_info["out_fans"],
  107. "out_follow": out_user_info["out_follow"],
  108. "out_friend": 0,
  109. "out_like": 0,
  110. "platform": cls.platform,
  111. "tag": cls.tag,
  112. }
  113. our_user_dict = getUser.create_user(log_type=log_type, crawler=crawler,
  114. out_user_dict=out_user_dict, env=env)
  115. our_uid = our_user_dict['our_uid']
  116. our_user_link = our_user_dict['our_user_link']
  117. Feishu.update_values(log_type, crawler, sheetid, f'G{i + 1}:H{i + 1}',
  118. [[our_uid, our_user_link]])
  119. Common.logger(log_type, crawler).info(f'站内用户信息写入飞书成功!\n')
  120. our_user_list.append(our_user_dict)
  121. else:
  122. our_user_dict = {
  123. 'out_uid': out_uid,
  124. 'user_name': user_name,
  125. 'our_uid': our_uid,
  126. 'our_user_link': our_user_link,
  127. }
  128. our_user_list.append(our_user_dict)
  129. return our_user_list
  130. except Exception as e:
  131. Common.logger(log_type, crawler).error(f'get_user_list:{e}\n')
  132. # 处理视频标题
  133. @classmethod
  134. def video_title(cls, log_type, crawler, env, title):
  135. title_split1 = title.split(" #")
  136. if title_split1[0] != "":
  137. title1 = title_split1[0]
  138. else:
  139. title1 = title_split1[-1]
  140. title_split2 = title1.split(" #")
  141. if title_split2[0] != "":
  142. title2 = title_split2[0]
  143. else:
  144. title2 = title_split2[-1]
  145. title_split3 = title2.split("@")
  146. if title_split3[0] != "":
  147. title3 = title_split3[0]
  148. else:
  149. title3 = title_split3[-1]
  150. video_title = title3.strip().replace("\n", "") \
  151. .replace("/", "").replace("快手", "").replace(" ", "") \
  152. .replace(" ", "").replace("&NBSP", "").replace("\r", "") \
  153. .replace("#", "").replace(".", "。").replace("\\", "") \
  154. .replace(":", "").replace("*", "").replace("?", "") \
  155. .replace("?", "").replace('"', "").replace("<", "") \
  156. .replace(">", "").replace("|", "").replace("@", "").replace('"', '').replace("'", '')[:40]
  157. if video_title.replace(" ", "") == "" or video_title == "。。。" or video_title == "...":
  158. return random_title(log_type, crawler, env, text='title')
  159. else:
  160. return video_title
  161. @classmethod
  162. def get_videoList(cls, log_type, crawler, strategy, task, our_uid, oss_endpoint, env):
  163. rule_dict_1 = task['rule_dict']
  164. for i in range(100):
  165. url = "https://www.kuaishou.com/graphql"
  166. payload = json.dumps({
  167. "operationName": "visionNewRecoFeed",
  168. "variables": {
  169. "dailyFirstPage": False
  170. },
  171. "query": "fragment photoContent on PhotoEntity {\n id\n duration\n caption\n originCaption\n likeCount\n viewCount\n realLikeCount\n coverUrl\n photoUrl\n photoH265Url\n manifest\n manifestH265\n videoResource\n coverUrls {\n url\n __typename\n }\n timestamp\n expTag\n animatedCoverUrl\n distance\n videoRatio\n liked\n stereoType\n profileUserTopPhoto\n musicBlocked\n __typename\n}\n\nfragment feedContent on Feed {\n type\n author {\n id\n name\n headerUrl\n following\n headerUrls {\n url\n __typename\n }\n __typename\n }\n photo {\n ...photoContent\n __typename\n }\n canAddComment\n llsid\n status\n currentPcursor\n tags {\n type\n name\n __typename\n }\n __typename\n}\n\nfragment photoResult on PhotoResult {\n result\n llsid\n expTag\n serverExpTag\n pcursor\n feeds {\n ...feedContent\n __typename\n }\n webPageArea\n __typename\n}\n\nquery visionNewRecoFeed($semKeyword: String, $semCrowd: String, $utmSource: String, $utmMedium: String, $utmCampaign: String, $dailyFirstPage: Boolean) {\n visionNewRecoFeed(semKeyword: $semKeyword, semCrowd: $semCrowd, utmSource: $utmSource, utmMedium: $utmMedium, utmCampaign: $utmCampaign, dailyFirstPage: $dailyFirstPage) {\n ...photoResult\n __typename\n }\n}\n"
  172. })
  173. s = string.ascii_lowercase
  174. r = random.choice(s)
  175. headers = {
  176. 'Accept-Language': 'zh-CN,zh;q=0.9',
  177. 'Connection': 'keep-alive',
  178. 'Cookie': 'kpf=PC_WEB; clientid=3; did=web_aba004b1780f4d7174d0a2ff42da1f{r}7; kpn=KUAISHOU_VISION;'.format(
  179. r=r),
  180. 'Origin': 'https://www.kuaishou.com',
  181. 'Referer': 'https://www.kuaishou.com/new-reco',
  182. 'Sec-Fetch-Dest': 'empty',
  183. 'Sec-Fetch-Mode': 'cors',
  184. 'Sec-Fetch-Site': 'same-origin',
  185. 'User-Agent': get_random_user_agent('pc'),
  186. 'accept': '*/*',
  187. 'content-type': 'application/json',
  188. 'sec-ch-ua': '"Google Chrome";v="111", "Not(A:Brand";v="8", "Chromium";v="111"',
  189. 'sec-ch-ua-mobile': '?0',
  190. 'sec-ch-ua-platform': '"macOS"'
  191. }
  192. try:
  193. urllib3.disable_warnings()
  194. s = requests.session()
  195. # max_retries=3 重试3次
  196. s.mount('http://', HTTPAdapter(max_retries=3))
  197. s.mount('https://', HTTPAdapter(max_retries=3))
  198. response = s.post(url=url, headers=headers, data=payload, proxies=Common.tunnel_proxies(), verify=False,
  199. timeout=10)
  200. response.close()
  201. except Exception as e:
  202. Common.logger(log_type, crawler).error(f"get_videoList:{e}\n")
  203. continue
  204. # Common.logger(log_type, crawler).info(f"get_videoList:{response.text}\n")
  205. if response.status_code != 200:
  206. Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.text}\n")
  207. continue
  208. elif 'data' not in response.json():
  209. Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()}\n")
  210. continue
  211. elif 'visionNewRecoFeed' not in response.json()['data']:
  212. Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()['data']}\n")
  213. continue
  214. elif 'feeds' not in response.json()['data']['visionNewRecoFeed']:
  215. Common.logger(log_type, crawler).warning(
  216. f"get_videoList_response:{response.json()['data']['visionNewRecoFeed']}\n")
  217. continue
  218. elif len(response.json()['data']['visionNewRecoFeed']['feeds']) == 0:
  219. Common.logger(log_type, crawler).info("没有更多视频啦 ~\n")
  220. continue
  221. else:
  222. feeds = response.json()['data']['visionNewRecoFeed']['feeds']
  223. for i in range(len(feeds)):
  224. if 'photo' not in feeds[i]:
  225. Common.logger(log_type, crawler).warning(f"get_videoList:{feeds[i]}\n")
  226. continue
  227. # video_title
  228. if 'caption' not in feeds[i]['photo']:
  229. video_title = random_title(log_type, crawler, env, text='title')
  230. elif feeds[i]['photo']['caption'].strip() == "":
  231. video_title = random_title(log_type, crawler, env, text='title')
  232. else:
  233. video_title = cls.video_title(log_type, crawler, env, feeds[i]['photo']['caption'])
  234. if 'videoResource' not in feeds[i]['photo'] \
  235. and 'manifest' not in feeds[i]['photo'] \
  236. and 'manifestH265' not in feeds[i]['photo']:
  237. Common.logger(log_type, crawler).warning(f"get_videoList:{feeds[i]['photo']}\n")
  238. continue
  239. videoResource = feeds[i]['photo']['videoResource']
  240. if 'h264' not in videoResource and 'hevc' not in videoResource:
  241. Common.logger(log_type, crawler).warning(f"get_videoList:{videoResource}\n")
  242. continue
  243. # video_id
  244. if 'h264' in videoResource and 'videoId' in videoResource['h264']:
  245. video_id = videoResource['h264']['videoId']
  246. elif 'hevc' in videoResource and 'videoId' in videoResource['hevc']:
  247. video_id = videoResource['hevc']['videoId']
  248. else:
  249. video_id = ""
  250. # play_cnt
  251. if 'viewCount' not in feeds[i]['photo']:
  252. play_cnt = 0
  253. else:
  254. play_cnt = int(feeds[i]['photo']['viewCount'])
  255. # like_cnt
  256. if 'realLikeCount' not in feeds[i]['photo']:
  257. like_cnt = 0
  258. else:
  259. like_cnt = feeds[i]['photo']['realLikeCount']
  260. # publish_time
  261. if 'timestamp' not in feeds[i]['photo']:
  262. publish_time_stamp = 0
  263. publish_time_str = ''
  264. publish_time = 0
  265. else:
  266. publish_time_stamp = int(int(feeds[i]['photo']['timestamp']) / 1000)
  267. publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
  268. publish_time = int((int(time.time()) - publish_time_stamp) / (3600 * 24))
  269. # duration
  270. if 'duration' not in feeds[i]['photo']:
  271. duration = 0
  272. else:
  273. duration = int(int(feeds[i]['photo']['duration']) / 1000)
  274. # video_width / video_height / video_url
  275. mapping = {}
  276. for item in ['width', 'height']:
  277. try:
  278. val = str(videoResource['h264']['adaptationSet'][0]['representation'][0][item])
  279. except Exception:
  280. val = str(videoResource['hevc']['adaptationSet'][0]['representation'][0][item])
  281. except:
  282. val = ''
  283. mapping[item] = val
  284. video_width = int(mapping['width']) if mapping['width'] != '' else 0
  285. video_height = int(mapping['height']) if mapping['height'] != '' else 0
  286. # cover_url
  287. if 'coverUrl' not in feeds[i]['photo']:
  288. cover_url = ""
  289. else:
  290. cover_url = feeds[i]['photo']['coverUrl']
  291. # user_name / avatar_url
  292. try:
  293. user_name = feeds[i]['author']['name']
  294. avatar_url = feeds[i]['author']['headerUrl']
  295. user_id = feeds[i]['author']['id']
  296. except Exception:
  297. user_name = ''
  298. avatar_url = ''
  299. user_id = ''
  300. video_url = feeds[i]['photo']['photoUrl']
  301. video_dict = {'video_title': video_title,
  302. 'video_id': video_id,
  303. 'play_cnt': play_cnt,
  304. 'comment_cnt': 0,
  305. 'like_cnt': like_cnt,
  306. 'share_cnt': 0,
  307. 'video_width': video_width,
  308. 'video_height': video_height,
  309. 'duration': duration,
  310. 'publish_time': publish_time,
  311. 'publish_time_stamp': publish_time_stamp,
  312. 'publish_time_str': publish_time_str,
  313. 'user_name': user_name,
  314. 'user_id': user_id,
  315. 'avatar_url': avatar_url,
  316. 'cover_url': cover_url,
  317. 'video_url': video_url,
  318. 'session': f"kuaishou{int(time.time())}"}
  319. rule_1 = cls.download_rule(video_dict, rule_dict_1)
  320. if rule_1 is True:
  321. cls.download_publish(log_type=log_type,
  322. crawler=crawler,
  323. strategy=strategy,
  324. video_dict=video_dict,
  325. rule_dict=rule_dict_1,
  326. our_uid=our_uid,
  327. oss_endpoint=oss_endpoint,
  328. env=env,
  329. )
  330. else:
  331. Common.logger(log_type, crawler).info("不满足下载规则\n")
  332. @classmethod
  333. def repeat_video(cls, log_type, crawler, video_id, video_title, publish_time, env):
  334. sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}" or (platform="{cls.platform}" and video_title="{video_title}" and publish_time="{publish_time}") """
  335. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  336. return len(repeat_video)
  337. @classmethod
  338. def download_publish(cls, log_type, crawler, strategy, video_dict, rule_dict, our_uid, oss_endpoint, env):
  339. try:
  340. filter_words = get_config_from_mysql(log_type, crawler, env, text='filter')
  341. for filter_word in filter_words:
  342. if filter_word in video_dict['video_title']:
  343. Common.logger(log_type, crawler).info('标题已中过滤词:{}\n', video_dict['video_title'])
  344. return
  345. download_finished = False
  346. if cls.repeat_video(log_type, crawler, video_dict['video_id'], video_dict['video_title'],
  347. video_dict['publish_time_str'], env) != 0:
  348. Common.logger(log_type, crawler).info('视频已下载\n')
  349. else:
  350. # 下载视频
  351. Common.download_method(log_type=log_type, crawler=crawler, text='video',
  352. title=video_dict['video_title'], url=video_dict['video_url'])
  353. md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
  354. if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
  355. # 删除视频文件夹
  356. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  357. Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
  358. return
  359. # 下载封面
  360. Common.download_method(log_type=log_type, crawler=crawler, text='cover',
  361. title=video_dict['video_title'], url=video_dict['cover_url'])
  362. # 保存视频信息至txt
  363. Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
  364. # 上传视频
  365. Common.logger(log_type, crawler).info("开始上传视频...")
  366. our_video_id = Publish.upload_and_publish(log_type=log_type,
  367. crawler=crawler,
  368. strategy=strategy,
  369. our_uid=our_uid,
  370. env=env,
  371. oss_endpoint=oss_endpoint)
  372. if env == 'dev':
  373. our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  374. else:
  375. our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  376. Common.logger(log_type, crawler).info("视频上传完成")
  377. if our_video_id is None:
  378. Common.logger(log_type, crawler).warning(f"our_video_id:{our_video_id} 删除该视频文件夹")
  379. # 删除视频文件夹
  380. shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
  381. return download_finished
  382. # 视频信息保存数据库
  383. insert_sql = f""" insert into crawler_video(video_id,
  384. user_id,
  385. out_user_id,
  386. platform,
  387. strategy,
  388. out_video_id,
  389. video_title,
  390. cover_url,
  391. video_url,
  392. duration,
  393. publish_time,
  394. play_cnt,
  395. crawler_rule,
  396. width,
  397. height)
  398. values({our_video_id},
  399. {our_uid},
  400. "{video_dict['user_id']}",
  401. "{cls.platform}",
  402. "{strategy}",
  403. "{video_dict['video_id']}",
  404. "{video_dict['video_title']}",
  405. "{video_dict['cover_url']}",
  406. "{video_dict['video_url']}",
  407. {int(video_dict['duration'])},
  408. "{video_dict['publish_time_str']}",
  409. {int(video_dict['play_cnt'])},
  410. '{json.dumps(rule_dict)}',
  411. {int(video_dict['video_width'])},
  412. {int(video_dict['video_height'])}) """
  413. Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
  414. MysqlHelper.update_values(log_type, crawler, insert_sql, env)
  415. Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
  416. # 视频写入飞书
  417. Feishu.insert_columns(log_type, 'kuaishou', "Aps2BI", "ROWS", 1, 2)
  418. upload_time = int(time.time())
  419. values = [[our_video_id,
  420. time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
  421. strategy,
  422. str(video_dict['video_id']),
  423. video_dict['video_title'],
  424. our_video_link,
  425. video_dict['play_cnt'],
  426. video_dict['comment_cnt'],
  427. video_dict['like_cnt'],
  428. video_dict['share_cnt'],
  429. video_dict['duration'],
  430. f"{video_dict['video_width']}*{video_dict['video_height']}",
  431. video_dict['publish_time_str'],
  432. video_dict['user_name'],
  433. video_dict['user_id'],
  434. video_dict['avatar_url'],
  435. video_dict['cover_url'],
  436. video_dict['video_url']]]
  437. time.sleep(1)
  438. Feishu.update_values(log_type, 'kuaishou', "Aps2BI", "E2:Z2", values)
  439. Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
  440. download_finished = True
  441. return download_finished
  442. except Exception as e:
  443. Common.logger(log_type, crawler).error(f"download_publish:{e}\n")
  444. @classmethod
  445. def get_recommend_videos(cls, log_type, crawler, task, oss_endpoint, env):
  446. user_list = get_user_from_mysql(log_type, crawler, crawler, env)
  447. strategy = '推荐抓取策略'
  448. for user in user_list:
  449. spider_link = user["link"]
  450. out_uid = spider_link
  451. user_name = user["nick_name"]
  452. our_uid = user["uid"]
  453. Common.logger(log_type, crawler).info(f"开始抓取 {user_name} 用户主页视频\n")
  454. cls.get_videoList(log_type=log_type,
  455. crawler=crawler,
  456. strategy=strategy,
  457. task=task,
  458. our_uid=our_uid,
  459. oss_endpoint=oss_endpoint,
  460. env=env)
  461. if __name__ == "__main__":
  462. task = ''
  463. KuaiShouRecommendScheduling.get_recommend_videos('recommend', 'kuaishou', task, 'outer', 'prod')