kuaishou_author.py 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2023/5/24
  4. import os
  5. import shutil
  6. import sys
  7. import time
  8. from hashlib import md5
  9. import requests
  10. import json
  11. import urllib3
  12. from requests.adapters import HTTPAdapter
  13. sys.path.append(os.getcwd())
  14. from common.common import Common
  15. from common.feishu import Feishu
  16. from common.getuser import getUser
  17. from common.scheduling_db import MysqlHelper
  18. from common.publish import Publish
  19. from common.public import random_title, get_config_from_mysql
  20. from common.public import get_user_from_mysql
  21. class KuaishouauthorScheduling:
  22. platform = "快手"
  23. # 处理视频标题
  24. @classmethod
  25. def video_title(cls, log_type, crawler, env, title):
  26. title_split1 = title.split(" #")
  27. if title_split1[0] != "":
  28. title1 = title_split1[0]
  29. else:
  30. title1 = title_split1[-1]
  31. title_split2 = title1.split(" #")
  32. if title_split2[0] != "":
  33. title2 = title_split2[0]
  34. else:
  35. title2 = title_split2[-1]
  36. title_split3 = title2.split("@")
  37. if title_split3[0] != "":
  38. title3 = title_split3[0]
  39. else:
  40. title3 = title_split3[-1]
  41. video_title = title3.strip().replace("\n", "") \
  42. .replace("/", "").replace("快手", "").replace(" ", "") \
  43. .replace(" ", "").replace("&NBSP", "").replace("\r", "") \
  44. .replace("#", "").replace(".", "。").replace("\\", "") \
  45. .replace(":", "").replace("*", "").replace("?", "") \
  46. .replace("?", "").replace('"', "").replace("<", "") \
  47. .replace(">", "").replace("|", "").replace("@", "").replace('"', '').replace("'", '')[:40]
  48. if video_title.replace(" ", "") == "" or video_title == "。。。" or video_title == "...":
  49. return random_title(log_type, crawler, env, text='title')
  50. else:
  51. return video_title
  52. @classmethod
  53. def get_cookie(cls, log_type, crawler, env):
  54. select_sql = f""" select * from crawler_config where source="{crawler}" """
  55. configs = MysqlHelper.get_values(log_type, crawler, select_sql, env, action="")
  56. for config in configs:
  57. if "cookie" in config["config"]:
  58. cookie_dict = {
  59. "cookie_id": config["id"],
  60. "title": config["title"].strip(),
  61. "cookie": dict(eval(config["config"]))["cookie"].strip(),
  62. "update_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(config["update_time"] / 1000))),
  63. "operator": config["operator"].strip()
  64. }
  65. for k, v in cookie_dict.items():
  66. print(f"{k}:{type(v)}, {v}")
  67. return cookie_dict
  68. @classmethod
  69. def get_videoList(cls, log_type, crawler, strategy, our_uid, out_uid, oss_endpoint, env, machine):
  70. download_cnt_1, download_cnt_2 = 0, 0
  71. rule_dict_1 = cls.get_rule(log_type, crawler, 1)
  72. rule_dict_2 = cls.get_rule(log_type, crawler, 2)
  73. if rule_dict_1 is None or rule_dict_2 is None:
  74. Common.logger(log_type, crawler).warning(f"rule_dict is None")
  75. return
  76. url = "https://www.kuaishou.com/graphql"
  77. payload = json.dumps({
  78. "operationName": "visionProfilePhotoList",
  79. "variables": {
  80. "userId": out_uid,
  81. "pcursor": "",
  82. "page": "profile"
  83. },
  84. "query": "fragment photoContent on PhotoEntity {\n id\n duration\n caption\n originCaption\n likeCount\n viewCount\n commentCount\n realLikeCount\n coverUrl\n photoUrl\n photoH265Url\n manifest\n manifestH265\n videoResource\n coverUrls {\n url\n __typename\n }\n timestamp\n expTag\n animatedCoverUrl\n distance\n videoRatio\n liked\n stereoType\n profileUserTopPhoto\n musicBlocked\n __typename\n}\n\nfragment feedContent on Feed {\n type\n author {\n id\n name\n headerUrl\n following\n headerUrls {\n url\n __typename\n }\n __typename\n }\n photo {\n ...photoContent\n __typename\n }\n canAddComment\n llsid\n status\n currentPcursor\n tags {\n type\n name\n __typename\n }\n __typename\n}\n\nquery visionProfilePhotoList($pcursor: String, $userId: String, $page: String, $webPageArea: String) {\n visionProfilePhotoList(pcursor: $pcursor, userId: $userId, page: $page, webPageArea: $webPageArea) {\n result\n llsid\n webPageArea\n feeds {\n ...feedContent\n __typename\n }\n hostName\n pcursor\n __typename\n }\n}\n"
  85. })
  86. headers = {
  87. 'Accept': '*/*',
  88. 'Content-Type': 'application/json',
  89. 'Origin': 'https://www.kuaishou.com',
  90. 'Cookie': f'kpf=PC_WEB; clientid=3; did={cls.get_(log_type, crawler)}; kpn=KUAISHOU_VISION',
  91. 'Content-Length': '1260',
  92. 'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
  93. 'Host': 'www.kuaishou.com',
  94. 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.6.1 Safari/605.1.15',
  95. 'Referer': 'https://www.kuaishou.com/profile/{}'.format(out_uid),
  96. 'Accept-Encoding': 'gzip, deflate, br',
  97. 'Connection': 'keep-alive'
  98. }
  99. response = requests.post(url=url, headers=headers, data=payload, proxies=Common.tunnel_proxies(),
  100. verify=False, timeout=10)
  101. try:
  102. feeds = response.json()['data']['visionProfilePhotoList']['feeds']
  103. except Exception as e:
  104. Common.logger(log_type, crawler).error(f"get_videoList:{e},response:{response.text}")
  105. return
  106. if not feeds:
  107. Common.logger(log_type, crawler).info("没有更多视频啦 ~\n")
  108. return
  109. # pcursor = response.json()['data']['visionProfilePhotoList']['pcursor']
  110. # Common.logger(log_type, crawler).info(f"feeds0: {feeds}\n")
  111. for i in range(len(feeds)):
  112. try:
  113. # video_title
  114. if 'caption' not in feeds[i]['photo']:
  115. video_title = random_title(log_type, crawler, env, text='title')
  116. elif feeds[i]['photo']['caption'].strip() == "":
  117. video_title = random_title(log_type, crawler, env, text='title')
  118. else:
  119. video_title = cls.video_title(log_type, crawler, env, feeds[i]['photo']['caption'])
  120. if 'videoResource' not in feeds[i]['photo'] \
  121. and 'manifest' not in feeds[i]['photo'] \
  122. and 'manifestH265' not in feeds[i]['photo']:
  123. Common.logger(log_type, crawler).warning(f"get_videoList:{feeds[i]['photo']}\n")
  124. break
  125. videoResource = feeds[i]['photo']['videoResource']
  126. if 'h264' not in videoResource and 'hevc' not in videoResource:
  127. Common.logger(log_type, crawler).warning(f"get_videoList:{videoResource}\n")
  128. break
  129. # video_id
  130. if 'h264' in videoResource and 'videoId' in videoResource['h264']:
  131. video_id = videoResource['h264']['videoId']
  132. elif 'hevc' in videoResource and 'videoId' in videoResource['hevc']:
  133. video_id = videoResource['hevc']['videoId']
  134. else:
  135. video_id = ""
  136. # play_cnt
  137. if 'viewCount' not in feeds[i]['photo']:
  138. play_cnt = 0
  139. else:
  140. play_cnt = int(feeds[i]['photo']['viewCount'])
  141. # like_cnt
  142. if 'realLikeCount' not in feeds[i]['photo']:
  143. like_cnt = 0
  144. else:
  145. like_cnt = feeds[i]['photo']['realLikeCount']
  146. # publish_time
  147. if 'timestamp' not in feeds[i]['photo']:
  148. publish_time_stamp = 0
  149. publish_time_str = ''
  150. publish_time = 0
  151. else:
  152. publish_time_stamp = int(int(feeds[i]['photo']['timestamp']) / 1000)
  153. publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
  154. publish_time = int((int(time.time()) - publish_time_stamp) / (3600 * 24))
  155. # duration
  156. if 'duration' not in feeds[i]['photo']:
  157. duration = 0
  158. else:
  159. duration = int(int(feeds[i]['photo']['duration']) / 1000)
  160. # video_width / video_height / video_url
  161. mapping = {}
  162. for item in ['width', 'height']:
  163. try:
  164. val = str(videoResource['h264']['adaptationSet'][0]['representation'][0][item])
  165. except:
  166. val = str(videoResource['hevc']['adaptationSet'][0]['representation'][0][item])
  167. mapping[item] = val
  168. video_width = int(mapping['width']) if mapping['width'] else 0
  169. video_height = int(mapping['height']) if mapping['height'] else 0
  170. # cover_url
  171. if 'coverUrl' not in feeds[i]['photo']:
  172. cover_url = ""
  173. else:
  174. cover_url = feeds[i]['photo']['coverUrl']
  175. # user_name / avatar_url
  176. user_name = feeds[i]['author']['name']
  177. avatar_url = feeds[i]['author']['headerUrl']
  178. video_url = feeds[i]['photo']['photoUrl']
  179. video_dict = {'video_title': video_title,
  180. 'video_id': video_id,
  181. 'play_cnt': play_cnt,
  182. 'comment_cnt': 0,
  183. 'like_cnt': like_cnt,
  184. 'share_cnt': 0,
  185. 'video_width': video_width,
  186. 'video_height': video_height,
  187. 'duration': duration,
  188. 'publish_time': publish_time,
  189. 'publish_time_stamp': publish_time_stamp,
  190. 'publish_time_str': publish_time_str,
  191. 'user_name': user_name,
  192. 'user_id': out_uid,
  193. 'avatar_url': avatar_url,
  194. 'cover_url': cover_url,
  195. 'video_url': video_url,
  196. 'session': f"kuaishou{int(time.time())}"}
  197. rule_1 = cls.download_rule(video_dict, rule_dict_1)
  198. Common.logger(log_type, crawler).info(f"video_title:{video_title}")
  199. Common.logger(log_type, crawler).info(f"video_id:{video_id}\n")
  200. Common.logger(log_type, crawler).info(
  201. f"play_cnt:{video_dict['play_cnt']}{rule_dict_1['play_cnt']}, {eval(str(video_dict['play_cnt']) + str(rule_dict_1['play_cnt']))}")
  202. Common.logger(log_type, crawler).info(
  203. f"like_cnt:{video_dict['like_cnt']}{rule_dict_1['like_cnt']}, {eval(str(video_dict['like_cnt']) + str(rule_dict_1['like_cnt']))}")
  204. Common.logger(log_type, crawler).info(
  205. f"video_width:{video_dict['video_width']}{rule_dict_1['video_width']}, {eval(str(video_dict['video_width']) + str(rule_dict_1['video_width']))}")
  206. Common.logger(log_type, crawler).info(
  207. f"video_height:{video_dict['video_height']}{rule_dict_1['video_height']}, {eval(str(video_dict['video_height']) + str(rule_dict_1['video_height']))}")
  208. Common.logger(log_type, crawler).info(
  209. f"duration:{video_dict['duration']}{rule_dict_1['duration']}, {eval(str(video_dict['duration']) + str(rule_dict_1['duration']))}")
  210. Common.logger(log_type, crawler).info(
  211. f"publish_time:{video_dict['publish_time']}{rule_dict_1['publish_time']}, {eval(str(video_dict['publish_time']) + str(rule_dict_1['publish_time']))}")
  212. Common.logger(log_type, crawler).info(f"rule_1:{rule_1}\n")
  213. rule_2 = cls.download_rule(video_dict, rule_dict_2)
  214. Common.logger(log_type, crawler).info(
  215. f"play_cnt:{video_dict['play_cnt']}{rule_dict_2['play_cnt']}, {eval(str(video_dict['play_cnt']) + str(rule_dict_2['play_cnt']))}")
  216. Common.logger(log_type, crawler).info(
  217. f"like_cnt:{video_dict['like_cnt']}{rule_dict_2['like_cnt']}, {eval(str(video_dict['like_cnt']) + str(rule_dict_2['like_cnt']))}")
  218. Common.logger(log_type, crawler).info(
  219. f"video_width:{video_dict['video_width']}{rule_dict_2['video_width']}, {eval(str(video_dict['video_width']) + str(rule_dict_2['video_width']))}")
  220. Common.logger(log_type, crawler).info(
  221. f"video_height:{video_dict['video_height']}{rule_dict_2['video_height']}, {eval(str(video_dict['video_height']) + str(rule_dict_2['video_height']))}")
  222. Common.logger(log_type, crawler).info(
  223. f"duration:{video_dict['duration']}{rule_dict_2['duration']}, {eval(str(video_dict['duration']) + str(rule_dict_2['duration']))}")
  224. Common.logger(log_type, crawler).info(
  225. f"publish_time:{video_dict['publish_time']}{rule_dict_2['publish_time']}, {eval(str(video_dict['publish_time']) + str(rule_dict_2['publish_time']))}")
  226. Common.logger(log_type, crawler).info(f"rule_2:{rule_2}\n")
  227. if video_title == "" or video_url == "":
  228. Common.logger(log_type, crawler).info("无效视频\n")
  229. continue
  230. elif rule_1 is True:
  231. if download_cnt_1 < int(
  232. rule_dict_1['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">",
  233. "")[
  234. -1]):
  235. cls.download_publish(log_type=log_type,
  236. crawler=crawler,
  237. strategy=strategy,
  238. video_dict=video_dict,
  239. rule_dict=rule_dict_1,
  240. our_uid=our_uid,
  241. oss_endpoint=oss_endpoint,
  242. env=env,
  243. machine=machine)
  244. # if download_finished is True:
  245. # download_cnt_1 += 1
  246. elif rule_2 is True:
  247. if download_cnt_2 < int(
  248. rule_dict_2['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">",
  249. "")[
  250. -1]):
  251. cls.download_publish(log_type=log_type,
  252. crawler=crawler,
  253. strategy=strategy,
  254. video_dict=video_dict,
  255. rule_dict=rule_dict_2,
  256. our_uid=our_uid,
  257. oss_endpoint=oss_endpoint,
  258. env=env,
  259. machine=machine)
  260. # if download_finished is True:
  261. # download_cnt_2 += 1
  262. else:
  263. Common.logger(log_type, crawler).info("不满足下载规则\n")
  264. # Common.logger(log_type, crawler).info(f"feeds: {feeds}\n")
  265. except Exception as e:
  266. Common.logger(log_type, crawler).warning(f"抓取单条视频异常:{e}\n")
  267. # if pcursor == "no_more":
  268. # Common.logger(log_type, crawler).info(f"作者,{out_uid},已经到底了,没有更多内容了\n")
  269. # return
  270. # cls.get_videoList(log_type, crawler, strategy, our_uid, out_uid, oss_endpoint, env, machine,
  271. # pcursor=pcursor)
  272. # time.sleep(random.randint(1, 3))
  273. @classmethod
  274. def repeat_video(cls, log_type, crawler, video_id, video_title, publish_time, env, machine):
  275. sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}" or (platform="{cls.platform}" and video_title="{video_title}" and publish_time="{publish_time}") """
  276. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env, machine)
  277. return len(repeat_video)
  278. @classmethod
  279. def download_publish(cls, log_type, crawler, strategy, video_dict, rule_dict, our_uid, oss_endpoint, env, machine):
  280. filter_words = get_config_from_mysql(log_type, crawler, env, text='filter')
  281. for filter_word in filter_words:
  282. if filter_word in video_dict['video_title']:
  283. Common.logger(log_type, crawler).info('标题已中过滤词:{}\n', video_dict['video_title'])
  284. return
  285. download_finished = False
  286. if cls.repeat_video(log_type, crawler, video_dict['video_id'], video_dict['video_title'],
  287. video_dict['publish_time_str'], env, machine) != 0:
  288. Common.logger(log_type, crawler).info('视频已下载\n')
  289. else:
  290. # 下载视频
  291. Common.download_method(log_type=log_type, crawler=crawler, text='video',
  292. title=video_dict['video_title'], url=video_dict['video_url'])
  293. md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
  294. try:
  295. if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
  296. # 删除视频文件夹
  297. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  298. Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
  299. return
  300. except FileNotFoundError:
  301. # 删除视频文件夹
  302. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  303. Common.logger(log_type, crawler).info("未发现视频文件,删除成功\n")
  304. return
  305. # 下载封面
  306. Common.download_method(log_type=log_type, crawler=crawler, text='cover',
  307. title=video_dict['video_title'], url=video_dict['cover_url'])
  308. # 保存视频信息至txt
  309. Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
  310. # 上传视频
  311. Common.logger(log_type, crawler).info("开始上传视频...")
  312. our_video_id = Publish.upload_and_publish(log_type=log_type,
  313. crawler=crawler,
  314. strategy=strategy,
  315. our_uid=our_uid,
  316. env=env,
  317. oss_endpoint=oss_endpoint)
  318. if env == 'dev':
  319. our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  320. else:
  321. our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  322. Common.logger(log_type, crawler).info("视频上传完成")
  323. if our_video_id is None:
  324. try:
  325. Common.logger(log_type, crawler).warning(f"our_video_id:{our_video_id} 删除该视频文件夹")
  326. # 删除视频文件夹
  327. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  328. return download_finished
  329. except FileNotFoundError:
  330. return download_finished
  331. # 视频信息保存数据库
  332. insert_sql = f""" insert into crawler_video(video_id,
  333. user_id,
  334. out_user_id,
  335. platform,
  336. strategy,
  337. out_video_id,
  338. video_title,
  339. cover_url,
  340. video_url,
  341. duration,
  342. publish_time,
  343. play_cnt,
  344. crawler_rule,
  345. width,
  346. height)
  347. values({our_video_id},
  348. {our_uid},
  349. "{video_dict['user_id']}",
  350. "{cls.platform}",
  351. "定向爬虫策略",
  352. "{video_dict['video_id']}",
  353. "{video_dict['video_title']}",
  354. "{video_dict['cover_url']}",
  355. "{video_dict['video_url']}",
  356. {int(video_dict['duration'])},
  357. "{video_dict['publish_time_str']}",
  358. {int(video_dict['play_cnt'])},
  359. '{json.dumps(rule_dict)}',
  360. {int(video_dict['video_width'])},
  361. {int(video_dict['video_height'])}) """
  362. Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
  363. MysqlHelper.update_values(log_type, crawler, insert_sql, env, machine)
  364. Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
  365. # 视频写入飞书
  366. Feishu.insert_columns(log_type, 'kuaishou', "fYdA8F", "ROWS", 1, 2)
  367. upload_time = int(time.time())
  368. values = [[our_video_id,
  369. time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
  370. "定向榜",
  371. str(video_dict['video_id']),
  372. video_dict['video_title'],
  373. our_video_link,
  374. video_dict['play_cnt'],
  375. video_dict['comment_cnt'],
  376. video_dict['like_cnt'],
  377. video_dict['share_cnt'],
  378. video_dict['duration'],
  379. f"{video_dict['video_width']}*{video_dict['video_height']}",
  380. video_dict['publish_time_str'],
  381. video_dict['user_name'],
  382. video_dict['user_id'],
  383. video_dict['avatar_url'],
  384. video_dict['cover_url'],
  385. video_dict['video_url']]]
  386. time.sleep(1)
  387. Feishu.update_values(log_type, 'kuaishou', "fYdA8F", "E2:Z2", values)
  388. Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
  389. download_finished = True
  390. return download_finished
  391. @classmethod
  392. def get_follow_videos(cls, log_type, crawler, strategy, oss_endpoint, env, machine):
  393. user_list = get_user_from_mysql(log_type, crawler, crawler, env)
  394. for user in user_list:
  395. try:
  396. spider_link = user["link"]
  397. out_uid = spider_link.split('/')[-1]
  398. user_name = user["nick_name"]
  399. our_uid = user["uid"]
  400. Common.logger(log_type, crawler).info(f"开始抓取 {user_name} 用户主页视频\n")
  401. cls.get_videoList(log_type=log_type,
  402. crawler=crawler,
  403. strategy=strategy,
  404. our_uid=our_uid,
  405. out_uid=out_uid,
  406. oss_endpoint=oss_endpoint,
  407. env=env,
  408. machine=machine)
  409. except Exception as e:
  410. Common.logger(log_type, crawler).warning(f"抓取用户{user}时异常:{e}\n")
  411. if __name__ == "__main__":
  412. KuaishouauthorScheduling.get_cookie("author", "kuaishou", "dev")
  413. pass