kuaishou_recommend_shceduling.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2023/2/24
  4. import os
  5. import random
  6. import shutil
  7. import sys
  8. import time
  9. import string
  10. from hashlib import md5
  11. import requests
  12. import json
  13. import urllib3
  14. from requests.adapters import HTTPAdapter
  15. sys.path.append(os.getcwd())
  16. from common.common import Common
  17. from common.feishu import Feishu
  18. from common.scheduling_db import MysqlHelper
  19. from common.publish import Publish
  20. from common.public import random_title, get_config_from_mysql, download_rule
  21. from common.userAgent import get_random_user_agent
  22. class KuaiShouRecommendScheduling:
  23. platform = "快手"
  24. # 处理视频标题
  25. @classmethod
  26. def video_title(cls, log_type, crawler, env, title):
  27. title_split1 = title.split(" #")
  28. if title_split1[0] != "":
  29. title1 = title_split1[0]
  30. else:
  31. title1 = title_split1[-1]
  32. title_split2 = title1.split(" #")
  33. if title_split2[0] != "":
  34. title2 = title_split2[0]
  35. else:
  36. title2 = title_split2[-1]
  37. title_split3 = title2.split("@")
  38. if title_split3[0] != "":
  39. title3 = title_split3[0]
  40. else:
  41. title3 = title_split3[-1]
  42. video_title = title3.strip().replace("\n", "") \
  43. .replace("/", "").replace("快手", "").replace(" ", "") \
  44. .replace(" ", "").replace("&NBSP", "").replace("\r", "") \
  45. .replace("#", "").replace(".", "。").replace("\\", "") \
  46. .replace(":", "").replace("*", "").replace("?", "") \
  47. .replace("?", "").replace('"', "").replace("<", "") \
  48. .replace(">", "").replace("|", "").replace("@", "").replace('"', '').replace("'", '')[:40]
  49. if video_title.replace(" ", "") == "" or video_title == "。。。" or video_title == "...":
  50. return random_title(log_type, crawler, env, text='title')
  51. else:
  52. return video_title
  53. @classmethod
  54. def get_videoList(cls, log_type, crawler, our_uid, rule_dict, env):
  55. for page in range(1, 101):
  56. try:
  57. Common.logger(log_type, crawler).info(f"正在抓取第{page}页")
  58. url = "https://www.kuaishou.com/graphql"
  59. payload = json.dumps({
  60. "operationName": "visionNewRecoFeed",
  61. "variables": {
  62. "dailyFirstPage": False
  63. },
  64. "query": "fragment photoContent on PhotoEntity {\n id\n duration\n caption\n originCaption\n likeCount\n viewCount\n realLikeCount\n coverUrl\n photoUrl\n photoH265Url\n manifest\n manifestH265\n videoResource\n coverUrls {\n url\n __typename\n }\n timestamp\n expTag\n animatedCoverUrl\n distance\n videoRatio\n liked\n stereoType\n profileUserTopPhoto\n musicBlocked\n __typename\n}\n\nfragment feedContent on Feed {\n type\n author {\n id\n name\n headerUrl\n following\n headerUrls {\n url\n __typename\n }\n __typename\n }\n photo {\n ...photoContent\n __typename\n }\n canAddComment\n llsid\n status\n currentPcursor\n tags {\n type\n name\n __typename\n }\n __typename\n}\n\nfragment photoResult on PhotoResult {\n result\n llsid\n expTag\n serverExpTag\n pcursor\n feeds {\n ...feedContent\n __typename\n }\n webPageArea\n __typename\n}\n\nquery visionNewRecoFeed($semKeyword: String, $semCrowd: String, $utmSource: String, $utmMedium: String, $utmCampaign: String, $dailyFirstPage: Boolean) {\n visionNewRecoFeed(semKeyword: $semKeyword, semCrowd: $semCrowd, utmSource: $utmSource, utmMedium: $utmMedium, utmCampaign: $utmCampaign, dailyFirstPage: $dailyFirstPage) {\n ...photoResult\n __typename\n }\n}\n"
  65. })
  66. s = string.ascii_lowercase
  67. r = random.choice(s)
  68. headers = {
  69. 'Accept-Language': 'zh-CN,zh;q=0.9',
  70. 'Connection': 'keep-alive',
  71. 'Cookie': 'kpf=PC_WEB; clientid=3; did=web_7cdc486ebd1aba220455a7781d6ae5b5{r}7; kpn=KUAISHOU_VISION;'.format(
  72. r=r),
  73. 'Origin': 'https://www.kuaishou.com',
  74. 'Referer': 'https://www.kuaishou.com/new-reco',
  75. 'Sec-Fetch-Dest': 'empty',
  76. 'Sec-Fetch-Mode': 'cors',
  77. 'Sec-Fetch-Site': 'same-origin',
  78. 'User-Agent': get_random_user_agent('pc'),
  79. 'accept': '*/*',
  80. 'content-type': 'application/json',
  81. 'sec-ch-ua': '"Google Chrome";v="111", "Not(A:Brand";v="8", "Chromium";v="111"',
  82. 'sec-ch-ua-mobile': '?0',
  83. 'sec-ch-ua-platform': '"macOS"'
  84. }
  85. urllib3.disable_warnings()
  86. s = requests.session()
  87. # max_retries=3 重试3次
  88. s.mount('http://', HTTPAdapter(max_retries=3))
  89. s.mount('https://', HTTPAdapter(max_retries=3))
  90. response = s.post(url=url, headers=headers, data=payload, proxies=Common.tunnel_proxies(), verify=False, timeout=10)
  91. # Common.logger(log_type, crawler).info(f"response:{response.text}")
  92. response.close()
  93. if response.status_code != 200:
  94. Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.status_code}, {response.text}\n")
  95. continue
  96. elif 'data' not in response.json():
  97. Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()}\n")
  98. continue
  99. elif 'visionNewRecoFeed' not in response.json()['data']:
  100. Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()['data']}\n")
  101. continue
  102. elif 'feeds' not in response.json()['data']['visionNewRecoFeed']:
  103. Common.logger(log_type, crawler).warning(
  104. f"get_videoList_response:{response.json()['data']['visionNewRecoFeed']}\n")
  105. continue
  106. elif len(response.json()['data']['visionNewRecoFeed']['feeds']) == 0:
  107. Common.logger(log_type, crawler).info("没有更多视频啦 ~\n")
  108. continue
  109. else:
  110. feeds = response.json()['data']['visionNewRecoFeed']['feeds']
  111. for i in range(len(feeds)):
  112. try:
  113. video_title = feeds[i].get("photo", random_title(log_type, crawler, env, text='title')).get("caption", random_title(log_type, crawler, env, text='title'))
  114. video_title = cls.video_title(log_type, crawler, env, video_title)
  115. try:
  116. video_id = feeds[i].get("photo", {}).get("videoResource").get("h264", {}).get("videoId", "")
  117. video_width = feeds[i].get("photo", {}).get("videoResource").get("h264", {}).get("adaptationSet", {})[0].get("representation", {})[0].get("width", 0)
  118. video_height = feeds[i].get("photo", {}).get("videoResource").get("h264", {}).get("adaptationSet", {})[0].get("representation", {})[0].get("height", 0)
  119. except KeyError:
  120. video_id = feeds[i].get("photo", {}).get("videoResource").get("hevc", {}).get("videoId", "")
  121. video_width = feeds[i].get("photo", {}).get("videoResource").get("hevc", {}).get("adaptationSet", {})[0].get("representation", {})[0].get("width", 0)
  122. video_height = feeds[i].get("photo", {}).get("videoResource").get("hevc", {}).get("adaptationSet", {})[0].get("representation", {})[0].get("height", 0)
  123. publish_time_stamp = int(int(feeds[i].get('photo', {}).get('timestamp', 0)) / 1000)
  124. publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
  125. video_dict = {'video_title': video_title,
  126. 'video_id': video_id,
  127. 'play_cnt': int(feeds[i].get('photo', {}).get('viewCount', 0)),
  128. 'like_cnt': int(feeds[i].get('photo', {}).get('realLikeCount', 0)),
  129. 'comment_cnt': 0,
  130. 'share_cnt': 0,
  131. 'video_width': video_width,
  132. 'video_height': video_height,
  133. 'duration': int(int(feeds[i].get('photo', {}).get('duration', 0)) / 1000),
  134. 'publish_time_stamp': publish_time_stamp,
  135. 'publish_time_str': publish_time_str,
  136. 'user_name': feeds[i].get('author', {}).get('name', ""),
  137. 'user_id': feeds[i].get('author', {}).get('id', ""),
  138. 'avatar_url': feeds[i].get('author', {}).get('headerUrl', ""),
  139. 'cover_url': feeds[i].get('photo', {}).get('coverUrl', ""),
  140. 'video_url': feeds[i].get('photo', {}).get('photoUrl', ""),
  141. 'session': f"kuaishou-{int(time.time())}"}
  142. for k, v in video_dict.items():
  143. Common.logger(log_type, crawler).info(f"{k}:{v}")
  144. if video_dict["video_id"] == '' or video_dict["cover_url"] == '' or video_dict["video_url"] == '':
  145. Common.logger(log_type, crawler).info('无效视频\n')
  146. elif download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
  147. Common.logger(log_type, crawler).info("不满足抓取规则\n")
  148. elif any(str(word) if str(word) in video_dict["video_title"] else False
  149. for word in get_config_from_mysql(log_type=log_type,
  150. source=crawler,
  151. env=env,
  152. text="filter",
  153. action="")) is True:
  154. Common.logger(log_type, crawler).info('已中过滤词\n')
  155. elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
  156. Common.logger(log_type, crawler).info('视频已下载\n')
  157. else:
  158. cls.download_publish(log_type=log_type,
  159. crawler=crawler,
  160. our_uid=our_uid,
  161. video_dict=video_dict,
  162. rule_dict=rule_dict,
  163. env=env)
  164. except Exception as e:
  165. Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
  166. except Exception as e:
  167. Common.logger(log_type, crawler).error(f"抓取第{page}页时异常:{e}\n")
  168. @classmethod
  169. def repeat_video(cls, log_type, crawler, video_id, env):
  170. sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}" """
  171. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  172. return len(repeat_video)
  173. @classmethod
  174. def download_publish(cls, log_type, crawler, our_uid, video_dict, rule_dict, env):
  175. # 下载视频
  176. Common.download_method(log_type=log_type, crawler=crawler, text='video',
  177. title=video_dict['video_title'], url=video_dict['video_url'])
  178. md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
  179. try:
  180. if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
  181. # 删除视频文件夹
  182. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  183. Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
  184. return
  185. except FileNotFoundError:
  186. # 删除视频文件夹
  187. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  188. Common.logger(log_type, crawler).info("视频文件不存在,删除文件夹成功\n")
  189. return
  190. # 下载封面
  191. Common.download_method(log_type=log_type, crawler=crawler, text='cover', title=video_dict['video_title'], url=video_dict['cover_url'])
  192. # 保存视频信息至txt
  193. Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
  194. # 上传视频
  195. Common.logger(log_type, crawler).info("开始上传视频...")
  196. if env == "dev":
  197. oss_endpoint = "out"
  198. our_video_id = Publish.upload_and_publish(log_type=log_type,
  199. crawler=crawler,
  200. strategy="推荐抓取策略",
  201. our_uid=our_uid,
  202. env=env,
  203. oss_endpoint=oss_endpoint)
  204. our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  205. else:
  206. oss_endpoint = "inner"
  207. our_video_id = Publish.upload_and_publish(log_type=log_type,
  208. crawler=crawler,
  209. strategy="推荐抓取策略",
  210. our_uid=our_uid,
  211. env=env,
  212. oss_endpoint=oss_endpoint)
  213. our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  214. if our_video_id is None:
  215. try:
  216. # 删除视频文件夹
  217. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  218. return
  219. except FileNotFoundError:
  220. return
  221. # 视频信息保存数据库
  222. insert_sql = f""" insert into crawler_video(video_id,
  223. user_id,
  224. out_user_id,
  225. platform,
  226. strategy,
  227. out_video_id,
  228. video_title,
  229. cover_url,
  230. video_url,
  231. duration,
  232. publish_time,
  233. play_cnt,
  234. crawler_rule,
  235. width,
  236. height)
  237. values({our_video_id},
  238. {our_uid},
  239. "{video_dict['user_id']}",
  240. "{cls.platform}",
  241. "推荐抓取策略",
  242. "{video_dict['video_id']}",
  243. "{video_dict['video_title']}",
  244. "{video_dict['cover_url']}",
  245. "{video_dict['video_url']}",
  246. {int(video_dict['duration'])},
  247. "{video_dict['publish_time_str']}",
  248. {int(video_dict['play_cnt'])},
  249. '{json.dumps(rule_dict)}',
  250. {int(video_dict['video_width'])},
  251. {int(video_dict['video_height'])}) """
  252. Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
  253. MysqlHelper.update_values(log_type, crawler, insert_sql, env, action="")
  254. Common.logger(log_type, crawler).info('视频信息写入数据库成功')
  255. # 视频写入飞书
  256. Feishu.insert_columns(log_type, crawler, "Aps2BI", "ROWS", 1, 2)
  257. upload_time = int(time.time())
  258. values = [[our_video_id,
  259. time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
  260. "推荐抓取策略",
  261. str(video_dict['video_id']),
  262. video_dict['video_title'],
  263. our_video_link,
  264. video_dict['play_cnt'],
  265. video_dict['comment_cnt'],
  266. video_dict['like_cnt'],
  267. video_dict['share_cnt'],
  268. video_dict['duration'],
  269. f"{video_dict['video_width']}*{video_dict['video_height']}",
  270. video_dict['publish_time_str'],
  271. video_dict['user_name'],
  272. video_dict['user_id'],
  273. video_dict['avatar_url'],
  274. video_dict['cover_url'],
  275. video_dict['video_url']]]
  276. time.sleep(0.5)
  277. Feishu.update_values(log_type, crawler, "Aps2BI", "E2:Z2", values)
  278. Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
  279. if __name__ == "__main__":
  280. print(get_config_from_mysql("recommend", "kuaishou", "prod", "filter"))
  281. pass