|
@@ -0,0 +1,305 @@
|
|
|
+# -*- coding: utf-8 -*-
|
|
|
+# @Author: wangkun
|
|
|
+# @Time: 2023/2/24
|
|
|
+import os
|
|
|
+import random
|
|
|
+import shutil
|
|
|
+import sys
|
|
|
+import time
|
|
|
+import string
|
|
|
+from hashlib import md5
|
|
|
+import requests
|
|
|
+import json
|
|
|
+import urllib3
|
|
|
+from requests.adapters import HTTPAdapter
|
|
|
+sys.path.append(os.getcwd())
|
|
|
+from common.common import Common
|
|
|
+from common.feishu import Feishu
|
|
|
+from common.scheduling_db import MysqlHelper
|
|
|
+from common.publish import Publish
|
|
|
+from common.public import random_title, get_config_from_mysql, download_rule, get_title_score
|
|
|
+from common.userAgent import get_random_user_agent
|
|
|
+
|
|
|
+
|
|
|
+class KuaiShouRecommendScheduling:
|
|
|
+ platform = "快手"
|
|
|
+
|
|
|
+ # 处理视频标题
|
|
|
+ @classmethod
|
|
|
+ def video_title(cls, log_type, crawler, env, title):
|
|
|
+ title_split1 = title.split(" #")
|
|
|
+ if title_split1[0] != "":
|
|
|
+ title1 = title_split1[0]
|
|
|
+ else:
|
|
|
+ title1 = title_split1[-1]
|
|
|
+
|
|
|
+ title_split2 = title1.split(" #")
|
|
|
+ if title_split2[0] != "":
|
|
|
+ title2 = title_split2[0]
|
|
|
+ else:
|
|
|
+ title2 = title_split2[-1]
|
|
|
+
|
|
|
+ title_split3 = title2.split("@")
|
|
|
+ if title_split3[0] != "":
|
|
|
+ title3 = title_split3[0]
|
|
|
+ else:
|
|
|
+ title3 = title_split3[-1]
|
|
|
+
|
|
|
+ video_title = title3.strip().replace("\n", "") \
|
|
|
+ .replace("/", "").replace("快手", "").replace(" ", "") \
|
|
|
+ .replace(" ", "").replace("&NBSP", "").replace("\r", "") \
|
|
|
+ .replace("#", "").replace(".", "。").replace("\\", "") \
|
|
|
+ .replace(":", "").replace("*", "").replace("?", "") \
|
|
|
+ .replace("?", "").replace('"', "").replace("<", "") \
|
|
|
+ .replace(">", "").replace("|", "").replace("@", "").replace('"', '').replace("'", '')[:40]
|
|
|
+ if video_title.replace(" ", "") == "" or video_title == "。。。" or video_title == "...":
|
|
|
+ return random_title(log_type, crawler, env, text='title')
|
|
|
+ else:
|
|
|
+ return video_title
|
|
|
+
|
|
|
+ @classmethod
|
|
|
+ def get_videoList(cls, log_type, crawler, our_uid, rule_dict, env):
|
|
|
+ for page in range(1, 101):
|
|
|
+ try:
|
|
|
+ Common.logger(log_type, crawler).info(f"正在抓取第{page}页")
|
|
|
+ url = "https://www.kuaishou.com/graphql"
|
|
|
+ payload = json.dumps({
|
|
|
+ "operationName": "visionNewRecoFeed",
|
|
|
+ "variables": {
|
|
|
+ "dailyFirstPage": False
|
|
|
+ },
|
|
|
+ "query": "fragment photoContent on PhotoEntity {\n id\n duration\n caption\n originCaption\n likeCount\n viewCount\n realLikeCount\n coverUrl\n photoUrl\n photoH265Url\n manifest\n manifestH265\n videoResource\n coverUrls {\n url\n __typename\n }\n timestamp\n expTag\n animatedCoverUrl\n distance\n videoRatio\n liked\n stereoType\n profileUserTopPhoto\n musicBlocked\n __typename\n}\n\nfragment feedContent on Feed {\n type\n author {\n id\n name\n headerUrl\n following\n headerUrls {\n url\n __typename\n }\n __typename\n }\n photo {\n ...photoContent\n __typename\n }\n canAddComment\n llsid\n status\n currentPcursor\n tags {\n type\n name\n __typename\n }\n __typename\n}\n\nfragment photoResult on PhotoResult {\n result\n llsid\n expTag\n serverExpTag\n pcursor\n feeds {\n ...feedContent\n __typename\n }\n webPageArea\n __typename\n}\n\nquery visionNewRecoFeed($semKeyword: String, $semCrowd: String, $utmSource: String, $utmMedium: String, $utmCampaign: String, $dailyFirstPage: Boolean) {\n visionNewRecoFeed(semKeyword: $semKeyword, semCrowd: $semCrowd, utmSource: $utmSource, utmMedium: $utmMedium, utmCampaign: $utmCampaign, dailyFirstPage: $dailyFirstPage) {\n ...photoResult\n __typename\n }\n}\n"
|
|
|
+ })
|
|
|
+ s = string.ascii_lowercase
|
|
|
+ r = random.choice(s)
|
|
|
+ headers = {
|
|
|
+ 'Accept-Language': 'zh-CN,zh;q=0.9',
|
|
|
+ 'Connection': 'keep-alive',
|
|
|
+ 'Cookie': 'kpf=PC_WEB; clientid=3; did=web_7cdc486ebd1aba220455a7781d6ae5b5{r}7; kpn=KUAISHOU_VISION;'.format(
|
|
|
+ r=r),
|
|
|
+ 'Origin': 'https://www.kuaishou.com',
|
|
|
+ 'Referer': 'https://www.kuaishou.com/new-reco',
|
|
|
+ 'Sec-Fetch-Dest': 'empty',
|
|
|
+ 'Sec-Fetch-Mode': 'cors',
|
|
|
+ 'Sec-Fetch-Site': 'same-origin',
|
|
|
+ 'User-Agent': get_random_user_agent('pc'),
|
|
|
+ 'accept': '*/*',
|
|
|
+ 'content-type': 'application/json',
|
|
|
+ 'sec-ch-ua': '"Google Chrome";v="111", "Not(A:Brand";v="8", "Chromium";v="111"',
|
|
|
+ 'sec-ch-ua-mobile': '?0',
|
|
|
+ 'sec-ch-ua-platform': '"macOS"'
|
|
|
+ }
|
|
|
+ urllib3.disable_warnings()
|
|
|
+ s = requests.session()
|
|
|
+ # max_retries=3 重试3次
|
|
|
+ s.mount('http://', HTTPAdapter(max_retries=3))
|
|
|
+ s.mount('https://', HTTPAdapter(max_retries=3))
|
|
|
+ response = s.post(url=url, headers=headers, data=payload, proxies=Common.tunnel_proxies(), verify=False, timeout=10)
|
|
|
+ # Common.logger(log_type, crawler).info(f"response:{response.text}")
|
|
|
+ response.close()
|
|
|
+ if response.status_code != 200:
|
|
|
+ Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.status_code}, {response.text}\n")
|
|
|
+ continue
|
|
|
+ elif 'data' not in response.json():
|
|
|
+ Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()}\n")
|
|
|
+ continue
|
|
|
+ elif 'visionNewRecoFeed' not in response.json()['data']:
|
|
|
+ Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()['data']}\n")
|
|
|
+ continue
|
|
|
+ elif 'feeds' not in response.json()['data']['visionNewRecoFeed']:
|
|
|
+ Common.logger(log_type, crawler).warning(
|
|
|
+ f"get_videoList_response:{response.json()['data']['visionNewRecoFeed']}\n")
|
|
|
+ continue
|
|
|
+ elif len(response.json()['data']['visionNewRecoFeed']['feeds']) == 0:
|
|
|
+ Common.logger(log_type, crawler).info("没有更多视频啦 ~\n")
|
|
|
+ continue
|
|
|
+ else:
|
|
|
+ feeds = response.json()['data']['visionNewRecoFeed']['feeds']
|
|
|
+ for i in range(len(feeds)):
|
|
|
+ try:
|
|
|
+ video_title = feeds[i].get("photo", random_title(log_type, crawler, env, text='title')).get("caption", random_title(log_type, crawler, env, text='title'))
|
|
|
+ video_title = cls.video_title(log_type, crawler, env, video_title)
|
|
|
+ try:
|
|
|
+ video_id = feeds[i].get("photo", {}).get("videoResource").get("h264", {}).get("videoId", "")
|
|
|
+ video_width = feeds[i].get("photo", {}).get("videoResource").get("h264", {}).get("adaptationSet", {})[0].get("representation", {})[0].get("width", 0)
|
|
|
+ video_height = feeds[i].get("photo", {}).get("videoResource").get("h264", {}).get("adaptationSet", {})[0].get("representation", {})[0].get("height", 0)
|
|
|
+ except KeyError:
|
|
|
+ video_id = feeds[i].get("photo", {}).get("videoResource").get("hevc", {}).get("videoId", "")
|
|
|
+ video_width = feeds[i].get("photo", {}).get("videoResource").get("hevc", {}).get("adaptationSet", {})[0].get("representation", {})[0].get("width", 0)
|
|
|
+ video_height = feeds[i].get("photo", {}).get("videoResource").get("hevc", {}).get("adaptationSet", {})[0].get("representation", {})[0].get("height", 0)
|
|
|
+ publish_time_stamp = int(int(feeds[i].get('photo', {}).get('timestamp', 0)) / 1000)
|
|
|
+ publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
|
|
|
+
|
|
|
+ video_dict = {'video_title': video_title,
|
|
|
+ 'video_id': video_id,
|
|
|
+ 'play_cnt': int(feeds[i].get('photo', {}).get('viewCount', 0)),
|
|
|
+ 'like_cnt': int(feeds[i].get('photo', {}).get('realLikeCount', 0)),
|
|
|
+ 'comment_cnt': 0,
|
|
|
+ 'share_cnt': 0,
|
|
|
+ 'video_width': video_width,
|
|
|
+ 'video_height': video_height,
|
|
|
+ 'duration': int(int(feeds[i].get('photo', {}).get('duration', 0)) / 1000),
|
|
|
+ 'publish_time_stamp': publish_time_stamp,
|
|
|
+ 'publish_time_str': publish_time_str,
|
|
|
+ 'user_name': feeds[i].get('author', {}).get('name', ""),
|
|
|
+ 'user_id': feeds[i].get('author', {}).get('id', ""),
|
|
|
+ 'avatar_url': feeds[i].get('author', {}).get('headerUrl', ""),
|
|
|
+ 'cover_url': feeds[i].get('photo', {}).get('coverUrl', ""),
|
|
|
+ 'video_url': feeds[i].get('photo', {}).get('photoUrl', ""),
|
|
|
+ 'session': f"kuaishou-{int(time.time())}"}
|
|
|
+ for k, v in video_dict.items():
|
|
|
+ Common.logger(log_type, crawler).info(f"{k}:{v}")
|
|
|
+
|
|
|
+ if video_dict["video_id"] == '' or video_dict["cover_url"] == '' or video_dict["video_url"] == '':
|
|
|
+ Common.logger(log_type, crawler).info('无效视频\n')
|
|
|
+ elif download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
|
|
|
+ Common.logger(log_type, crawler).info("不满足抓取规则\n")
|
|
|
+ elif any(str(word) if str(word) in video_dict["video_title"] else False
|
|
|
+ for word in get_config_from_mysql(log_type=log_type,
|
|
|
+ source=crawler,
|
|
|
+ env=env,
|
|
|
+ text="filter",
|
|
|
+ action="")) is True:
|
|
|
+ Common.logger(log_type, crawler).info('已中过滤词\n')
|
|
|
+ elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
|
|
|
+ Common.logger(log_type, crawler).info('视频已下载\n')
|
|
|
+ else:
|
|
|
+ title_score = get_title_score(log_type, crawler, "16QspO", "0usaDk", video_title)
|
|
|
+ if title_score <= 0.3:
|
|
|
+ Common.logger(log_type, crawler).info(f"权重分:{title_score}<=0.3\n")
|
|
|
+ continue
|
|
|
+ cls.download_publish(log_type=log_type,
|
|
|
+ crawler=crawler,
|
|
|
+ our_uid=our_uid,
|
|
|
+ video_dict=video_dict,
|
|
|
+ rule_dict=rule_dict,
|
|
|
+ title_score=title_score,
|
|
|
+ env=env)
|
|
|
+ except Exception as e:
|
|
|
+ Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
|
|
|
+ except Exception as e:
|
|
|
+ Common.logger(log_type, crawler).error(f"抓取第{page}页时异常:{e}\n")
|
|
|
+
|
|
|
+ @classmethod
|
|
|
+ def repeat_video(cls, log_type, crawler, video_id, env):
|
|
|
+ sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}" """
|
|
|
+ repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
|
|
|
+ return len(repeat_video)
|
|
|
+
|
|
|
+ @classmethod
|
|
|
+ def download_publish(cls, log_type, crawler, our_uid, video_dict, rule_dict, title_score, env):
|
|
|
+ # 下载视频
|
|
|
+ Common.download_method(log_type=log_type, crawler=crawler, text='video',
|
|
|
+ title=video_dict['video_title'], url=video_dict['video_url'])
|
|
|
+ md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
|
|
|
+ try:
|
|
|
+ if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
|
|
|
+ # 删除视频文件夹
|
|
|
+ shutil.rmtree(f"./{crawler}/videos/{md_title}")
|
|
|
+ Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
|
|
|
+ return
|
|
|
+ except FileNotFoundError:
|
|
|
+ # 删除视频文件夹
|
|
|
+ shutil.rmtree(f"./{crawler}/videos/{md_title}")
|
|
|
+ Common.logger(log_type, crawler).info("视频文件不存在,删除文件夹成功\n")
|
|
|
+ return
|
|
|
+ # 下载封面
|
|
|
+ Common.download_method(log_type=log_type, crawler=crawler, text='cover', title=video_dict['video_title'], url=video_dict['cover_url'])
|
|
|
+ # 保存视频信息至txt
|
|
|
+ Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
|
|
|
+
|
|
|
+ # 上传视频
|
|
|
+ Common.logger(log_type, crawler).info("开始上传视频...")
|
|
|
+ if env == "dev":
|
|
|
+ oss_endpoint = "out"
|
|
|
+ our_video_id = Publish.upload_and_publish(log_type=log_type,
|
|
|
+ crawler=crawler,
|
|
|
+ strategy="推荐抓取策略",
|
|
|
+ our_uid=our_uid,
|
|
|
+ env=env,
|
|
|
+ oss_endpoint=oss_endpoint)
|
|
|
+ our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
|
|
|
+ else:
|
|
|
+ oss_endpoint = "inner"
|
|
|
+ our_video_id = Publish.upload_and_publish(log_type=log_type,
|
|
|
+ crawler=crawler,
|
|
|
+ strategy="推荐抓取策略",
|
|
|
+ our_uid=our_uid,
|
|
|
+ env=env,
|
|
|
+ oss_endpoint=oss_endpoint)
|
|
|
+
|
|
|
+ our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
|
|
|
+
|
|
|
+ if our_video_id is None:
|
|
|
+ try:
|
|
|
+ # 删除视频文件夹
|
|
|
+ shutil.rmtree(f"./{crawler}/videos/{md_title}")
|
|
|
+ return
|
|
|
+ except FileNotFoundError:
|
|
|
+ return
|
|
|
+
|
|
|
+ # 视频信息保存数据库
|
|
|
+ insert_sql = f""" insert into crawler_video(video_id,
|
|
|
+ user_id,
|
|
|
+ out_user_id,
|
|
|
+ platform,
|
|
|
+ strategy,
|
|
|
+ out_video_id,
|
|
|
+ video_title,
|
|
|
+ cover_url,
|
|
|
+ video_url,
|
|
|
+ duration,
|
|
|
+ publish_time,
|
|
|
+ play_cnt,
|
|
|
+ crawler_rule,
|
|
|
+ width,
|
|
|
+ height)
|
|
|
+ values({our_video_id},
|
|
|
+ {our_uid},
|
|
|
+ "{video_dict['user_id']}",
|
|
|
+ "{cls.platform}",
|
|
|
+ "推荐抓取策略",
|
|
|
+ "{video_dict['video_id']}",
|
|
|
+ "{video_dict['video_title']}",
|
|
|
+ "{video_dict['cover_url']}",
|
|
|
+ "{video_dict['video_url']}",
|
|
|
+ {int(video_dict['duration'])},
|
|
|
+ "{video_dict['publish_time_str']}",
|
|
|
+ {int(video_dict['play_cnt'])},
|
|
|
+ '{json.dumps(rule_dict)}',
|
|
|
+ {int(video_dict['video_width'])},
|
|
|
+ {int(video_dict['video_height'])}) """
|
|
|
+ Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
|
|
|
+ MysqlHelper.update_values(log_type, crawler, insert_sql, env, action="")
|
|
|
+ Common.logger(log_type, crawler).info('视频信息写入数据库成功')
|
|
|
+
|
|
|
+ # 视频写入飞书
|
|
|
+ Feishu.insert_columns(log_type, crawler, "Aps2BI", "ROWS", 1, 2)
|
|
|
+ upload_time = int(time.time())
|
|
|
+ values = [[title_score,
|
|
|
+ our_video_id,
|
|
|
+ time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
|
|
|
+ "推荐抓取策略",
|
|
|
+ str(video_dict['video_id']),
|
|
|
+ video_dict['video_title'],
|
|
|
+ our_video_link,
|
|
|
+ video_dict['play_cnt'],
|
|
|
+ video_dict['comment_cnt'],
|
|
|
+ video_dict['like_cnt'],
|
|
|
+ video_dict['share_cnt'],
|
|
|
+ video_dict['duration'],
|
|
|
+ f"{video_dict['video_width']}*{video_dict['video_height']}",
|
|
|
+ video_dict['publish_time_str'],
|
|
|
+ video_dict['user_name'],
|
|
|
+ video_dict['user_id'],
|
|
|
+ video_dict['avatar_url'],
|
|
|
+ video_dict['cover_url'],
|
|
|
+ video_dict['video_url']]]
|
|
|
+ time.sleep(0.5)
|
|
|
+ Feishu.update_values(log_type, crawler, "Aps2BI", "D2:Z2", values)
|
|
|
+ Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
|
|
|
+
|
|
|
+
|
|
|
+if __name__ == "__main__":
|
|
|
+ print(get_config_from_mysql("recommend", "kuaishou", "prod", "filter"))
|
|
|
+ pass
|