123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447 |
- # -*- coding: utf-8 -*-
- # @Author: wangkun
- # @Time: 2023/5/24
- import os
- import shutil
- import sys
- import time
- from hashlib import md5
- import requests
- import json
- import urllib3
- from requests.adapters import HTTPAdapter
- sys.path.append(os.getcwd())
- from common.common import Common
- from common.feishu import Feishu
- from common.getuser import getUser
- from common.scheduling_db import MysqlHelper
- from common.publish import Publish
- from common.public import random_title, get_config_from_mysql
- from common.public import get_user_from_mysql
- class KuaishouauthorScheduling:
- platform = "快手"
- # 处理视频标题
- @classmethod
- def video_title(cls, log_type, crawler, env, title):
- title_split1 = title.split(" #")
- if title_split1[0] != "":
- title1 = title_split1[0]
- else:
- title1 = title_split1[-1]
- title_split2 = title1.split(" #")
- if title_split2[0] != "":
- title2 = title_split2[0]
- else:
- title2 = title_split2[-1]
- title_split3 = title2.split("@")
- if title_split3[0] != "":
- title3 = title_split3[0]
- else:
- title3 = title_split3[-1]
- video_title = title3.strip().replace("\n", "") \
- .replace("/", "").replace("快手", "").replace(" ", "") \
- .replace(" ", "").replace("&NBSP", "").replace("\r", "") \
- .replace("#", "").replace(".", "。").replace("\\", "") \
- .replace(":", "").replace("*", "").replace("?", "") \
- .replace("?", "").replace('"', "").replace("<", "") \
- .replace(">", "").replace("|", "").replace("@", "").replace('"', '').replace("'", '')[:40]
- if video_title.replace(" ", "") == "" or video_title == "。。。" or video_title == "...":
- return random_title(log_type, crawler, env, text='title')
- else:
- return video_title
- @classmethod
- def get_cookie(cls, log_type, crawler, env):
- select_sql = f""" select * from crawler_config where source="{crawler}" """
- configs = MysqlHelper.get_values(log_type, crawler, select_sql, env, action="")
- for config in configs:
- if "cookie" in config["config"]:
- cookie_dict = {
- "cookie_id": config["id"],
- "title": config["title"].strip(),
- "cookie": dict(eval(config["config"]))["cookie"].strip(),
- "update_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(config["update_time"] / 1000))),
- "operator": config["operator"].strip()
- }
- for k, v in cookie_dict.items():
- print(f"{k}:{type(v)}, {v}")
- return cookie_dict
- @classmethod
- def get_videoList(cls, log_type, crawler, strategy, our_uid, out_uid, oss_endpoint, env, machine):
- download_cnt_1, download_cnt_2 = 0, 0
- rule_dict_1 = cls.get_rule(log_type, crawler, 1)
- rule_dict_2 = cls.get_rule(log_type, crawler, 2)
- if rule_dict_1 is None or rule_dict_2 is None:
- Common.logger(log_type, crawler).warning(f"rule_dict is None")
- return
- url = "https://www.kuaishou.com/graphql"
- payload = json.dumps({
- "operationName": "visionProfilePhotoList",
- "variables": {
- "userId": out_uid,
- "pcursor": "",
- "page": "profile"
- },
- "query": "fragment photoContent on PhotoEntity {\n id\n duration\n caption\n originCaption\n likeCount\n viewCount\n commentCount\n realLikeCount\n coverUrl\n photoUrl\n photoH265Url\n manifest\n manifestH265\n videoResource\n coverUrls {\n url\n __typename\n }\n timestamp\n expTag\n animatedCoverUrl\n distance\n videoRatio\n liked\n stereoType\n profileUserTopPhoto\n musicBlocked\n __typename\n}\n\nfragment feedContent on Feed {\n type\n author {\n id\n name\n headerUrl\n following\n headerUrls {\n url\n __typename\n }\n __typename\n }\n photo {\n ...photoContent\n __typename\n }\n canAddComment\n llsid\n status\n currentPcursor\n tags {\n type\n name\n __typename\n }\n __typename\n}\n\nquery visionProfilePhotoList($pcursor: String, $userId: String, $page: String, $webPageArea: String) {\n visionProfilePhotoList(pcursor: $pcursor, userId: $userId, page: $page, webPageArea: $webPageArea) {\n result\n llsid\n webPageArea\n feeds {\n ...feedContent\n __typename\n }\n hostName\n pcursor\n __typename\n }\n}\n"
- })
- headers = {
- 'Accept': '*/*',
- 'Content-Type': 'application/json',
- 'Origin': 'https://www.kuaishou.com',
- 'Cookie': f'kpf=PC_WEB; clientid=3; did={cls.get_(log_type, crawler)}; kpn=KUAISHOU_VISION',
- 'Content-Length': '1260',
- 'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
- 'Host': 'www.kuaishou.com',
- 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.6.1 Safari/605.1.15',
- 'Referer': 'https://www.kuaishou.com/profile/{}'.format(out_uid),
- 'Accept-Encoding': 'gzip, deflate, br',
- 'Connection': 'keep-alive'
- }
- response = requests.post(url=url, headers=headers, data=payload, proxies=Common.tunnel_proxies(),
- verify=False, timeout=10)
- try:
- feeds = response.json()['data']['visionProfilePhotoList']['feeds']
- except Exception as e:
- Common.logger(log_type, crawler).error(f"get_videoList:{e},response:{response.text}")
- return
- if not feeds:
- Common.logger(log_type, crawler).info("没有更多视频啦 ~\n")
- return
- # pcursor = response.json()['data']['visionProfilePhotoList']['pcursor']
- # Common.logger(log_type, crawler).info(f"feeds0: {feeds}\n")
- for i in range(len(feeds)):
- try:
- # video_title
- if 'caption' not in feeds[i]['photo']:
- video_title = random_title(log_type, crawler, env, text='title')
- elif feeds[i]['photo']['caption'].strip() == "":
- video_title = random_title(log_type, crawler, env, text='title')
- else:
- video_title = cls.video_title(log_type, crawler, env, feeds[i]['photo']['caption'])
- if 'videoResource' not in feeds[i]['photo'] \
- and 'manifest' not in feeds[i]['photo'] \
- and 'manifestH265' not in feeds[i]['photo']:
- Common.logger(log_type, crawler).warning(f"get_videoList:{feeds[i]['photo']}\n")
- break
- videoResource = feeds[i]['photo']['videoResource']
- if 'h264' not in videoResource and 'hevc' not in videoResource:
- Common.logger(log_type, crawler).warning(f"get_videoList:{videoResource}\n")
- break
- # video_id
- if 'h264' in videoResource and 'videoId' in videoResource['h264']:
- video_id = videoResource['h264']['videoId']
- elif 'hevc' in videoResource and 'videoId' in videoResource['hevc']:
- video_id = videoResource['hevc']['videoId']
- else:
- video_id = ""
- # play_cnt
- if 'viewCount' not in feeds[i]['photo']:
- play_cnt = 0
- else:
- play_cnt = int(feeds[i]['photo']['viewCount'])
- # like_cnt
- if 'realLikeCount' not in feeds[i]['photo']:
- like_cnt = 0
- else:
- like_cnt = feeds[i]['photo']['realLikeCount']
- # publish_time
- if 'timestamp' not in feeds[i]['photo']:
- publish_time_stamp = 0
- publish_time_str = ''
- publish_time = 0
- else:
- publish_time_stamp = int(int(feeds[i]['photo']['timestamp']) / 1000)
- publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
- publish_time = int((int(time.time()) - publish_time_stamp) / (3600 * 24))
- # duration
- if 'duration' not in feeds[i]['photo']:
- duration = 0
- else:
- duration = int(int(feeds[i]['photo']['duration']) / 1000)
- # video_width / video_height / video_url
- mapping = {}
- for item in ['width', 'height']:
- try:
- val = str(videoResource['h264']['adaptationSet'][0]['representation'][0][item])
- except:
- val = str(videoResource['hevc']['adaptationSet'][0]['representation'][0][item])
- mapping[item] = val
- video_width = int(mapping['width']) if mapping['width'] else 0
- video_height = int(mapping['height']) if mapping['height'] else 0
- # cover_url
- if 'coverUrl' not in feeds[i]['photo']:
- cover_url = ""
- else:
- cover_url = feeds[i]['photo']['coverUrl']
- # user_name / avatar_url
- user_name = feeds[i]['author']['name']
- avatar_url = feeds[i]['author']['headerUrl']
- video_url = feeds[i]['photo']['photoUrl']
- video_dict = {'video_title': video_title,
- 'video_id': video_id,
- 'play_cnt': play_cnt,
- 'comment_cnt': 0,
- 'like_cnt': like_cnt,
- 'share_cnt': 0,
- 'video_width': video_width,
- 'video_height': video_height,
- 'duration': duration,
- 'publish_time': publish_time,
- 'publish_time_stamp': publish_time_stamp,
- 'publish_time_str': publish_time_str,
- 'user_name': user_name,
- 'user_id': out_uid,
- 'avatar_url': avatar_url,
- 'cover_url': cover_url,
- 'video_url': video_url,
- 'session': f"kuaishou{int(time.time())}"}
- rule_1 = cls.download_rule(video_dict, rule_dict_1)
- Common.logger(log_type, crawler).info(f"video_title:{video_title}")
- Common.logger(log_type, crawler).info(f"video_id:{video_id}\n")
- Common.logger(log_type, crawler).info(
- f"play_cnt:{video_dict['play_cnt']}{rule_dict_1['play_cnt']}, {eval(str(video_dict['play_cnt']) + str(rule_dict_1['play_cnt']))}")
- Common.logger(log_type, crawler).info(
- f"like_cnt:{video_dict['like_cnt']}{rule_dict_1['like_cnt']}, {eval(str(video_dict['like_cnt']) + str(rule_dict_1['like_cnt']))}")
- Common.logger(log_type, crawler).info(
- f"video_width:{video_dict['video_width']}{rule_dict_1['video_width']}, {eval(str(video_dict['video_width']) + str(rule_dict_1['video_width']))}")
- Common.logger(log_type, crawler).info(
- f"video_height:{video_dict['video_height']}{rule_dict_1['video_height']}, {eval(str(video_dict['video_height']) + str(rule_dict_1['video_height']))}")
- Common.logger(log_type, crawler).info(
- f"duration:{video_dict['duration']}{rule_dict_1['duration']}, {eval(str(video_dict['duration']) + str(rule_dict_1['duration']))}")
- Common.logger(log_type, crawler).info(
- f"publish_time:{video_dict['publish_time']}{rule_dict_1['publish_time']}, {eval(str(video_dict['publish_time']) + str(rule_dict_1['publish_time']))}")
- Common.logger(log_type, crawler).info(f"rule_1:{rule_1}\n")
- rule_2 = cls.download_rule(video_dict, rule_dict_2)
- Common.logger(log_type, crawler).info(
- f"play_cnt:{video_dict['play_cnt']}{rule_dict_2['play_cnt']}, {eval(str(video_dict['play_cnt']) + str(rule_dict_2['play_cnt']))}")
- Common.logger(log_type, crawler).info(
- f"like_cnt:{video_dict['like_cnt']}{rule_dict_2['like_cnt']}, {eval(str(video_dict['like_cnt']) + str(rule_dict_2['like_cnt']))}")
- Common.logger(log_type, crawler).info(
- f"video_width:{video_dict['video_width']}{rule_dict_2['video_width']}, {eval(str(video_dict['video_width']) + str(rule_dict_2['video_width']))}")
- Common.logger(log_type, crawler).info(
- f"video_height:{video_dict['video_height']}{rule_dict_2['video_height']}, {eval(str(video_dict['video_height']) + str(rule_dict_2['video_height']))}")
- Common.logger(log_type, crawler).info(
- f"duration:{video_dict['duration']}{rule_dict_2['duration']}, {eval(str(video_dict['duration']) + str(rule_dict_2['duration']))}")
- Common.logger(log_type, crawler).info(
- f"publish_time:{video_dict['publish_time']}{rule_dict_2['publish_time']}, {eval(str(video_dict['publish_time']) + str(rule_dict_2['publish_time']))}")
- Common.logger(log_type, crawler).info(f"rule_2:{rule_2}\n")
- if video_title == "" or video_url == "":
- Common.logger(log_type, crawler).info("无效视频\n")
- continue
- elif rule_1 is True:
- if download_cnt_1 < int(
- rule_dict_1['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">",
- "")[
- -1]):
- cls.download_publish(log_type=log_type,
- crawler=crawler,
- strategy=strategy,
- video_dict=video_dict,
- rule_dict=rule_dict_1,
- our_uid=our_uid,
- oss_endpoint=oss_endpoint,
- env=env,
- machine=machine)
- # if download_finished is True:
- # download_cnt_1 += 1
- elif rule_2 is True:
- if download_cnt_2 < int(
- rule_dict_2['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">",
- "")[
- -1]):
- cls.download_publish(log_type=log_type,
- crawler=crawler,
- strategy=strategy,
- video_dict=video_dict,
- rule_dict=rule_dict_2,
- our_uid=our_uid,
- oss_endpoint=oss_endpoint,
- env=env,
- machine=machine)
- # if download_finished is True:
- # download_cnt_2 += 1
- else:
- Common.logger(log_type, crawler).info("不满足下载规则\n")
- # Common.logger(log_type, crawler).info(f"feeds: {feeds}\n")
- except Exception as e:
- Common.logger(log_type, crawler).warning(f"抓取单条视频异常:{e}\n")
- # if pcursor == "no_more":
- # Common.logger(log_type, crawler).info(f"作者,{out_uid},已经到底了,没有更多内容了\n")
- # return
- # cls.get_videoList(log_type, crawler, strategy, our_uid, out_uid, oss_endpoint, env, machine,
- # pcursor=pcursor)
- # time.sleep(random.randint(1, 3))
- @classmethod
- def repeat_video(cls, log_type, crawler, video_id, video_title, publish_time, env, machine):
- sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}" or (platform="{cls.platform}" and video_title="{video_title}" and publish_time="{publish_time}") """
- repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env, machine)
- return len(repeat_video)
- @classmethod
- def download_publish(cls, log_type, crawler, strategy, video_dict, rule_dict, our_uid, oss_endpoint, env, machine):
- filter_words = get_config_from_mysql(log_type, crawler, env, text='filter')
- for filter_word in filter_words:
- if filter_word in video_dict['video_title']:
- Common.logger(log_type, crawler).info('标题已中过滤词:{}\n', video_dict['video_title'])
- return
- download_finished = False
- if cls.repeat_video(log_type, crawler, video_dict['video_id'], video_dict['video_title'],
- video_dict['publish_time_str'], env, machine) != 0:
- Common.logger(log_type, crawler).info('视频已下载\n')
- else:
- # 下载视频
- Common.download_method(log_type=log_type, crawler=crawler, text='video',
- title=video_dict['video_title'], url=video_dict['video_url'])
- md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
- try:
- if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
- # 删除视频文件夹
- shutil.rmtree(f"./{crawler}/videos/{md_title}")
- Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
- return
- except FileNotFoundError:
- # 删除视频文件夹
- shutil.rmtree(f"./{crawler}/videos/{md_title}")
- Common.logger(log_type, crawler).info("未发现视频文件,删除成功\n")
- return
- # 下载封面
- Common.download_method(log_type=log_type, crawler=crawler, text='cover',
- title=video_dict['video_title'], url=video_dict['cover_url'])
- # 保存视频信息至txt
- Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
- # 上传视频
- Common.logger(log_type, crawler).info("开始上传视频...")
- our_video_id = Publish.upload_and_publish(log_type=log_type,
- crawler=crawler,
- strategy=strategy,
- our_uid=our_uid,
- env=env,
- oss_endpoint=oss_endpoint)
- if env == 'dev':
- our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
- else:
- our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
- Common.logger(log_type, crawler).info("视频上传完成")
- if our_video_id is None:
- try:
- Common.logger(log_type, crawler).warning(f"our_video_id:{our_video_id} 删除该视频文件夹")
- # 删除视频文件夹
- shutil.rmtree(f"./{crawler}/videos/{md_title}")
- return download_finished
- except FileNotFoundError:
- return download_finished
- # 视频信息保存数据库
- insert_sql = f""" insert into crawler_video(video_id,
- user_id,
- out_user_id,
- platform,
- strategy,
- out_video_id,
- video_title,
- cover_url,
- video_url,
- duration,
- publish_time,
- play_cnt,
- crawler_rule,
- width,
- height)
- values({our_video_id},
- {our_uid},
- "{video_dict['user_id']}",
- "{cls.platform}",
- "定向爬虫策略",
- "{video_dict['video_id']}",
- "{video_dict['video_title']}",
- "{video_dict['cover_url']}",
- "{video_dict['video_url']}",
- {int(video_dict['duration'])},
- "{video_dict['publish_time_str']}",
- {int(video_dict['play_cnt'])},
- '{json.dumps(rule_dict)}',
- {int(video_dict['video_width'])},
- {int(video_dict['video_height'])}) """
- Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
- MysqlHelper.update_values(log_type, crawler, insert_sql, env, machine)
- Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
- # 视频写入飞书
- Feishu.insert_columns(log_type, 'kuaishou', "fYdA8F", "ROWS", 1, 2)
- upload_time = int(time.time())
- values = [[our_video_id,
- time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
- "定向榜",
- str(video_dict['video_id']),
- video_dict['video_title'],
- our_video_link,
- video_dict['play_cnt'],
- video_dict['comment_cnt'],
- video_dict['like_cnt'],
- video_dict['share_cnt'],
- video_dict['duration'],
- f"{video_dict['video_width']}*{video_dict['video_height']}",
- video_dict['publish_time_str'],
- video_dict['user_name'],
- video_dict['user_id'],
- video_dict['avatar_url'],
- video_dict['cover_url'],
- video_dict['video_url']]]
- time.sleep(1)
- Feishu.update_values(log_type, 'kuaishou', "fYdA8F", "E2:Z2", values)
- Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
- download_finished = True
- return download_finished
- @classmethod
- def get_follow_videos(cls, log_type, crawler, strategy, oss_endpoint, env, machine):
- user_list = get_user_from_mysql(log_type, crawler, crawler, env)
- for user in user_list:
- try:
- spider_link = user["link"]
- out_uid = spider_link.split('/')[-1]
- user_name = user["nick_name"]
- our_uid = user["uid"]
- Common.logger(log_type, crawler).info(f"开始抓取 {user_name} 用户主页视频\n")
- cls.get_videoList(log_type=log_type,
- crawler=crawler,
- strategy=strategy,
- our_uid=our_uid,
- out_uid=out_uid,
- oss_endpoint=oss_endpoint,
- env=env,
- machine=machine)
- except Exception as e:
- Common.logger(log_type, crawler).warning(f"抓取用户{user}时异常:{e}\n")
- if __name__ == "__main__":
- KuaishouauthorScheduling.get_cookie("author", "kuaishou", "dev")
- pass
|