# -*- coding: utf-8 -*- # @Author: wangkun # @Time: 2022/3/29 """ 从 微信小程序-快手短视频 中,下载符合规则的视频 """ import json import time import requests import urllib3 from main.common import Common from main.publish import Publish class KuaiShou: # 已下载视频列表 download_video_list = [] @classmethod def kuaishou_sensitive_words(cls): sensitive_words = [ "集结吧光合创作者", "电影解说", "快来露两手", "分享家常美食教程", "光合作者助手", "创作者中心", "创作者学院", "娱乐星熠计划", "解说电影", "电影剪辑", "放映室", "老剧", "影视剪辑", "精彩片段", "冬日影娱大作战", "春日追剧计划单", "影视解说", "中视频影视混剪计划", "众志成城共抗疫情", "我在追好剧", "娱乐星灿计划", "电影", "电视剧", "毛泽东", "毛主席", "周恩来", "林彪", "习近平", "习大大", "彭丽媛", "怀旧经典影视", ] return sensitive_words @staticmethod def kuaishou_download_rule(d_duration, d_width, d_height, d_play_cnt, d_like_cnt, d_share_cnt): """ 下载视频的基本规则 :param d_duration: 时长 :param d_width: 宽 :param d_height: 高 :param d_play_cnt: 播放量 :param d_like_cnt: 点赞量 :param d_share_cnt: 分享量 :return: 满足规则,返回 True;反之,返回 False """ if 600 >= int(float(d_duration)) >= 60: if int(d_width) >= 720 or int(d_height) >= 720: if int(d_play_cnt) >= 50000: if int(d_like_cnt) >= 50000: if int(d_share_cnt) >= 2000: return True else: return False else: return False else: return False return False return False @classmethod def kuaishou_get_recommend(cls): """ 从快手小程序首页推荐获取视频list: 1.在 kuaishou_videoid.txt 中去重 2.在 kuaishou_feeds.txt 中去重 3.添加视频信息到 kuaishou_feeds.txt """ url = "https://wxmini-api.uyouqu.com/rest/wd/wechatApp/feed/recommend" params = { "__NS_sig3": "e6f6b281ea31e3d7d1bbb8b91f662576fc25f7c3a7a7a5a5aaaba8b2", "__NS_sig3_origin": "3sCt3iAAAAAAAAAAAAAAAwEQBv2b8ewCwkZKaiAAAAAPg0soi" "e7GiOlU vF4zPrG1Nl6xvaoBgFd3MwTzOed9w==" } cookies = { "did": "wxo_05f915ac6b1deca87db36cea1a0fd18fae6c", "preMinaVersion": "v3.109.0", "sid": "kuaishou.wechat.app", "appId": "ks_wechat_small_app_2", "clientid": "13", "client_key": "f60ac815", "kpn": "WECHAT_SMALL_APP", "kpf": "OUTSIDE_ANDROID_H5", "language": "zh_CN", "smallAppVersion": "v3.109.0", "session_key": "123005bcc551a92aac29cdb96190251c9f492c29d4ba6c502dc" "0d2f8b8d18df356a2f7a22d6924d1dd34b8554a64af49b1bb1a" "1236cd2f69c25d4ac2a2531ebcd28c179da14b222023f9e111c" "c4d3b064ac7b0915d8c9fdaccb59e4048e96a5c38a32b2ce9f4abf628053001", "unionid": "V2:1230b56c8337908c3eecba63142a58daca05535c1f14bf67d3d8" "85cace91a7db335c5572d204762d075f24aa84412e2955711a12bb9" "2bd9c2290489ba7a733708a4a446de83822205ab727650489dda0db" "9d2a226c5ddb66d88a1f1373283a3d3b959611d816660028053001", "eUserStableOpenId": "12303325e8710eb802137c70fd1fb65997a4e5e33d82" "cddd409d335d096e20873e07ee472090133bc7a67e5c" "749da045d9a31a12da4c4c26181d432b873ec39432f4" "10196c6c2220323d0e6b562d1b3786aefb352b4e509c" "d96f3466b7b2e5e74b904a94c40792d928053001", "openId": "o5otV45DcV1EUsWw4fAUk_iq0YSA", "eOpenUserId": "124074b7726c996283f25044a42e2c7427e929cd6d968c5342" "330e61fc8939e57b0da4ffe21887f3abc8784175f73e1a267d" "671247273806f293f64c9c8c2adc00a21a12bb92bd9c229048" "9ba7a733708a4a446de8382220534aa79c69b74866bb09187e" "eceec880fa1e0fa421b7df8b3289dab603b17c4828053001", "kuaishou.wechat.app_st": "ChZrdWFpc2hvdS53ZWNoYXQuYXBwLnN0ErAB8aO" "EcB6jh4CMSJ-p_4BJFCId0PKNa_5IeFfeV_tj7q" "CjdXK0y13CSte6-KHbNK9BPo6Rjy3OGny0sh4Zb" "5AUl3Q_zqVXe2TunW7_F3nlTdJOdZ6iVIhPrHa1" "CM0Y-cG9gS4FDDzTvejfWaTI0CbjfNN0RZXzYVE" "AUVT_BNgUVDtYBbEY792gPylMfXxwxKMSzkhaDe" "eaHkGCWUj62FGCFYQ9Fw2W3d7suCXFsNylqT4aE" "s8oNwmycUiygfvfKuoXlHkbeSIgOhEFMZ3ArImS" "vFY_OwLJDHak1iXRO8g5TwzHTvBT3WcoBTAB", "passToken": "ChNwYXNzcG9ydC5wYXNzLXRva2VuEpABI42IhPCJHfFngXC3i-vF" "3daRTB-EtnAYyE6HpfWcPoZ6VSRDvKrom_RvltQ2zKk1T3_FJteb" "mv7ZzQLD7IicnTypaGoeflb7KQVrAv50Mp_JL4ObfBu_xTiwI53t" "bTlM6iML0G7DFd16K5z0jZZ1xECKVQQbk_vIqnseUujFIWAsKcDz" "BqqfnQNbUU5DzDUkGhKgKyzmNjRDxLfpDU5SPFhJmG0iIGBZ_Vd-" "7eT8i_Xit9ZPM-zdFpnRZFveFE9iplMg8Z06KAUwAQ", "userId": "2845397958" } json_data = { "thirdPartyUserId": 2845397958, "photoId": "5250352807040393911", "forwardUserId": 2845397958, "count": 10, "portal": 2, "pageType": 2, "needLivestream": "true", "extraRequestInfo": "{\"scene\":1074,\"fid\":\"2845397958\"," "\"sharerUserId\":\"2845397958\",\"curPhotoIndex\":0," "\"adShow\":true,\"weChatAd\":{},\"page\":0}", "pcursor": 0, "sourceFrom": 2, } try: urllib3.disable_warnings() r = requests.post(url=url, params=params, cookies=cookies, json=json_data, verify=False) response = json.loads(r.content.decode("utf8")) if "feeds" not in response: Common.crawler_log().info("获取快手视频 list 出错:{},休眠 10s".format(response)) time.sleep(10) else: feeds = response["feeds"] for i in range(len(feeds)): if "photoId" not in feeds[i]: photo_id = "0" Common.crawler_log().info("photo_id:{}".format(photo_id)) else: photo_id = feeds[i]["photoId"] Common.crawler_log().info("photo_id:{}".format(photo_id)) if "viewCount" not in feeds[i]: video_play_cnt = "0" Common.crawler_log().info("video_play_cnt:0") else: video_play_cnt = feeds[i]["viewCount"] Common.crawler_log().info("video_play_cnt:{}".format(video_play_cnt)) if "likeCount" not in feeds[i]: video_like_cnt = "0" Common.crawler_log().info("video_like_cnt:0") else: video_like_cnt = feeds[i]["likeCount"] Common.crawler_log().info("video_like_cnt:{}".format(video_like_cnt)) if "headUrl" not in feeds[i]: head_url = "0" Common.crawler_log().info("head_url:不存在") else: head_url = feeds[i]["headUrl"] Common.crawler_log().info("head_url:{}".format(head_url)) if len(feeds[i]["coverUrls"]) == 0: cover_url = "0" Common.crawler_log().info("cover_url:不存在") else: cover_url = feeds[i]["coverUrls"][0]["url"] Common.crawler_log().info("cover_url:{}".format(cover_url)) if len(feeds[i]["mainMvUrls"]) == 0: video_url = "0" Common.crawler_log().info("video_url:不存在") else: video_url = feeds[i]["mainMvUrls"][0]["url"] Common.crawler_log().info("video_url:{}".format(video_url)) if "shareCount" not in feeds[i]: video_share_cnt = "0" Common.crawler_log().info("video_share_cnt:0") else: video_share_cnt = feeds[i]["shareCount"] Common.crawler_log().info("video_share_cnt:{}".format(video_share_cnt)) if "width" not in feeds[i] or "height"not in feeds[i]: video_width = "0" video_height = "0" video_resolution = str(video_width) + "*" + str(video_height) Common.crawler_log().info("无分辨率") else: video_width = feeds[i]["width"] video_height = feeds[i]["height"] video_resolution = str(video_width) + "*" + str(video_height) Common.crawler_log().info("video_resolution:{}".format(video_resolution)) if "commentCount" not in feeds[i]: video_comment_cnt = "0" Common.crawler_log().info("video_comment_cnt:0") else: video_comment_cnt = feeds[i]["commentCount"] Common.crawler_log().info("video_comment_cnt:{}".format(video_comment_cnt)) if "duration" not in feeds[i]: video_duration = "0" Common.crawler_log().info("video_duration:不存在") else: video_duration = int(int(feeds[i]["duration"])/1000) Common.crawler_log().info("video_duration:{}秒".format(video_duration)) if "timestamp" not in feeds[i]: video_send_time = "0" Common.crawler_log().info("video_send_time:不存在") else: video_send_time = feeds[i]["timestamp"] Common.crawler_log().info("video_send_time:{}".format( time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(video_send_time)/1000)))) user_name = feeds[i]["userName"].strip().replace("\n", "")\ .replace("/", "").replace("快手", "").replace(" ", "")\ .replace(" ", "").replace("&NBSP", "").replace("\r", "") Common.crawler_log().info("user_name:{}".format(user_name)) user_id = feeds[i]["userId"] Common.crawler_log().info("user_id:{}".format(user_id)) # 视频标题过滤话题及处理特殊字符 kuaishou_title = feeds[i]["caption"] title_split1 = kuaishou_title.split(" #") if title_split1[0] != "": title1 = title_split1[0] else: title1 = title_split1[-1] title_split2 = title1.split(" #") if title_split2[0] != "": title2 = title_split2[0] else: title2 = title_split2[-1] title_split3 = title2.split("@") if title_split3[0] != "": title3 = title_split3[0] else: title3 = title_split3[-1] video_title = title3.strip().replace("\n", "")\ .replace("/", "").replace("快手", "").replace(" ", "")\ .replace(" ", "").replace("&NBSP", "").replace("\r", "")\ .replace("#", "").replace(".", "。") Common.crawler_log().info("video_title:{}".format(video_title)) # 从 kuaishou_videoid.txt 中去重 photo_ids = Common.read_txt("kuaishou_videoid.txt") if photo_id in [p_id.strip() for p_id in photo_ids]: Common.crawler_log().info("该视频已下载:{}".format(video_title)) pass else: Common.crawler_log().info("该视频未下载:{}".format(video_title)) # 从 kuaishou_feeds.txt 中去重 contents = Common.read_txt("kuaishou_feeds.txt") # kuaishou_feeds.txt 为空时,直接保存 if len(contents) == 0 and head_url != "0" \ and cover_url != "0" and video_url != "0" \ and video_duration != "0" and photo_id != "0": # 判断敏感词 if any(word if word in kuaishou_title else False for word in cls.kuaishou_sensitive_words()) is True: Common.crawler_log().info("视频已中敏感词:{}".format(kuaishou_title)) else: basic_time = int(time.time()) Common.crawler_log().info("添加视频信息至kuaishou_feeds.txt:{}".format(video_title)) with open("./txt/kuaishou_feeds.txt", "a", encoding="utf8") as f_a: f_a.write(str(basic_time) + " + " + str(photo_id) + " + " + str(video_play_cnt) + " + " + str(video_title) + " + " + str(video_duration) + " + " + str(video_comment_cnt) + " + " + str(video_like_cnt) + " + " + str(video_share_cnt) + " + " + str(video_resolution) + " + " + str(video_send_time) + " + " + str(user_name) + " + " + str(head_url) + " + " + str(cover_url) + " + " + str(video_url) + " + " + str(user_id) + " + " + str("wxo_b07ba02ad4340205d89b47c76030bb090977") + "\n") else: if photo_id in [content.split(" + ")[1] for content in contents]: Common.crawler_log().info("该视频已在 kuaishou_feeds.txt 中:{}".format(video_title)) elif head_url == "0" or cover_url == "0" \ or video_url == "0" or video_duration == "0" or photo_id == "0": Common.crawler_log().info("视频封面/播放地址/播放时长/用户头像不存在") else: # 判断敏感词 if any(word if word in kuaishou_title else False for word in cls.kuaishou_sensitive_words()) is True: Common.crawler_log().info("视频已中敏感词:{}".format(kuaishou_title)) else: basic_time = int(time.time()) Common.crawler_log().info("添加视频信息至kuaishou_feeds.txt:{}".format(video_title)) with open("./txt/kuaishou_feeds.txt", "a", encoding="utf8") as f_a: f_a.write(str(basic_time) + " + " + str(photo_id) + " + " + str(video_play_cnt) + " + " + str(video_title) + " + " + str(video_duration) + " + " + str(video_comment_cnt) + " + " + str(video_like_cnt) + " + " + str(video_share_cnt) + " + " + str(video_resolution) + " + " + str(video_send_time) + " + " + str(user_name) + " + " + str(head_url) + " + " + str(cover_url) + " + " + str(video_url) + " + " + str(user_id) + " + " + str("wxo_b07ba02ad4340205d89b47c76030bb090977") + "\n") except Exception as e: Common.crawler_log().error("获取视频 list 异常:{}".format(e)) @classmethod def kuaishou_download_play_video(cls, env): """ 下载播放量视频 测试环境:env == dev 正式环境:env == prod """ videos = Common.read_txt("kuaishou_feeds.txt") for video in videos: download_photo_id = video.strip().split(" + ")[1] download_video_title = video.strip().split(" + ")[3] download_video_duration = video.strip().split(" + ")[4] download_video_play_cnt = video.strip().split(" + ")[2] download_video_comment_cnt = video.strip().split(" + ")[5] download_video_like_cnt = video.strip().split(" + ")[6] download_video_share_cnt = video.strip().split(" + ")[7] download_video_resolution = video.strip().split(" + ")[8] download_video_width = download_video_resolution.split("*")[0] download_video_height = download_video_resolution.split("*")[-1] download_video_send_time = video.strip().split(" + ")[9] download_user_name = video.strip().split(" + ")[10] download_head_url = video.strip().split(" + ")[11] download_cover_url = video.strip().split(" + ")[12] download_video_url = video.strip().split(" + ")[13] download_video_session = video.strip().split(" + ")[-1] if cls.kuaishou_download_rule(download_video_duration, download_video_width, download_video_height, download_video_play_cnt, download_video_like_cnt, download_video_share_cnt) is True: Common.crawler_log().info("开始下载快手视频:{}".format(download_video_title)) # 下载封面 Common.download_method(text="cover", d_name=download_video_title, d_url=download_cover_url) # 下载视频 Common.download_method(text="video", d_name=download_video_title, d_url=download_video_url) # 保存视频信息至 kuaishou_videoid.txt with open("./txt/kuaishou_videoid.txt", "a", encoding="utf8") as fa: fa.write(download_photo_id + "\n") # 添加视频 ID 到 list,用于统计当次下载总数 cls.download_video_list.append(download_photo_id) # # 保存视频信息至 {today}_kuaishou_videoid.txt # with open("./txt/" + str(Common.today) + "_kuaishou_videoid.txt", "a", encoding="utf8") as fc: # fc.write(download_photo_id + "\n") # 保存视频信息至 "./videos/{download_video_title}/info.txt" with open("./videos/" + download_video_title + "/info.txt", "a", encoding="utf8") as f_a: f_a.write(str(download_photo_id) + "\n" + str(download_video_title) + "\n" + str(download_video_duration) + "\n" + str(download_video_play_cnt) + "\n" + str(download_video_comment_cnt) + "\n" + str(download_video_like_cnt) + "\n" + str(download_video_share_cnt) + "\n" + str(download_video_resolution) + "\n" + str(download_video_send_time) + "\n" + str(download_user_name) + "\n" + str(download_head_url) + "\n" + str(download_video_url) + "\n" + str(download_cover_url) + "\n" + str(download_video_session)) # 上传视频 if env == "dev": Common.crawler_log().info("开始上传视频:{}".format(download_video_title)) Publish.upload_and_publish("dev", "play") elif env == "prod": Common.crawler_log().info("开始上传视频:{}".format(download_video_title)) Publish.upload_and_publish("prod", "play") # 删除该视频在kuaishou_feeds.txt中的信息 Common.crawler_log().info("删除该视频在kuaishou_feeds.txt中的信息:{}".format(download_video_title)) with open("./txt/kuaishou_feeds.txt", "r", encoding="utf8") as f_r: lines = f_r.readlines() with open("./txt/kuaishou_feeds.txt", "w", encoding="utf-8") as f_w: for line in lines: if download_photo_id in line.split(" + ")[1]: continue f_w.write(line) else: # 删除该视频在 recommend.txt中的信息 Common.crawler_log().info("该视频不满足下载规则,删除在kuaishou_feeds.txt中的信息:{}".format(download_video_title)) with open("./txt/kuaishou_feeds.txt", "r", encoding="utf8") as f_r: lines = f_r.readlines() with open("./txt/kuaishou_feeds.txt", "w", encoding="utf-8") as f_w: for line in lines: if download_photo_id in line.split(" + ")[1]: continue f_w.write(line) if __name__ == "__main__": kuaishou = KuaiShou() kuaishou.kuaishou_get_recommend()