wangkun 2 年 前
コミット
4598cafa1a

+ 1 - 1
README.MD

@@ -22,7 +22,7 @@ ${nohup_dir}:       nohup日志存储路径,如: ./youtube/nohup.log
 阿里云 102 服务器
 sh ./main/main.sh ./xigua/xigua_main/run_xigua_follow.py --log_type="follow" --crawler="xigua" --strategy="定向爬虫策略" --oss_endpoint="inner" --env="prod" --machine="aliyun" xigua/nohup.log
 本机
-sh ./main/main.sh ./xigua/xigua_main/run_xigua_follow.py --log_type="follow" --crawler="xigua" --strategy="定向爬虫策略" --oss_endpoint="out" --env="dev" --machine="local" xigua/nohup.log
+sh ./main/main.sh ./xigua/xigua_main/run_xigua_follow.py --log_type="follow" --crawler="xigua" --strategy="定向爬虫策略" --oss_endpoint="out" --env="prod" --machine="local" xigua/nohup.log
 杀进程命令:
 ps aux | grep run_xigua | grep -v grep | awk '{print $2}' | xargs kill -9
 

+ 3 - 0
kuaishou/__init__.py

@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/2/23

+ 3 - 0
kuaishou/kuaishou_follow/__init__.py

@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/2/23

+ 702 - 0
kuaishou/kuaishou_follow/kuaishou_follow.py

@@ -0,0 +1,702 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/2/24
+import os
+import random
+import sys
+import time
+import requests
+import urllib3
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.feishu import Feishu
+from common.publish import Publish
+proxies = {"http": None, "https": None}
+
+
+class Follow:
+    # 已抓取视频数量
+    get_person_video_count = []
+    get_all_video_count = []
+    # 小程序:关注列表翻页参数
+    follow_pcursor = ""
+    # 小程序:个人主页视频列表翻页参数
+    person_pcursor = ""
+    # 视频发布时间
+    send_time = 0
+    # 配置微信
+    wechat_sheet = Feishu.get_values_batch("follow", "kuaishou", "WFF4jw")
+    Referer = wechat_sheet[2][3]
+    NS_sig3 = wechat_sheet[3][3]
+    NS_sig3_origin = wechat_sheet[4][3]
+    did = wechat_sheet[5][3]
+    session_key = wechat_sheet[6][3]
+    unionid = wechat_sheet[7][3]
+    eUserStableOpenId = wechat_sheet[8][3]
+    openId = wechat_sheet[9][3]
+    eOpenUserId = wechat_sheet[10][3]
+    kuaishou_wechat_app_st = wechat_sheet[11][3]
+    passToken = wechat_sheet[12][3]
+    userId = wechat_sheet[13][3]
+
+    # 过滤敏感词
+    @classmethod
+    def sensitive_words(cls):
+        # 敏感词库列表
+        word_list = []
+        # 从云文档读取所有敏感词,添加到词库列表
+        lists = Feishu.get_values_batch("follow", "kuaishou", "HIKVvs")
+        for i in lists:
+            for j in i:
+                # 过滤空的单元格内容
+                if j is None:
+                    pass
+                else:
+                    word_list.append(j)
+        return word_list
+
+    # 下载规则
+    @staticmethod
+    def download_rule(d_duration, d_width, d_height, d_play_cnt, d_like_cnt, d_share_cnt):
+        """
+        下载视频的基本规则
+        :param d_duration: 时长
+        :param d_width: 宽
+        :param d_height: 高
+        :param d_play_cnt: 播放量
+        :param d_like_cnt: 点赞量
+        :param d_share_cnt: 分享量
+        :return: 满足规则,返回 True;反之,返回 False
+        """
+        if int(float(d_duration)) >= 40:
+            if int(d_width) >= 0 or int(d_height) >= 0:
+                if int(d_play_cnt) >= 5000:
+                    if int(d_like_cnt) >= 5000 or int(d_share_cnt) >= 1000:
+                        return True
+                    else:
+                        return False
+                else:
+                    return False
+            else:
+                return False
+        else:
+            return False
+
+    # 删除飞书关注人列表
+    @classmethod
+    def del_follow_user_from_feishu(cls, log_type):
+        try:
+            while True:
+                follow_sheet = Feishu.get_values_batch(log_type, "kuaishou", "2OLxLr")
+                if len(follow_sheet) == 1:
+                    Common.logger(log_type).info('删除完成\n')
+                    return
+                else:
+                    for i in range(1, len(follow_sheet)):
+                        Feishu.dimension_range(log_type, "kuaishou", "2OLxLr", 'ROWS', i+1, i+1)
+                        time.sleep(0.5)
+                        break
+        except Exception as e:
+            Common.logger(log_type).error('del_follow_user_from_feishu异常:{}', e)
+
+    # 从小程序中,关注用户列表同步至云文档
+    @classmethod
+    def get_follow_users_to_feishu(cls, log_type):
+        try:
+            follow_list = []
+            follow_sheet = Feishu.get_values_batch(log_type, "kuaishou", "2OLxLr")
+            url = "https://wxmini-api.uyouqu.com/rest/wd/wechatApp/relation/fol?"
+            headers = {
+                "content-type": "application/json",
+                "Accept-Encoding": "gzip,compress,br,deflate",
+                "User-Agent": 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X)'
+                              ' AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148'
+                              ' MicroMessenger/8.0.20(0x18001442) NetType/WIFI Language/zh_CN',
+                "Referer": str(cls.Referer),
+            }
+            params = {
+                "__NS_sig3": str(cls.NS_sig3),
+                "__NS_sig3_origin": str(cls.NS_sig3_origin)
+            }
+            cookies = {
+                "did": str(cls.did),
+                "preMinaVersion": "v3.109.0",
+                "sid": "kuaishou.wechat.app",
+                "appId": "ks_wechat_small_app_2",
+                "clientid": "13",
+                "client_key": "f60ac815",
+                "kpn": "WECHAT_SMALL_APP",
+                "kpf": "OUTSIDE_ANDROID_H5",
+                "language": "zh_CN",
+                "smallAppVersion": "v3.114.0",
+                "session_key": str(cls.session_key),
+                "unionid": str(cls.unionid),
+                "eUserStableOpenId": str(cls.eUserStableOpenId),
+                "openId": str(cls.openId),
+                "eOpenUserId": str(cls.eOpenUserId),
+                "kuaishou.wechat.app_st": str(cls.kuaishou_wechat_app_st),
+                "passToken": str(cls.passToken),
+                "userId": str(cls.userId)
+            }
+            json_text = {
+                "count": 20,
+                "pcursor": str(cls.follow_pcursor),
+                "ftype": 1
+            }
+            urllib3.disable_warnings()
+            r = requests.post(url=url, headers=headers, params=params,
+                              cookies=cookies, json=json_text, proxies=proxies, verify=False)
+            if "fols" not in r.json():
+                Common.logger(log_type).warning("从小程序中获取关注用户列表:{}", r.text)
+            else:
+                users = r.json()["fols"]
+                for i in range(len(users)):
+                    uid = users[i]["targetId"]
+                    nick = users[i]["targetName"]
+                    sex = users[i]["targetSex"]
+                    description = users[i]["targetUserText"]
+                    if "followReason" in users[i]:
+                        follow_reason = users[i]["followReason"]
+                    else:
+                        follow_reason = ""
+                    follow_time = users[i]["time"]
+                    is_friend = users[i]["isFriend"]
+                    # print(f"uid:{uid}")
+                    follow_list.append(uid)
+                    # print(f"follow_list:{follow_list}")
+                    # 同步已关注的用户至云文档
+                    if uid not in [j for i in follow_sheet for j in i]:
+                        time.sleep(1)
+                        Feishu.insert_columns(log_type, "kuaishou", "2OLxLr", "ROWS", 1, 2)
+                        time.sleep(1)
+                        values = [[uid, nick, sex, description, follow_reason, follow_time, str(is_friend)]]
+                        Feishu.update_values(log_type, "kuaishou", "2OLxLr", "A2:L2", values)
+                    else:
+                        Common.logger(log_type).info("用户:{},在云文档中已存在", nick)
+            cls.follow_pcursor = r.json()["pcursor"]
+            # 翻页,直至到底了
+            if cls.follow_pcursor != "no_more":
+                cls.get_follow_users_to_feishu(log_type)
+            else:
+                Common.logger(log_type).info("从小程序中同步关注用户至云文档完成\n")
+        except Exception as e:
+            Common.logger(log_type).error("从小程序中,关注用户列表同步至云文档异常:{}\n", e)
+
+    # 从云文档获取关注用户列表
+    @classmethod
+    def get_follow_users(cls, log_type):
+        try:
+            follow_sheet = Feishu.get_values_batch(log_type, "kuaishou", "2OLxLr")
+            if len(follow_sheet) == 1:
+                Common.logger(log_type).info("暂无关注用户")
+            else:
+                follow_dict = {}
+                for i in range(1, len(follow_sheet)):
+                    uid = follow_sheet[i][0]
+                    nick = follow_sheet[i][1]
+                    if uid is None or nick is None:
+                        pass
+                    else:
+                        follow_dict[nick] = uid
+                return follow_dict
+        except Exception as e:
+            Common.logger(log_type).error("从云文档获取关注用户列表异常:{}\n", e)
+
+    # 从云文档获取取消关注用户列表
+    @classmethod
+    def get_unfollow_users(cls, log_type):
+        try:
+            unfollow_sheet = Feishu.get_values_batch(log_type, "kuaishou", "WRveYg")
+            if len(unfollow_sheet) == 1:
+                Common.logger(log_type).info("暂无取消关注用户")
+            else:
+                unfollow_list = []
+                nick_list = []
+                for i in range(1, len(unfollow_sheet)):
+                    uid = unfollow_sheet[i][0]
+                    nick = unfollow_sheet[i][1]
+                    nick_list.append(nick)
+                    unfollow_list.append(uid)
+                Common.logger(log_type).info("取消关注用户列表:{}", nick_list)
+                return unfollow_list
+        except Exception as e:
+            Common.logger(log_type).error("从云文档获取取消关注用户列表异常:{}", e)
+
+    # 小程序:关注/取消关注用户
+    @classmethod
+    def follow_unfollow(cls, log_type, is_follow, uid):
+        try:
+            url = "https://wxmini-api.uyouqu.com/rest/wd/wechatApp/relation/follow?"
+            headers = {
+                "content-type": "application/json",
+                "Accept-Encoding": "gzip,compress,br,deflate",
+                "User-Agent": 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X)'
+                              ' AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148'
+                              ' MicroMessenger/8.0.20(0x18001442) NetType/WIFI Language/zh_CN',
+                "Referer": str(cls.Referer),
+            }
+            params = {
+                "__NS_sig3": str(cls.NS_sig3),
+                "__NS_sig3_origin": str(cls.NS_sig3_origin)
+            }
+            cookies = {
+                "did": str(cls.did),
+                "preMinaVersion": "v3.109.0",
+                "sid": "kuaishou.wechat.app",
+                "appId": "ks_wechat_small_app_2",
+                "clientid": "13",
+                "client_key": "f60ac815",
+                "kpn": "WECHAT_SMALL_APP",
+                "kpf": "OUTSIDE_ANDROID_H5",
+                "language": "zh_CN",
+                "smallAppVersion": "v3.114.0",
+                "session_key": str(cls.session_key),
+                "unionid": str(cls.unionid),
+                "eUserStableOpenId": str(cls.eUserStableOpenId),
+                "openId": str(cls.openId),
+                "eOpenUserId": str(cls.eOpenUserId),
+                "kuaishou.wechat.app_st": str(cls.kuaishou_wechat_app_st),
+                "passToken": str(cls.passToken),
+                "userId": str(cls.userId)
+            }
+
+            if is_follow == "follow":
+                ftype = 1
+            elif is_follow == "unfollow":
+                ftype = 2
+            else:
+                ftype = 1
+
+            json_text = {
+                "touid": uid,
+                "ftype": ftype,
+                "page_ref": 84
+            }
+            r = requests.post(url=url, headers=headers, cookies=cookies, params=params, json=json_text)
+            if is_follow == "follow":
+                if r.json()["result"] != 1:
+                    Common.logger(log_type).warning("{}", r.text)
+                else:
+                    Common.logger(log_type).info("关注:{}, {}", uid, r)
+            else:
+                if r.json()["result"] != 1:
+                    Common.logger(log_type).warning("{}", r.text)
+                else:
+                    Common.logger(log_type).info("取消关注:{}, {}", uid, r)
+        except Exception as e:
+            Common.logger(log_type).error("关注/取消关注异常:{}", e)
+
+    # 获取个人主页视频
+    @classmethod
+    def get_user_videos(cls, log_type, uid):
+        try:
+            time.sleep(1)
+            url = "https://wxmini-api.uyouqu.com/rest/wd/wechatApp/feed/profile?"
+            headers = {
+                "content-type": "application/json",
+                "Accept-Encoding": "gzip,compress,br,deflate",
+                "User-Agent": 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X) '
+                              'AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 '
+                              'MicroMessenger/8.0.26(0x18001a34) NetType/WIFI Language/zh_CN',
+                "Referer": str(cls.Referer),
+            }
+            params = {
+                "__NS_sig3": str(cls.NS_sig3),
+                "__NS_sig3_origin": str(cls.NS_sig3_origin)
+            }
+            cookies = {
+                "did": str(cls.did),
+                "sid": "kuaishou.wechat.app",
+                "appId": "ks_wechat_small_app_2",
+                "clientid": "13",
+                "client_key": "f60ac815",
+                "kpn": "WECHAT_SMALL_APP",
+                "kpf": "OUTSIDE_IOS_H5",
+                "language": "zh_CN",
+                "smallAppVersion": "v3.131.0",
+                "mod": "iPhone(11<iPhone12%2C1>)",
+                "sys": "iOS%2014.7.1",
+                'wechatVersion': '8.0.26',
+                "brand": "iPhone",
+                "session_key": str(cls.session_key),
+                "unionid": str(cls.unionid),
+                "eUserStableOpenId": str(cls.eUserStableOpenId),
+                "openId": str(cls.openId),
+                "eOpenUserId": str(cls.eOpenUserId),
+                "kuaishou.wechat.app_st": str(cls.kuaishou_wechat_app_st),
+                "passToken": str(cls.passToken),
+                "userId": str(cls.userId)
+            }
+            json_text = {
+                "count": 12,
+                "pcursor": str(cls.person_pcursor),
+                "eid": str(uid)
+            }
+            urllib3.disable_warnings()
+            r = requests.post(url=url, headers=headers, params=params, cookies=cookies,
+                              json=json_text, proxies=proxies, verify=False)
+            # Common.logger(log_type).info("response:{}\n\n", r.text)
+            if "feeds" not in r.json():
+                # Feishu.bot(log_type, "follow:get_videos_from_person:"+r.text)
+                Common.logger(log_type).warning("response:{}", r.text)
+            elif r.json()["feeds"] == 0:
+                Common.logger(log_type).warning("用户主页无视频\n")
+                return
+            else:
+                feeds = r.json()["feeds"]
+                for i in range(len(feeds)):
+                    # 视频标题过滤话题及处理特殊字符
+                    kuaishou_title = feeds[i]["caption"]
+                    title_split1 = kuaishou_title.split(" #")
+                    if title_split1[0] != "":
+                        title1 = title_split1[0]
+                    else:
+                        title1 = title_split1[-1]
+
+                    title_split2 = title1.split(" #")
+                    if title_split2[0] != "":
+                        title2 = title_split2[0]
+                    else:
+                        title2 = title_split2[-1]
+
+                    title_split3 = title2.split("@")
+                    if title_split3[0] != "":
+                        title3 = title_split3[0]
+                    else:
+                        title3 = title_split3[-1]
+
+                    video_title = title3.strip().replace("\n", "") \
+                        .replace("/", "").replace("快手", "").replace(" ", "") \
+                        .replace(" ", "").replace("&NBSP", "").replace("\r", "") \
+                        .replace("#", "").replace(".", "。").replace("\\", "") \
+                        .replace(":", "").replace("*", "").replace("?", "") \
+                        .replace("?", "").replace('"', "").replace("<", "") \
+                        .replace(">", "").replace("|", "").replace("@", "")[:40]
+
+                    if "photoId" not in feeds[i]:
+                        video_id = "0"
+                    else:
+                        video_id = feeds[i]["photoId"]
+
+                    if "viewCount" not in feeds[i]:
+                        video_play_cnt = "0"
+                    else:
+                        video_play_cnt = feeds[i]["viewCount"]
+
+                    if "likeCount" not in feeds[i]:
+                        video_like_cnt = "0"
+                    else:
+                        video_like_cnt = feeds[i]["likeCount"]
+
+                    if "shareCount" not in feeds[i]:
+                        video_share_cnt = "0"
+                    else:
+                        video_share_cnt = feeds[i]["shareCount"]
+
+                    if "commentCount" not in feeds[i]:
+                        video_comment_cnt = "0"
+                    else:
+                        video_comment_cnt = feeds[i]["commentCount"]
+
+                    if "duration" not in feeds[i]:
+                        video_duration = "0"
+                    else:
+                        video_duration = int(int(feeds[i]["duration"]) / 1000)
+
+                    if "width" not in feeds[i] or "height" not in feeds[i]:
+                        video_width = "0"
+                        video_height = "0"
+                    else:
+                        video_width = feeds[i]["width"]
+                        video_height = feeds[i]["height"]
+
+                    if "timestamp" not in feeds[i]:
+                        video_send_time = "0"
+                    else:
+                        video_send_time = feeds[i]["timestamp"]
+                    cls.send_time = int(int(video_send_time) / 1000)
+
+                    if "userName" not in feeds[i]:
+                        user_name = "0"
+                    else:
+                        user_name = feeds[i]["userName"].strip().replace("\n", "") \
+                            .replace("/", "").replace("快手", "").replace(" ", "") \
+                            .replace(" ", "").replace("&NBSP", "").replace("\r", "")
+
+                    if "userId" not in feeds[i]:
+                        user_id = "0"
+                    else:
+                        user_id = feeds[i]["userId"]
+
+                    if "headUrl" not in feeds[i]:
+                        head_url = "0"
+                    else:
+                        head_url = feeds[i]["headUrl"]
+
+                    if "webpCoverUrls" in feeds[i]:
+                        cover_url = feeds[i]["webpCoverUrls"][-1]["url"]
+                    elif "coverUrls" not in feeds[i]:
+                        cover_url = "0"
+                    elif len(feeds[i]["coverUrls"]) == 0:
+                        cover_url = "0"
+                    else:
+                        cover_url = feeds[i]["coverUrls"][0]["url"]
+
+                    if "mainMvUrls" not in feeds[i]:
+                        video_url = "0"
+                    elif len(feeds[i]["mainMvUrls"]) == 0:
+                        video_url = "0"
+                    else:
+                        video_url = feeds[i]["mainMvUrls"][0]["url"]
+
+                    Common.logger(log_type).info("video_title:{}".format(video_title))
+                    Common.logger(log_type).info("user_name:{}".format(user_name))
+                    Common.logger(log_type).info("video_play_cnt:{}".format(video_play_cnt))
+                    Common.logger(log_type).info("video_like_cnt:{}".format(video_like_cnt))
+                    Common.logger(log_type).info("video_duration:{}秒".format(video_duration))
+                    Common.logger(log_type).info("video_send_time:{}".format(
+                        time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(int(video_send_time) / 1000))))
+                    Common.logger(log_type).info("video_url:{}".format(video_url))
+
+                    # 过滤无效视频
+                    if video_id == "0" \
+                            or head_url == "0" \
+                            or cover_url == "0" \
+                            or video_url == "0" \
+                            or video_duration == "0" \
+                            or video_send_time == "0" \
+                            or user_name == "0" \
+                            or user_id == "0" \
+                            or video_title == "":
+                        Common.logger(log_type).info("无效视频\n")
+                    # 视频发布时间 <= 7 天
+                    elif int(time.time()) - int(int(video_send_time) / 1000) > 604800:
+                        Common.logger("follow").info("发布时间:{},超过7天\n", time.strftime(
+                            "%Y/%m/%d %H:%M:%S", time.localtime(int(video_send_time) / 1000)))
+                        cls.person_pcursor = ""
+                        return
+                    # 判断敏感词
+                    elif cls.download_rule(video_duration, video_width, video_height, video_play_cnt,
+                                           video_like_cnt, video_share_cnt) is False:
+                        Common.logger(log_type).info("不满足下载规则\n".format(kuaishou_title))
+                    elif any(word if word in kuaishou_title else False for word in cls.sensitive_words()) is True:
+                        Common.logger(log_type).info("视频已中敏感词:{}\n".format(kuaishou_title))
+                    # 从云文档去重: 推荐榜_已下载表
+                    elif str(video_id) in [j for m in Feishu.get_values_batch(log_type, "kuaishou", "3cd128") for j in m]:
+                        Common.logger(log_type).info("该视频已下载:{}\n", video_title)
+                    # 从云文档去重: 用户主页_已下载表
+                    elif str(video_id) in [j for m in Feishu.get_values_batch(log_type, "kuaishou", "fYdA8F") for j in m]:
+                        Common.logger(log_type).info("该视频已下载:{}\n", video_title)
+                    # 从云文档去重:用户主页_feeds
+                    elif str(video_id) in [j for n in Feishu.get_values_batch(log_type, "kuaishou", "wW5cyb") for j in n]:
+                        Common.logger(log_type).info("该视频已在feeds中:{}\n", video_title)
+                    else:
+                        Feishu.insert_columns("follow", "kuaishou", "wW5cyb", "ROWS", 1, 2)
+                        # 获取当前时间
+                        get_feeds_time = int(time.time())
+                        # 工作表中写入数据
+                        values = [[time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(int(get_feeds_time))),
+                                   "用户主页",
+                                   str(video_id),
+                                   video_title,
+                                   video_play_cnt,
+                                   video_comment_cnt,
+                                   video_like_cnt,
+                                   video_share_cnt,
+                                   video_duration,
+                                   str(video_width) + "*" + str(video_height),
+                                   time.strftime(
+                                       "%Y/%m/%d %H:%M:%S", time.localtime(int(video_send_time) / 1000)),
+                                   user_name,
+                                   user_id,
+                                   head_url,
+                                   cover_url,
+                                   video_url]]
+                        # 等待 1s,防止操作云文档太频繁,导致报错
+                        time.sleep(1)
+                        Feishu.update_values("follow", "kuaishou", "wW5cyb", "A2:T2", values)
+                        Common.logger("follow").info("添加视频至follow_feeds成功:{}\n", video_title)
+                        cls.get_person_video_count.append(video_id)
+
+                        # # 抓取足够多数量的视频
+                        # if len(cls.get_person_video_count) >= 1:
+                        #     Common.logger(log_type).info('已抓取{}:{}条视频\n', user_name, len(cls.get_person_video_count))
+                        #     cls.person_pcursor = ""
+                        #     cls.get_person_video_count = []
+                        #     return
+                if r.json()["pcursor"] == 'no_more':
+                    Common.logger(log_type).info('没有更多作品了\n')
+                    return
+                elif len(cls.get_person_video_count) < 1:
+                    Common.logger(log_type).info('休眠 10-20 秒,翻页')
+                    time.sleep(random.randint(10, 20))
+                    # 翻页
+                    cls.person_pcursor = r.json()["pcursor"]
+                    cls.get_user_videos(log_type, uid)
+
+        except Exception as e:
+            Common.logger(log_type).error("get_videos_from_person异常:{}\n", e)
+
+    # 获取所有关注列表的用户视频
+    @classmethod
+    def get_videos_from_follow(cls, log_type, env):
+        try:
+            user_list = cls.get_follow_users(log_type)
+            if len(user_list) == 0:
+                Common.logger(log_type).warning('用户ID列表为空\n')
+            else:
+                while True:
+                    for k, v in user_list.items():
+                        Common.logger(log_type).info('正在获取 {} 主页视频\n', k)
+                        cls.person_pcursor = ""
+                        cls.get_user_videos(log_type, str(v))
+                        cls.run_download_publish(log_type, env)
+                        if len(cls.get_all_video_count) >= 100:
+                            cls.get_all_video_count = []
+                            Common.logger(log_type).info('今日已抓取{}条视频\n', len(cls.get_all_video_count))
+                            return
+                        else:
+                            Common.logger(log_type).info('随机休眠 10-30 秒\n')
+                            time.sleep(random.randint(10, 30))
+        except Exception as e:
+            Common.logger(log_type).error('get_videos_from_follow异常:{}\n', e)
+
+    # 下载/上传
+    @classmethod
+    def download_publish(cls, log_type, env):
+        try:
+            follow_feeds_sheet = Feishu.get_values_batch(log_type, "kuaishou", "wW5cyb")
+            for i in range(1, len(follow_feeds_sheet)):
+                time.sleep(1)
+                download_video_id = follow_feeds_sheet[i][2]
+                download_video_title = follow_feeds_sheet[i][3]
+                download_video_play_cnt = follow_feeds_sheet[i][4]
+                download_video_comment_cnt = follow_feeds_sheet[i][5]
+                download_video_like_cnt = follow_feeds_sheet[i][6]
+                download_video_share_cnt = follow_feeds_sheet[i][7]
+                download_video_duration = follow_feeds_sheet[i][8]
+                download_video_resolution = follow_feeds_sheet[i][9]
+                download_video_send_time = follow_feeds_sheet[i][10]
+                download_user_name = follow_feeds_sheet[i][11]
+                download_user_id = follow_feeds_sheet[i][12]
+                download_head_url = follow_feeds_sheet[i][13]
+                download_cover_url = follow_feeds_sheet[i][14]
+                download_video_url = follow_feeds_sheet[i][15]
+
+                Common.logger(log_type).info("正在判断第{}行,视频:{}", i + 1, download_video_title)
+
+                # 过滤空行及空标题视频
+                if download_video_id is None \
+                        or download_video_id == "" \
+                        or download_video_title is None \
+                        or download_video_title == "":
+                    # 删除行或列,可选 ROWS、COLUMNS
+                    Feishu.dimension_range(log_type, "kuaishou", "wW5cyb", "ROWS", i + 1, i + 1)
+                    Common.logger(log_type).warning("标题为空或空行,删除成功\n")
+                    return
+
+                # 从已下载视频表中去重:推荐榜_已下载表
+                elif str(download_video_id) in [j for m in Feishu.get_values_batch(
+                        log_type, "kuaishou", "3cd128") for j in m]:
+                    # 删除行或列,可选 ROWS、COLUMNS
+                    Feishu.dimension_range(log_type, "kuaishou", "wW5cyb", "ROWS", i + 1, i + 1)
+                    Common.logger(log_type).info("视频已下载:{},删除成功\n", download_video_title)
+                    return
+
+                # 从已下载视频表中去重:用户主页_已下载表
+                elif str(download_video_id) in [j for m in Feishu.get_values_batch(
+                        log_type, "kuaishou", "fYdA8F") for j in m]:
+                    # 删除行或列,可选 ROWS、COLUMNS
+                    Feishu.dimension_range(log_type, "kuaishou", "wW5cyb", "ROWS", i + 1, i + 1)
+                    Common.logger(log_type).info("视频已下载:{},删除成功\n", download_video_title)
+                    return
+
+                else:
+                    # 下载封面
+                    Common.download_method(log_type=log_type, text="cover",
+                                           d_name=str(download_video_title), d_url=str(download_cover_url))
+                    # 下载视频
+                    Common.download_method(log_type=log_type, text="video",
+                                           d_name=str(download_video_title), d_url=str(download_video_url))
+                    # 保存视频信息至 "./videos/{download_video_title}/info.txt"
+                    with open("./videos/" + download_video_title + "/" + "info.txt",
+                              "a", encoding="UTF-8") as f_a:
+                        f_a.write(str(download_video_id) + "\n" +
+                                  str(download_video_title) + "\n" +
+                                  str(download_video_duration) + "\n" +
+                                  str(download_video_play_cnt) + "\n" +
+                                  str(download_video_comment_cnt) + "\n" +
+                                  str(download_video_like_cnt) + "\n" +
+                                  str(download_video_share_cnt) + "\n" +
+                                  str(download_video_resolution) + "\n" +
+                                  str(int(time.mktime(
+                                      time.strptime(download_video_send_time, "%Y/%m/%d %H:%M:%S")))) + "\n" +
+                                  str(download_user_name) + "\n" +
+                                  str(download_head_url) + "\n" +
+                                  str(download_video_url) + "\n" +
+                                  str(download_cover_url) + "\n" +
+                                  "kuaishou_person")
+                    Common.logger(log_type).info("==========视频信息已保存至info.txt==========")
+
+                    # 上传视频
+                    Common.logger(log_type).info("开始上传视频:{}".format(download_video_title))
+                    our_video_id = Publish.upload_and_publish(log_type, env, "play")
+                    our_video_link = "https://admin.piaoquantv.com/cms/post-detail/" + str(our_video_id) + "/info"
+                    Common.logger(log_type).info("视频上传完成:{}", download_video_title)
+
+                    # 视频ID工作表,插入首行
+                    time.sleep(1)
+                    Feishu.insert_columns(log_type, "kuaishou", "fYdA8F", "ROWS", 1, 2)
+                    # 视频ID工作表,首行写入数据
+                    upload_time = int(time.time())
+                    values = [[our_video_id,
+                               time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(upload_time)),
+                               "用户主页",
+                               str(download_video_id),
+                               str(download_video_title),
+                               our_video_link,
+                               download_video_play_cnt,
+                               download_video_comment_cnt,
+                               download_video_like_cnt,
+                               download_video_share_cnt,
+                               download_video_duration,
+                               str(download_video_resolution),
+                               str(download_video_send_time),
+                               str(download_user_name),
+                               str(download_user_id),
+                               str(download_head_url),
+                               str(download_cover_url),
+                               str(download_video_url)]]
+                    time.sleep(1)
+                    Feishu.update_values(log_type, "kuaishou", "fYdA8F", "E2:Z2", values)
+                    cls.get_all_video_count.append(download_video_id)
+                    Common.logger(log_type).info("保存视频ID至已下载云文档成功:{}", download_video_title)
+
+                    # 删除行或列,可选 ROWS、COLUMNS
+                    Feishu.dimension_range(log_type, "kuaishou", "wW5cyb", "ROWS", i + 1, i + 1)
+                    Common.logger(log_type).info("视频:{},下载/上传成功\n", download_video_title)
+                    return
+        except Exception as e:
+            Feishu.dimension_range(log_type, "kuaishou", "wW5cyb", "ROWS", 2, 2)
+            Common.logger(log_type).error("download_publish异常,删除成功:{}\n", e)
+
+    # 执行下载/上传
+    @classmethod
+    def run_download_publish(cls, log_type, env):
+        try:
+            while True:
+                follow_feeds_sheet = Feishu.get_values_batch(log_type, "kuaishou", "wW5cyb")
+                if len(follow_feeds_sheet) == 1:
+                    Common.logger(log_type).info("下载/上传完成\n")
+                    break
+                else:
+                    cls.download_publish(log_type, env)
+        except Exception as e:
+            Common.logger(log_type).error("run_download_publish异常:{}\n", e)
+
+
+if __name__ == "__main__":
+    Follow.get_user_videos('follow', '240529022')
+
+    pass
+

+ 3 - 0
kuaishou/kuaishou_main/__init__.py

@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/2/23

BIN
xigua/.DS_Store


BIN
xigua/xigua_follow/.DS_Store


+ 311 - 302
xigua/xigua_follow/xigua_follow.py

@@ -644,199 +644,205 @@ class Follow:
 
     @classmethod
     def get_videolist(cls, log_type, crawler, strategy, our_uid, out_uid, oss_endpoint, env, machine):
-        signature = cls.random_signature()
-        while True:
-            url = "https://www.ixigua.com/api/videov2/author/new_video_list?"
-            params = {
-                'to_user_id': str(out_uid),
-                'offset': str(cls.offset),
-                'limit': '30',
-                'maxBehotTime': '0',
-                'order': 'new',
-                'isHome': '0',
-                'msToken': 'G0eRzNkw189a8TLaXjc6nTHVMQwh9XcxVAqTbGKi7iPJdQcLwS3-XRrJ3MZ7QBfqErpxp3EX1WtvWOIcZ3NIgr41hgcd-v64so_RRj3YCRw1UsKW8mIssNLlIMspsg==',
-                'X-Bogus': 'DFSzswVuEkUANjW9ShFTgR/F6qHt',
-                '_signature': signature,
-            }
-            headers = {
-                'authority': 'www.ixigua.com',
-                'accept': 'application/json, text/plain, */*',
-                'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
-                'cache-control': 'no-cache',
-                'cookie': f'MONITOR_WEB_ID=7168304743566296612; __ac_signature={signature}; ixigua-a-s=1; support_webp=true; support_avif=false; csrf_session_id=a5355d954d3c63ed1ba35faada452b4d; msToken=G0eRzNkw189a8TLaXjc6nTHVMQwh9XcxVAqTbGKi7iPJdQcLwS3-XRrJ3MZ7QBfqErpxp3EX1WtvWOIcZ3NIgr41hgcd-v64so_RRj3YCRw1UsKW8mIssNLlIMspsg==; tt_scid=o4agqz7u9SKPwfBoPt6S82Cw0q.9KDtqmNe0JHxMqmpxNHQWq1BmrQdgVU6jEoX7ed99; ttwid=1%7CHHtv2QqpSGuSu8r-zXF1QoWsvjmNi1SJrqOrZzg-UCY%7C1676618894%7Cee5ad95378275f282f230a7ffa9947ae7eff40d0829c5a2568672a6dc90a1c96; ixigua-a-s=1',
-                'pragma': 'no-cache',
-                'referer': f'https://www.ixigua.com/home/{out_uid}/video/?preActiveKey=hotsoon&list_entrance=userdetail',
-                'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
-                'sec-ch-ua-mobile': '?0',
-                'sec-ch-ua-platform': '"macOS"',
-                'sec-fetch-dest': 'empty',
-                'sec-fetch-mode': 'cors',
-                'sec-fetch-site': 'same-origin',
-                'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.41',
-                'x-secsdk-csrf-token': '00010000000119e3f9454d1dcbb288704cda1960f241e2d19bd21f2fd283520c3615a990ac5a17448bfbb902a249'
-            }
-            urllib3.disable_warnings()
-            response = requests.get(url=url, headers=headers, params=params, proxies=proxies, verify=False)
-            cls.offset += 30
-            if response.status_code != 200:
-                Common.logger(log_type, crawler).warning(f"get_videolist_response:{response.text}\n")
-            elif 'data' not in response.text:
-                Common.logger(log_type, crawler).warning(f"get_videolist_response:{response.text}\n")
-            elif 'videoList' not in response.json()["data"]:
-                Common.logger(log_type, crawler).warning(f"get_videolist_response:{response.json()}\n")
-            else:
-                videoList = response.json()['data']['videoList']
-                for i in range(len(videoList)):
-                    # video_title
-                    if 'title' not in videoList[i]:
-                        video_title = 0
-                    else:
-                        video_title = videoList[i]['title'].strip().replace('手游', '') \
-                            .replace('/', '').replace('\/', '').replace('\n', '')
+        try:
+            signature = cls.random_signature()
+            while True:
+                url = "https://www.ixigua.com/api/videov2/author/new_video_list?"
+                params = {
+                    'to_user_id': str(out_uid),
+                    'offset': str(cls.offset),
+                    'limit': '30',
+                    'maxBehotTime': '0',
+                    'order': 'new',
+                    'isHome': '0',
+                    'msToken': 'G0eRzNkw189a8TLaXjc6nTHVMQwh9XcxVAqTbGKi7iPJdQcLwS3-XRrJ3MZ7QBfqErpxp3EX1WtvWOIcZ3NIgr41hgcd-v64so_RRj3YCRw1UsKW8mIssNLlIMspsg==',
+                    'X-Bogus': 'DFSzswVuEkUANjW9ShFTgR/F6qHt',
+                    '_signature': signature,
+                }
+                headers = {
+                    'authority': 'www.ixigua.com',
+                    'accept': 'application/json, text/plain, */*',
+                    'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
+                    'cache-control': 'no-cache',
+                    'cookie': f'MONITOR_WEB_ID=7168304743566296612; __ac_signature={signature}; ixigua-a-s=1; support_webp=true; support_avif=false; csrf_session_id=a5355d954d3c63ed1ba35faada452b4d; msToken=G0eRzNkw189a8TLaXjc6nTHVMQwh9XcxVAqTbGKi7iPJdQcLwS3-XRrJ3MZ7QBfqErpxp3EX1WtvWOIcZ3NIgr41hgcd-v64so_RRj3YCRw1UsKW8mIssNLlIMspsg==; tt_scid=o4agqz7u9SKPwfBoPt6S82Cw0q.9KDtqmNe0JHxMqmpxNHQWq1BmrQdgVU6jEoX7ed99; ttwid=1%7CHHtv2QqpSGuSu8r-zXF1QoWsvjmNi1SJrqOrZzg-UCY%7C1676618894%7Cee5ad95378275f282f230a7ffa9947ae7eff40d0829c5a2568672a6dc90a1c96; ixigua-a-s=1',
+                    'pragma': 'no-cache',
+                    'referer': f'https://www.ixigua.com/home/{out_uid}/video/?preActiveKey=hotsoon&list_entrance=userdetail',
+                    'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
+                    'sec-ch-ua-mobile': '?0',
+                    'sec-ch-ua-platform': '"macOS"',
+                    'sec-fetch-dest': 'empty',
+                    'sec-fetch-mode': 'cors',
+                    'sec-fetch-site': 'same-origin',
+                    'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.41',
+                    'x-secsdk-csrf-token': '00010000000119e3f9454d1dcbb288704cda1960f241e2d19bd21f2fd283520c3615a990ac5a17448bfbb902a249'
+                }
+                urllib3.disable_warnings()
+                response = requests.get(url=url, headers=headers, params=params, proxies=proxies, verify=False)
+                cls.offset += 30
+                if response.status_code != 200:
+                    Common.logger(log_type, crawler).warning(f"get_videolist_response:{response.text}\n")
+                    return
+                elif 'data' not in response.text:
+                    Common.logger(log_type, crawler).warning(f"get_videolist_response:{response.text}\n")
+                    return
+                elif 'videoList' not in response.json()["data"]:
+                    Common.logger(log_type, crawler).warning(f"get_videolist_response:{response.json()}\n")
+                    return
+                else:
+                    videoList = response.json()['data']['videoList']
+                    for i in range(len(videoList)):
+                        # video_title
+                        if 'title' not in videoList[i]:
+                            video_title = 0
+                        else:
+                            video_title = videoList[i]['title'].strip().replace('手游', '') \
+                                .replace('/', '').replace('\/', '').replace('\n', '')
 
-                    # video_id
-                    if 'video_id' not in videoList[i]:
-                        video_id = 0
-                    else:
-                        video_id = videoList[i]['video_id']
+                        # video_id
+                        if 'video_id' not in videoList[i]:
+                            video_id = 0
+                        else:
+                            video_id = videoList[i]['video_id']
 
-                    # gid
-                    if 'gid' not in videoList[i]:
-                        gid = 0
-                    else:
-                        gid = videoList[i]['gid']
+                        # gid
+                        if 'gid' not in videoList[i]:
+                            gid = 0
+                        else:
+                            gid = videoList[i]['gid']
 
-                    # play_cnt
-                    if 'video_detail_info' not in videoList[i]:
-                        play_cnt = 0
-                    elif 'video_watch_count' not in videoList[i]['video_detail_info']:
-                        play_cnt = 0
-                    else:
-                        play_cnt = videoList[i]['video_detail_info']['video_watch_count']
+                        # play_cnt
+                        if 'video_detail_info' not in videoList[i]:
+                            play_cnt = 0
+                        elif 'video_watch_count' not in videoList[i]['video_detail_info']:
+                            play_cnt = 0
+                        else:
+                            play_cnt = videoList[i]['video_detail_info']['video_watch_count']
 
-                    # comment_cnt
-                    if 'comment_count' not in videoList[i]:
-                        comment_cnt = 0
-                    else:
-                        comment_cnt = videoList[i]['comment_count']
+                        # comment_cnt
+                        if 'comment_count' not in videoList[i]:
+                            comment_cnt = 0
+                        else:
+                            comment_cnt = videoList[i]['comment_count']
 
-                    # like_cnt
-                    if 'digg_count' not in videoList[i]:
-                        like_cnt = 0
-                    else:
-                        like_cnt = videoList[i]['digg_count']
+                        # like_cnt
+                        if 'digg_count' not in videoList[i]:
+                            like_cnt = 0
+                        else:
+                            like_cnt = videoList[i]['digg_count']
 
-                    # share_cnt
-                    share_cnt = 0
+                        # share_cnt
+                        share_cnt = 0
 
-                    # video_duration
-                    if 'video_duration' not in videoList[i]:
-                        video_duration = 0
-                    else:
-                        video_duration = int(videoList[i]['video_duration'])
-
-                    # send_time
-                    if 'publish_time' not in videoList[i]:
-                        publish_time = 0
-                    else:
-                        publish_time = videoList[i]['publish_time']
+                        # video_duration
+                        if 'video_duration' not in videoList[i]:
+                            video_duration = 0
+                        else:
+                            video_duration = int(videoList[i]['video_duration'])
 
-                    publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time))
+                        # send_time
+                        if 'publish_time' not in videoList[i]:
+                            publish_time = 0
+                        else:
+                            publish_time = videoList[i]['publish_time']
 
-                    # is_top
-                    if 'is_top' not in videoList[i]:
-                        is_top = 0
-                    else:
-                        is_top = videoList[i]['is_top']
+                        publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time))
 
-                    # user_name
-                    if 'user_info' not in videoList[i]:
-                        user_name = 0
-                    elif 'name' not in videoList[i]['user_info']:
-                        user_name = 0
-                    else:
-                        user_name = videoList[i]['user_info']['name']
+                        # is_top
+                        if 'is_top' not in videoList[i]:
+                            is_top = 0
+                        else:
+                            is_top = videoList[i]['is_top']
 
-                    # user_id
-                    if 'user_info' not in videoList[i]:
-                        user_id = 0
-                    elif 'user_id' not in videoList[i]['user_info']:
-                        user_id = 0
-                    else:
-                        user_id = videoList[i]['user_info']['user_id']
+                        # user_name
+                        if 'user_info' not in videoList[i]:
+                            user_name = 0
+                        elif 'name' not in videoList[i]['user_info']:
+                            user_name = 0
+                        else:
+                            user_name = videoList[i]['user_info']['name']
 
-                    # avatar_url
-                    if 'user_info' not in videoList[i]:
-                        avatar_url = 0
-                    elif 'avatar_url' not in videoList[i]['user_info']:
-                        avatar_url = 0
-                    else:
-                        avatar_url = videoList[i]['user_info']['avatar_url']
-
-                    # cover_url
-                    if 'video_detail_info' not in videoList[i]:
-                        cover_url = 0
-                    elif 'detail_video_large_image' not in videoList[i]['video_detail_info']:
-                        cover_url = 0
-                    elif 'url' in videoList[i]['video_detail_info']['detail_video_large_image']:
-                        cover_url = videoList[i]['video_detail_info']['detail_video_large_image']['url']
-                    else:
-                        cover_url = videoList[i]['video_detail_info']['detail_video_large_image']['url_list'][0]['url']
+                        # user_id
+                        if 'user_info' not in videoList[i]:
+                            user_id = 0
+                        elif 'user_id' not in videoList[i]['user_info']:
+                            user_id = 0
+                        else:
+                            user_id = videoList[i]['user_info']['user_id']
 
-                    while True:
-                        rule_dict = cls.get_rule(log_type, crawler)
-                        if rule_dict is None:
-                            Common.logger(log_type, crawler).warning(f"rule_dict:{rule_dict}, 10秒后重试")
-                            time.sleep(10)
+                        # avatar_url
+                        if 'user_info' not in videoList[i]:
+                            avatar_url = 0
+                        elif 'avatar_url' not in videoList[i]['user_info']:
+                            avatar_url = 0
                         else:
-                            break
-
-                    if gid == 0 or video_id == 0 or cover_url == 0:
-                        Common.logger(log_type, crawler).info('无效视频\n')
-                    elif is_top is True and int(time.time()) - int(publish_time) > 3600 * 24 * rule_dict['publish_time']:
-                        Common.logger(log_type, crawler).info(f'置顶视频,且发布时间:{publish_time_str} 超过{rule_dict["publish_time"]}天\n')
-                    elif int(time.time()) - int(publish_time) > 3600 * 24 * rule_dict['publish_time']:
-                        Common.logger(log_type, crawler).info(f'发布时间:{publish_time_str}超过{rule_dict["publish_time"]}天\n')
-                        cls.offset = 0
-                        return
-                    else:
-                        video_url_dict = cls.get_video_url(log_type, crawler, gid)
-                        video_url = video_url_dict["video_url"]
-                        audio_url = video_url_dict["audio_url"]
-                        video_width = video_url_dict["video_width"]
-                        video_height = video_url_dict["video_height"]
-
-                        video_dict = {'video_title': video_title,
-                                      'video_id': video_id,
-                                      'gid': gid,
-                                      'play_cnt': play_cnt,
-                                      'comment_cnt': comment_cnt,
-                                      'like_cnt': like_cnt,
-                                      'share_cnt': share_cnt,
-                                      'video_width': video_width,
-                                      'video_height': video_height,
-                                      'duration': video_duration,
-                                      'publish_time_stamp': publish_time,
-                                      'publish_time_str': publish_time_str,
-                                      'is_top': is_top,
-                                      'user_name': user_name,
-                                      'user_id': user_id,
-                                      'avatar_url': avatar_url,
-                                      'cover_url': cover_url,
-                                      'audio_url': audio_url,
-                                      'video_url': video_url,
-                                      'session': signature}
-                        for k, v in video_dict.items():
-                            Common.logger(log_type, crawler).info(f"{k}:{v}")
-                        cls.download_publish(log_type=log_type,
-                                             crawler=crawler,
-                                             video_dict=video_dict,
-                                             rule_dict=rule_dict,
-                                             strategy=strategy,
-                                             our_uid=our_uid,
-                                             oss_endpoint=oss_endpoint,
-                                             env=env,
-                                             machine=machine)
+                            avatar_url = videoList[i]['user_info']['avatar_url']
+
+                        # cover_url
+                        if 'video_detail_info' not in videoList[i]:
+                            cover_url = 0
+                        elif 'detail_video_large_image' not in videoList[i]['video_detail_info']:
+                            cover_url = 0
+                        elif 'url' in videoList[i]['video_detail_info']['detail_video_large_image']:
+                            cover_url = videoList[i]['video_detail_info']['detail_video_large_image']['url']
+                        else:
+                            cover_url = videoList[i]['video_detail_info']['detail_video_large_image']['url_list'][0]['url']
+
+                        while True:
+                            rule_dict = cls.get_rule(log_type, crawler)
+                            if rule_dict is None:
+                                Common.logger(log_type, crawler).warning(f"rule_dict:{rule_dict}, 10秒后重试")
+                                time.sleep(10)
+                            else:
+                                break
+
+                        if gid == 0 or video_id == 0 or cover_url == 0:
+                            Common.logger(log_type, crawler).info('无效视频\n')
+                        elif is_top is True and int(time.time()) - int(publish_time) > 3600 * 24 * rule_dict['publish_time']:
+                            Common.logger(log_type, crawler).info(f'置顶视频,且发布时间:{publish_time_str} 超过{rule_dict["publish_time"]}天\n')
+                        elif int(time.time()) - int(publish_time) > 3600 * 24 * rule_dict['publish_time']:
+                            Common.logger(log_type, crawler).info(f'发布时间:{publish_time_str}超过{rule_dict["publish_time"]}天\n')
+                            cls.offset = 0
+                            return
+                        else:
+                            video_url_dict = cls.get_video_url(log_type, crawler, gid)
+                            video_url = video_url_dict["video_url"]
+                            audio_url = video_url_dict["audio_url"]
+                            video_width = video_url_dict["video_width"]
+                            video_height = video_url_dict["video_height"]
+
+                            video_dict = {'video_title': video_title,
+                                          'video_id': video_id,
+                                          'gid': gid,
+                                          'play_cnt': play_cnt,
+                                          'comment_cnt': comment_cnt,
+                                          'like_cnt': like_cnt,
+                                          'share_cnt': share_cnt,
+                                          'video_width': video_width,
+                                          'video_height': video_height,
+                                          'duration': video_duration,
+                                          'publish_time_stamp': publish_time,
+                                          'publish_time_str': publish_time_str,
+                                          'is_top': is_top,
+                                          'user_name': user_name,
+                                          'user_id': user_id,
+                                          'avatar_url': avatar_url,
+                                          'cover_url': cover_url,
+                                          'audio_url': audio_url,
+                                          'video_url': video_url,
+                                          'session': signature}
+                            for k, v in video_dict.items():
+                                Common.logger(log_type, crawler).info(f"{k}:{v}")
+                            cls.download_publish(log_type=log_type,
+                                                 crawler=crawler,
+                                                 video_dict=video_dict,
+                                                 rule_dict=rule_dict,
+                                                 strategy=strategy,
+                                                 our_uid=our_uid,
+                                                 oss_endpoint=oss_endpoint,
+                                                 env=env,
+                                                 machine=machine)
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f"get_videolist:{e}\n")
 
     @classmethod
     def repeat_video(cls, log_type, crawler, video_id, env, machine):
@@ -847,133 +853,136 @@ class Follow:
     # 下载 / 上传
     @classmethod
     def download_publish(cls, log_type, crawler, strategy, video_dict, rule_dict, our_uid, oss_endpoint, env, machine):
-        # try:
-        if cls.download_rule(video_dict, rule_dict) is False:
-            Common.logger(log_type, crawler).info('不满足抓取规则\n')
-        elif any(word if word in video_dict['video_title'] else False for word in cls.filter_words(log_type, crawler)) is True:
-            Common.logger(log_type, crawler).info('标题已中过滤词:{}\n', video_dict['video_title'])
-        elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env, machine) != 0:
-            Common.logger(log_type, crawler).info('视频已下载\n')
-        # elif str(video_dict['video_id']) in [x for y in Feishu.get_values_batch(log_type, 'xigua', 'e075e9') for x in y]:
-        #     Common.logger(log_type, crawler).info('视频已下载\n')
-        # elif str(video_dict['video_id']) in [x for y in Feishu.get_values_batch(log_type, 'xigua', '3Ul6wZ') for x in y]:
-        #     Common.logger(log_type, crawler).info('视频已下载\n')
-        # elif str(video_dict['video_id']) in [x for y in Feishu.get_values_batch(log_type, 'xigua', 'QOWqMo') for x in y]:
-        #     Common.logger(log_type, crawler).info('视频已下载\n')
-        # elif str(video_dict['video_id']) in [x for y in Feishu.get_values_batch(log_type, 'xigua', 'wjhpDs') for x in y]:
-        #     Common.logger(log_type, crawler).info('视频已存在\n')
-        else:
-            # 下载封面
-            Common.download_method(log_type=log_type, crawler=crawler, text='cover', title=video_dict['video_title'], url=video_dict['cover_url'])
-            # 下载视频
-            Common.download_method(log_type=log_type, crawler=crawler, text='xigua_video', title=video_dict['video_title'], url=video_dict['video_url'])
-            # 下载音频
-            Common.download_method(log_type=log_type, crawler=crawler, text='xigua_audio', title=video_dict['video_title'], url=video_dict['audio_url'])
-            # 保存视频信息至txt
-            Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
-            # 合成音视频
-            Common.video_compose(log_type=log_type, crawler=crawler, video_dir=f"./{crawler}/videos/{video_dict['video_title']}")
-
-            # 上传视频
-            Common.logger(log_type, crawler).info("开始上传视频...")
-            our_video_id = Publish.upload_and_publish(log_type=log_type,
-                                                      crawler=crawler,
-                                                      strategy=strategy,
-                                                      our_uid=our_uid,
-                                                      env=env,
-                                                      oss_endpoint=oss_endpoint)
-            if env == 'dev':
-                our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+        try:
+            if cls.download_rule(video_dict, rule_dict) is False:
+                Common.logger(log_type, crawler).info('不满足抓取规则\n')
+            elif any(word if word in video_dict['video_title'] else False for word in cls.filter_words(log_type, crawler)) is True:
+                Common.logger(log_type, crawler).info('标题已中过滤词:{}\n', video_dict['video_title'])
+            elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env, machine) != 0:
+                Common.logger(log_type, crawler).info('视频已下载\n')
+            # elif str(video_dict['video_id']) in [x for y in Feishu.get_values_batch(log_type, 'xigua', 'e075e9') for x in y]:
+            #     Common.logger(log_type, crawler).info('视频已下载\n')
+            # elif str(video_dict['video_id']) in [x for y in Feishu.get_values_batch(log_type, 'xigua', '3Ul6wZ') for x in y]:
+            #     Common.logger(log_type, crawler).info('视频已下载\n')
+            # elif str(video_dict['video_id']) in [x for y in Feishu.get_values_batch(log_type, 'xigua', 'QOWqMo') for x in y]:
+            #     Common.logger(log_type, crawler).info('视频已下载\n')
+            # elif str(video_dict['video_id']) in [x for y in Feishu.get_values_batch(log_type, 'xigua', 'wjhpDs') for x in y]:
+            #     Common.logger(log_type, crawler).info('视频已存在\n')
             else:
-                our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
-            Common.logger(log_type, crawler).info("视频上传完成")
-
-            if our_video_id is None:
-                # 删除视频文件夹
-                shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}/")
-                return
-
-            # 视频写入飞书
-            Feishu.insert_columns(log_type, 'xigua', "e075e9", "ROWS", 1, 2)
-            upload_time = int(time.time())
-            values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
-                       "定向榜",
-                       video_dict['video_title'],
-                       str(video_dict['video_id']),
-                       our_video_link,
-                       video_dict['gid'],
-                       video_dict['play_cnt'],
-                       video_dict['comment_cnt'],
-                       video_dict['like_cnt'],
-                       video_dict['share_cnt'],
-                       video_dict['duration'],
-                       str(video_dict['video_width']) + '*' + str(video_dict['video_height']),
-                       video_dict['publish_time_str'],
-                       video_dict['user_name'],
-                       video_dict['user_id'],
-                       video_dict['avatar_url'],
-                       video_dict['cover_url'],
-                       video_dict['video_url'],
-                       video_dict['audio_url']]]
-            time.sleep(1)
-            Feishu.update_values(log_type, 'xigua', "e075e9", "F2:Z2", values)
-            Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
-
-            # 视频信息保存数据库
-            insert_sql = f""" insert into crawler_video(video_id,
-                            user_id,
-                            out_user_id,
-                            platform,
-                            strategy,
-                            out_video_id,
-                            video_title,
-                            cover_url,
-                            video_url,
-                            duration,
-                            publish_time,
-                            play_cnt,
-                            crawler_rule,
-                            width,
-                            height)
-                            values({our_video_id},
-                            {our_uid},
-                            "{video_dict['user_id']}",
-                            "{cls.platform}",
-                            "定向爬虫策略",
-                            "{video_dict['video_id']}",
-                            "{video_dict['video_title']}",
-                            "{video_dict['cover_url']}",
-                            "{video_dict['video_url']}",
-                            {int(video_dict['duration'])},
-                            "{video_dict['publish_time_str']}",
-                            {int(video_dict['play_cnt'])},
-                            '{json.dumps(rule_dict)}',
-                            {int(video_dict['video_width'])},
-                            {int(video_dict['video_height'])}) """
-            Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
-            MysqlHelper.update_values(log_type, crawler, insert_sql, env, machine)
-            Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
-        # except Exception as e:
-        #     Common.logger(log_type, crawler).error(f'download_publish异常:{e}\n')
+                # 下载封面
+                Common.download_method(log_type=log_type, crawler=crawler, text='cover', title=video_dict['video_title'], url=video_dict['cover_url'])
+                # 下载视频
+                Common.download_method(log_type=log_type, crawler=crawler, text='xigua_video', title=video_dict['video_title'], url=video_dict['video_url'])
+                # 下载音频
+                Common.download_method(log_type=log_type, crawler=crawler, text='xigua_audio', title=video_dict['video_title'], url=video_dict['audio_url'])
+                # 保存视频信息至txt
+                Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
+                # 合成音视频
+                Common.video_compose(log_type=log_type, crawler=crawler, video_dir=f"./{crawler}/videos/{video_dict['video_title']}")
+
+                # 上传视频
+                Common.logger(log_type, crawler).info("开始上传视频...")
+                our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                          crawler=crawler,
+                                                          strategy=strategy,
+                                                          our_uid=our_uid,
+                                                          env=env,
+                                                          oss_endpoint=oss_endpoint)
+                if env == 'dev':
+                    our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+                else:
+                    our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+                Common.logger(log_type, crawler).info("视频上传完成")
+
+                if our_video_id is None:
+                    # 删除视频文件夹
+                    shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}/")
+                    return
+
+                # 视频写入飞书
+                Feishu.insert_columns(log_type, 'xigua', "e075e9", "ROWS", 1, 2)
+                upload_time = int(time.time())
+                values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
+                           "定向榜",
+                           video_dict['video_title'],
+                           str(video_dict['video_id']),
+                           our_video_link,
+                           video_dict['gid'],
+                           video_dict['play_cnt'],
+                           video_dict['comment_cnt'],
+                           video_dict['like_cnt'],
+                           video_dict['share_cnt'],
+                           video_dict['duration'],
+                           str(video_dict['video_width']) + '*' + str(video_dict['video_height']),
+                           video_dict['publish_time_str'],
+                           video_dict['user_name'],
+                           video_dict['user_id'],
+                           video_dict['avatar_url'],
+                           video_dict['cover_url'],
+                           video_dict['video_url'],
+                           video_dict['audio_url']]]
+                time.sleep(1)
+                Feishu.update_values(log_type, 'xigua', "e075e9", "F2:Z2", values)
+                Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
+
+                # 视频信息保存数据库
+                insert_sql = f""" insert into crawler_video(video_id,
+                                user_id,
+                                out_user_id,
+                                platform,
+                                strategy,
+                                out_video_id,
+                                video_title,
+                                cover_url,
+                                video_url,
+                                duration,
+                                publish_time,
+                                play_cnt,
+                                crawler_rule,
+                                width,
+                                height)
+                                values({our_video_id},
+                                {our_uid},
+                                "{video_dict['user_id']}",
+                                "{cls.platform}",
+                                "定向爬虫策略",
+                                "{video_dict['video_id']}",
+                                "{video_dict['video_title']}",
+                                "{video_dict['cover_url']}",
+                                "{video_dict['video_url']}",
+                                {int(video_dict['duration'])},
+                                "{video_dict['publish_time_str']}",
+                                {int(video_dict['play_cnt'])},
+                                '{json.dumps(rule_dict)}',
+                                {int(video_dict['video_width'])},
+                                {int(video_dict['video_height'])}) """
+                Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+                MysqlHelper.update_values(log_type, crawler, insert_sql, env, machine)
+                Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f'download_publish异常:{e}\n')
 
     @classmethod
     def get_follow_videos(cls, log_type, crawler, strategy, oss_endpoint, env, machine):
-        user_list = cls.get_user_list(log_type=log_type, crawler=crawler, sheetid="5tlTYB", env=env, machine=machine)
-        for user in user_list:
-            out_uid = user["out_uid"]
-            user_name = user["user_name"]
-            our_uid = user["our_uid"]
-            Common.logger(log_type, crawler).info(f"开始抓取 {user_name} 用户主页视频\n")
-            cls.get_videolist(log_type=log_type,
-                              crawler=crawler,
-                              strategy=strategy,
-                              our_uid=our_uid,
-                              out_uid=out_uid,
-                              oss_endpoint=oss_endpoint,
-                              env=env,
-                              machine=machine)
-            cls.offset = 0
-            time.sleep(3)
+        try:
+            user_list = cls.get_user_list(log_type=log_type, crawler=crawler, sheetid="5tlTYB", env=env, machine=machine)
+            for user in user_list:
+                out_uid = user["out_uid"]
+                user_name = user["user_name"]
+                our_uid = user["our_uid"]
+                Common.logger(log_type, crawler).info(f"开始抓取 {user_name} 用户主页视频\n")
+                cls.get_videolist(log_type=log_type,
+                                  crawler=crawler,
+                                  strategy=strategy,
+                                  our_uid=our_uid,
+                                  out_uid=out_uid,
+                                  oss_endpoint=oss_endpoint,
+                                  env=env,
+                                  machine=machine)
+                cls.offset = 0
+                time.sleep(3)
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f"get_follow_videos:{e}\n")
 
 
 if __name__ == '__main__':

+ 1 - 1
xigua/xigua_main/run_xigua_follow.py

@@ -22,7 +22,7 @@ def main(log_type, crawler, strategy, oss_endpoint, env, machine):
             time.sleep(60)
         except Exception as e:
             Common.logger(log_type, crawler).info(f"西瓜视频异常,触发报警:{e}\n")
-            Feishu.bot(log_type, crawler, e)
+            Feishu.bot(log_type, crawler, f"{e}")
 
 
 if __name__ == "__main__":