wangkun před 2 roky
rodič
revize
865c4e1e32
9 změnil soubory, kde provedl 2007 přidání a 0 odebrání
  1. 34 0
      README.md
  2. 3 0
      main/__init__.py
  3. 160 0
      main/common.py
  4. 218 0
      main/demo.py
  5. 406 0
      main/feishu.py
  6. 121 0
      main/get_signature.py
  7. 261 0
      main/publish.py
  8. 37 0
      main/run_xigua_follow.py
  9. 767 0
      main/xigua_follow.py

+ 34 - 0
README.md

@@ -0,0 +1,34 @@
+# crawler_xigua
+1. git:https://git.yishihui.com/Server/crawler_xigua
+2. feishu:https://w42nne6hzg.feishu.cn/sheets/shtcnvOpx2P8vBXiV91Ot1MKIw8?sheet=e075e9
+
+#### 软件架构
+1. ffmpeg-python==0.2.0
+2. ffmpeg==1.4
+3. loguru==0.6.0
+4. lxml==4.9.1 
+5. oss2==2.15.0 
+6. requests==2.27.1 
+7. selenium==4.2.0 
+8. urllib3==1.26.9 
+9. webdriver-manager==3.8.3
+
+#### 使用说明
+1. cd ./crawler_xigua
+2. python3 /main/run_xigua_follow.py
+
+#### 需求
+1. 爬取时间:不限制时间:只要定向账号更新,就进行爬取
+2. 爬取规则: 
+   1. 视频时长:1分钟以上 
+   2. 视频数据(点赞+播放量)无要求 
+   3. 视频分辨率720以上
+   4. 站内标题=西瓜视频原标题
+   5. 站内封面图=西瓜视频原封面图
+3. 站内承接: 
+   1. 视频数量不限(因为这类账号一周基本上只更新1-7条左右,较少是日更的)
+   2. 由于是定向账号抓取,所以,视频和账号要一对一匹配,目前有个疑问,新的爬虫可否直接把抓取的内容,继续承接在之前老的爬虫账号上?如果不能实现,那就用新的虚拟账号进行承接。 
+   3. 账号分两种:老账号直接承接,新账号另找新的虚拟账号,原则,定向爬取,一对一。
+4. 注意事项:目前账号有两种类型,一种是老账号,一种是新账号,老账号抓取时间,只需要按照爬虫上线的时间进行抓取最近刚发的内容即可。
+5. 过滤词: 毛泽东、毛主席、周恩来、林彪、习近平、习大大、彭丽媛、拜登、普京、佩洛西、蔡英文,后续继续补充
+

+ 3 - 0
main/__init__.py

@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/8/23

+ 160 - 0
main/common.py

@@ -0,0 +1,160 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/8/24
+"""
+公共方法,包含:生成log / 删除log / 下载方法 / 读取文件 / 统计下载数
+"""
+from datetime import date, timedelta
+from loguru import logger
+import datetime
+import os
+import time
+import requests
+import urllib3
+
+proxies = {"http": None, "https": None}
+
+
+class Common:
+    # 统一获取当前时间 <class 'datetime.datetime'>  2022-04-14 20:13:51.244472
+    now = datetime.datetime.now()
+    # 昨天 <class 'str'>  2022-04-13
+    yesterday = (date.today() + timedelta(days=-1)).strftime("%Y/%m/%d")
+    # 今天 <class 'datetime.date'>  2022-04-14
+    today = date.today()
+    # 明天 <class 'str'>  2022-04-15
+    tomorrow = (date.today() + timedelta(days=1)).strftime("%Y/%m/%d")
+
+    # 使用 logger 模块生成日志
+    @staticmethod
+    def logger(log_type):
+        """
+        使用 logger 模块生成日志
+        """
+        # 日志路径
+        log_dir = r"./logs/"
+        log_path = os.getcwd() + os.sep + log_dir
+        if not os.path.isdir(log_path):
+            os.makedirs(log_path)
+
+        # 日志文件名
+        if log_type == "recommend":
+            log_name = time.strftime("%Y-%m-%d", time.localtime(time.time())) + '-xigua-recommend.log'
+        elif log_type == "follow":
+            log_name = time.strftime("%Y-%m-%d", time.localtime(time.time())) + '-xigua-follow.log'
+        else:
+            log_name = time.strftime("%Y-%m-%d", time.localtime(time.time())) + '-xigua.log'
+
+        # 日志不打印到控制台
+        logger.remove(handler_id=None)
+
+        # rotation="500 MB",实现每 500MB 存储一个文件
+        # rotation="12:00",实现每天 12:00 创建一个文件
+        # rotation="1 week",每周创建一个文件
+        # retention="10 days",每隔10天之后就会清理旧的日志
+        # 初始化日志
+        logger.add(log_dir + log_name, level="INFO", rotation='00:00')
+
+        return logger
+
+    # 清除日志,保留最近 7 个文件
+    @classmethod
+    def del_logs(cls, log_type):
+        """
+        清除冗余日志文件
+        :return: 保留最近 7 个日志
+        """
+        log_dir = "./logs/"
+        all_files = sorted(os.listdir(log_dir))
+        all_logs = []
+        for log in all_files:
+            name = os.path.splitext(log)[-1]
+            if name == ".log":
+                all_logs.append(log)
+
+        if len(all_logs) <= 7:
+            pass
+        else:
+            for file in all_logs[:len(all_logs) - 7]:
+                os.remove(log_dir + file)
+        cls.logger(log_type).info("清除日志成功")
+
+    # 删除 charles 缓存文件,只保留最近的两个文件
+    @classmethod
+    def del_charles_files(cls, log_type):
+        # 目标文件夹下所有文件
+        all_file = sorted(os.listdir("./chlsfiles/"))
+        for file in all_file[0:-3]:
+            os.remove(r"./chlsfiles/" + file)
+        cls.logger(log_type).info("删除 charles 缓存文件成功")
+
+    # 封装下载视频或封面的方法
+    @classmethod
+    def download_method(cls, log_type, text, d_name, d_url):
+        """
+        下载封面:text == "cover" ; 下载视频:text == "video"
+        需要下载的视频标题:d_title
+        视频封面,或视频播放地址:d_url
+        下载保存路径:"./files/{d_title}/"
+        """
+        # 首先创建一个保存该视频相关信息的文件夹
+        video_dir = "./videos/" + str(d_name) + "/"
+        if not os.path.exists(video_dir):
+            os.mkdir(video_dir)
+
+        # 下载视频
+        if text == "video":
+            # 需要下载的视频地址
+            video_url = str(d_url)
+            # 视频名
+            video_name = "video1.mp4"
+
+            # 下载视频
+            urllib3.disable_warnings()
+            response = requests.get(video_url, stream=True, proxies=proxies, verify=False)
+            try:
+                with open(video_dir + video_name, "wb") as f:
+                    for chunk in response.iter_content(chunk_size=10240):
+                        f.write(chunk)
+                cls.logger(log_type).info("==========视频下载完成==========")
+            except Exception as e:
+                cls.logger(log_type).error("视频下载失败:{}", e)
+
+        # 下载音频
+        elif text == "audio":
+            # 需要下载的视频地址
+            audio_url = str(d_url)
+            # 音频名
+            audio_name = "audio1.mp4"
+
+            # 下载视频
+            urllib3.disable_warnings()
+            response = requests.get(audio_url, stream=True, proxies=proxies, verify=False)
+            try:
+                with open(video_dir + audio_name, "wb") as f:
+                    for chunk in response.iter_content(chunk_size=10240):
+                        f.write(chunk)
+                cls.logger(log_type).info("==========音频下载完成==========")
+            except Exception as e:
+                cls.logger(log_type).error("音频下载失败:{}", e)
+
+        # 下载封面
+        elif text == "cover":
+            # 需要下载的封面地址
+            cover_url = str(d_url)
+            # 封面名
+            cover_name = "image.jpg"
+
+            # 下载封面
+            urllib3.disable_warnings()
+            response = requests.get(cover_url, proxies=proxies, verify=False)
+            try:
+                with open(video_dir + cover_name, "wb") as f:
+                    f.write(response.content)
+                cls.logger(log_type).info("==========封面下载完成==========")
+            except Exception as e:
+                cls.logger(log_type).error("封面下载失败:{}", e)
+
+
+if __name__ == "__main__":
+    common = Common()

Rozdílová data souboru nebyla zobrazena, protože soubor je příliš velký
+ 218 - 0
main/demo.py


+ 406 - 0
main/feishu.py

@@ -0,0 +1,406 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/8/24
+import json
+
+import requests
+import urllib3
+
+from main.common import Common
+
+proxies = {"http": None, "https": None}
+
+
+class Feishu:
+    """
+    编辑飞书云文档
+    """
+    # 看一看爬虫数据表
+    kanyikan_url = "https://w42nne6hzg.feishu.cn/sheets/shtcngRPoDYAi24x52j2nDuHMih?"
+    # 快手爬虫数据表
+    # kuaishou_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnp4SaJt37q6OOOrYzPMjQkg?"
+    kuaishou_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnICEfaw9llDNQkKgdymM1xf?"
+    # 微视爬虫数据表
+    weishi_url = "https://w42nne6hzg.feishu.cn/sheets/shtcn5YSWg91JfVGzj0SFZIRRPh?"
+    # 小年糕爬虫数据表
+    xiaoniangao_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnYxiyQ1wLklo1W5Kdqc9cGh?"
+    # 数据监控表
+    crawler_monitor = "https://w42nne6hzg.feishu.cn/sheets/shtcnlZWYazInhf7Z60jkbLRJyd?"
+    # 西瓜视频表
+    crawler_xigua = 'https://w42nne6hzg.feishu.cn/sheets/shtcnvOpx2P8vBXiV91Ot1MKIw8?'
+
+    # 手机号
+    wangkun = "13426262515"
+    gaonannan = "18501180073"
+    xinxin = "15546206651"
+    huxinxue = "18832292015"
+
+    # 飞书路径token
+    @classmethod
+    def spreadsheettoken(cls, crawler):
+        """
+        :param crawler: 哪个爬虫
+        """
+        if crawler == "kanyikan":
+            return "shtcngRPoDYAi24x52j2nDuHMih"
+        elif crawler == "kuaishou":
+            # return "shtcnp4SaJt37q6OOOrYzPMjQkg"
+            return "shtcnICEfaw9llDNQkKgdymM1xf"
+        elif crawler == "weishi":
+            return "shtcn5YSWg91JfVGzj0SFZIRRPh"
+        elif crawler == "xiaoniangao":
+            return "shtcnYxiyQ1wLklo1W5Kdqc9cGh"
+        elif crawler == "monitor":
+            return "shtcnlZWYazInhf7Z60jkbLRJyd"
+        elif crawler == "xigua":
+            return "shtcnvOpx2P8vBXiV91Ot1MKIw8"
+
+    # 获取飞书api token
+    @classmethod
+    def get_token(cls, log_type):
+        """
+        获取飞书api token
+        :return:
+        """
+        url = "https://open.feishu.cn/open-apis/auth/v3/tenant_access_token/internal/"
+        post_data = {"app_id": "cli_a13ad2afa438d00b",  # 这里账号密码是发布应用的后台账号及密码
+                     "app_secret": "4tK9LY9VbiQlY5umhE42dclBFo6t4p5O"}
+
+        try:
+            urllib3.disable_warnings()
+            response = requests.post(url=url, data=post_data, proxies=proxies, verify=False)
+            tenant_access_token = response.json()["tenant_access_token"]
+            return tenant_access_token
+        except Exception as e:
+            Common.logger(log_type).error("获取飞书 api token 异常:{}", e)
+
+    # 获取表格元数据
+    @classmethod
+    def get_metainfo(cls, log_type, crawler):
+        """
+        获取表格元数据
+        :return:
+        """
+        get_metainfo_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                           + cls.spreadsheettoken(crawler) + "/metainfo"
+
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        params = {
+            "extFields": "protectedRange",  # 额外返回的字段,extFields=protectedRange时返回保护行列信息
+            "user_id_type": "open_id"  # 返回的用户id类型,可选open_id,union_id
+        }
+        try:
+            urllib3.disable_warnings()
+            r = requests.get(url=get_metainfo_url, headers=headers, params=params, proxies=proxies, verify=False)
+            response = json.loads(r.content.decode("utf8"))
+            return response
+        except Exception as e:
+            Common.logger(log_type).error("获取表格元数据异常:{}", e)
+
+    # 读取工作表中所有数据
+    @classmethod
+    def get_values_batch(cls, log_type, crawler, sheetid):
+        """
+        读取工作表中所有数据
+        :param log_type: 启用哪个 log
+        :param crawler: 哪个爬虫
+        :param sheetid: 哪张表
+        :return: 所有数据
+        """
+        get_values_batch_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                               + cls.spreadsheettoken(crawler) + "/values_batch_get"
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        params = {
+            # 多个查询范围 如 url?ranges=range1,range2 ,其中 range 包含 sheetId 与单元格范围两部分
+            "ranges": sheetid,
+
+            # valueRenderOption=ToString 可返回纯文本的值(数值类型除外);
+            # valueRenderOption=FormattedValue 计算并格式化单元格;
+            # valueRenderOption=Formula单元格中含有公式时返回公式本身;
+            # valueRenderOption=UnformattedValue计算但不对单元格进行格式化
+            "valueRenderOption": "ToString",
+
+            # dateTimeRenderOption=FormattedString 计算并将时间日期按照其格式进行格式化,但不会对数字进行格式化,返回格式化后的字符串。
+            "dateTimeRenderOption": "",
+
+            # 返回的用户id类型,可选open_id,union_id
+            "user_id_type": "open_id"
+        }
+        try:
+            urllib3.disable_warnings()
+            r = requests.get(url=get_values_batch_url, headers=headers, params=params, proxies=proxies, verify=False)
+            # print(r.text)
+            response = json.loads(r.content.decode("utf8"))
+            values = response["data"]["valueRanges"][0]["values"]
+            return values
+        except Exception as e:
+            Common.logger(log_type).error("读取工作表所有数据异常:{}", e)
+
+    # 工作表,插入行或列
+    @classmethod
+    def insert_columns(cls, log_type, crawler, sheetid, majordimension, startindex, endindex):
+        """
+        工作表插入行或列
+        :param log_type: 日志路径
+        :param crawler: 哪个爬虫的云文档
+        :param sheetid:哪张工作表
+        :param majordimension:行或者列, ROWS、COLUMNS
+        :param startindex:开始位置
+        :param endindex:结束位置
+        """
+        insert_columns_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                             + cls.spreadsheettoken(crawler) + "/insert_dimension_range"
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        body = {
+            "dimension": {
+                "sheetId": sheetid,
+                "majorDimension": majordimension,  # 默认 ROWS ,可选 ROWS、COLUMNS
+                "startIndex": startindex,  # 开始的位置
+                "endIndex": endindex  # 结束的位置
+            },
+            "inheritStyle": "AFTER"  # BEFORE 或 AFTER,不填为不继承 style
+        }
+        try:
+            urllib3.disable_warnings()
+            r = requests.post(url=insert_columns_url, headers=headers, json=body, proxies=proxies, verify=False)
+            Common.logger(log_type).info("插入行或列:{}", r.json()["msg"])
+        except Exception as e:
+            Common.logger(log_type).error("插入行或列异常:{}", e)
+
+    # 写入数据
+    @classmethod
+    def update_values(cls, log_type, crawler, sheetid, ranges, values):
+        """
+        写入数据
+        :param log_type: 日志路径
+        :param crawler: 哪个爬虫的云文档
+        :param sheetid:哪张工作表
+        :param ranges:单元格范围
+        :param values:写入的具体数据,list
+        """
+        update_values_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                            + cls.spreadsheettoken(crawler) + "/values_batch_update"
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        body = {
+            "valueRanges": [
+                {
+                    "range": sheetid + "!" + ranges,
+                    "values": values
+                },
+            ],
+        }
+
+        try:
+            urllib3.disable_warnings()
+            r = requests.post(url=update_values_url, headers=headers, json=body, proxies=proxies, verify=False)
+            Common.logger(log_type).info("写入数据:{}", r.json()["msg"])
+        except Exception as e:
+            Common.logger(log_type).error("写入数据异常:{}", e)
+
+    # 合并单元格
+    @classmethod
+    def merge_cells(cls, log_type, crawler, sheetid, ranges):
+        """
+        合并单元格
+        :param log_type: 日志路径
+        :param crawler: 哪个爬虫
+        :param sheetid:哪张工作表
+        :param ranges:需要合并的单元格范围
+        """
+        merge_cells_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                          + cls.spreadsheettoken(crawler) + "/merge_cells"
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+
+        body = {
+            "range": sheetid + "!" + ranges,
+            "mergeType": "MERGE_ROWS"
+        }
+
+        try:
+            urllib3.disable_warnings()
+            r = requests.post(url=merge_cells_url, headers=headers, json=body, proxies=proxies, verify=False)
+            Common.logger(log_type).info("合并单元格:{}", r.json()["msg"])
+        except Exception as e:
+            Common.logger(log_type).error("合并单元格异常:{}", e)
+
+    # 读取单元格数据
+    @classmethod
+    def get_range_value(cls, log_type, crawler, sheetid, cell):
+        """
+        读取单元格内容
+        :param log_type: 日志路径
+        :param crawler: 哪个爬虫
+        :param sheetid: 哪张工作表
+        :param cell: 哪个单元格
+        :return: 单元格内容
+        """
+        get_range_value_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                              + cls.spreadsheettoken(crawler) + "/values/" + sheetid + "!" + cell
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        params = {
+            # valueRenderOption=ToString 可返回纯文本的值(数值类型除外);
+            # valueRenderOption=FormattedValue 计算并格式化单元格;
+            # valueRenderOption=Formula 单元格中含有公式时返回公式本身;
+            # valueRenderOption=UnformattedValue 计算但不对单元格进行格式化。
+            "valueRenderOption": "FormattedValue",
+
+            # dateTimeRenderOption=FormattedString 计算并对时间日期按照其格式进行格式化,但不会对数字进行格式化,返回格式化后的字符串。
+            "dateTimeRenderOption": "",
+
+            # 返回的用户id类型,可选open_id,union_id
+            "user_id_type": "open_id"
+        }
+        try:
+            urllib3.disable_warnings()
+            r = requests.get(url=get_range_value_url, headers=headers, params=params, proxies=proxies, verify=False)
+            # print(r.text)
+            return r.json()["data"]["valueRange"]["values"][0]
+        except Exception as e:
+            Common.logger(log_type).error("读取单元格数据异常:{}", e)
+
+    # 删除行或列,可选 ROWS、COLUMNS
+    @classmethod
+    def dimension_range(cls, log_type, crawler, sheetid, major_dimension, startindex, endindex):
+        """
+        删除行或列
+        :param log_type: 日志路径
+        :param crawler: 哪个爬虫
+        :param sheetid:工作表
+        :param major_dimension:默认 ROWS ,可选 ROWS、COLUMNS
+        :param startindex:开始的位置
+        :param endindex:结束的位置
+        :return:
+        """
+        dimension_range_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                              + cls.spreadsheettoken(crawler) + "/dimension_range"
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        body = {
+            "dimension": {
+                "sheetId": sheetid,
+                "majorDimension": major_dimension,
+                "startIndex": startindex,
+                "endIndex": endindex
+            }
+        }
+        try:
+            urllib3.disable_warnings()
+            r = requests.delete(url=dimension_range_url, headers=headers, json=body, proxies=proxies, verify=False)
+            Common.logger(log_type).info("删除视频数据:{}", r.json()["msg"])
+        except Exception as e:
+            Common.logger(log_type).error("删除视频数据异常:{}", e)
+
+    # 获取用户 ID
+    @classmethod
+    def get_userid(cls, log_type, username):
+        try:
+            url = "https://open.feishu.cn/open-apis/user/v1/batch_get_id?"
+            headers = {
+                "Authorization": "Bearer " + cls.get_token(log_type),
+                "Content-Type": "application/json; charset=utf-8"
+            }
+            if username == "wangkun":
+                username = cls.wangkun
+            elif username == "gaonannan":
+                username = cls.gaonannan
+            elif username == "xinxin":
+                username = cls.xinxin
+            elif username == "huxinxue":
+                username = cls.huxinxue
+            data = {"mobiles": [username]}
+            urllib3.disable_warnings()
+            r = requests.get(url=url, headers=headers, params=data, verify=False, proxies=proxies)
+            open_id = r.json()["data"]["mobile_users"][username][0]["open_id"]
+            Common.logger(log_type).info("{}:{}", username, open_id)
+            # print(f"{username}:{open_id}")
+            return open_id
+        except Exception as e:
+            Common.logger(log_type).error("get_userid异常:{}", e)
+
+    # 飞书机器人
+    @classmethod
+    def bot(cls, log_type, content):
+        try:
+            url = "https://open.feishu.cn/open-apis/bot/v2/hook/96989577-50e7-4653-9ec2-308fe3f2c5fe"
+            headers = {
+                'Content-Type': 'application/json'
+            }
+            data = json.dumps({
+                "msg_type": "interactive",
+                "card": {
+                    "config": {
+                        "wide_screen_mode": True,
+                        "enable_forward": True
+                    },
+                    "elements": [{
+                        "tag": "div",
+                        "text": {
+                            "content": "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at>\n" + content,
+                            "tag": "lark_md"
+                        }
+                    }, {
+                        "actions": [{
+                            "tag": "button",
+                            "text": {
+                                "content": "快手爬虫表",
+                                "tag": "lark_md"
+                            },
+                            "url": "https://w42nne6hzg.feishu.cn/sheets/shtcnICEfaw9llDNQkKgdymM1xf",
+                            "type": "default",
+                            "value": {}
+                        },
+                            {
+                                "tag": "button",
+                                "text": {
+                                    "content": "快手Jenkins",
+                                    "tag": "lark_md"
+                                },
+                                "url": "https://jenkins-on.yishihui.com/view/%E7%88%AC%E8%99%AB-Spider/job/%E5%BF%"
+                                       "AB%E6%89%8B%E5%B0%8F%E7%A8%8B%E5%BA%8F-%E8%A7%86%E9%A2%91%E7%88%AC%E5%8F%96/",
+                                "type": "default",
+                                "value": {}
+                            }
+
+                        ],
+                        "tag": "action"
+                    }],
+                    "header": {
+                        "title": {
+                            "content": "📣有新的报警,请注意查处",
+                            "tag": "plain_text"
+                        }
+                    }
+                }
+            })
+            urllib3.disable_warnings()
+            r = requests.post(url, headers=headers, data=data, verify=False, proxies=proxies)
+            Common.logger(log_type).info("触发机器人消息:{}, {}", r, r.json()["StatusMessage"])
+        except Exception as e:
+            Common.logger(log_type).error("bot异常:{}", e)
+
+
+if __name__ == "__main__":
+    Feishu.bot("kuaishou", "我是快手测试内容,请忽略")
+    # Feishu.get_userid("kuaishou", "huxinxue")
+    # Feishu.get_department("kuaishou")
+    pass

+ 121 - 0
main/get_signature.py

@@ -0,0 +1,121 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/9/21
+import json
+import os
+import sys
+import time
+from selenium import webdriver
+from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
+sys.path.append(os.getcwd())
+from main.common import Common
+from main.feishu import Feishu
+proxies = {'http': None, 'https': None}
+
+
+class GetSignature:
+    @classmethod
+    def get_signature(cls, log_type):
+        try:
+            # 打印请求配置
+            ca = DesiredCapabilities.CHROME
+            ca["goog:loggingPrefs"] = {"performance": "ALL"}
+
+            # 不打开浏览器运行
+            chrome_options = webdriver.ChromeOptions()
+            chrome_options.add_argument("headless")
+            chrome_options.add_argument(f'user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36')
+
+            # driver初始化
+            Common.logger(log_type).info('初始化 webdriver')
+            # driver = webdriver.Chrome(desired_capabilities=ca)
+            driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options)
+
+            driver.implicitly_wait(10)
+            Common.logger(log_type).info('打开『北京旅游等着瞧』个人主页')
+            driver.get('https://www.ixigua.com/home/2176672314697038')
+            time.sleep(5)
+
+            # 向上滑动 1000 个像素
+            Common.logger(log_type).info('向上滑动页面')
+            driver.execute_script('window.scrollBy(0, 2000)')
+            Common.logger(log_type).info('休眠 5s')
+            time.sleep(5)
+            Common.logger(log_type).info('刷新页面')
+            driver.refresh()
+            Common.logger(log_type).info('休眠 1s')
+            time.sleep(1)
+            Common.logger(log_type).info('解析signature')
+            logs = driver.get_log("performance")
+            # Common.logger(log_type).info('已获取logs:{}\n', logs)
+            Common.logger(log_type).info('退出浏览器')
+            driver.quit()
+            for line in logs:
+                msg = json.loads(line['message'])
+                if 'params' not in msg['message']:
+                    pass
+                elif 'documentURL' not in msg['message']['params']:
+                    pass
+                elif 'www.ixigua.com/home/2176672314697038' not in msg['message']['params']['documentURL']:
+                    pass
+                elif 'url' not in msg['message']['params']['request']:
+                    pass
+                elif '_signature' not in msg['message']['params']['request']['url']:
+                    pass
+                elif 'web/user/settings' not in msg['message']['params']['request']['url']:
+                    pass
+                else:
+                    url = msg['message']['params']['request']['url']
+                    signature = url.split('_signature=')[-1].split('&')[0]
+                    Common.logger(log_type).info('url:{}\n', url)
+                    Common.logger(log_type).info('signature:{}\n', signature)
+
+                    get_signature_time = time.strftime('%Y/%m/%d %H:%M:%S', time.localtime(time.time()))
+                    values = [[get_signature_time, signature]]
+                    Feishu.insert_columns(log_type, 'xigua', '6tZHhs', 'ROWS', 1, 2)
+                    time.sleep(0.5)
+                    Feishu.update_values(log_type, 'xigua', '6tZHhs', 'A2:B2', values)
+                    Common.logger(log_type).info('signature写入飞书成功:{}\n', signature)
+                    cls.del_signature(log_type)
+
+                # if 'message' in line \
+                #         and 'message' in json.loads(line['message'])\
+                #         and 'params' in json.loads(line['message'])['message']\
+                #         and 'response' in json.loads(line['message'])['message']['params']\
+                #         and 'url' in json.loads(line['message'])['message']['params']['response']\
+                #         and '_signature' in json.loads(line['message'])['message']['params']['response']['url']\
+                #         and 'web/user/settings' in json.loads(line['message'])['message']['params']['response']['url']:
+                #     url = json.loads(line['message'])['message']['params']['response']['url']
+                #     signature = url.split('_signature=')[-1]
+                #     if len(signature) < 50:
+                #         pass
+                #     else:
+                #         get_signature_time = time.strftime('%Y/%m/%d %H:%M:%S', time.localtime(time.time()))
+                #         values = [[get_signature_time, signature]]
+                #         Feishu.update_values(log_type, 'xigua', '6tZHhs', 'A2:B2', values)
+                #         Common.logger(log_type).info('signature写入飞书成功:{}\n', signature)
+                #         cls.del_signature(log_type)
+                # else:
+                #     Common.logger(log_type).info('未获取到signature:{}\n', line)
+
+        except Exception as e:
+            Common.logger(log_type).error('get_signature异常:{}', e)
+
+    @classmethod
+    def del_signature(cls, log_type):
+        try:
+            while True:
+                signature_sht = Feishu.get_values_batch(log_type, 'xigua', '6tZHhs')
+                if len(signature_sht) <= 25:
+                    break
+                else:
+                    Feishu.dimension_range(log_type, 'xigua', '6tZHhs', 'ROWS', 26, 26)
+            Common.logger(log_type).info('删除signature完成\n')
+        except Exception as e:
+            Common.logger(log_type).error('del_signature异常:{}', e)
+
+
+if __name__ == '__main__':
+    GetSignature.get_signature('follow')
+    # GetSignature.del_signature('follow')
+    pass

+ 261 - 0
main/publish.py

@@ -0,0 +1,261 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/8/26
+"""
+上传视频到阿里云 OSS
+上传视频到管理后台
+"""
+import json
+import os
+import random
+import time
+
+import oss2
+import requests
+import urllib3
+from main.common import Common
+
+proxies = {"http": None, "https": None}
+
+
+class Publish:
+    @classmethod
+    def publish_video_dev(cls, log_type, request_data):
+        """
+        loginUid  站内uid (随机)
+        appType  默认:888888
+        crawlerSrcId   站外视频ID
+        crawlerSrcCode   渠道(自定义 KYK)
+        crawlerSrcPublishTimestamp  视频原发布时间
+        crawlerTaskTimestamp   爬虫创建时间(可以是当前时间)
+        videoPath  视频oss地址
+        coverImgPath  视频封面oss地址
+        title  标题
+        totalTime  视频时长
+        viewStatus  视频的有效状态 默认1
+        versionCode  版本 默认1
+        :return:
+        """
+        # Common.logger(log_type).info('publish request data: {}'.format(request_data))
+        result = cls.request_post('https://videotest.yishihui.com/longvideoapi/crawler/video/send', request_data)
+        # Common.logger(log_type).info('publish result: {}'.format(result))
+        video_id = result["data"]["id"]
+        # Common.logger(log_type).info('video_id: {}'.format(video_id))
+        if result['code'] != 0:
+            Common.logger(log_type).error('pushlish failure msg = {}'.format(result['msg']))
+        else:
+            Common.logger(log_type).info('publish success video_id = : {}'.format(request_data['crawlerSrcId']))
+        return video_id
+
+    @classmethod
+    def publish_video_prod(cls, log_type, request_data):
+        """
+        loginUid  站内uid (随机)
+        appType  默认:888888
+        crawlerSrcId   站外视频ID
+        crawlerSrcCode   渠道(自定义 KYK)
+        crawlerSrcPublishTimestamp  视频原发布时间
+        crawlerTaskTimestamp   爬虫创建时间(可以是当前时间)
+        videoPath  视频oss地址
+        coverImgPath  视频封面oss地址
+        title  标题
+        totalTime  视频时长
+        viewStatus  视频的有效状态 默认1
+        versionCode  版本 默认1
+        :return:
+        """
+        result = cls.request_post('https://longvideoapi.piaoquantv.com/longvideoapi/crawler/video/send', request_data)
+        # Common.logger(log_type).info('publish result: {}'.format(result))
+        video_id = result["data"]["id"]
+        # Common.logger(log_type).info('video_id: {}'.format(video_id))
+        if result['code'] != 0:
+            Common.logger(log_type).error('pushlish failure msg = {}'.format(result['msg']))
+        else:
+            Common.logger(log_type).info('publish success video_id = : {}'.format(request_data['crawlerSrcId']))
+        return video_id
+
+    @classmethod
+    def request_post(cls, request_url, request_data):
+        """
+        post 请求 HTTP接口
+        :param request_url: 接口URL
+        :param request_data: 请求参数
+        :return: res_data json格式
+        """
+        urllib3.disable_warnings()
+        response = requests.post(url=request_url, data=request_data, proxies=proxies, verify=False)
+        if response.status_code == 200:
+            res_data = json.loads(response.text)
+            return res_data
+
+    # 以下代码展示了基本的文件上传、下载、罗列、删除用法。
+
+    # 首先初始化AccessKeyId、AccessKeySecret、Endpoint等信息。
+    # 通过环境变量获取,或者把诸如“<你的AccessKeyId>”替换成真实的AccessKeyId等。
+    #
+    # 以杭州区域为例,Endpoint可以是:
+    #   http://oss-cn-hangzhou.aliyuncs.com
+    #   https://oss-cn-hangzhou.aliyuncs.com
+    # 分别以HTTP、HTTPS协议访问。
+    access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', 'LTAIP6x1l3DXfSxm')
+    access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', 'KbTaM9ars4OX3PMS6Xm7rtxGr1FLon')
+    bucket_name = os.getenv('OSS_TEST_BUCKET', 'art-pubbucket')
+    # endpoint = os.getenv('OSS_TEST_ENDPOINT', 'oss-cn-hangzhou-internal.aliyuncs.com')
+    endpoint = os.getenv('OSS_TEST_ENDPOINT', 'oss-cn-hangzhou.aliyuncs.com')
+
+    # 确认上面的参数都填写正确了
+    for param in (access_key_id, access_key_secret, bucket_name, endpoint):
+        assert '<' not in param, '请设置参数:' + param
+
+    # 创建Bucket对象,所有Object相关的接口都可以通过Bucket对象来进行
+    bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)
+
+    """
+    处理流程:
+    1. 定时(每天凌晨1点执行一次)循环files文件下的内容 结构:files -> 视频文件夹 -> 视频文件 + 封面图 + 基本信息
+    2. 视频文件和封面上传到oss
+    - 视频文件oss目录  longvideo/crawler_local/video/prod/文件名
+    - 视频封面oss目录  longvideo/crawler_local/image/prod/文件名
+    3. 发布视频
+    - 读取 基本信息 调用发布接口
+    """
+    # env 日期20220225 文件名
+    oss_file_path_video = 'longvideo/crawler_local/video/{}/{}/{}'
+    oss_file_path_image = 'longvideo/crawler_local/image/{}/{}/{}'
+
+    @classmethod
+    def put_file(cls, log_type, oss_file, local_file):
+        cls.bucket.put_object_from_file(oss_file, local_file)
+        Common.logger(log_type).info("put oss file = {}, local file = {} success".format(oss_file, local_file))
+
+    # 清除本地文件
+    @classmethod
+    def remove_local_file(cls, log_type, local_file):
+        os.remove(local_file)
+        Common.logger(log_type).info("remove local file = {} success".format(local_file))
+
+    # 清除本地文件夹
+    @classmethod
+    def remove_local_file_dir(cls, log_type, local_file):
+        os.rmdir(local_file)
+        Common.logger(log_type).info("remove local file dir = {} success".format(local_file))
+
+    local_file_path = './videos'
+    video_file = 'video'
+    image_file = 'image'
+    info_file = 'info'
+    uids_dev_up = [6267140]
+    uids_dev_play = [6267141]
+    uids_prod_up = [20631185, 20631186, 20631187, 20631188, 20631189,
+                    20631190, 20631191, 20631192, 20631193]
+    uids_prod_play = [20631196, 20631197, 20631198, 20631199, 20631200, 20631201]
+
+    @classmethod
+    def upload_and_publish(cls, log_type, env, uid):
+        """
+        上传视频到 oss
+        :param log_type: 选择的 log
+        :param env: 测试环境:dev,正式环境:prod
+        # :param job: 上升榜:up,播放量:play
+        :param uid: 站内 UID
+        """
+        Common.logger(log_type).info("upload_and_publish starting...")
+        today = time.strftime("%Y%m%d", time.localtime())
+        # videos 目录下的所有视频文件夹
+        files = os.listdir(cls.local_file_path)
+        for f in files:
+            try:
+                # 单个视频文件夹
+                fi_d = os.path.join(cls.local_file_path, f)
+                # 确认为视频文件夹
+                if os.path.isdir(fi_d):
+                    Common.logger(log_type).info('dir = {}'.format(fi_d))
+                    # 列出所有视频文件夹
+                    dir_files = os.listdir(fi_d)
+                    data = {'appType': '888888',
+                            'crawlerSrcCode': 'XIGUA',
+                            'viewStatus': '1',
+                            'versionCode': '1'}
+                    now_timestamp = int(round(time.time() * 1000))
+                    data['crawlerTaskTimestamp'] = str(now_timestamp)
+                    # global uid
+                    # if env == "dev" and job == "up":
+                    #     uid = str(random.choice(cls.uids_dev_up))
+                    # elif env == "dev" and job == "play":
+                    #     uid = str(random.choice(cls.uids_dev_play))
+                    # elif env == "prod" and job == "up":
+                    #     uid = str(random.choice(cls.uids_prod_up))
+                    # elif env == "prod" and job == "play":
+                    #     uid = str(random.choice(cls.uids_prod_play))
+                    data['loginUid'] = uid
+                    # 单个视频文件夹下的所有视频文件
+                    for fi in dir_files:
+                        # 视频文件夹下的所有文件路径
+                        fi_path = fi_d + '/' + fi
+                        Common.logger(log_type).info('dir fi_path = {}'.format(fi_path))
+                        # 读取 info.txt,赋值给 data
+                        if cls.info_file in fi:
+                            f = open(fi_path, "r", encoding="UTF-8")
+                            # 读取数据 数据准确性写入的时候保证 读取暂不处理
+                            for i in range(14):
+                                line = f.readline()
+                                line = line.replace('\n', '')
+                                if line is not None and len(line) != 0 and not line.isspace():
+                                    # Common.logger(log_type).info("line = {}".format(line))
+                                    if i == 0:
+                                        data['crawlerSrcId'] = line
+                                    elif i == 1:
+                                        data['title'] = line
+                                    elif i == 2:
+                                        data['totalTime'] = line
+                                    elif i == 8:
+                                        data['crawlerSrcPublishTimestamp'] = line
+                                else:
+                                    Common.logger(log_type).warning("{} line is None".format(fi_path))
+                            f.close()
+                            # remove info.txt
+                            cls.remove_local_file(log_type, fi_path)
+                    # 刷新数据
+                    dir_files = os.listdir(fi_d)
+                    for fi in dir_files:
+                        fi_path = fi_d + '/' + fi
+                        # Common.logger(log_type).info('dir fi_path = {}'.format(fi_path))
+                        # 上传oss
+                        if cls.video_file in fi:
+                            global oss_video_file
+                            if env == "dev":
+                                oss_video_file = cls.oss_file_path_video.format("dev", today, data['crawlerSrcId'])
+                            elif env == "prod":
+                                oss_video_file = cls.oss_file_path_video.format("prod", today, data['crawlerSrcId'])
+                            Common.logger(log_type).info("oss_video_file = {}".format(oss_video_file))
+                            cls.put_file(log_type, oss_video_file, fi_path)
+                            data['videoPath'] = oss_video_file
+                            Common.logger(log_type).info("videoPath = {}".format(oss_video_file))
+                        elif cls.image_file in fi:
+                            global oss_image_file
+                            if env == "dev":
+                                oss_image_file = cls.oss_file_path_image.format("env", today, data['crawlerSrcId'])
+                            elif env == "prod":
+                                oss_image_file = cls.oss_file_path_image.format("prod", today, data['crawlerSrcId'])
+                            Common.logger(log_type).info("oss_image_file = {}".format(oss_image_file))
+                            cls.put_file(log_type, oss_image_file, fi_path)
+                            data['coverImgPath'] = oss_image_file
+                            Common.logger(log_type).info("coverImgPath = {}".format(oss_image_file))
+                        # 全部remove
+                        cls.remove_local_file(log_type, fi_path)
+
+                    # 发布
+                    if env == "dev":
+                        video_id = cls.publish_video_dev(log_type, data)
+                    elif env == "prod":
+                        video_id = cls.publish_video_prod(log_type, data)
+                    else:
+                        video_id = cls.publish_video_dev(log_type, data)
+                    cls.remove_local_file_dir(log_type, fi_d)
+                    Common.logger(log_type).info('video_id:{}', video_id)
+                    return video_id
+
+                else:
+                    Common.logger(log_type).error('file not a dir = {}'.format(fi_d))
+            except Exception as e:
+                Common.logger(log_type).exception('upload_and_publish error', e)

+ 37 - 0
main/run_xigua_follow.py

@@ -0,0 +1,37 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/8/25
+import os
+import sys
+import time
+
+
+sys.path.append(os.getcwd())
+from main.common import Common
+from main.xigua_follow import Follow
+from main.get_signature import GetSignature
+
+
+class Main:
+    # 主函数
+    @classmethod
+    def main(cls, env):
+        while True:
+            GetSignature.get_signature('follow')
+
+            # 获取视频列表
+            Follow.get_all_person_videos('follow', env)
+
+            # 清除日志
+            Common.del_logs('follow')
+
+            # 翻页初始化
+            Follow.offset = 0
+
+            # 随机休眠
+            Common.logger('follow').info('休眠1小时')
+            time.sleep(3600)
+
+
+if __name__ == '__main__':
+    Main.main('prod')

+ 767 - 0
main/xigua_follow.py

@@ -0,0 +1,767 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/8/23
+import base64
+import os
+import random
+import subprocess
+import sys
+import time
+import requests
+import urllib3
+sys.path.append(os.getcwd())
+from main.common import Common
+from main.feishu import Feishu
+from main.publish import Publish
+proxies = {"http": None, "https": None}
+
+
+class Follow:
+    # 个人主页视频翻页参数
+    offset = 0
+
+    # 获取用户信息(字典格式). 注意:部分 user_id 字符类型是 int / str
+    @classmethod
+    def get_user_info_from_feishu(cls, log_type):
+        try:
+            user_sheet = Feishu.get_values_batch(log_type, 'xigua', '5tlTYB')
+            user_dict = {}
+            for i in range(1, len(user_sheet)):
+                user_name = user_sheet[i][0]
+                user_id = user_sheet[i][1]
+                our_id = user_sheet[i][3]
+                if user_name is None or user_id is None or our_id is None:
+                    pass
+                else:
+                    user_dict[user_name] = str(user_id)+','+str(our_id)
+            return user_dict
+        except Exception as e:
+            Common.logger(log_type).error('get_user_id_from_feishu异常:{}', e)
+
+    # 下载规则
+    @staticmethod
+    def download_rule(duration, width, height):
+        if int(duration) >= 60:
+            if int(width) >= 720 or int(height) >= 720:
+                return True
+            else:
+                return False
+        else:
+            return False
+
+    # 过滤词库
+    @classmethod
+    def filter_words(cls, log_type):
+        try:
+            filter_words_sheet = Feishu.get_values_batch(log_type, 'xigua', 'KGB4Hc')
+            filter_words_list = []
+            for x in filter_words_sheet:
+                for y in x:
+                    if y is None:
+                        pass
+                    else:
+                        filter_words_list.append(y)
+            return filter_words_list
+        except Exception as e:
+            Common.logger(log_type).error('filter_words异常:{}', e)
+
+    # PC端:西瓜用户主页视频列表. 注意:参数_signature有效期时长只有一小时
+    @classmethod
+    def get_follow_feeds_by_pc(cls, log_type, userid):
+        try:
+            url = "https://www.ixigua.com/api/videov2/author/new_video_list?"
+            headers = {
+                'sec-ch-ua': '".Not/A)Brand";v="99", "Google Chrome";v="103", "Chromium";v="103"',
+                'accept': 'application/json, text/plain, */*',
+                'sec-ch-ua-mobile': '?0',
+                'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko)'
+                              ' Chrome/103.0.0.0 Safari/537.36',
+                'sec-ch-ua-platform': '"macOS"',
+                'sec-fetch-site': 'same-origin',
+                'sec-fetch-mode': 'cors',
+                'sec-fetch-dest': 'document',
+                'referer': 'https://www.ixigua.com/home/' + str(userid),
+                'accept-encoding': 'gzip, deflate, br',
+                'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
+            }
+            params = {
+                'to_user_id': str(userid),
+                'offset': str(cls.offset),
+                'limit': '30',
+                'maxBehotTime': '0',
+                'order': 'new',
+                'isHome': '0',
+                'msToken': '2ZHINOMBPK-qlCKApv37xVCBKkXyPli8mTYNlTSXvr17eZ0Ea8B__Otimkx6q_enDc9m8Kgzi3Re7wpLIMSSE9dofTYdqQgvB7mHQbx_AMnVnf5lsByU',
+                'X-Bogus': 'DFSzswVuVvTANe2BSBBMCR/F6qyc',
+                '_signature': Feishu.get_values_batch(log_type, 'xigua', '6tZHhs')[1][1],
+            }
+            cookies = {
+                '__ac_signature': '_02B4Z6wo00f017vzS8QAAIDCwz2gwwDpX9-7009AAI4Bc4',
+                'MONITOR_WEB_ID': 'fd4244aa-2003-4e19-a2a4-715c19310a56',
+                'ixigua-a-s': '1',
+                'support_webp': 'true',
+                'support_avif': 'true',
+                '_tea_utm_cache_1300': 'undefined',
+                'ttcid': '16a3b6b9b80b4a87ae258f5f3f101e6310',
+                'msToken': 'G8pL2oH-9Zl1hrLZPyOMSceMaII3ejKda2o-tgO1heYrj7b_fgm9vGlvwyLOA2H8oUShZgAYfxEvIuktT7OuxBuy85N-ousFfqxuAIrfruMEFZUTYp2z',
+                'tt_scid': 'a0zhISPImN-dVMMdbeb1Kzhl1x4oJS5Yr81FzH6qYk3jDtj1d2E5gsywN4rwna8ib398',
+                'ttwid': '1%7CvorN1HQjbSgBViRkEoZYEbqP_sQVoQqaUqGcFA-bzpA%7C1661324763%7Ce040213e1107973ebb0db64f0e77cfb027375f1fb5854bb40588d692d025af1f',
+            }
+
+            urllib3.disable_warnings()
+            response = requests.get(url=url, headers=headers, params=params, cookies=cookies, proxies=proxies, verify=False)
+            # Common.logger(log_type).info('response:{}', response.text)
+            cls.offset += 30
+            if 'data' not in response.text or response.json()['data'] == '' or response.json()['code'] != 200:
+                Common.logger(log_type).info('get_follow_feeds: response:{}', response.text)
+            else:
+                feeds = response.json()['data']['videoList']
+                # print(len(feeds))
+                for i in range(len(feeds)):
+                    # video_title
+                    if 'title' not in feeds[i]:
+                        video_title = 0
+                    else:
+                        video_title = feeds[i]['title'].strip().replace('手游', '')
+
+                    # video_id
+                    if 'video_id' not in feeds[i]:
+                        video_id = 0
+                    else:
+                        video_id = feeds[i]['video_id']
+
+                    # gid
+                    if 'gid' not in feeds[i]:
+                        gid = 0
+                    else:
+                        gid = feeds[i]['gid']
+
+                    # play_cnt
+                    if 'video_detail_info' not in feeds[i]:
+                        play_cnt = 0
+                    elif 'video_watch_count' not in feeds[i]['video_detail_info']:
+                        play_cnt = 0
+                    else:
+                        play_cnt = feeds[i]['video_detail_info']['video_watch_count']
+
+                    # comment_cnt
+                    if 'comment_count' not in feeds[i]:
+                        comment_cnt = 0
+                    else:
+                        comment_cnt = feeds[i]['comment_count']
+
+                    # like_cnt
+                    if 'digg_count' not in feeds[i]:
+                        like_cnt = 0
+                    else:
+                        like_cnt = feeds[i]['digg_count']
+
+                    # share_cnt
+                    share_cnt = 0
+
+                    # video_duration
+                    if 'video_duration' not in feeds[i]:
+                        video_duration = 0
+                    else:
+                        video_duration = feeds[i]['video_duration']
+
+                    # send_time
+                    if 'publish_time' not in feeds[i]:
+                        send_time = 0
+                    else:
+                        send_time = feeds[i]['publish_time']
+
+                    # user_name
+                    if 'user_info' not in feeds[i]:
+                        user_name = 0
+                    elif 'name' not in feeds[i]['user_info']:
+                        user_name = 0
+                    else:
+                        user_name = feeds[i]['user_info']['name']
+
+                    # user_id
+                    if 'user_info' not in feeds[i]:
+                        user_id = 0
+                    elif 'user_id' not in feeds[i]['user_info']:
+                        user_id = 0
+                    else:
+                        user_id = feeds[i]['user_info']['user_id']
+
+                    # head_url
+                    if 'user_info' not in feeds[i]:
+                        head_url = 0
+                    elif 'avatar_url' not in feeds[i]['user_info']:
+                        head_url = 0
+                    else:
+                        head_url = feeds[i]['user_info']['avatar_url']
+
+                    # cover_url
+                    if 'video_detail_info' not in feeds[i]:
+                        cover_url = 0
+                    elif 'detail_video_large_image' not in feeds[i]['video_detail_info']:
+                        cover_url = 0
+                    elif 'url' in feeds[i]['video_detail_info']['detail_video_large_image']:
+                        cover_url = feeds[i]['video_detail_info']['detail_video_large_image']['url']
+                    else:
+                        cover_url = feeds[i]['video_detail_info']['detail_video_large_image']['url_list'][0]['url']
+
+                    video_url_info = cls.get_video_info(log_type, gid)
+
+                    video_width = video_url_info[2]
+                    video_height = video_url_info[-1]
+                    video_url = video_url_info[0]
+                    audio_url = video_url_info[1]
+
+                    Common.logger(log_type).info('video_title:{}', video_title)
+                    Common.logger(log_type).info('video_id:{}', video_id)
+                    Common.logger(log_type).info('play_cnt:{}', play_cnt)
+                    Common.logger(log_type).info('send_time:{}',
+                                                 time.strftime('%Y/%m/%d %H:%M:%S', time.localtime(send_time)))
+
+                    if gid == 0 or video_id == 0:
+                        Common.logger(log_type).info('无效视频\n')
+                    elif int(time.time()) - int(send_time) > 3600 * 24 * 10:
+                        Common.logger(log_type).info('发布时间超过10天:{}\n',
+                                                     time.strftime('%Y/%m/%d %H:%M:%S', time.localtime(send_time)))
+                        cls.offset = 0
+                        return
+                    elif cls.download_rule(video_duration, video_width, video_height) is False:
+                        Common.logger(log_type).info('不满足抓取规则\n')
+                    elif any(word if word in video_title else False for word in cls.filter_words(log_type)) is True:
+                        Common.logger(log_type).info('标题已中过滤词:{}\n', video_title)
+                    elif str(video_id) in [x for y in Feishu.get_values_batch(log_type, 'xigua', 'e075e9') for x in y]:
+                        Common.logger(log_type).info('视频已下载\n')
+                    elif str(video_id) in [x for y in Feishu.get_values_batch(log_type, 'xigua', 'wjhpDs') for x in y]:
+                        Common.logger(log_type).info('视频已存在\n')
+                    else:
+                        Feishu.insert_columns(log_type, 'xigua', 'wjhpDs', 'ROWS', 1, 2)
+                        get_feeds_time = time.time()
+                        values = [[time.strftime('%Y/%m/%d %H:%M:%S', time.localtime(get_feeds_time)),
+                                   '关注榜',
+                                   video_title,
+                                   str(video_id),
+                                   gid,
+                                   play_cnt,
+                                   comment_cnt,
+                                   like_cnt,
+                                   share_cnt,
+                                   video_duration,
+                                   str(video_width) + '*' + str(video_height),
+                                   time.strftime('%Y/%m/%d %H:%M:%S', time.localtime(send_time)),
+                                   user_name,
+                                   user_id,
+                                   head_url,
+                                   cover_url,
+                                   video_url,
+                                   audio_url]]
+                        time.sleep(1)
+                        Feishu.update_values(log_type, 'xigua', 'wjhpDs', 'A2:Z2', values)
+                        Common.logger(log_type).info('视频信息写入飞书成功\n')
+                        time.sleep(random.randint(1, 3))
+        except Exception as e:
+            Common.logger(log_type).error('get_follow_feeds_by_pc异常:{}\n', e)
+
+    # 获取视频详情
+    @classmethod
+    def get_video_info(cls, log_type, gid):
+        try:
+            url = 'https://www.ixigua.com/api/mixVideo/information?'
+            headers = {
+                "accept-encoding": "gzip, deflate, br",
+                "accept-language": "zh-CN,zh-Hans;q=0.9",
+                "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) "
+                              "AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.5 Safari/605.1.15",
+                "referer": "https://www.ixigua.com/7102614741050196520?logTag=0531c88ac04f38ab2c62",
+            }
+            params = {
+                'mixId': gid,
+                'msToken': 'IlG0wd0Pylyw9ghcYiB2YseUmTwrsrqqhXrbIcsSaTcLTJyVlbYJzk20zw3UO-CfrfC'
+                           'NVVIOBNjIl7vfBoxnVUwO9ZyzAI3umSKsT5-pef_RRfQCJwmA',
+                'X-Bogus': 'DFSzswVupYTANCJOSBk0P53WxM-r',
+                '_signature': '_02B4Z6wo0000119LvEwAAIDCuktNZ0y5wkdfS7jAALThuOR8D9yWNZ.EmWHKV0WSn6Px'
+                              'fPsH9-BldyxVje0f49ryXgmn7Tzk-swEHNb15TiGqa6YF.cX0jW8Eds1TtJOIZyfc9s5emH7gdWN94',
+            }
+            cookies = {
+                'ixigua-a-s': '1',
+                'msToken': 'IlG0wd0Pylyw9ghcYiB2YseUmTwrsrqqhXrbIcsSaTcLTJyVlbYJzk20zw3UO-CfrfCNVVIOB'
+                           'NjIl7vfBoxnVUwO9ZyzAI3umSKsT5-pef_RRfQCJwmA',
+                'ttwid': '1%7C_yXQeHWwLZgCsgHClOwTCdYSOt_MjdOkgnPIkpi-Sr8%7C1661241238%7Cf57d0c5ef3f1d7'
+                         '6e049fccdca1ac54887c34d1f8731c8e51a49780ff0ceab9f8',
+                'tt_scid': 'QZ4l8KXDG0YAEaMCSbADdcybdKbUfG4BC6S4OBv9lpRS5VyqYLX2bIR8CTeZeGHR9ee3',
+                'MONITOR_WEB_ID': '0a49204a-7af5-4e96-95f0-f4bafb7450ad',
+                '__ac_nonce': '06304878000964fdad287',
+                '__ac_signature': '_02B4Z6wo00f017Rcr3AAAIDCUVxeW1tOKEu0fKvAAI4cvoYzV-wBhq7B6D8k0no7lb'
+                                  'FlvYoinmtK6UXjRIYPXnahUlFTvmWVtb77jsMkKAXzAEsLE56m36RlvL7ky.M3Xn52r9t1IEb7IR3ke8',
+                'ttcid': 'e56fabf6e85d4adf9e4d91902496a0e882',
+                '_tea_utm_cache_1300': 'undefined',
+                'support_avif': 'false',
+                'support_webp': 'false',
+                'xiguavideopcwebid': '7134967546256016900',
+                'xiguavideopcwebid.sig': 'xxRww5R1VEMJN_dQepHorEu_eAc',
+            }
+            urllib3.disable_warnings()
+            response = requests.get(url=url, headers=headers, params=params, cookies=cookies, proxies=proxies,
+                                    verify=False)
+            if 'data' not in response.json() or response.json()['data'] == '':
+                Common.logger(log_type).warning('get_video_info: response: {}', response)
+            else:
+                video_info = response.json()['data']['gidInformation']['packerData']['video']
+
+                video_url = ''
+                audio_url = ''
+                video_width = ''
+                video_height = ''
+
+                # video_url
+                if 'videoResource' not in video_info:
+                    video_url = 0
+                    audio_url = 0
+                    video_width = 0
+                    video_height = 0
+                elif 'dash' in video_info['videoResource']:
+                    video_url = video_info['videoResource']['dash']['dynamic_video']['dynamic_video_list'][-1][
+                        'main_url']
+                    audio_url = video_info['videoResource']['dash']['dynamic_video']['dynamic_audio_list'][-1][
+                        'main_url']
+                    video_url = base64.b64decode(video_url).decode('utf8')
+                    audio_url = base64.b64decode(audio_url).decode('utf8')
+                    video_width = video_info['videoResource']['dash']['dynamic_video']['dynamic_video_list'][-1][
+                        'vwidth']
+                    video_height = video_info['videoResource']['dash']['dynamic_video']['dynamic_video_list'][-1][
+                        'vheight']
+
+                elif 'normal' in video_info['videoResource']:
+                    video_list = video_info['videoResource']['normal']['video_list']
+                    if 'video_4' in video_list.keys():
+                        # Common.logger(log_type).info('{}', video_list['video_4'])
+                        video_url = video_list['video_4']['main_url']
+                        audio_url = video_list['video_4']['main_url']
+                        video_url = base64.b64decode(video_url).decode('utf8')
+                        audio_url = base64.b64decode(audio_url).decode('utf8')
+                        video_width = video_list['video_4']['vwidth']
+                        video_height = video_list['video_4']['vheight']
+                    elif 'video_3' in video_list.keys():
+                        # Common.logger(log_type).info('{}', video_list['video_3'])
+                        video_url = video_list['video_3']['main_url']
+                        audio_url = video_list['video_3']['main_url']
+                        video_url = base64.b64decode(video_url).decode('utf8')
+                        audio_url = base64.b64decode(audio_url).decode('utf8')
+                        video_width = video_list['video_3']['vwidth']
+                        video_height = video_list['video_3']['vheight']
+                    elif 'video_2' in video_list.keys():
+                        # Common.logger(log_type).info('{}', video_list['video_2'])
+                        video_url = video_list['video_2']['main_url']
+                        audio_url = video_list['video_2']['main_url']
+                        video_url = base64.b64decode(video_url).decode('utf8')
+                        audio_url = base64.b64decode(audio_url).decode('utf8')
+                        video_width = video_list['video_2']['vwidth']
+                        video_height = video_list['video_2']['vheight']
+                    elif 'video_1' in video_list.keys():
+                        # Common.logger(log_type).info('{}', video_list['video_1'])
+                        video_url = video_list['video_1']['main_url']
+                        audio_url = video_list['video_1']['main_url']
+                        video_url = base64.b64decode(video_url).decode('utf8')
+                        audio_url = base64.b64decode(audio_url).decode('utf8')
+                        video_width = video_list['video_1']['vwidth']
+                        video_height = video_list['video_1']['vheight']
+                    else:
+                        video_url = 0
+                        audio_url = 0
+                        video_width = 0
+                        video_height = 0
+
+                return video_url, audio_url, video_width, video_height
+
+        except Exception as e:
+            Common.logger(log_type).error('get_video_info异常:{}', e)
+
+    # APP端:西瓜视频用户主页
+    @classmethod
+    def get_follow_feeds_by_app(cls, log_type, userid):
+        while True:
+            try:
+                url = "https://api5-normal-quic-lq.ixigua.com/video/app/user/videolist_tab/v3/?"
+                headers = {
+                    'Host': 'api5-normal-quic-lq.ixigua.com',
+                    'Cookie': 'passport_csrf_token=9dc29668504aefd8f810d194c1591b27; passport_csrf_token_default=9dc29668504aefd8f810d194c1591b27; d_ticket=8cc008f231ad00a57481e490f82f4bedebe99; n_mh=Zi1ukqZaOfwMQ8RKEEaBFHPd94g9LJFrf_5jskG0uhY; odin_tt=79986f6d46fe14e0f0cf5c6d831005ef2d2ba797151d32eb7678d9ec14a770349dcc7f5cce1746a00dc493838a94db296ef2135712d40b5de1b4ebb170e7e3bf; sessionid=cd61dd6003146ce5b8d19b1eeb29d5b6; sessionid_ss=cd61dd6003146ce5b8d19b1eeb29d5b6; sid_guard=cd61dd6003146ce5b8d19b1eeb29d5b6%7C1661320113%7C5184000%7CSun%2C+23-Oct-2022+05%3A48%3A33+GMT; sid_tt=cd61dd6003146ce5b8d19b1eeb29d5b6; uid_tt=6544aadbdc13b980ab4906f550c70af5; uid_tt_ss=6544aadbdc13b980ab4906f550c70af5; install_id=541373572069224; ttreq=1$27a2ec895a960525ef828e684768bef579920543; msToken=6hA48Lf7RVYOl0Okgng_KQzBwfUpN2M5tB6opL8N6YB3EX0VsNQNhGH4kT-vRxO3Yjac8E4w7Zk4rkFF5JCRTilK',
+                    'x-tt-token': '00cd61dd6003146ce5b8d19b1eeb29d5b603e056899dfc41b69bf336d3ce3bfc61b2822bbd85f84cfdfb3bf876b7bb71ea85363bff7cb21186b571d3418b30838538c78e169a0db8500261060669094c3ed23032496d65f19a0fa66fc54cc4eed2c55-1.0.1',
+                    'request-startime': '683091411.831285',
+                    'x-vc-bdturing-sdk-version': '2.2.8',
+                    'x-ss-cookie': 'install_id=541373572069224; msToken=6hA48Lf7RVYOl0Okgng_KQzBwfUpN2M5tB6opL8N6YB3EX0VsNQNhGH4kT-vRxO3Yjac8E4w7Zk4rkFF5JCRTilK; ttreq=1$27a2ec895a960525ef828e684768bef579920543; d_ticket=8cc008f231ad00a57481e490f82f4bedebe99; n_mh=Zi1ukqZaOfwMQ8RKEEaBFHPd94g9LJFrf_5jskG0uhY; odin_tt=79986f6d46fe14e0f0cf5c6d831005ef2d2ba797151d32eb7678d9ec14a770349dcc7f5cce1746a00dc493838a94db296ef2135712d40b5de1b4ebb170e7e3bf; sessionid=cd61dd6003146ce5b8d19b1eeb29d5b6; sessionid_ss=cd61dd6003146ce5b8d19b1eeb29d5b6; sid_guard=cd61dd6003146ce5b8d19b1eeb29d5b6%7C1661320113%7C5184000%7CSun%2C+23-Oct-2022+05%3A48%3A33+GMT; sid_tt=cd61dd6003146ce5b8d19b1eeb29d5b6; uid_tt=6544aadbdc13b980ab4906f550c70af5; uid_tt_ss=6544aadbdc13b980ab4906f550c70af5; passport_csrf_token=9dc29668504aefd8f810d194c1591b27; passport_csrf_token_default=9dc29668504aefd8f810d194c1591b27',
+                    'tt-request-time': '1661398611831',
+                    'user-agent': 'Video 6.8.8 rv:6.8.8.12 (iPhone; iOS 14.7.1; zh_CN) Cronet',
+                    'sdk-version': '2',
+                    'x-tt-dt': 'AAARLMRFIGV63HLKR2OFYMAN4ECX3S3FF7T6VF3ZUGZVJHJRTAR6TZ6TXKNYXU5US4L72542CDEO4CJAORJUPSELHB52LINBZAWN7DIMVSPRKPKSIJYA2S2ZS7PIYZQBQ3OFWJETR35OAD55FXYP6OY',
+                    'passport-sdk-version': '5.14.3',
+                    'x-bd-kmsv': '1',
+                    'x-ss-dp': '32',
+                    'x-tt-trace-id': '00-d312f8fb0dae06939d00507998be0020-d312f8fb0dae0693-01',
+                    'x-argus': 'OoPWDUi7xa1FAheuXaB4U+12sViNA+0vZEq7RpA1HvKF5CreKftmWWAtl1ndNdJNbk4zPogps8WNxsRJWdgZOzLg5CUTwVWrMQ/ptLgYrFTXbKf4P4CpqSRoJEHca/LVYRXUrTxTsi+AS7u/S3BTCrzm6nwvZB43GyiLGyN1W38poinJoMkPltgUNoSkAilVXCTu3iSWFLUYayOF7MwFRnYFxU4vBu+XmYCtl74XVCCARZD6uYf/cjkIH9wRD+uv0HBNlI70mqjaQOTYtlINi2i61yctngEjgwpV6s+4GLWQQYY6KXq+eu9mEppFDLSI9WY=',
+                    'x-gorgon': '8404e06000002dfc1ace57427120b4f72a226ce677bde6d67b92',
+                    'x-khronos': '1661398611',
+                    'x-ladon': '7bRfCQvXSDeU17k7XA6Y7TSO0rsUmxbxtqt+apKfuSx/juZZ'
+                }
+                params = {
+                    'anti_addiction_model': '0',
+                    'version_code': '10.8.8',
+                    'app_name': 'video_article',
+                    'device_id': '3061492313228551',
+                    'channel': 'App%20Store',
+                    'resolution': '828*1792',
+                    'aid': '32',
+                    'ab_feature': 'z1',
+                    'ab_version': '668851,4601580,668854,4594840,4601552,4622288,4641673,668858,4601444,668859,4601563,668856,4601562,668855,4601507,668853,4601558,668852,4601533',
+                    'update_version_code': '108812',
+                    'cdid': '7425DF80-0324-4CEF-AAEC-6596F45F2C7A',
+                    'ac': 'WIFI',
+                    'os_version': '14.7.1',
+                    'user_version': '6.8.8',
+                    'ssmix': 'a',
+                    'ipad_adapter_enable': '0',
+                    'device_platform': 'iphone',
+                    'iid': '541373572069224',
+                    'device_type': 'iPhone%2011',
+                    'ab_client': 'a1,f2,f7,e1',
+                    'cdid_ts': '1661312788',
+                    'offset': str(cls.offset),
+                    'orderby': 'publishtime',
+                    'to_user_id': userid,
+                    'count': '20',
+                    'language': 'zh-Hans-CN',
+                    'loc_mode': '0',
+                    'ab_version_vid_list': '4413540%2C2190089',
+                    'enable_publish_status': '0',
+                    'play_param': 'codec_type%3A7%2Cenable_dash%3A1%2Cresolution%3A828%2A1792%2Cis_order_flow%3A-1%2Cis_hdr%3A1',
+                    'client_extra': '%7B%22last_ad_position%22%3A-1%7D',
+                }
+                urllib3.disable_warnings()
+                response = requests.get(url=url, headers=headers, params=params, proxies=proxies, verify=False)
+                cls.offset += 30
+                if 'data' not in response.text or response.json()['code'] != 0 or len(response.json()['data']) == 0:
+                    Common.logger(log_type).warning('get_follow_feeds_by_app: response: {}', response.text)
+                else:
+                    feeds = response.json()['data']
+                    for i in range(len(feeds)):
+
+                        # video_title
+                        if 'title' in feeds[i]:
+                            video_title = feeds[i]['title'].strip().replace('手游', '')
+                        else:
+                            video_title = 0
+
+                        # video_id
+                        if 'video_id' in feeds[i]:
+                            video_id = feeds[i]['video_id']
+                        else:
+                            video_id = 0
+
+                        # gid
+                        if 'gid' in feeds[i]:
+                            gid = feeds[i]['gid']
+                        else:
+                            gid = 0
+
+                        # play_cnt
+                        if 'video_detail_info' not in feeds[i]:
+                            play_cnt = 0
+                        elif 'video_watch_count' not in feeds[i]['video_detail_info']:
+                            play_cnt = 0
+                        else:
+                            play_cnt = feeds[i]['video_detail_info']['video_watch_count']
+
+                        # comment_cnt
+                        if 'comment_count' in feeds[i]:
+                            comment_count = feeds[i]['comment_count']
+                        else:
+                            comment_count = 0
+
+                        # like_cnt
+                        if 'digg_count' in feeds[i]:
+                            like_cnt = feeds[i]['digg_count']
+                        else:
+                            like_cnt = 0
+
+                        # share_cnt
+                        if 'share_count' in feeds[i]:
+                            share_cnt = feeds[i]['share_count']
+                        else:
+                            share_cnt = 0
+
+                        # video_duration
+                        if 'video_duration' in feeds[i]:
+                            video_duration = feeds[i]['video_duration']
+                        else:
+                            video_duration = 0
+
+                        # send_time
+                        if 'publish_time' in feeds[i]:
+                            send_time = feeds[i]['publish_time']
+                        else:
+                            send_time = 0
+
+                        # user_name
+                        if 'user_info' not in feeds[i]:
+                            user_name = 0
+                        elif 'name' not in feeds[i]['user_info']:
+                            user_name = 0
+                        else:
+                            user_name = feeds[i]['user_info']['name']
+
+                        # user_id
+                        if 'user_info' not in feeds[i]:
+                            user_id = 0
+                        elif 'user_id' not in feeds[i]['user_info']:
+                            user_id = 0
+                        else:
+                            user_id = feeds[i]['user_info']['user_id']
+
+                        # head_url
+                        if 'user_info' not in feeds[i]:
+                            head_url = 0
+                        elif 'avatar_url' not in feeds[i]['user_info']:
+                            head_url = 0
+                        else:
+                            head_url = feeds[i]['user_info']['avatar_url']
+
+                        # cover_url
+                        if 'video_detail_info' not in feeds[i]:
+                            cover_url = 0
+                        elif 'detail_video_large_image' not in feeds[i]['video_detail_info']:
+                            cover_url = 0
+                        elif 'url' not in feeds[i]['video_detail_info']['detail_video_large_image']:
+                            cover_url = 0
+                        else:
+                            cover_url = feeds[i]['video_detail_info']['detail_video_large_image']['url']
+
+                        url_info = cls.get_video_info(log_type, gid)
+
+                        video_url = url_info[0]
+                        audio_url = url_info[1]
+                        video_width = url_info[2]
+                        video_height = url_info[3]
+
+                        Common.logger(log_type).info('video_title:{}', video_title)
+                        Common.logger(log_type).info('video_id:{}', video_id)
+                        Common.logger(log_type).info('play_cnt:{}', play_cnt)
+                        Common.logger(log_type).info('video_duration:{}', video_duration)
+                        Common.logger(log_type).info('video_width_height:{}', str(video_width) + '*' + str(video_height))
+                        Common.logger(log_type).info('send_time:{}',
+                                                     time.strftime('%Y/%m/%d %H:%M:%S', time.localtime(send_time)))
+
+                        if gid == 0 or video_url == 0 or audio_url == 0:
+                            Common.logger(log_type).info('无效视频:{}\n', video_title)
+                        elif int(time.time()) - int(send_time) > 3600 * 24 * 10:
+                            Common.logger(log_type).info('发布时间超过10天:{}\n', time.strftime('%Y/%m/%d %H:%M:%S'),
+                                                         time.localtime(send_time))
+                            cls.offset = 0
+                            return
+                        elif cls.download_rule(video_duration, video_width, video_height) is False:
+                            Common.logger(log_type).info('不满足抓取规则\n')
+                        elif any(word if word in video_title else False for word in cls.filter_words(log_type)) is True:
+                            Common.logger(log_type).info('标题已中过滤词:{}\n', video_title)
+                        elif str(video_id) in [x for y in Feishu.get_values_batch(log_type, 'xigua', 'e075e9') for x in y]:
+                            Common.logger(log_type).info('视频已下载:{}\n', video_title)
+                        elif str(video_id) in [x for y in Feishu.get_values_batch(log_type, 'xigua', 'wjhpDs') for x in y]:
+                            Common.logger(log_type).info('视频已存在:{}\n', video_title)
+                        else:
+                            Feishu.insert_columns(log_type, 'xigua', 'wjhpDs', 'ROWS', 1, 2)
+                            get_feeds_time = int(time.time())
+                            values = [[time.strftime('%Y/%m/%d %H:%M:%S', time.localtime(get_feeds_time)),
+                                       '关注榜',
+                                       video_title,
+                                       str(video_id),
+                                       gid,
+                                       int(play_cnt),
+                                       int(comment_count),
+                                       int(like_cnt),
+                                       int(share_cnt),
+                                       video_duration,
+                                       str(video_width) + '*' + str(video_height),
+                                       time.strftime('%Y/%m/%d %H:%M:%S', time.localtime(send_time)),
+                                       user_name,
+                                       str(user_id),
+                                       head_url,
+                                       cover_url,
+                                       video_url,
+                                       audio_url]]
+                            time.sleep(1)
+                            Feishu.update_values(log_type, 'xigua', 'wjhpDs', 'A2:Z2', values)
+                            Common.logger(log_type).info('当前视频信息写入飞书成功\n')
+                            time.sleep(random.randint(1, 3))
+
+            except Exception as e:
+                Common.logger(log_type).error('get_follow_feeds_by_app异常:{}\n', e)
+
+    # 获取所有用户主页视频
+    @classmethod
+    def get_all_person_videos(cls, log_type, env):
+        try:
+            user_list = cls.get_user_info_from_feishu(log_type)
+            if len(user_list) == 0:
+                Common.logger(log_type).warning('用户ID列表为空\n')
+            else:
+                for k, v in user_list.items():
+                    Common.logger(log_type).info('正在获取 {} 主页视频\n', k)
+                    # cls.get_follow_feeds_by_app(log_type, v.split(',')[0])
+                    cls.get_follow_feeds_by_pc(log_type, v.split(',')[0])
+                    time.sleep(1)
+                    cls.run_download_publish(log_type, env, v.split(',')[-1])
+                    time.sleep(random.randint(5, 10))
+        except Exception as e:
+            Common.logger(log_type).error('get_all_person_videos异常:{}\n', e)
+
+    # 合并音视频
+    @classmethod
+    def video_compose(cls, log_type, video_title):
+        video_path = './videos/' + str(video_title) + '/video1.mp4'
+        audio_path = './videos/' + str(video_title) + '/audio1.mp4'
+        out_path = './videos/' + str(video_title) + '/video.mp4'
+        cmd = 'ffmpeg -i ' + video_path + ' -i ' + audio_path + ' -c:v copy -c:a aac -strict experimental -map 0:v:0 -map 1:a:0 ' + out_path
+        # print(cmd)
+        subprocess.call(cmd, shell=True)
+
+        for file in os.listdir('./videos/' + str(video_title)):
+            if file.split('.mp4')[0] == 'video1' or file.split('.mp4')[0] == 'audio1':
+                os.remove('./videos/' + str(video_title) + '/' + file)
+
+        Common.logger(log_type).info('合成成功')
+
+    # 下载 / 上传
+    @classmethod
+    def download_publish(cls, log_type, env, uid):
+        try:
+            feeds_sheet = Feishu.get_values_batch(log_type, 'xigua', 'wjhpDs')
+            for i in range(1, len(feeds_sheet)):
+                download_video_title = feeds_sheet[i][2]
+                download_video_id = feeds_sheet[i][3]
+                download_video_gid = feeds_sheet[i][4]
+                download_play_cnt = feeds_sheet[i][5]
+                download_comment_cnt = feeds_sheet[i][6]
+                download_like_cnt = feeds_sheet[i][7]
+                download_share_cnt = feeds_sheet[i][8]
+                download_video_duration = feeds_sheet[i][9]
+                download_video_width_height = feeds_sheet[i][10]
+                download_send_time = feeds_sheet[i][11]
+                download_user_name = feeds_sheet[i][12]
+                download_user_id = feeds_sheet[i][13]
+                download_head_url = feeds_sheet[i][14]
+                download_cover_url = feeds_sheet[i][15]
+                download_video_url = feeds_sheet[i][16]
+                download_audio_url = feeds_sheet[i][17]
+
+                Common.logger(log_type).info('正在判断第{}行:{}', i + 1, download_video_title)
+                Common.logger(log_type).info('download_video_id:{}', download_video_id)
+                Common.logger(log_type).info('download_video_duration:{}', download_video_duration)
+                Common.logger(log_type).info('download_send_time:{}', download_send_time)
+
+                # 过滤空行
+                if download_video_title is None or download_video_id is None:
+                    Feishu.dimension_range(log_type, 'xigua', 'wjhpDs', 'ROWS', i + 1, i + 1)
+                    Common.logger(log_type).info('空行,删除成功\n')
+                    return
+                elif str(download_video_id) in [x for y in Feishu.get_values_batch(log_type, 'xigua', 'e075e9') for x in
+                                                y]:
+                    Feishu.dimension_range(log_type, 'xigua', 'wjhpDs', 'ROWS', i + 1, i + 1)
+                    Common.logger(log_type).info('视频已下载,删除成功\n')
+                    return
+                else:
+                    # 下载封面
+                    Common.download_method(log_type=log_type, text='cover', d_name=download_video_title,
+                                           d_url=download_cover_url)
+                    # 下载视频
+                    Common.download_method(log_type=log_type, text='video', d_name=download_video_title,
+                                           d_url=download_video_url)
+                    # 下载音频
+                    Common.download_method(log_type=log_type, text='audio', d_name=download_video_title,
+                                           d_url=download_audio_url)
+                    # 保存视频信息至 "./videos/{download_video_title}/info.txt"
+                    with open("./videos/" + download_video_title + "/" + "info.txt",
+                              "a", encoding="UTF-8") as f_a:
+                        f_a.write(str(download_video_id) + "\n" +
+                                  str(download_video_title) + "\n" +
+                                  str(download_video_duration) + "\n" +
+                                  str(download_play_cnt) + "\n" +
+                                  str(download_comment_cnt) + "\n" +
+                                  str(download_like_cnt) + "\n" +
+                                  str(download_share_cnt) + "\n" +
+                                  str(download_video_width_height) + "\n" +
+                                  str(int(time.mktime(
+                                      time.strptime(download_send_time, "%Y/%m/%d %H:%M:%S")))) + "\n" +
+                                  str(download_user_name) + "\n" +
+                                  str(download_head_url) + "\n" +
+                                  str(download_video_url) + "\n" +
+                                  str(download_cover_url) + "\n" +
+                                  "xigua"+str(int(time.time())))
+                    Common.logger("follow").info("==========视频信息已保存至info.txt==========")
+                    # 合成音视频
+                    cls.video_compose(log_type, download_video_title)
+
+                    # 上传视频
+                    Common.logger(log_type).info("开始上传视频:{}".format(download_video_title))
+                    our_video_id = Publish.upload_and_publish(log_type, env, uid)
+                    if env == 'dev':
+                        our_video_link = "https://testadmin.piaoquantv.com/cms/post-detail/" + str(our_video_id) + "/info"
+                    else:
+                        our_video_link = "https://admin.piaoquantv.com/cms/post-detail/" + str(our_video_id) + "/info"
+                    Common.logger(log_type).info("视频上传完成:{}\n", download_video_title)
+
+                    # 视频ID工作表,插入首行
+                    Feishu.insert_columns(log_type, 'xigua', "e075e9", "ROWS", 1, 2)
+                    # 视频ID工作表,首行写入数据
+                    upload_time = int(time.time())
+                    values = [[time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(upload_time)),
+                               "关注榜",
+                               download_video_title,
+                               str(download_video_id),
+                               our_video_link,
+                               download_video_gid,
+                               download_play_cnt,
+                               download_comment_cnt,
+                               download_like_cnt,
+                               download_share_cnt,
+                               download_video_duration,
+                               download_video_width_height,
+                               download_send_time,
+                               download_user_name,
+                               download_user_id,
+                               download_head_url,
+                               download_cover_url,
+                               download_video_url,
+                               download_audio_url]]
+                    Common.logger(log_type).info('values:{}\n', values)
+                    time.sleep(1)
+                    Feishu.update_values(log_type, 'xigua', "e075e9", "F2:Z2", values)
+                    Common.logger(log_type).info("视频已保存至云文档:{}", download_video_title)
+
+                    # 删除行或列,可选 ROWS、COLUMNS
+                    Feishu.dimension_range(log_type, 'xigua', "wjhpDs", "ROWS", i + 1, i + 1)
+                    Common.logger(log_type).info("视频:{},下载/上传成功\n", download_video_title)
+                    return
+        except Exception as e:
+            Common.logger(log_type).error('download_publish异常:{}\n', e)
+
+    # 执行 下载 / 上传
+    @classmethod
+    def run_download_publish(cls, log_type, env, uid):
+        try:
+            while True:
+                if len(Feishu.get_values_batch(log_type, 'xigua', 'wjhpDs')) == 1:
+                    Common.logger(log_type).info('下载 / 上传 完成\n')
+                    break
+                else:
+                    cls.download_publish(log_type, env, uid)
+                    time.sleep(random.randint(1, 3))
+        except Exception as e:
+            Common.logger(log_type).error('run_download_publish异常:{}\n', e)
+
+
+if __name__ == '__main__':
+    # Follow.get_follow_feeds_by_pc('follow', '6431477489')
+    # Follow.get_follow_feeds_by_app('xigua', '6431477489')
+    # Follow.get_follow_feeds_by_app('follow', '3865480345435996')
+    # Follow.get_user_info_from_feishu('follow')
+    # Follow.filter_words('follow')
+    # Follow.get_all_person_videos('follow', 'dev')
+    Follow.download_publish('follow', 'dev', '6267141')
+
+    pass

Některé soubory nejsou zobrazeny, neboť je v těchto rozdílových datech změněno mnoho souborů