Browse Source

first push

wangkun 2 năm trước cách đây
mục cha
commit
949a3da71c
7 tập tin đã thay đổi với 779 bổ sung1 xóa
  1. 1 1
      README.md
  2. 3 0
      logs/__init__.py
  3. 3 0
      main/__init__.py
  4. 132 0
      main/common.py
  5. 13 0
      main/demo.py
  6. 421 0
      main/feishu_lib.py
  7. 206 0
      main/run_bot.py

+ 1 - 1
README.md

@@ -17,7 +17,7 @@ urllib3==1.26.9
 2.python3 ./main/run_bot.py 
 
 
-==========2022/7/06===========
+==========2022/8/10===========
 
 每隔一小时,检查一次已下载表
 

+ 3 - 0
logs/__init__.py

@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/8/9

+ 3 - 0
main/__init__.py

@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/8/9

+ 132 - 0
main/common.py

@@ -0,0 +1,132 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/8/9
+"""
+公共方法,包含:生成log / 删除log / 下载方法 / 读取文件 / 统计下载数
+"""
+from datetime import date, timedelta
+from loguru import logger
+import datetime
+import os
+import time
+import requests
+import urllib3
+proxies = {"http": None, "https": None}
+
+
+class Common:
+    # 统一获取当前时间 <class 'datetime.datetime'>  2022-04-14 20:13:51.244472
+    now = datetime.datetime.now()
+    # 昨天 <class 'str'>  2022-04-13
+    yesterday = (date.today() + timedelta(days=-1)).strftime("%Y/%m/%d")
+    # 今天 <class 'datetime.date'>  2022-04-14
+    today = date.today()
+    # 明天 <class 'str'>  2022-04-15
+    tomorrow = (date.today() + timedelta(days=1)).strftime("%Y/%m/%d")
+
+    # 使用 logger 模块生成日志
+    @staticmethod
+    def logger(log_type):
+        """
+        使用 logger 模块生成日志
+        """
+        # 日志路径
+        log_dir = r"./logs/"
+        log_path = os.getcwd() + os.sep + log_dir
+        if not os.path.isdir(log_path):
+            os.makedirs(log_path)
+
+        # 日志文件名
+        if log_type == "bot":
+            log_name = time.strftime("%Y-%m-%d", time.localtime(time.time())) + '-bot.log'
+        elif log_type == "bot":
+            log_name = time.strftime("%Y-%m-%d", time.localtime(time.time())) + '-bot.log'
+        else:
+            log_name = time.strftime("%Y-%m-%d", time.localtime(time.time())) + '-bot.log'
+
+        # 日志不打印到控制台
+        logger.remove(handler_id=None)
+
+        # rotation="500 MB",实现每 500MB 存储一个文件
+        # rotation="12:00",实现每天 12:00 创建一个文件
+        # rotation="1 week",每周创建一个文件
+        # retention="10 days",每隔10天之后就会清理旧的日志
+        # 初始化日志
+        logger.add(log_dir + log_name, level="INFO", rotation='00:00')
+
+        return logger
+
+    # 清除日志,保留最近 7 个文件
+    @classmethod
+    def del_logs(cls, log_type):
+        """
+        清除冗余日志文件
+        :return: 保留最近 7 个日志
+        """
+        log_dir = "./logs/"
+        all_files = sorted(os.listdir(log_dir))
+        all_logs = []
+        for log in all_files:
+            name = os.path.splitext(log)[-1]
+            if name == ".log":
+                all_logs.append(log)
+
+        if len(all_logs) <= 7:
+            pass
+        else:
+            for file in all_logs[:len(all_logs) - 7]:
+                os.remove(log_dir + file)
+        cls.logger(log_type).info("清除冗余日志成功")
+
+    # 封装下载视频或封面的方法
+    @classmethod
+    def download_method(cls, log_type, text, d_name, d_url):
+        """
+        下载封面:text == "cover" ; 下载视频:text == "video"
+        需要下载的视频标题:d_title
+        视频封面,或视频播放地址:d_url
+        下载保存路径:"./files/{d_title}/"
+        """
+        # 首先创建一个保存该视频相关信息的文件夹
+        video_dir = "./videos/" + d_name + "/"
+        if not os.path.exists(video_dir):
+            os.mkdir(video_dir)
+
+        # 下载视频
+        if text == "video":
+            # 需要下载的视频地址
+            video_url = d_url
+            # 视频名
+            video_name = "video.mp4"
+
+            # 下载视频
+            urllib3.disable_warnings()
+            response = requests.get(video_url, stream=True, proxies=proxies, verify=False)
+            try:
+                with open(video_dir + video_name, "wb") as f:
+                    for chunk in response.iter_content(chunk_size=10240):
+                        f.write(chunk)
+                cls.logger(log_type).info("==========视频下载完成==========")
+            except Exception as e:
+                cls.logger(log_type).error("视频下载失败:{}", e)
+
+        # 下载封面
+        elif text == "cover":
+            # 需要下载的封面地址
+            cover_url = d_url
+            # 封面名
+            cover_name = "image.jpg"
+
+            # 下载封面
+            urllib3.disable_warnings()
+            response = requests.get(cover_url, proxies=proxies, verify=False)
+            try:
+                with open(video_dir + cover_name, "wb") as f:
+                    f.write(response.content)
+                cls.logger(log_type).info("==========封面下载完成==========")
+            except Exception as e:
+                cls.logger(log_type).error("封面下载失败:{}", e)
+
+
+if __name__ == "__main__":
+    common = Common()

+ 13 - 0
main/demo.py

@@ -0,0 +1,13 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/8/10
+
+class Demo:
+    @classmethod
+    def demo1(cls):
+        for i in range(3):
+            print(i)
+
+
+if __name__ == "__main__":
+    Demo.demo1()

+ 421 - 0
main/feishu_lib.py

@@ -0,0 +1,421 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/8/9
+import json
+
+import requests
+import urllib3
+
+from main.common import Common
+
+proxies = {"http": None, "https": None}
+
+
+class Feishu:
+    """
+    编辑飞书云文档
+    """
+    # 看一看爬虫数据表
+    kanyikan_url = "https://w42nne6hzg.feishu.cn/sheets/shtcngRPoDYAi24x52j2nDuHMih?"
+    # 快手爬虫数据表
+    # kuaishou_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnp4SaJt37q6OOOrYzPMjQkg?"
+    kuaishou_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnICEfaw9llDNQkKgdymM1xf?"
+    # 微视爬虫数据表
+    weishi_url = "https://w42nne6hzg.feishu.cn/sheets/shtcn5YSWg91JfVGzj0SFZIRRPh?"
+    # 小年糕爬虫数据表
+    xiaoniangao_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnYxiyQ1wLklo1W5Kdqc9cGh?"
+    # 音乐相册
+    music_album = "https://w42nne6hzg.feishu.cn/sheets/shtcnT6zvmfsYe1g0iv4pt7855g?"
+    # 本山祝福数据表
+    crawler_benshanzhufu = "https://w42nne6hzg.feishu.cn/sheets/shtcnGh2rrsPYM4iVNEBO7OqWrb?"
+    # 公众号爬虫表
+    gzh_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnexNXnpDLHhARw0QdiwbYuA?"
+    # 数据监控表
+    crawler_monitor = "https://w42nne6hzg.feishu.cn/sheets/shtcnlZWYazInhf7Z60jkbLRJyd?"
+
+    # 手机号
+    wangkun = "13426262515"
+    gaonannan = "18501180073"
+    xinxin = "15546206651"
+    huxinxue = "18832292015"
+
+    # 飞书路径token
+    @classmethod
+    def spreadsheettoken(cls, crawler):
+        """
+        :param crawler: 哪个爬虫
+        """
+        if crawler == "kanyikan":
+            return "shtcngRPoDYAi24x52j2nDuHMih"
+        elif crawler == "kuaishou":
+            # return "shtcnp4SaJt37q6OOOrYzPMjQkg"
+            return "shtcnICEfaw9llDNQkKgdymM1xf"
+        elif crawler == "weishi":
+            return "shtcn5YSWg91JfVGzj0SFZIRRPh"
+        elif crawler == "xiaoniangao":
+            return "shtcnYxiyQ1wLklo1W5Kdqc9cGh"
+        elif crawler == "monitor":
+            return "shtcnlZWYazInhf7Z60jkbLRJyd"
+        elif crawler == "music_album":
+            return "shtcnT6zvmfsYe1g0iv4pt7855g"
+        elif crawler == "bszf":
+            return "shtcnGh2rrsPYM4iVNEBO7OqWrb"
+        elif crawler == "gzh":
+            return "shtcnexNXnpDLHhARw0QdiwbYuA"
+
+    # 获取飞书api token
+    @classmethod
+    def get_token(cls, log_type):
+        """
+        获取飞书api token
+        :return:
+        """
+        url = "https://open.feishu.cn/open-apis/auth/v3/tenant_access_token/internal/"
+        post_data = {"app_id": "cli_a13ad2afa438d00b",  # 这里账号密码是发布应用的后台账号及密码
+                     "app_secret": "4tK9LY9VbiQlY5umhE42dclBFo6t4p5O"}
+
+        try:
+            urllib3.disable_warnings()
+            response = requests.post(url=url, data=post_data, proxies=proxies, verify=False)
+            tenant_access_token = response.json()["tenant_access_token"]
+            return tenant_access_token
+        except Exception as e:
+            Common.logger(log_type).error("获取飞书 api token 异常:{}", e)
+
+    # 获取表格元数据
+    @classmethod
+    def get_metainfo(cls, log_type, crawler):
+        """
+        获取表格元数据
+        :return:
+        """
+        get_metainfo_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                           + cls.spreadsheettoken(crawler) + "/metainfo"
+
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        params = {
+            "extFields": "protectedRange",  # 额外返回的字段,extFields=protectedRange时返回保护行列信息
+            "user_id_type": "open_id"  # 返回的用户id类型,可选open_id,union_id
+        }
+        try:
+            urllib3.disable_warnings()
+            r = requests.get(url=get_metainfo_url, headers=headers, params=params, proxies=proxies, verify=False)
+            response = json.loads(r.content.decode("utf8"))
+            return response
+        except Exception as e:
+            Common.logger(log_type).error("获取表格元数据异常:{}", e)
+
+    # 读取工作表中所有数据
+    @classmethod
+    def get_values_batch(cls, log_type, crawler, sheetid):
+        """
+        读取工作表中所有数据
+        :param log_type: 启用哪个 log
+        :param crawler: 哪个爬虫
+        :param sheetid: 哪张表
+        :return: 所有数据
+        """
+        get_values_batch_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                               + cls.spreadsheettoken(crawler) + "/values_batch_get"
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        params = {
+            # 多个查询范围 如 url?ranges=range1,range2 ,其中 range 包含 sheetId 与单元格范围两部分
+            "ranges": sheetid,
+
+            # valueRenderOption=ToString 可返回纯文本的值(数值类型除外);
+            # valueRenderOption=FormattedValue 计算并格式化单元格;
+            # valueRenderOption=Formula单元格中含有公式时返回公式本身;
+            # valueRenderOption=UnformattedValue计算但不对单元格进行格式化
+            "valueRenderOption": "ToString",
+
+            # dateTimeRenderOption=FormattedString 计算并将时间日期按照其格式进行格式化,但不会对数字进行格式化,返回格式化后的字符串。
+            "dateTimeRenderOption": "",
+
+            # 返回的用户id类型,可选open_id,union_id
+            "user_id_type": "open_id"
+        }
+        try:
+            urllib3.disable_warnings()
+            r = requests.get(url=get_values_batch_url, headers=headers, params=params, proxies=proxies, verify=False)
+            # print(r.text)
+            response = json.loads(r.content.decode("utf8"))
+            values = response["data"]["valueRanges"][0]["values"]
+            return values
+        except Exception as e:
+            Common.logger(log_type).error("读取工作表所有数据异常:{}", e)
+
+    # 工作表,插入行或列
+    @classmethod
+    def insert_columns(cls, log_type, crawler, sheetid, majordimension, startindex, endindex):
+        """
+        工作表插入行或列
+        :param log_type: 日志路径
+        :param crawler: 哪个爬虫的云文档
+        :param sheetid:哪张工作表
+        :param majordimension:行或者列, ROWS、COLUMNS
+        :param startindex:开始位置
+        :param endindex:结束位置
+        """
+        insert_columns_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                             + cls.spreadsheettoken(crawler) + "/insert_dimension_range"
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        body = {
+            "dimension": {
+                "sheetId": sheetid,
+                "majorDimension": majordimension,  # 默认 ROWS ,可选 ROWS、COLUMNS
+                "startIndex": startindex,  # 开始的位置
+                "endIndex": endindex  # 结束的位置
+            },
+            "inheritStyle": "AFTER"  # BEFORE 或 AFTER,不填为不继承 style
+        }
+        try:
+            urllib3.disable_warnings()
+            r = requests.post(url=insert_columns_url, headers=headers, json=body, proxies=proxies, verify=False)
+            Common.logger(log_type).info("插入行或列:{}", r.json()["msg"])
+        except Exception as e:
+            Common.logger(log_type).error("插入行或列异常:{}", e)
+
+    # 写入数据
+    @classmethod
+    def update_values(cls, log_type, crawler, sheetid, ranges, values):
+        """
+        写入数据
+        :param log_type: 日志路径
+        :param crawler: 哪个爬虫的云文档
+        :param sheetid:哪张工作表
+        :param ranges:单元格范围
+        :param values:写入的具体数据,list
+        """
+        update_values_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                            + cls.spreadsheettoken(crawler) + "/values_batch_update"
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        body = {
+            "valueRanges": [
+                {
+                    "range": sheetid + "!" + ranges,
+                    "values": values
+                },
+            ],
+        }
+
+        try:
+            urllib3.disable_warnings()
+            r = requests.post(url=update_values_url, headers=headers, json=body, proxies=proxies, verify=False)
+            Common.logger(log_type).info("写入数据:{}", r.json()["msg"])
+        except Exception as e:
+            Common.logger(log_type).error("写入数据异常:{}", e)
+
+    # 合并单元格
+    @classmethod
+    def merge_cells(cls, log_type, crawler, sheetid, ranges):
+        """
+        合并单元格
+        :param log_type: 日志路径
+        :param crawler: 哪个爬虫
+        :param sheetid:哪张工作表
+        :param ranges:需要合并的单元格范围
+        """
+        merge_cells_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                          + cls.spreadsheettoken(crawler) + "/merge_cells"
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+
+        body = {
+            "range": sheetid + "!" + ranges,
+            "mergeType": "MERGE_ROWS"
+        }
+
+        try:
+            urllib3.disable_warnings()
+            r = requests.post(url=merge_cells_url, headers=headers, json=body, proxies=proxies, verify=False)
+            Common.logger(log_type).info("合并单元格:{}", r.json()["msg"])
+        except Exception as e:
+            Common.logger(log_type).error("合并单元格异常:{}", e)
+
+    # 读取单元格数据
+    @classmethod
+    def get_range_value(cls, log_type, crawler, sheetid, cell):
+        """
+        读取单元格内容
+        :param log_type: 日志路径
+        :param crawler: 哪个爬虫
+        :param sheetid: 哪张工作表
+        :param cell: 哪个单元格
+        :return: 单元格内容
+        """
+        get_range_value_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                              + cls.spreadsheettoken(crawler) + "/values/" + sheetid + "!" + cell
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        params = {
+            # valueRenderOption=ToString 可返回纯文本的值(数值类型除外);
+            # valueRenderOption=FormattedValue 计算并格式化单元格;
+            # valueRenderOption=Formula 单元格中含有公式时返回公式本身;
+            # valueRenderOption=UnformattedValue 计算但不对单元格进行格式化。
+            "valueRenderOption": "FormattedValue",
+
+            # dateTimeRenderOption=FormattedString 计算并对时间日期按照其格式进行格式化,但不会对数字进行格式化,返回格式化后的字符串。
+            "dateTimeRenderOption": "",
+
+            # 返回的用户id类型,可选open_id,union_id
+            "user_id_type": "open_id"
+        }
+        try:
+            urllib3.disable_warnings()
+            r = requests.get(url=get_range_value_url, headers=headers, params=params, proxies=proxies, verify=False)
+            # print(r.text)
+            return r.json()["data"]["valueRange"]["values"][0]
+        except Exception as e:
+            Common.logger(log_type).error("读取单元格数据异常:{}", e)
+
+    # 删除行或列,可选 ROWS、COLUMNS
+    @classmethod
+    def dimension_range(cls, log_type, crawler, sheetid, major_dimension, startindex, endindex):
+        """
+        删除行或列
+        :param log_type: 日志路径
+        :param crawler: 哪个爬虫
+        :param sheetid:工作表
+        :param major_dimension:默认 ROWS ,可选 ROWS、COLUMNS
+        :param startindex:开始的位置
+        :param endindex:结束的位置
+        :return:
+        """
+        dimension_range_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                              + cls.spreadsheettoken(crawler) + "/dimension_range"
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        body = {
+            "dimension": {
+                "sheetId": sheetid,
+                "majorDimension": major_dimension,
+                "startIndex": startindex,
+                "endIndex": endindex
+            }
+        }
+        try:
+            urllib3.disable_warnings()
+            r = requests.delete(url=dimension_range_url, headers=headers, json=body, proxies=proxies, verify=False)
+            Common.logger(log_type).info("删除视频数据:{}", r.json()["msg"])
+        except Exception as e:
+            Common.logger(log_type).error("删除视频数据异常:{}", e)
+
+    # 获取用户 ID
+    @classmethod
+    def get_userid(cls, log_type, username):
+        try:
+            url = "https://open.feishu.cn/open-apis/user/v1/batch_get_id?"
+            headers = {
+                "Authorization": "Bearer " + cls.get_token(log_type),
+                "Content-Type": "application/json; charset=utf-8"
+            }
+            if username == "wangkun":
+                username = cls.wangkun
+            elif username == "gaonannan":
+                username = cls.gaonannan
+            elif username == "xinxin":
+                username = cls.xinxin
+            elif username == "huxinxue":
+                username = cls.huxinxue
+            data = {"mobiles": [username]}
+            urllib3.disable_warnings()
+            r = requests.get(url=url, headers=headers, params=data, verify=False, proxies=proxies)
+            open_id = r.json()["data"]["mobile_users"][username][0]["open_id"]
+            Common.logger(log_type).info("{}:{}", username, open_id)
+            # print(f"{username}:{open_id}")
+            return open_id
+        except Exception as e:
+            Common.logger(log_type).error("get_userid异常:{}", e)
+
+    # 飞书机器人
+    @classmethod
+    def bot(cls, log_type, crawler, text):
+        try:
+            url = "https://open.feishu.cn/open-apis/bot/v2/hook/96989577-50e7-4653-9ec2-308fe3f2c5fe"
+            headers = {
+                'Content-Type': 'application/json'
+            }
+            if crawler == "kanyikan":
+                content = "看一看爬虫表"
+                sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcngRPoDYAi24x52j2nDuHMih?sheet=20ce0c"
+            elif crawler == "xiaoniangao":
+                content = "小年糕爬虫表"
+                sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnYxiyQ1wLklo1W5Kdqc9cGh?sheet=yatRv2"
+            elif crawler == "music_album":
+                content = "音乐相册爬虫表"
+                sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnT6zvmfsYe1g0iv4pt7855g?sheet=f5a76e"
+            elif crawler == "bszf":
+                content = "本山祝福爬虫表"
+                sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnGh2rrsPYM4iVNEBO7OqWrb?sheet=440018"
+            elif crawler == "kuaishou":
+                content = "快手爬虫表"
+                sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnICEfaw9llDNQkKgdymM1xf?sheet=3cd128"
+            elif crawler == "gzh":
+                content = "公众号爬虫表"
+                sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnexNXnpDLHhARw0QdiwbYuA?sheet=fCs3BT"
+            else:
+                content = "小年糕爬虫表"
+                sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnYxiyQ1wLklo1W5Kdqc9cGh?sheet=yatRv2"
+
+            data = json.dumps({
+                "msg_type": "interactive",
+                "card": {
+                    "config": {
+                        "wide_screen_mode": True,
+                        "enable_forward": True
+                    },
+                    "elements": [{
+                        "tag": "div",
+                        "text": {
+                            "content": "\n<at id=" + str(cls.get_userid(log_type, "wangkun")) + "></at>\n" + text,
+                            "tag": "lark_md"
+                        }
+                    }, {
+                        "actions": [{
+                            "tag": "button",
+                            "text": {
+                                "content": content,
+                                "tag": "lark_md"
+                            },
+                            "url": sheet_url,
+                            "type": "default",
+                            "value": {}
+                        }],
+                        "tag": "action"
+                    }],
+                    "header": {
+                        "title": {
+                            "content": "📣您有新的报警,请注意查收",
+                            "tag": "plain_text"
+                        }
+                    }
+                }
+            })
+            urllib3.disable_warnings()
+            r = requests.post(url, headers=headers, data=data, verify=False, proxies=proxies)
+            Common.logger(log_type).info("触发机器人消息:{}, {}", r, r.json()["StatusMessage"])
+        except Exception as e:
+            Common.logger(log_type).error("bot异常:{}", e)
+
+
+if __name__ == "__main__":
+    Feishu.bot("bot", "kuaishou", "我是快手测试内容,请忽略")
+    # Feishu.get_userid("kuaishou", "huxinxue")

+ 206 - 0
main/run_bot.py

@@ -0,0 +1,206 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/8/9
+# import datetime
+import datetime
+import os
+import sys
+import time
+
+sys.path.append(os.getcwd())
+from main.common import Common
+from main.feishu_lib import Feishu
+
+
+class Bot:
+    # 获取各个爬虫表最新一条抓取时间
+    @classmethod
+    def get_first_time(cls, log_type, crawler):
+        try:
+            if crawler == "xiaoniangao":
+                sheet = Feishu.get_values_batch(log_type, "xiaoniangao", "yatRv2")
+                # print(sheet[1])
+                # 已下载表,最新一条视频抓取时间
+                first_download_time = sheet[1][5]
+                first_download_time = int(time.mktime(time.strptime(first_download_time, "%Y/%m/%d %H:%M:%S")))
+            elif crawler == "kanyikan":
+                sheet = Feishu.get_values_batch(log_type, "kanyikan", "20ce0c")
+                # print(sheet[1])
+                # 已下载表,最新一条视频抓取时间
+                first_download_time = sheet[1][5]
+                first_download_time = int(time.mktime(time.strptime(first_download_time, "%Y/%m/%d %H:%M:%S")))
+            elif crawler == "music_album":
+                sheet = Feishu.get_values_batch(log_type, "music_album", "f5a76e")
+                # print(sheet[1])
+                # 已下载表,最新一条视频抓取时间
+                first_download_time = sheet[1][5]
+                first_download_time = int(time.mktime(time.strptime(first_download_time, "%Y/%m/%d %H:%M:%S")))
+            elif crawler == "bszf":
+                sheet = Feishu.get_values_batch(log_type, "bszf", "440018")
+                # print(sheet[1])
+                # 已下载表,最新一条视频抓取时间
+                first_download_time = sheet[1][4]
+                first_download_time = int(time.mktime(time.strptime(first_download_time, "%Y/%m/%d %H:%M:%S")))
+            elif crawler == "kuaishou":
+                sheet = Feishu.get_values_batch(log_type, "kuaishou", "3cd128")
+                # print(sheet[1])
+                # 已下载表,最新一条视频抓取时间
+                first_download_time = sheet[1][5]
+                first_download_time = int(time.mktime(time.strptime(first_download_time, "%Y/%m/%d %H:%M:%S")))
+            elif crawler == "gzh":
+                sheet = Feishu.get_values_batch(log_type, "gzh", "fCs3BT")
+                # print(sheet[1])
+                # 已下载表,最新一条视频抓取时间
+                first_download_time = sheet[1][3]
+                first_download_time = int(time.mktime(time.strptime(first_download_time, "%Y/%m/%d %H:%M:%S")))
+            else:
+                sheet = Feishu.get_values_batch(log_type, "xiaoniangao", "yatRv2")
+                # 已下载表,最新一条视频抓取时间
+                first_download_time = sheet[1][5]
+                first_download_time = int(time.mktime(time.strptime(first_download_time, "%Y/%m/%d %H:%M:%S")))
+
+            return first_download_time
+        except Exception as e:
+            Common.logger(log_type).error("get_first_time异常:{}", e)
+
+    # 获取各个爬虫的 feeds 表
+    @classmethod
+    def get_feeds_sheet(cls, log_type, crawler, sheet):
+        try:
+            if crawler == "kanyikan" and sheet == "recommend":
+                feeds_sheet = Feishu.get_values_batch(log_type, "kanyikan", "SdCHOM")
+            elif crawler == "kanyikan" and sheet == "moment":
+                feeds_sheet = Feishu.get_values_batch(log_type, "kanyikan", "tGqZMX")
+            elif crawler == "xiaoniangao" and sheet == "hour":
+                feeds_sheet = Feishu.get_values_batch(log_type, "xiaoniangao", "ba0da4")
+            elif crawler == "xiaoniangao" and sheet == "person":
+                feeds_sheet = Feishu.get_values_batch(log_type, "xiaoniangao", "k6ldje")
+            elif crawler == "music_album" and sheet == "recommend":
+                feeds_sheet = Feishu.get_values_batch(log_type, "music_album", "69UxPo")
+            elif crawler == "bszf" and sheet == "recommend":
+                feeds_sheet = Feishu.get_values_batch(log_type, "bszf", "CcHgO7")
+            elif crawler == "kuaishou" and sheet == "recommend":
+                feeds_sheet = Feishu.get_values_batch(log_type, "kuaishou", "JK6npf")
+            elif crawler == "kuaishou" and sheet == "follow":
+                feeds_sheet = Feishu.get_values_batch(log_type, "kuaishou", "wW5cyb")
+            elif crawler == "gzh" and sheet == "recommend":
+                feeds_sheet = Feishu.get_values_batch(log_type, "gzh", "zWKFGb")
+            else:
+                feeds_sheet = "请输入{crawler}和{sheet}"
+
+            return feeds_sheet
+        except Exception as e:
+            Common.logger(log_type).error("get_feeds_sheet异常:{}", e)
+
+    # feeds_sheet表报警:连续 2 小时无数据
+    @classmethod
+    def rebot_feeds_sheet(cls, log_type, crawler, sheet):
+        """
+        每隔一分钟获取一次表数据的数量:
+            1.中途有数据时,退出此次监控
+            2.连续2小时无数据时,触发机器人报警
+        """
+        # kanyikan_recommend_sheet = Feishu.get_values_batch(log_type, "kanyikan", "SdCHOM")
+        # kanyikan_moment_sheet = Feishu.get_values_batch(log_type, "kanyikan", "tGqZMX")
+        # xiaoniangao_hour_sheet = Feishu.get_values_batch(log_type, "xiaoniangao", "ba0da4")
+        # xiaoniangao_person_sheet = Feishu.get_values_batch(log_type, "xiaoniangao", "k6ldje")
+        # music_album_recommend_sheet = Feishu.get_values_batch(log_type, "music_album", "69UxPo")
+        # bszf_recommend_sheet = Feishu.get_values_batch(log_type, "bszf", "CcHgO7")
+        # kuaishou_recommend_sheet = Feishu.get_values_batch(log_type, "kuaishou", "JK6npf")
+        # kuaishou_follow_sheet = Feishu.get_values_batch(log_type, "kuaishou", "wW5cyb")
+        # gzh_recommend_sheet = Feishu.get_values_batch(log_type, "gzh", "zWKFGb")
+
+        for i in range(120):
+            if len(cls.get_feeds_sheet(log_type, crawler, sheet)) > 1:
+                break
+            else:
+                time.sleep(60)
+                if i == 119 and crawler == "kanyikan" and sheet == "recommend":
+                    Feishu.bot(log_type, "kanyikan", "看一看推荐榜表,已经 2 小时无数据了😤")
+                elif i == 119 and crawler == "kanyikan" and sheet == "moment":
+                    Feishu.bot(log_type, "kanyikan", "看一看朋友圈表,已经 2 小时无数据了😤")
+                elif i == 119 and crawler == "xiaoniangao" and sheet == "person":
+                    Feishu.bot(log_type, "xiaoniangao", "小年糕用户主页表,已经 2 小时无数据了😤")
+                elif i == 119 and crawler == "music_album" \
+                        and sheet == "recommend" and datetime.datetime.now().hour < 13:
+                    Feishu.bot(log_type, "music_album", "音乐相册推荐表,已经 2 小时无数据了😤")
+                elif i == 119 and crawler == "bszf" and sheet == "recommend" and datetime.datetime.now().hour < 13:
+                    Feishu.bot(log_type, "bszf", "本山祝福推荐表,已经 2 小时无数据了😤")
+                elif i == 119 and crawler == "kuaishou" and sheet == "recommend":
+                    Feishu.bot(log_type, "kuaishou", "快手推荐表,已经 2 小时无数据了😤")
+                elif i == 119 and crawler == "kuaishou" and sheet == "follow":
+                    Feishu.bot(log_type, "kuaishou", "快手关注表,已经 2 小时无数据了😤")
+                elif i == 119 and crawler == "gzh" and sheet == "recommend":
+                    Feishu.bot(log_type, "gzh", "公众号推荐表,已经 2 小时无数据了😤")
+
+    # 触发机器人报警:超过24小时没有新入库的视频
+    @classmethod
+    def robot_download_sheet(cls, log_type, crawler, duration):
+        """
+        已下载视频表:超过24小时没有新入库的视频
+        """
+        try:
+            if crawler == "kanyikan" and (int(time.time()) - cls.get_first_time(log_type, crawler) > int(duration)):
+                Feishu.bot(log_type, crawler, "看一看已下载表,超过24小时没有新视频入库了😤")
+                Common.logger(log_type).info("看一看已下载表,超过24小时没有新视频入库了😤\n")
+            elif crawler == "xiaoniangao" and (
+                    int(time.time()) - cls.get_first_time(log_type, crawler) > int(duration)):
+                Feishu.bot(log_type, crawler, "小年糕已下载表,超过24小时没有新视频入库了😤")
+                Common.logger(log_type).info("小年糕已下载表,超过24小时没有新视频入库了😤\n")
+            elif crawler == "music_album" and (
+                    int(time.time()) - cls.get_first_time(log_type, crawler) > int(duration)):
+                Feishu.bot(log_type, crawler, "音乐相册已下载表,超过24小时没有新视频入库了😤")
+                Common.logger(log_type).info("音乐相册已下载表,超过24小时没有新视频入库了😤\n")
+            elif crawler == "bszf" and (int(time.time()) - cls.get_first_time(log_type, crawler) > int(duration)):
+                Feishu.bot(log_type, crawler, "本山祝福已下载表,超过24小时没有新视频入库了😤")
+                Common.logger(log_type).info("本山祝福已下载表,超过24小时没有新视频入库了😤\n")
+            elif crawler == "kuaishou" and (int(time.time()) - cls.get_first_time(log_type, crawler) > int(duration)):
+                Feishu.bot(log_type, crawler, "快手已下载表,超过24小时没有新视频入库了😤")
+                Common.logger(log_type).info("快手已下载表,超过24小时没有新视频入库了😤\n")
+            elif crawler == "gzh" and (int(time.time()) - cls.get_first_time(log_type, crawler) > int(duration)):
+                Feishu.bot(log_type, crawler, "公众号已下载表,超过24小时没有新视频入库了😤]️")
+                Common.logger(log_type).info("公众号已下载表,超过24小时没有新视频入库了😤\n")
+        except Exception as e:
+            Common.logger(log_type).error("robot_alarm异常:{}", e)
+
+    # 监控运行入口
+    @classmethod
+    def main(cls):
+        """
+        每隔一小时,检查一次已下载表;
+        已下载表的最新一条数据抓取时间,距当前时间超过 24 小时,则触发机器人报警,发送飞书报警消息
+        """
+        # 已下载表,超过 24 小时无新视频入库报警
+        duration = 3600 * 24
+        while True:
+            if 21 > datetime.datetime.now().hour >= 10:
+
+                Common.logger("bot").info("监控看一看已下载表")
+                Bot.robot_download_sheet("bot", "kanyikan", duration)
+
+                Common.logger("bot").info("监控小年糕已下载表")
+                Bot.robot_download_sheet("bot", "xiaoniangao", duration)
+
+                Common.logger("bot").info("监控音乐相册已下载表")
+                Bot.robot_download_sheet("bot", "music_album", duration)
+
+                Common.logger("bot").info("监控本山祝福已下载表")
+                Bot.robot_download_sheet("bot", "bszf", duration)
+
+                Common.logger("bot").info("监控快手已下载表")
+                Bot.robot_download_sheet("bot", "kuaishou", duration)
+
+                # Common.logger("bot").info("监控公众号已下载表")
+                # Bot.robot_alarm("bot", "gzh", duration)
+
+                Common.del_logs("bot")
+                Common.logger("bot").info("休眠 1 小时")
+                time.sleep(3600)
+            else:
+                Common.logger("bot").info("今日监控完毕\n")
+
+
+if __name__ == "__main__":
+    # Bot.get_feeds_sheet("bot", "gzh", "recommend")
+    # Bot.rebot_feeds_sheet("bot", "gzh", "recommend")
+    Bot.main()