wangkun %!s(int64=2) %!d(string=hai) anos
pai
achega
4cffde854e

+ 5 - 0
README.md

@@ -0,0 +1,5 @@
+https://w42nne6hzg.feishu.cn/sheets/shtcnlZWYazInhf7Z60jkbLRJyd?sheet=N7e2yI
+
+已下载爬虫视频数据监控
+
+

+ 3 - 0
chlsfiles/__init__.txt

@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/6/27

+ 3 - 0
logs/__init__.py

@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/6/27

+ 3 - 0
main/__init__.py

@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/6/27

+ 222 - 0
main/common.py

@@ -0,0 +1,222 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/6/27
+
+"""
+公共方法,包含:生成log / 删除log / 获取session  / 下载方法 / 读取文件 / 统计下载数
+"""
+import json
+from datetime import date, timedelta
+import datetime
+import os
+import time
+import requests
+import urllib3
+from loguru import logger
+
+proxies = {"http": None, "https": None}
+
+
+class Common:
+    # 统一获取当前时间 <class 'datetime.datetime'>  2022-04-14 20:13:51.244472
+    now = datetime.datetime.now()
+    # 昨天 <class 'str'>  2022-04-13
+    yesterday = (date.today() + timedelta(days=-1)).strftime("%Y-%m-%d")
+    # 今天 <class 'datetime.date'>  2022-04-14
+    today = date.today()
+    # 明天 <class 'str'>  2022-04-15
+    tomorrow = (date.today() + timedelta(days=1)).strftime("%Y-%m-%d")
+
+    # 使用 logger 模块生成日志
+    @staticmethod
+    def logger(log_type):
+        """
+        使用 logger 模块生成日志
+        """
+        # 日志路径
+        log_dir = r"./logs/"
+        log_path = os.getcwd() + os.sep + log_dir
+        if not os.path.isdir(log_path):
+            os.makedirs(log_path)
+
+        # 日志文件名
+        if log_type == "kanyikan":
+            log_name = time.strftime("%Y-%m-%d", time.localtime(time.time())) + '-monitor-kanyikan.log'
+        elif log_type == "xiaoniangao":
+            log_name = time.strftime("%Y-%m-%d", time.localtime(time.time())) + '-monitor-xiaoniangao.log'
+        else:
+            log_name = time.strftime("%Y-%m-%d", time.localtime(time.time())) + '.log'
+
+        # 日志不打印到控制台
+        logger.remove(handler_id=None)
+
+        # rotation="500 MB",实现每 500MB 存储一个文件
+        # rotation="12:00",实现每天 12:00 创建一个文件
+        # rotation="1 week",每周创建一个文件
+        # retention="10 days",每隔10天之后就会清理旧的日志
+        # 初始化日志
+        logger.add(log_dir + log_name, level="INFO", rotation='00:00')
+
+        return logger
+
+    @classmethod
+    def del_logs(cls, log_type):
+        """
+        清除冗余日志文件
+        :return: 保留最近 6 个日志
+        """
+        log_dir = r"./logs/"
+        all_files = sorted(os.listdir(log_dir))
+        all_logs = []
+        for log in all_files:
+            name = os.path.splitext(log)[-1]
+            if name == ".log":
+                all_logs.append(log)
+
+        if len(all_logs) <= 6:
+            pass
+        else:
+            for file in all_logs[:len(all_logs) - 7]:
+                os.remove(log_dir + file)
+        cls.logger(log_type).info("清除冗余日志成功")
+
+    # 删除 charles 缓存文件,只保留最近的两个文件
+    @classmethod
+    def del_charles_files(cls):
+        # 目标文件夹下所有文件
+        all_file = sorted(os.listdir(r"./chlsfiles/"))
+        for file in all_file[0:-2]:
+            os.remove(r"./chlsfiles/" + file)
+        cls.logger("kanyikan").info("删除 charles 缓存文件成功")
+
+    @classmethod
+    def download_method(cls, log_type, text, d_name, d_url):
+        """
+        下载封面:text == "cover" ; 下载视频:text == "video"
+        需要下载的视频标题:d_title
+        视频封面,或视频播放地址:d_url
+        下载保存路径:"./videos/{d_title}/"
+        """
+        # 首先创建一个保存该视频相关信息的文件夹
+        video_dir = "./videos/" + d_name + "/"
+        if not os.path.exists(video_dir):
+            os.mkdir(video_dir)
+
+        # 下载视频
+        if text == "video":
+            # 需要下载的视频地址
+            video_url = d_url
+            # 视频名
+            video_name = "video.mp4"
+
+            # 下载视频
+            urllib3.disable_warnings()
+            response = requests.get(video_url, stream=True, proxies=proxies, verify=False)
+            try:
+                with open(video_dir + video_name, "wb") as f:
+                    for chunk in response.iter_content(chunk_size=10240):
+                        f.write(chunk)
+                cls.logger(log_type).info("==========视频下载完成==========")
+            except Exception as e:
+                cls.logger(log_type).exception("视频下载失败:{}", e)
+
+        # 下载封面
+        elif text == "cover":
+            # 需要下载的封面地址
+            cover_url = d_url
+            # 封面名
+            cover_name = "image.jpg"
+
+            # 下载封面
+            urllib3.disable_warnings()
+            response = requests.get(cover_url, proxies=proxies, verify=False)
+            try:
+                with open(video_dir + cover_name, "wb") as f:
+                    f.write(response.content)
+                cls.logger(log_type).info("==========封面下载完成==========")
+            except Exception as e:
+                cls.logger(log_type).exception("封面下载失败:{}", e)
+
+    @classmethod
+    def get_session(cls):
+        # charles 抓包文件保存目录
+        charles_file_dir = "./chlsfiles/"
+
+        if int(len(os.listdir(charles_file_dir))) == 1:
+            Common.logger("kanyikan").info("未找到chlsfile文件,等待60s")
+            time.sleep(60)
+        else:
+            try:
+                # 目标文件夹下所有文件
+                all_file = sorted(os.listdir(charles_file_dir))
+
+                # 获取到目标文件
+                old_file = all_file[-1]
+
+                # 分离文件名与扩展名
+                new_file = os.path.splitext(old_file)
+
+                # 重命名文件后缀
+                os.rename(os.path.join(charles_file_dir, old_file),
+                          os.path.join(charles_file_dir, new_file[0] + ".txt"))
+
+                with open(charles_file_dir + new_file[0] + ".txt", encoding='utf-8-sig', errors='ignore') as f:
+                    contents = json.load(f, strict=False)
+                if "search.weixin.qq.com" in [text['host'] for text in contents]:
+                    for text in contents:
+                        if text["host"] == "search.weixin.qq.com" \
+                                and text["path"] == "/cgi-bin/recwxa/recwxagetunreadmessagecnt":
+                            sessions = text["query"].split("session=")[-1].split("&wxaVersion=")[0]
+                            if "&vid" in sessions:
+                                session = sessions.split("&vid")[0]
+                                return session
+                            elif "&offset" in sessions:
+                                session = sessions.split("&offset")[0]
+                                return session
+                            elif "&wxaVersion" in sessions:
+                                session = sessions.split("&wxaVersion")[0]
+                                return session
+                            elif "&limit" in sessions:
+                                session = sessions.split("&limit")[0]
+                                return session
+                            elif "&scene" in sessions:
+                                session = sessions.split("&scene")[0]
+                                return session
+                            elif "&count" in sessions:
+                                session = sessions.split("&count")[0]
+                                return session
+                            elif "&channelid" in sessions:
+                                session = sessions.split("&channelid")[0]
+                                return session
+                            elif "&subscene" in sessions:
+                                session = sessions.split("&subscene")[0]
+                                return session
+                            elif "&clientVersion" in sessions:
+                                session = sessions.split("&clientVersion")[0]
+                                return session
+                            elif "&sharesearchid" in sessions:
+                                session = sessions.split("&sharesearchid")[0]
+                                return session
+                            elif "&nettype" in sessions:
+                                session = sessions.split("&nettype")[0]
+                                return session
+                            elif "&switchprofile" in sessions:
+                                session = sessions.split("&switchprofile")[0]
+                                return session
+                            elif "&switchnewuser" in sessions:
+                                session = sessions.split("&switchnewuser")[0]
+                                return session
+                            else:
+                                return sessions
+                else:
+                    cls.logger("kanyikan").info("未找到 session,10s后重新获取")
+                    time.sleep(10)
+                    cls.get_session()
+            except Exception as e:
+                cls.logger("kanyikan").exception("获取 session 异常,30s后重试:{}", e)
+                time.sleep(30)
+                cls.get_session()
+
+
+if __name__ == "__main__":
+    common = Common()

+ 224 - 0
main/demo.py

@@ -0,0 +1,224 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/6/27
+import json
+import os
+import time
+from datetime import date
+
+import requests
+
+from main.common import Common
+from main.feishu_lib import Feishu
+
+
+class Demo:
+    @classmethod
+    def feishu(cls):
+        value = Feishu.get_range_value("xiaoniangao", "monitor", "N7e2yI", "B4:B4")
+        print(type(value))
+        print(value)
+
+    @classmethod
+    def today(cls):
+        today = date.today()
+        print(today)
+
+        value = Feishu.get_range_value("xiaoniangao", "monitor", "N7e2yI", "J1:J1")
+        print(value)
+
+        print(Feishu.update_values("xiaoniangao", "monitor", "N7e2yI", "J1:K1", [[str(today)]]))
+
+    @classmethod
+    def lists(cls):
+        list1 = [6445, 52077, 15333]
+        list2 = [[x] for x in list1]
+        print(list2)
+
+    @classmethod
+    def get_sheet(cls):
+        xiaoniangao_sheet = Feishu.get_values_batch("xiaoniangao", "monitor", "N7e2yI")
+        video_id = xiaoniangao_sheet[1][6]
+        user_id = xiaoniangao_sheet[1][9]
+        user_mid = xiaoniangao_sheet[1][10]
+        print(video_id)
+        print(user_id)
+        print(user_mid)
+
+    @classmethod
+    def get_session(cls):
+        # charles 抓包文件保存目录
+        charles_file_dir = "../chlsfiles/"
+
+        if int(len(os.listdir(charles_file_dir))) == 1:
+            Common.logger("kanyikan").info("未找到chlsfile文件,等待60s")
+            time.sleep(60)
+        else:
+            try:
+                # 目标文件夹下所有文件
+                all_file = sorted(os.listdir(charles_file_dir))
+
+                # 获取到目标文件
+                old_file = all_file[-1]
+
+                # 分离文件名与扩展名
+                new_file = os.path.splitext(old_file)
+
+                # 重命名文件后缀
+                os.rename(os.path.join(charles_file_dir, old_file),
+                          os.path.join(charles_file_dir, new_file[0] + ".txt"))
+
+                with open(charles_file_dir + new_file[0] + ".txt", encoding='utf-8-sig', errors='ignore') as f:
+                    contents = json.load(f, strict=False)
+                if "search.weixin.qq.com" in [text['host'] for text in contents]:
+                    for text in contents:
+                        if text["host"] == "search.weixin.qq.com" \
+                                and text["path"] == "/cgi-bin/recwxa/recwxagetunreadmessagecnt":
+                            sessions = text["query"].split("session=")[-1].split("&wxaVersion=")[0]
+                            if "&vid" in sessions:
+                                session = sessions.split("&vid")[0]
+                                return session
+                            elif "&offset" in sessions:
+                                session = sessions.split("&offset")[0]
+                                return session
+                            elif "&wxaVersion" in sessions:
+                                session = sessions.split("&wxaVersion")[0]
+                                return session
+                            elif "&limit" in sessions:
+                                session = sessions.split("&limit")[0]
+                                return session
+                            elif "&scene" in sessions:
+                                session = sessions.split("&scene")[0]
+                                return session
+                            elif "&count" in sessions:
+                                session = sessions.split("&count")[0]
+                                return session
+                            elif "&channelid" in sessions:
+                                session = sessions.split("&channelid")[0]
+                                return session
+                            elif "&subscene" in sessions:
+                                session = sessions.split("&subscene")[0]
+                                return session
+                            elif "&clientVersion" in sessions:
+                                session = sessions.split("&clientVersion")[0]
+                                return session
+                            elif "&sharesearchid" in sessions:
+                                session = sessions.split("&sharesearchid")[0]
+                                return session
+                            elif "&nettype" in sessions:
+                                session = sessions.split("&nettype")[0]
+                                return session
+                            elif "&switchprofile" in sessions:
+                                session = sessions.split("&switchprofile")[0]
+                                return session
+                            elif "&switchnewuser" in sessions:
+                                session = sessions.split("&switchnewuser")[0]
+                                return session
+                            else:
+                                return sessions
+                else:
+                    Common.logger("kanyikan").info("未找到 session,10s后重新获取")
+                    time.sleep(10)
+                    cls.get_session()
+            except Exception as e:
+                Common.logger("kanyikan").exception("获取 session 异常,30s后重试:{}", e)
+                time.sleep(30)
+                cls.get_session()
+
+    @classmethod
+    def get_video_info(cls, video_id):
+        url = "https://search.weixin.qq.com/cgi-bin/recwxa/recwxagetonevideoinfo?"
+        param = {
+            "session": cls.get_session(),
+            "vid": video_id,
+            "wxaVersion": "3.9.2",
+            "channelid": "208201",
+            "scene": "32",
+            "subscene": "1089",
+            "model": "iPhone 11<iPhone12,1>14.7.1",
+            "clientVersion": "8.0.18",
+            "sharesearchid": "447665862521758270",
+            "sharesource": "-1"
+        }
+        r = requests.get(url=url, params=param)
+        video_id = r.json()["data"]['vid']
+        video_title = r.json()["data"]['title']
+        play_cnt = r.json()["data"]["played_cnt"]
+        shared_cnt = r.json()["data"]["shared_cnt"]
+        if "items" not in r.json()["data"]["play_info"]:
+            video_url = r.json()["data"]["play_info"][-1]["play_url"]
+            # video_url = r.json()["data"]["play_info"]
+        else:
+            video_url = r.json()["data"]["play_info"]["items"][-1]["play_url"]
+            # video_url = r.json()["data"]["play_info"]
+        print(f"video_id:{video_id}")
+        print(f"video_title:{video_title}")
+        print(f"play_cnt:{play_cnt}")
+        print(f"shared_cnt:{shared_cnt}")
+        print(f"video_url:{video_url}")
+
+
+        # response = {
+        #     'data': {
+        #         'collection_info': {
+        #             'collectionid': 'mmsearchrecwxacollection_496906082_1641976100173',
+        #             'episode_count': 0,
+        #             'img': '',
+        #             'latest_episode': 0,
+        #             'size': 0,
+        #             'title': ''
+        #         },
+        #         'comment_cnt': 54,
+        #         'composite_type': 0,
+        #         'composite_video_info': {
+        #             'first_composite_img_url': ''
+        #         },
+        #         'cover_url': 'http://mmbiz.qpic.cn/wx_search/duc2TvpEgSTib1eic8eJ1MrictHrfvedFwfUGYhyjlJicpnrTrygHcSuTg/0',
+        #         'duration': 210,
+        #         'height': 1200,
+        #         'isUGCVideo': True,
+        #         'is_expose_comment': False,
+        #         'is_following': False,
+        #         'is_forbid_comment': False,
+        #         'is_livereserve': False,
+        #         'item_type': 16,
+        #         'liked': False,
+        #         'liked_cnt': 1865,
+        #         'openid': 'oh_m45TTUoolLfj0x0vHl0QdlSw8',
+        #         'owner_head_image': 'http://mmbiz.qpic.cn/wx_search/duc2TvpEgSRgbVcqh8gWWmQicsib3HJ97O6zfzwiazWMyIIzkKJxluhgQ/0',
+        #         'play_icon_cover_url': 'http://mmbiz.qpic.cn/wx_search/Q3auHgzwzM6hiaHcSicmACTMfgzFdsLu77ovZnnx8MSLHhz2vYTxiaUfcrllqxW5t0bfNArKjOJBxw/0',
+        #         'play_info': {
+        #             'cdn_gif_url': '',
+        #             'cdn_source_type': 4,
+        #             'end_play_time_in_ms': 0,
+        #             'video_api': '',
+        #             'watermark_type': 0
+        #         },
+        #         'played_cnt': 347350,
+        #         'shared_cnt': 35463,
+        #         'size': 0,
+        #         'status': 10,
+        #         'title': '太恶心了_现在的外卖,正在毁掉我们的下一代_Merge',
+        #         'upload_time': 1647850532,
+        #         'user_info': {
+        #         'headimg_url': 'http://mmbiz.qpic.cn/wx_search/duc2TvpEgSRgbVcqh8gWWmQicsib3HJ97O6zfzwiazWMyIIzkKJxluhgQ/0',
+        #         'nickname': '代替不了',
+        #         'openid': 'oh_m45TTUoolLfj0x0vHl0QdlSw8'
+        #         },
+        #         'vid': 'ugc_b42rvhr',
+        #         'video_type': 1,
+        #         'width': 720
+        #     },
+        #     'errcode': 0,
+        #     'msg': 'ok',
+        #     'retcode': 0
+        # }
+
+
+if __name__ == "__main__":
+    demo = Demo()
+    # demo.feishu()
+    # demo.today()
+    # demo.lists()
+    # demo.get_sheet()
+    demo.get_video_info("ugc_iqrmf5")

+ 908 - 0
main/feishu_lib.py

@@ -0,0 +1,908 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/6/27
+import json
+import time
+
+import requests
+import urllib3
+from common import Common
+
+proxies = {"http": None, "https": None}
+
+
+class Feishu:
+    """
+    编辑飞书云文档
+    """
+    # 看一看爬虫数据表
+    kanyikan_url = "https://w42nne6hzg.feishu.cn/sheets/shtcngRPoDYAi24x52j2nDuHMih?"
+    # 快手爬虫数据表
+    kuaishou_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnp4SaJt37q6OOOrYzPMjQkg?"
+    # 微视爬虫数据表
+    weishi_url = "https://w42nne6hzg.feishu.cn/sheets/shtcn5YSWg91JfVGzj0SFZIRRPh?"
+    # 小年糕爬虫数据表
+    xiaoniangao_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnYxiyQ1wLklo1W5Kdqc9cGh?"
+    # twitter 爬虫表
+    twitter_url = "https://whtlrai9ej.feishu.cn/sheets/shtcn6BYfYuqegIP13ORB6rI2dh?"
+    # 爬虫视频监控表
+    spreadsheettoken_monitor = "https://w42nne6hzg.feishu.cn/sheets/shtcnlZWYazInhf7Z60jkbLRJyd?"
+
+    # 飞书路径token
+    @classmethod
+    def spreadsheettoken(cls, spreadsheettoken):
+        """
+        :param spreadsheettoken: 哪张云文档表
+        """
+        if spreadsheettoken == "kanyikan":
+            return "shtcngRPoDYAi24x52j2nDuHMih"
+        elif spreadsheettoken == "kuaishou":
+            return "shtcnp4SaJt37q6OOOrYzPMjQkg"
+        elif spreadsheettoken == "weishi":
+            return "shtcn5YSWg91JfVGzj0SFZIRRPh"
+        elif spreadsheettoken == "xiaoniangao":
+            return "shtcnYxiyQ1wLklo1W5Kdqc9cGh"
+        elif spreadsheettoken == "twitter":
+            return "shtcn6BYfYuqegIP13ORB6rI2dh"
+        elif spreadsheettoken == "monitor":
+            return "shtcnlZWYazInhf7Z60jkbLRJyd"
+
+    # 获取飞书api token
+    @classmethod
+    def get_token(cls, log_type):
+        """
+        获取飞书api token
+        :return:
+        """
+        time.sleep(1)
+        url = "https://open.feishu.cn/open-apis/auth/v3/tenant_access_token/internal/"
+        post_data = {"app_id": "cli_a13ad2afa438d00b",  # 这里账号密码是发布应用的后台账号及密码
+                     "app_secret": "4tK9LY9VbiQlY5umhE42dclBFo6t4p5O"}
+
+        try:
+            urllib3.disable_warnings()
+            response = requests.post(url=url, data=post_data, proxies=proxies, verify=False)
+            tenant_access_token = response.json()["tenant_access_token"]
+            return tenant_access_token
+        except Exception as e:
+            Common.logger(log_type).error("获取飞书 api token 异常:{}", e)
+
+    # 获取表格元数据
+    @classmethod
+    def get_metainfo(cls, log_type, spreadsheettoken):
+        """
+        获取表格元数据
+        :return:
+        """
+        get_metainfo_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                           + cls.spreadsheettoken(spreadsheettoken) + "/metainfo"
+
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        params = {
+            "extFields": "protectedRange",  # 额外返回的字段,extFields=protectedRange时返回保护行列信息
+            "user_id_type": "open_id"  # 返回的用户id类型,可选open_id,union_id
+        }
+        try:
+            urllib3.disable_warnings()
+            r = requests.get(url=get_metainfo_url, headers=headers, params=params, proxies=proxies, verify=False)
+            response = json.loads(r.content.decode("utf8"))
+            return response
+        except Exception as e:
+            Common.logger(log_type).error("获取表格元数据异常:{}", e)
+
+    # 读取工作表中所有数据
+    @classmethod
+    def get_values_batch(cls, log_type, spreadsheettoken, sheetid):
+        """
+        读取工作表中所有数据
+        :param spreadsheettoken: 哪个爬虫
+        :param log_type: 哪个日志
+        :param sheetid: 哪张表
+        :return: 所有数据
+        """
+        get_values_batch_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                               + cls.spreadsheettoken(spreadsheettoken) + "/values_batch_get"
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        params = {
+            # 多个查询范围 如 url?ranges=range1,range2 ,其中 range 包含 sheetId 与单元格范围两部分
+            "ranges": sheetid,
+
+            # valueRenderOption=ToString 可返回纯文本的值(数值类型除外);
+            # valueRenderOption=FormattedValue 计算并格式化单元格;
+            # valueRenderOption=Formula单元格中含有公式时返回公式本身;
+            # valueRenderOption=UnformattedValue计算但不对单元格进行格式化
+            "valueRenderOption": "ToString",
+
+            # dateTimeRenderOption=FormattedString 计算并将时间日期按照其格式进行格式化,但不会对数字进行格式化,返回格式化后的字符串。
+            "dateTimeRenderOption": "",
+
+            # 返回的用户id类型,可选open_id,union_id
+            "user_id_type": "open_id"
+        }
+        try:
+            urllib3.disable_warnings()
+            r = requests.get(url=get_values_batch_url, headers=headers, params=params, proxies=proxies, verify=False)
+            response = json.loads(r.content.decode("utf8"))
+            values = response["data"]["valueRanges"][0]["values"]
+            return values
+        except Exception as e:
+            Common.logger(log_type).error("读取工作表所有数据异常:{}", e)
+
+    # 工作表,插入行或列
+    @classmethod
+    def insert_columns(cls, log_type, spreadsheettoken, sheetid, majordimension, startindex, endindex):
+        """
+        工作表插入行或列
+        :param log_type: 哪个日志
+        :param spreadsheettoken: 哪个爬虫
+        :param sheetid:哪张工作表
+        :param majordimension:行或者列
+        :param startindex:开始位置
+        :param endindex:结束位置
+        """
+        insert_columns_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                             + cls.spreadsheettoken(spreadsheettoken) + "/insert_dimension_range"
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        body = {
+            "dimension": {
+                "sheetId": sheetid,
+                "majorDimension": majordimension,  # 默认 ROWS ,可选 ROWS、COLUMNS
+                "startIndex": startindex,  # 开始的位置
+                "endIndex": endindex  # 结束的位置
+            },
+            "inheritStyle": "AFTER"  # BEFORE 或 AFTER,不填为不继承 style
+        }
+        try:
+            urllib3.disable_warnings()
+            r = requests.post(url=insert_columns_url, headers=headers, json=body, proxies=proxies, verify=False)
+            Common.logger(log_type).info("插入行或列:{}", r.json()["msg"])
+        except Exception as e:
+            Common.logger(log_type).error("插入行或列异常:{}", e)
+
+    # 写入数据
+    @classmethod
+    def update_values(cls, log_type, spreadsheettoken, sheetid, ranges, values):
+        """
+        写入数据
+        :param log_type: 哪个log
+        :param spreadsheettoken: 哪个爬虫
+        :param sheetid:哪张工作表
+        :param ranges:单元格范围
+        :param values:写入的具体数据,list
+        """
+        update_values_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                            + cls.spreadsheettoken(spreadsheettoken) + "/values_batch_update"
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        body = {
+            "valueRanges": [
+                {
+                    "range": sheetid + "!" + ranges,
+                    "values": values
+                },
+            ],
+        }
+
+        try:
+            urllib3.disable_warnings()
+            r = requests.post(url=update_values_url, headers=headers, json=body, proxies=proxies, verify=False)
+            Common.logger(log_type).info("写入数据:{}", r.json()["msg"])
+        except Exception as e:
+            Common.logger(log_type).error("写入数据异常:{}", e)
+
+    # 合并单元格
+    @classmethod
+    def merge_cells(cls, log_type, spreadsheettoken, sheetid, ranges):
+        """
+        合并单元格
+        :param spreadsheettoken: 哪个爬虫
+        :param log_type: 哪个log
+        :param sheetid:哪张工作表
+        :param ranges:需要合并的单元格范围
+        """
+        merge_cells_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                          + cls.spreadsheettoken(spreadsheettoken) + "/merge_cells"
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+
+        body = {
+            "range": sheetid + "!" + ranges,
+            "mergeType": "MERGE_ROWS"
+        }
+
+        try:
+            urllib3.disable_warnings()
+            r = requests.post(url=merge_cells_url, headers=headers, json=body, proxies=proxies, verify=False)
+            Common.logger(log_type).info("合并单元格:{}", r.json()["msg"])
+        except Exception as e:
+            Common.logger(log_type).error("合并单元格异常:{}", e)
+
+    # 读取单元格数据
+    @classmethod
+    def get_range_value(cls, log_type, spreadsheettoken, sheetid, cell):
+        """
+        读取单元格内容
+        :param spreadsheettoken: 哪个爬虫
+        :param log_type: 哪个log
+        :param sheetid: 哪张工作表
+        :param cell: 哪个单元格
+        :return: 单元格内容
+        """
+        get_range_value_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                              + cls.spreadsheettoken(spreadsheettoken) + "/values/" + sheetid + "!" + cell
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        params = {
+            # valueRenderOption=ToString 可返回纯文本的值(数值类型除外);
+            # valueRenderOption=FormattedValue 计算并格式化单元格;
+            # valueRenderOption=Formula 单元格中含有公式时返回公式本身;
+            # valueRenderOption=UnformattedValue 计算但不对单元格进行格式化。
+            "valueRenderOption": "FormattedValue",
+
+            # dateTimeRenderOption=FormattedString 计算并对时间日期按照其格式进行格式化,但不会对数字进行格式化,返回格式化后的字符串。
+            "dateTimeRenderOption": "",
+
+            # 返回的用户id类型,可选open_id,union_id
+            "user_id_type": "open_id"
+        }
+        try:
+            urllib3.disable_warnings()
+            r = requests.get(url=get_range_value_url, headers=headers, params=params, proxies=proxies, verify=False)
+            return r.json()["data"]["valueRange"]["values"][0]
+        except Exception as e:
+            Common.logger(log_type).error("读取单元格数据异常:{}", e)
+
+    # 删除行或列,可选 ROWS、COLUMNS
+    @classmethod
+    def dimension_range(cls, log_type, spreadsheettoken, sheetid, major_dimension, startindex, endindex):
+        """
+        删除行或列
+        :param spreadsheettoken: 哪个爬虫
+        :param log_type: 哪个log
+        :param sheetid:工作表
+        :param major_dimension:默认 ROWS ,可选 ROWS、COLUMNS
+        :param startindex:开始的位置
+        :param endindex:结束的位置
+        :return:
+        """
+        dimension_range_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                              + cls.spreadsheettoken(spreadsheettoken) + "/dimension_range"
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        body = {
+            "dimension": {
+                "sheetId": sheetid,
+                "majorDimension": major_dimension,
+                "startIndex": startindex,
+                "endIndex": endindex
+            }
+        }
+        try:
+            urllib3.disable_warnings()
+            r = requests.delete(url=dimension_range_url, headers=headers, json=body, proxies=proxies, verify=False)
+            Common.logger(log_type).info("删除视频数据:{}", r.json()["msg"])
+        except Exception as e:
+            Common.logger(log_type).error("删除视频数据异常:{}", e)
+
+    # 查找单元格
+    @classmethod
+    def find_cell(cls, log_type, spreadsheettoken, sheetid, find_text):
+        """
+        查找单元格
+        :param spreadsheettoken: 哪个爬虫
+        :param log_type: 哪个log
+        :param sheetid: 哪张表
+        # :param ranges: 单元格范围
+        :param find_text: 查找的字符
+        :return: 返回单元格索引
+        """
+        find_cell_url = "https://open.feishu.cn/open-apis/sheets/v3/spreadsheets/" \
+                        + cls.spreadsheettoken(spreadsheettoken) + "/sheets/" \
+                        + sheetid + "/find"
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        rows_count = len(cls.get_values_batch(log_type, spreadsheettoken, "db114c"))
+        body = {
+            "find_condition": {
+                "range": sheetid + "!A1:A" + str(rows_count),
+                "match_case": True,  # 是否忽略大小写
+                "match_entire_cell": False,  # 是否匹配整个单元格
+                "search_by_regex": False,  # 是否为正则匹配
+                "include_formulas": False  # 是否搜索公式内容
+            },
+            "find": find_text  # 搜索内容
+        }
+        try:
+            urllib3.disable_warnings()
+            r = requests.post(url=find_cell_url, headers=headers, json=body, proxies=proxies, verify=False)
+            Common.logger(log_type).info("查找单元格:{}", r.json()["msg"])
+            matched_cell = r.json()["data"]["find_result"]["matched_cells"][0].split("A")[-1]
+            return matched_cell
+        except Exception as e:
+            Common.logger(log_type).error("查找单元格异常:{}", e)
+
+    # 筛选:filter
+    @classmethod
+    def filter_created_at(cls, log_type):
+        filter_created_at_url = "https://open.feishu.cn/open-apis/sheets/v3/spreadsheets/" \
+                                "shtcn8fFzDhCFHpB6vzf51s2xbf/sheets/48cfb0/filter"
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        body = {
+            "col": "A",
+            "condition": {
+                "filter_type": "number",
+                "compare_type": "less",
+                "expected": [
+                    "6"
+                ]
+            }
+        }
+        try:
+            urllib3.disable_warnings()
+            r = requests.put(url=filter_created_at_url, headers=headers, json=body, proxies=proxies, verify=False)
+            print(r.json())
+        except Exception as e:
+            Common.logger(log_type).error("查找单元格异常:{}", e)
+
+
+class Bitable:
+    """
+    多维表格 API
+    文档地址:https://w42nne6hzg.feishu.cn/base/bascnpAYvIA0B1hBtNJlriZceUV?table=tblqMbXrpqFbDLNE&view=vewsMtek0O
+    app_token:bascnpAYvIA0B1hBtNJlriZceUV
+    """
+    app_token = "bascnpAYvIA0B1hBtNJlriZceUV"
+    table_id = "tblqMbXrpqFbDLNE"
+    page_token = ""  # 列出记录时,翻页参数
+
+    # 获取飞书api token
+    @classmethod
+    def tenant_access_token(cls, log_type):
+        """
+        获取飞书api token
+        :return:
+        """
+        time.sleep(1)
+        url = "https://open.feishu.cn/open-apis/auth/v3/tenant_access_token/internal/"
+        post_data = {"app_id": "cli_a13ad2afa438d00b",  # 这里账号密码是发布应用的后台账号及密码
+                     "app_secret": "4tK9LY9VbiQlY5umhE42dclBFo6t4p5O"}
+
+        try:
+            urllib3.disable_warnings()
+            response = requests.post(url=url, data=post_data, proxies=proxies, verify=False)
+            tenant_access_token = response.json()["tenant_access_token"]
+            return tenant_access_token
+        except Exception as e:
+            Common.logger(log_type).error("获取tenant_access_token异常:{}", e)
+
+    # 获取多维表格元数据
+    @classmethod
+    def get_apps(cls, log_type):
+        """
+        获取多维表格元数据
+        该接口支持调用频率上限为 20 QPS
+        https://open.feishu.cn/document/uAjLw4CM/ukTMukTMukTM/reference/bitable-v1/app/get
+        """
+        url = "https://open.feishu.cn/open-apis/bitable/v1/apps/" + cls.app_token
+        headers = {
+            "Authorization": "Bearer " + cls.tenant_access_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        try:
+            urllib3.disable_warnings()
+            r = requests.get(url=url, headers=headers, proxies=proxies, verify=False)
+            Common.logger(log_type).info("获取多维表格元数据,code:{},msg:{}", r.json()["code"], r.json()["msg"])
+        except Exception as e:
+            Common.logger(log_type).error("获取多维表格元数据异常:{}", e)
+
+    # 列出数据表
+    @classmethod
+    def get_tables(cls, log_type):
+        """
+        列出数据表
+        该接口支持调用频率上限为 20 QPS
+        https://open.feishu.cn/document/uAjLw4CM/ukTMukTMukTM/reference/bitable-v1/app-table/list
+        """
+        url = "https://open.feishu.cn/open-apis/bitable/v1/apps/" + cls.app_token + "/tables"
+        headers = {
+            "Authorization": "Bearer " + cls.tenant_access_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        params = {
+            "page_token": "",
+            "page_size": ""
+        }
+        try:
+            urllib3.disable_warnings()
+            r = requests.get(url=url, headers=headers, params=params, proxies=proxies, verify=False)
+            Common.logger(log_type).info("列出数据表,code:{},msg:{}", r.json()["code"], r.json()["msg"])
+        except Exception as e:
+            Common.logger(log_type).error("列出数据表异常:{}", e)
+
+    # 列出记录
+    @classmethod
+    def list_records(cls, log_type, count):
+        """
+        该接口用于列出数据表中的现有记录,单次最多列出 100 行记录,支持分页获取。
+        该接口支持调用频率上限为 1000 次/分钟
+        https://open.feishu.cn/document/uAjLw4CM/ukTMukTMukTM/reference/bitable-v1/app-table-record/list
+        """
+        url = "https://open.feishu.cn/open-apis/bitable/v1/apps/" \
+              + cls.app_token + "/tables/" + cls.table_id + "/records"
+        headers = {
+            "Authorization": "Bearer " + cls.tenant_access_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        params = {
+            "view_id": "",  # 视图 id; 注意:
+            # 如 filter 或 sort 有值,view_id 会被忽略。
+            # 示例值: "vewqhz51lk"
+            "filter": "",  # 筛选参数; 注意:
+            # 1.筛选记录的表达式不超过2000个字符。
+            # 2.不支持对“人员”以及“关联字段”的属性进行过滤筛选,如人员的 OpenID。
+            # 3.仅支持字段在页面展示字符值进行筛选。
+            # 详细参考:https://open.feishu.cn/document/uAjLw4CM/ukTMukTMukTM/reference/bitable-v1/filter
+            # 示例值:"示例表达式:AND(CurrentValue.[身高]>180, CurrentValue.[体重]>150)"
+            "sort": "",  # 排序参数。注意:
+            # 1.表达式需要不超过1000字符。
+            # 2.不支持对带“公式”和“关联字段”的表的使用。
+            # 示例值:"["字段1 DESC","字段2 ASC"]
+            # 注意:使用引号将字段名称和顺序逆序连接起来。"
+            "field_names": "[]",  # 字段名称。示例值:"["字段1"]"
+            "text_field_as_array": True,  # 控制多行文本字段数据的返回格式,true 表示以数组形式返回。注意:
+            # 1.多行文本中如果有超链接部分,则会返回链接的 URL。
+            # 2.目前可以返回多行文本中 URL 类型为多维表格链接、飞书 doc、飞书 sheet的URL类型以及@人员的数据结构。
+            # 示例值:true
+            # "user_id_type": "",  # 用户 ID 类型
+            # 示例值:"open_id"
+            # 可选值有:
+            # open_id:用户的 open id
+            # union_id:用户的 union id
+            # user_id:用户的 user id
+            # 默认值:open_id
+            "display_formula_ref": "",  # 控制公式、查找引用是否显示完整的原样返回结果。示例值:true
+            "automatic_fields": "",  # 控制是否返回自动计算的字段
+            # 例如 created_by/created_time/last_modified_by/last_modified_time,true 表示返回
+            # 示例值:true
+            "page_token": "",  # 分页标记
+            # 第一次请求不填,表示从头开始遍历;
+            # 分页查询结果还有更多项时会同时返回新的 page_token
+            # 下次遍历可采用该 page_token 获取查询结果
+            # 示例值:"recn0hoyXL"
+            "page_size": count  # 分页大小。示例值:10。数据校验规则:最大值 100
+        }
+        try:
+            urllib3.disable_warnings()
+            r = requests.get(url=url, headers=headers, params=params, proxies=proxies, verify=False)
+            cls.page_token = r.json()["data"]["page_token"]
+            items = r.json()["data"]["items"]
+            for item in items:
+                print(item)
+            Common.logger(log_type).info("列出记录,code:{},msg:{}", r.json()["code"], r.json()["msg"])
+        except Exception as e:
+            Common.logger(log_type).error("列出记录异常:{}", e)
+
+    # 检索记录
+    @classmethod
+    def search_records(cls, log_type, record_id):
+        """
+        该接口用于根据 record_id 的值检索现有记录
+        该接口支持调用频率上限为 20 QPS
+        https://open.feishu.cn/document/uAjLw4CM/ukTMukTMukTM/reference/bitable-v1/app-table-record/get
+        """
+        url = "https://open.feishu.cn/open-apis/bitable/v1/apps/" \
+              + cls.app_token + "/tables/" + cls.table_id + "/records/" + record_id
+        headers = {
+            "Authorization": "Bearer " + cls.tenant_access_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        params = {
+            "text_field_as_array": True,  # 控制多行文本字段数据的返回格式, true 表示以数组形式返回。示例值:true
+            # "user_id_type": "",  # 用户 ID 类型
+            # 示例值:"open_id"
+            # 可选值有:
+            # open_id:用户的 open id
+            # union_id:用户的 union id
+            # user_id:用户的 user id
+            # 默认值:open_id
+            "display_formula_ref": True,  # 控制公式、查找引用是否显示完整的原样返回结果。示例值:true
+            "automatic_fields": True,  # 控制是否返回自动计算的字段
+            # 例如 created_by/created_time/last_modified_by/last_modified_time,true 表示返回。示例值:true
+        }
+        try:
+            urllib3.disable_warnings()
+            r = requests.get(url=url, headers=headers, params=params, proxies=proxies, verify=False)
+            Common.logger(log_type).info("检索记录,code:{},msg:{}", r.json()["code"], r.json()["msg"])
+        except Exception as e:
+            Common.logger(log_type).error("检索记录异常:{}", e)
+
+    # 新增记录
+    @classmethod
+    def create_record(cls, log_type, fields):
+        """
+        该接口用于在数据表中新增一条记录
+        该接口支持调用频率上限为 10 QPS
+        https://open.feishu.cn/document/uAjLw4CM/ukTMukTMukTM/reference/bitable-v1/app-table-record/create
+        """
+        url = "https://open.feishu.cn/open-apis/bitable/v1/apps/" \
+              + cls.app_token + "/tables/" + cls.table_id + "/records"
+        headers = {
+            "Authorization": "Bearer " + cls.tenant_access_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        body = fields
+        try:
+            urllib3.disable_warnings()
+            r = requests.post(url=url, headers=headers, json=body, proxies=proxies, verify=False)
+            Common.logger(log_type).info("新增记录,code:{},msg:{}", r.json()["code"], r.json()["msg"])
+        except Exception as e:
+            Common.logger(log_type).error("新增记录异常:{}", e)
+
+    # 新增多条记录
+    @classmethod
+    def create_records(cls, log_type, records):
+        """
+        该接口用于在数据表中新增多条记录
+        该接口支持调用频率上限为 10 QPS
+        https://open.feishu.cn/document/uAjLw4CM/ukTMukTMukTM/reference/bitable-v1/app-table-record/batch_create
+        """
+        url = "https://open.feishu.cn/open-apis/bitable/v1/apps/" \
+              + cls.app_token + "/tables/" + cls.table_id + "/records/batch_create"
+        headers = {
+            "Authorization": "Bearer " + cls.tenant_access_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        body = {
+            "records": records
+        }
+        try:
+            urllib3.disable_warnings()
+            r = requests.post(url=url, headers=headers, json=body, proxies=proxies, verify=False)
+            Common.logger(log_type).info("新增多条记录,code:{},msg:{}", r.json()["code"], r.json()["msg"])
+        except Exception as e:
+            Common.logger(log_type).error("新增多条记录异常:{}", e)
+
+    # 更新记录
+    @classmethod
+    def update_record(cls, log_type, record_id, fields):
+        """
+        该接口用于更新数据表中的一条记录
+        该接口支持调用频率上限为 10 QPS
+        https://open.feishu.cn/document/uAjLw4CM/ukTMukTMukTM/reference/bitable-v1/app-table-record/update
+        """
+        url = "https://open.feishu.cn/open-apis/bitable/v1/apps/" \
+              + cls.app_token + "/tables/" + cls.table_id + "/records/" + record_id
+        headers = {
+            "Authorization": "Bearer " + cls.tenant_access_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        body = fields
+        try:
+            urllib3.disable_warnings()
+            r = requests.put(url=url, headers=headers, json=body, proxies=proxies, verify=False)
+            Common.logger(log_type).info("更新记录,code:{},msg:{}", r.json()["code"], r.json()["msg"])
+        except Exception as e:
+            Common.logger(log_type).error("更新记录异常:{}", e)
+
+    # 更新多条记录
+    @classmethod
+    def update_records(cls, log_type, records):
+        """
+        该接口用于更新数据表中的多条记录
+        该接口支持调用频率上限为 10 QPS
+        https://open.feishu.cn/document/uAjLw4CM/ukTMukTMukTM/reference/bitable-v1/app-table-record/batch_update
+        """
+        url = "https://open.feishu.cn/open-apis/bitable/v1/apps/" \
+              + cls.app_token + "/tables/" + cls.table_id + "/records/batch_update"
+        headers = {
+            "Authorization": "Bearer " + cls.tenant_access_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        body = records
+        try:
+            urllib3.disable_warnings()
+            r = requests.post(url=url, headers=headers, json=body, proxies=proxies, verify=False)
+            Common.logger(log_type).info("更新多条记录,code:{},msg:{}", r.json()["code"], r.json()["msg"])
+        except Exception as e:
+            Common.logger(log_type).error("更新多条记录异常:{}", e)
+
+    # 删除记录
+    @classmethod
+    def del_record(cls, log_type, record_id):
+        """
+        该接口用于删除数据表中的一条记录
+        该接口支持调用频率上限为 10 QPS
+        https://open.feishu.cn/document/uAjLw4CM/ukTMukTMukTM/reference/bitable-v1/app-table-record/delete
+        """
+        url = "https://open.feishu.cn/open-apis/bitable/v1/apps/" \
+              + cls.app_token + "/tables/" + cls.table_id + "/records/" + record_id
+        headers = {
+            "Authorization": "Bearer " + cls.tenant_access_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        try:
+            urllib3.disable_warnings()
+            r = requests.delete(url=url, headers=headers, proxies=proxies, verify=False)
+            Common.logger(log_type).info("删除记录,code:{},msg:{}", r.json()["code"], r.json()["msg"])
+        except Exception as e:
+            Common.logger(log_type).error("删除记录异常:{}", e)
+
+    # 删除多条记录
+    @classmethod
+    def del_records(cls, log_type, record_ids):
+        """
+        该接口用于删除数据表中现有的多条记录
+        该接口支持调用频率上限为 10 QPS
+        https://open.feishu.cn/document/uAjLw4CM/ukTMukTMukTM/reference/bitable-v1/app-table-record/batch_delete
+        """
+        url = "https://open.feishu.cn/open-apis/bitable/v1/apps/" \
+              + cls.app_token + "/tables/" + cls.table_id + "/records/batch_delete"
+        headers = {
+            "Authorization": "Bearer " + cls.tenant_access_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        body = {
+            "records": record_ids  # 删除的多条记录id列表。示例值:["recIcJBbvC","recvmiCORa"]
+        }
+        try:
+            urllib3.disable_warnings()
+            r = requests.post(url=url, headers=headers, json=body, proxies=proxies, verify=False)
+            Common.logger(log_type).info("删除多条记录,code:{},msg:{}", r.json()["code"], r.json()["msg"])
+        except Exception as e:
+            Common.logger(log_type).error("删除多条记录异常:{}", e)
+
+
+if __name__ == "__main__":
+    # feishu = Feishu()
+    # print(feishu.get_bitable_token())
+
+    'reck6nLiZV'
+    'recHcfJZnG'
+    'recxdSMhzE'
+
+    # 实例化多维表格
+    bitable = Bitable()
+
+    # # 获取多维表格元数据
+    # bitable.get_apps()
+    #
+    # # 列出数据表
+    # bitable.get_tables()
+    #
+    # # 列出记录
+    # bitable.list_records(3)
+    #
+    # # 检索记录
+    # bitable.search_records("recHcfJZnG")
+
+    # # 新增一条记录
+    # create_value = {
+    #     "fields": {
+    #         "uid": "0000000000",
+    #         "key_words": "0000000000",
+    #         "name": "功能开发🥕",
+    #         "screen_name": "功能开发🥕",
+    #         "person_url": {
+    #             "link": "https://bytedance.feishu.cn/drive/home/", "text": "https://bytedance.feishu.cn/drive/home/"
+    #         },
+    #         "description": "功能开发🥕",
+    #         "location": "null",
+    #         "friends_count": 9999999999,
+    #         "followers_count": 9999999999,
+    #         "favourites_count": 9999999999,
+    #         "listed_count": 9999999999,
+    #         "statuses_count": 9999999999,
+    #         "media_count": 9999999999,
+    #         "display_url": {
+    #             "link": "https://bytedance.feishu.cn/drive/home/",
+    #             "text": "https://bytedance.feishu.cn/drive/home/"
+    #         },
+    #         "created_at": 1656053209000,
+    #         "profile_image_url": {
+    #             "link": "https://bytedance.feishu.cn/drive/home/",
+    #             "text": "https://bytedance.feishu.cn/drive/home/"
+    #         },
+    #         "profile_banner_url": {
+    #             "link": "null",
+    #             "text": "null"
+    #         },
+    #         "ext_has_nft_avatar": "False",
+    #         "verified": "False",
+    #         "记录创建时间": 1656053209000,
+    #         # "记录修改时间": ""
+    #     }
+    # }
+    # bitable.create_record(create_value)
+
+    # 新增多条记录
+    # create_values = {
+    #     "fields": {
+    #         "uid": "0000000000",
+    #         "key_words": "0000000000",
+    #         "name": "功能开发🥕",
+    #         "screen_name": "功能开发🥕",
+    #         "person_url": {
+    #             "link": "https://bytedance.feishu.cn/drive/home/", "text": "https://bytedance.feishu.cn/drive/home/"
+    #         },
+    #         "description": "功能开发🥕",
+    #         "location": "null",
+    #         "friends_count": 9999999999,
+    #         "followers_count": 9999999999,
+    #         "favourites_count": 9999999999,
+    #         "listed_count": 9999999999,
+    #         "statuses_count": 9999999999,
+    #         "media_count": 9999999999,
+    #         "display_url": {
+    #             "link": "https://bytedance.feishu.cn/drive/home/",
+    #             "text": "https://bytedance.feishu.cn/drive/home/"
+    #         },
+    #         "created_at": 1656053209000,
+    #         "profile_image_url": {
+    #             "link": "https://bytedance.feishu.cn/drive/home/",
+    #             "text": "https://bytedance.feishu.cn/drive/home/"
+    #         },
+    #         "profile_banner_url": {
+    #             "link": "null",
+    #             "text": "null"
+    #         },
+    #         "ext_has_nft_avatar": "False",
+    #         "verified": "False",
+    #         "记录创建时间": 1656053209000,
+    #         # "记录修改时间": ""
+    #     }
+    # }
+    # values_list = [create_values, create_values]
+    # bitable.create_records(values_list)
+
+    # # 更新一条记录
+    # use_record_id = "recxdSMhzE"
+    # use_fields = {
+    #     "fields": {
+    #         "uid": "1111111111",
+    #         "key_words": "1111111111",
+    #         "name": "功能开发🥕",
+    #         "screen_name": "功能开发🥕",
+    #         "person_url": {
+    #             "link": "https://bytedance.feishu.cn/drive/home/", "text": "https://bytedance.feishu.cn/drive/home/"
+    #         },
+    #         "description": "功能开发🥕",
+    #         "location": "null",
+    #         "friends_count": 9999999999,
+    #         "followers_count": 9999999999,
+    #         "favourites_count": 9999999999,
+    #         "listed_count": 9999999999,
+    #         "statuses_count": 9999999999,
+    #         "media_count": 9999999999,
+    #         "display_url": {
+    #             "link": "https://bytedance.feishu.cn/drive/home/",
+    #             "text": "https://bytedance.feishu.cn/drive/home/"
+    #         },
+    #         "created_at": 1656053209000,
+    #         "profile_image_url": {
+    #             "link": "https://bytedance.feishu.cn/drive/home/",
+    #             "text": "https://bytedance.feishu.cn/drive/home/"
+    #         },
+    #         "profile_banner_url": {
+    #             "link": "null",
+    #             "text": "null"
+    #         },
+    #         "ext_has_nft_avatar": "False",
+    #         "verified": "False",
+    #         "记录创建时间": 1656053209000,
+    #         # "记录修改时间": ""
+    #     }
+    # }
+    # bitable.update_record(use_record_id, use_fields)
+
+    # # 更新多条记录
+    # "recxdSMhzE"
+    # "recHcfJZnG"
+    # use_records = {
+    #     "records": [
+    #         {
+    #             "record_id": "recxdSMhzE",
+    #             "fields": {
+    #                 "uid": "3333333333",
+    #                 "key_words": "3333333333",
+    #                 "name": "功能开发🥕",
+    #                 "screen_name": "功能开发🥕",
+    #                 "person_url": {
+    #                     "link": "https://bytedance.feishu.cn/drive/home/",
+    #                     "text": "https://bytedance.feishu.cn/drive/home/"
+    #                 },
+    #                 "description": "功能开发🥕",
+    #                 "location": "null",
+    #                 "friends_count": 9999999999,
+    #                 "followers_count": 9999999999,
+    #                 "favourites_count": 9999999999,
+    #                 "listed_count": 9999999999,
+    #                 "statuses_count": 9999999999,
+    #                 "media_count": 9999999999,
+    #                 "display_url": {
+    #                     "link": "https://bytedance.feishu.cn/drive/home/",
+    #                     "text": "https://bytedance.feishu.cn/drive/home/"
+    #                 },
+    #                 "created_at": 1656053209000,
+    #                 "profile_image_url": {
+    #                     "link": "https://bytedance.feishu.cn/drive/home/",
+    #                     "text": "https://bytedance.feishu.cn/drive/home/"
+    #                 },
+    #                 "profile_banner_url": {
+    #                     "link": "null",
+    #                     "text": "null"
+    #                 },
+    #                 "ext_has_nft_avatar": "False",
+    #                 "verified": "False",
+    #                 "记录创建时间": 1656053209000,
+    #                 # "记录修改时间": ""
+    #             }
+    #         },
+    #         {
+    #             "record_id": "recHcfJZnG",
+    #             "fields": {
+    #                 "uid": "3333333333",
+    #                 "key_words": "3333333333",
+    #                 "name": "功能开发🥕",
+    #                 "screen_name": "功能开发🥕",
+    #                 "person_url": {
+    #                     "link": "https://bytedance.feishu.cn/drive/home/",
+    #                     "text": "https://bytedance.feishu.cn/drive/home/"
+    #                 },
+    #                 "description": "功能开发🥕",
+    #                 "location": "null",
+    #                 "friends_count": 9999999999,
+    #                 "followers_count": 9999999999,
+    #                 "favourites_count": 9999999999,
+    #                 "listed_count": 9999999999,
+    #                 "statuses_count": 9999999999,
+    #                 "media_count": 9999999999,
+    #                 "display_url": {
+    #                     "link": "https://bytedance.feishu.cn/drive/home/",
+    #                     "text": "https://bytedance.feishu.cn/drive/home/"
+    #                 },
+    #                 "created_at": 1656053209000,
+    #                 "profile_image_url": {
+    #                     "link": "https://bytedance.feishu.cn/drive/home/",
+    #                     "text": "https://bytedance.feishu.cn/drive/home/"
+    #                 },
+    #                 "profile_banner_url": {
+    #                     "link": "null",
+    #                     "text": "null"
+    #                 },
+    #                 "ext_has_nft_avatar": "False",
+    #                 "verified": "False",
+    #                 "记录创建时间": 1656053209000,
+    #                 # "记录修改时间": ""
+    #             }
+    #         }
+    #     ]
+    # }
+    # bitable.update_records(use_records)
+
+    # # 删除一条记录
+    # bitable.del_record("reck6nLiZV")
+
+    # # 删除多条记录
+    # bitable.del_records(['recHcfJZnG', 'recxdSMhzE'])

+ 31 - 0
main/run_update_kanyikan.py

@@ -0,0 +1,31 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/6/27
+import datetime
+import os
+import sys
+import time
+
+sys.path.append(os.getcwd())
+from main.common import Common
+from main.update_kanyikan import UpdateKanYiKan
+
+
+class Main:
+    @classmethod
+    def main(cls):
+        # while True:
+        #     while True:
+        #         main_time = datetime.datetime.now()
+        start_time = time.time()
+        today = Common.today
+        UpdateKanYiKan.check_data(today)
+        UpdateKanYiKan.update_play_cnt()
+        end_time = time.time()
+        Common.del_logs("kanyikan")
+        Common.logger("kanyikan").info("全部更新完成,共耗时:{}秒\n", int(end_time-start_time))
+
+
+if __name__ == "__main__":
+    main = Main()
+    main.main()

+ 35 - 0
main/run_update_xiaoniangao.py

@@ -0,0 +1,35 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/6/27
+import datetime
+import os
+import sys
+import time
+
+sys.path.append(os.getcwd())
+from main.common import Common
+from main.update_xiaoniangao import UpdateXiaoNianGao
+
+
+class Main:
+    @classmethod
+    def main(cls):
+        # while True:
+        # while True:
+        # main_time = datetime.datetime.now()
+        start_time = time.time()
+        today = Common.today
+        # if main_time.hour == 2:
+        UpdateXiaoNianGao.check_data(today)
+        UpdateXiaoNianGao.update_play_cnt()
+        end_time = time.time()
+        Common.del_logs("xiaoniangao")
+        Common.logger("xiaoniangao").info("全部更新完成,共耗时:{}秒\n", int(end_time-start_time))
+        #     break
+        # else:
+        #     pass
+
+
+if __name__ == "__main__":
+    main = Main()
+    main.main()

+ 99 - 0
main/update_kanyikan.py

@@ -0,0 +1,99 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/6/27
+import time
+import requests
+import urllib3
+from main.common import Common
+from main.feishu_lib import Feishu
+proxies = {"http": None, "https": None}
+
+
+class UpdateKanYiKan:
+    # 待更新数据列表
+    update_list = []
+
+    # 检查是否有今日的上升榜日期
+    @classmethod
+    def check_data(cls, date):
+        # 判断J1单元格的日期是否为今天
+        time.sleep(1)
+        if Feishu.get_range_value("kanyikan", "monitor", "XS1f7l", "M1:M1")[0] != str(date):
+            Common.logger("kanyikan").info("今天日期不存在")
+            # 插入列 I1:J1,并写入日期
+            values = [[str(date)]]
+            time.sleep(1)
+            Feishu.insert_columns("kanyikan", "monitor", "XS1f7l", "COLUMNS", 12, 13)
+            time.sleep(1)
+            Feishu.update_values("kanyikan", "monitor", "XS1f7l", "M1:M1", values)
+            Common.logger("kanyikan").info("插入今天日期成功\n")
+        else:
+            Common.logger("kanyikan").info("今天日期已存在\n")
+
+    # 更新看一看播放量
+    @classmethod
+    def update_play_cnt(cls):
+        try:
+            kanyikan_sheet = Feishu.get_values_batch("kanyikan", "monitor", "XS1f7l")
+            for i in range(1, len(kanyikan_sheet)):
+                Common.logger("kanyikan").info("正在更新第{}行", i + 1)
+                video_id = kanyikan_sheet[i][6]
+
+                if video_id is None or video_id == "":
+                    Common.logger("kanyikan").info("空行")
+                    cls.update_list.append("0")
+                else:
+                    url = "https://search.weixin.qq.com/cgi-bin/recwxa/recwxagetonevideoinfo?"
+                    param = {
+                        "session": Common.get_session(),
+                        "vid": video_id,
+                        "wxaVersion": "3.9.2",
+                        "channelid": "208201",
+                        "scene": "32",
+                        "subscene": "1089",
+                        "model": "iPhone 11<iPhone12,1>14.7.1",
+                        "clientVersion": "8.0.18",
+                        "sharesearchid": "447665862521758270",
+                        "sharesource": "-1"
+                    }
+                    urllib3.disable_warnings()
+                    r = requests.get(url=url, params=param, proxies=proxies, verify=False)
+                    v_play_cnt = r.json()["data"]["played_cnt"]
+
+                    # 更新单个视频
+                    # Feishu.update_values(
+                    #     "kanyikan", "monitor", "XS1f7l", "G" + str(i + 1) + ":" + "G" + str(i + 1), [[v_play_cnt]])
+                    # Common.logger("kanyikan").info("video_id:{}, play_cnt:{}, 更新成功\n", video_id, v_play_cnt)
+
+                    # 待更新数据列表
+                    cls.update_list.append(v_play_cnt)
+
+                    Common.logger("kanyikan").info("video_id:{}, 播放量:{},已添加至待更新数据列表", video_id, v_play_cnt)
+                    Common.logger("kanyikan").info("待更新列表数量:{}\n", len(cls.update_list))
+
+                if len(cls.update_list) >= 200:
+                    Common.logger("kanyikan").info("M{}:M{}", str(i-198), str(i+1))
+                    Common.logger("kanyikan").info("{}\n", cls.update_list)
+                    time.sleep(1)
+                    Feishu.update_values("kanyikan", "monitor", "XS1f7l",
+                                         "M"+str(i-198) + ":" + "M"+str(i+1), [[x] for x in cls.update_list])
+                    Common.logger("kanyikan").info("更新{}条播放量成功\n", len(cls.update_list))
+                    cls.update_list = []
+                elif i+1 == len(kanyikan_sheet):
+                    Common.logger("kanyikan").info("M{}:M{}", str(i + 1 - len(cls.update_list) + 1), str(i + 1))
+                    Common.logger("kanyikan").info("{}\n", cls.update_list)
+                    time.sleep(1)
+                    Feishu.update_values("kanyikan", "monitor", "XS1f7l",
+                                         "M"+str(i+1-len(cls.update_list)+1) + ":" + "M" + str(i+1),
+                                         [[x] for x in cls.update_list])
+                    Common.logger("kanyikan").info("更新{}条播放量成功\n", len(cls.update_list))
+                    cls.update_list = []
+                    return
+
+        except Exception as e:
+            Common.logger("kanyikan").error("更新看一看播放量异常:{}", e)
+
+
+if __name__ == "__main__":
+    kanyikan = UpdateKanYiKan()
+    kanyikan.check_data("2022/06/29")

+ 143 - 0
main/update_xiaoniangao.py

@@ -0,0 +1,143 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/6/23
+import time
+import requests
+import urllib3
+from main.common import Common
+from main.feishu_lib import Feishu
+proxies = {"http": None, "https": None}
+
+
+class UpdateXiaoNianGao:
+    # 待更新数据列表
+    update_list = []
+
+    # 检查是否有今日的上升榜日期
+    @classmethod
+    def check_data(cls, date):
+        # 判断J1单元格的日期是否为今天
+        time.sleep(1)
+        if Feishu.get_range_value("xiaoniangao", "monitor", "N7e2yI", "P1:P1")[0] != str(date):
+            Common.logger("xiaoniangao").info("今天日期不存在")
+            # 插入列 I1:J1,并写入日期
+            values = [[str(date)]]
+            time.sleep(1)
+            Feishu.insert_columns("xiaoniangao", "monitor", "N7e2yI", "COLUMNS", 15, 16)
+            time.sleep(1)
+            Feishu.update_values("xiaoniangao", "monitor", "N7e2yI", "P1:P1", values)
+            Common.logger("xiaoniangao").info("插入今天日期成功\n")
+        else:
+            Common.logger("xiaoniangao").info("今天日期已存在\n")
+
+    # 更新小年糕播放量
+    @classmethod
+    def update_play_cnt(cls):
+        try:
+            xiaoniangao_sheet = Feishu.get_values_batch("xiaoniangao", "monitor", "N7e2yI")
+            for i in range(1, len(xiaoniangao_sheet)):
+                Common.logger("xiaoniangao").info("正在更新第{}行", i+1)
+                video_id = xiaoniangao_sheet[i][6]
+                user_id = xiaoniangao_sheet[i][9]
+                user_mid = xiaoniangao_sheet[i][10]
+
+                if video_id is None or video_id == "" \
+                        or user_id is None or user_id == "" \
+                        or user_mid is None or user_mid == "":
+                    Common.logger("xiaoniangao").info("空行")
+                    cls.update_list.append("0")
+                else:
+                    url = "https://kapi.xiaoniangao.cn/profile/get_profile_by_id"
+                    headers = {
+                        "x-b3-traceid": "96376ab5ed525",
+                        "X-Token-Id": "35f5e036e103500b6b51f11adf1b345d-1145266232",
+                        "uid": "250e6514-fd83-446c-a880-e274c7f17bce",
+                        "content-type": "application/json",
+                        "Accept-Encoding": "gzip,compress,br,deflate",
+                        "User-Agent": 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X)'
+                                      ' AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 '
+                                      'MicroMessenger/8.0.20(0x18001432) NetType/WIFI Language/zh_CN',
+                        "Referer": "https://servicewechat.com/wxd7911e4c177690e4/623/page-frame.html"
+                    }
+                    data = {
+                        "play_src": "1",
+                        "profile_id": int(user_id),
+                        "profile_mid": int(user_mid),
+                        "qs": "imageMogr2/gravity/center/rotate/$/thumbnail/"
+                              "!400x400r/crop/400x400/interlace/1/format/jpg",
+                        "h_qs": "imageMogr2/gravity/center/rotate/$/thumbnail"
+                                "/!80x80r/crop/80x80/interlace/1/format/jpg",
+                        "share_width": 625,
+                        "share_height": 500,
+                        "no_comments": True,
+                        "no_follow": True,
+                        "vid": str(video_id),
+                        "hot_l1_comment": True,
+                        "token": "a7a4cb67d17ceb2e02a6d8bb6854beaa",
+                        "uid": "250e6514-fd83-446c-a880-e274c7f17bce",
+                        "proj": "ma",
+                        "wx_ver": "8.0.23",
+                        "code_ver": "3.67.0",
+                        "log_common_params": {
+                            "e": [{
+                                "data": {
+                                    "page": "dynamicSharePage"
+                                }
+                            }],
+                            "ext": {
+                                "brand": "iPhone",
+                                "device": "iPhone 11",
+                                "os": "iOS 14.7.1",
+                                "weixinver": "8.0.23",
+                                "srcver": "2.24.6",
+                                "net": "wifi",
+                                "scene": "1089"
+                            },
+                            "pj": "1",
+                            "pf": "2",
+                            "session_id": "d0c5c363-d571-4c8a-ab55-91a7ad284ee4"
+                        }
+                    }
+                    urllib3.disable_warnings()
+                    r = requests.post(headers=headers, url=url, json=data, proxies=proxies, verify=False)
+                    video_play_cnt = r.json()["data"]["play_pv"]
+
+                    # 单条更新
+                    # Feishu.update_values(
+                    #     "xiaoniangao", "monitor", "N7e2yI", "J" + str(i+1) + ":" + "J" + str(i+1), [[video_play_cnt]])
+                    # Common.logger("xiaoniangao").info("video_id:{}, play_cnt:{}, 更新成功\n", video_id, video_play_cnt)
+
+                    # 待更新数据列表
+                    cls.update_list.append(video_play_cnt)
+
+                    Common.logger("xiaoniangao").info("video_id:{}, 播放量:{},已添加至待更新数据列表", video_id, video_play_cnt)
+                    Common.logger("xiaoniangao").info("待更新列表数量:{}\n", len(cls.update_list))
+
+                if len(cls.update_list) >= 200:
+                    Common.logger("xiaoniangao").info("P{}:P{}", str(i-198), str(i+1))
+                    Common.logger("xiaoniangao").info("{}\n", cls.update_list)
+                    time.sleep(1)
+                    Feishu.update_values(
+                        "xiaoniangao", "monitor", "N7e2yI",
+                        "P"+str(i-198) + ":" + "P"+str(i+1), [[x] for x in cls.update_list])
+                    Common.logger("xiaoniangao").info("更新{}条播放量成功\n", len(cls.update_list))
+                    cls.update_list = []
+                elif i+1 == len(xiaoniangao_sheet):
+                    Common.logger("xiaoniangao").info("P{}:P{}", str(i+1-len(cls.update_list)+1), str(i+1))
+                    Common.logger("xiaoniangao").info("{}\n", cls.update_list)
+                    time.sleep(1)
+                    Feishu.update_values(
+                        "xiaoniangao", "monitor", "N7e2yI",
+                        "P" + str(i+1-len(cls.update_list)+1) + ":" + "P" + str(i+1), [[x] for x in cls.update_list])
+                    Common.logger("xiaoniangao").info("更新{}条播放量成功\n", len(cls.update_list))
+                    cls.update_list = []
+                    return
+
+        except Exception as e:
+            Common.logger("xiaoniangao").error("更新小年糕播放量异常:{}", e)
+
+
+if __name__ == "__main__":
+    xng = UpdateXiaoNianGao()
+    xng.check_data("2022/06/29")
+    xng.update_play_cnt()