Ver código fonte

快手创作者版内容分析接口

zhangyong 9 meses atrás
pai
commit
b601b7d9fc

+ 0 - 0
channel/__init__.py


+ 74 - 0
channel/ks_analyze_photo.py

@@ -0,0 +1,74 @@
+import json
+from hashlib import md5
+
+import requests
+from flask import Flask, request, jsonify
+
+app = Flask(__name__)
+
+
+def process_range_data(data, label_key, value_key, selected_key=None, tips_key=None):
+    processed_data = []
+    for item in data:
+        processed_item = {label_key: item['label'], value_key: item['value']}
+        if selected_key and item['selected'] is not None:
+            processed_item[selected_key] = item['selected']
+        if tips_key and item['tips'] is not None:
+            processed_item[tips_key] = item['tips']
+        processed_data.append(processed_item)
+    return processed_data
+
+
+@app.route('/analyze_photo', methods=['GET'])
+def analyze_photo():
+    photo_id = request.args.get('photo_id')
+    if not photo_id:
+        return jsonify({"error": "photo_id is required"}), 400
+
+    url = 'https://creator-app.kuaishou.com/rest/bamboo/inspiration/n/photo/analysis'
+    headers = {
+        'Accept-Language': 'zh-cn',
+        'Connection': 'keep-alive',
+        'Content-Type': 'application/x-www-form-urlencoded',
+        'Host': 'creator-app.kuaishou.com',
+    }
+    query = {}
+    data = {
+        'photoId': photo_id,
+        'client_key': '214c9979',
+    }
+    src = {}
+    src.update(query)
+    src.update(data)
+    src = ''.join([f'{key}={src[key]}' for key in sorted(src.keys(), reverse=False)])
+    salt = '08d8eece8e83'
+    sig = md5(f'{src}{salt}'.encode()).hexdigest()
+    query.update({'sig': sig})
+    response = requests.post(url=url, headers=headers, params=query, data=data)
+
+    if response.status_code != 200:
+        return jsonify({"error": "Failed to analyze photo"}), response.status_code
+
+    body = response.content.decode()
+    json_body = json.loads(body)
+
+    user_range = json_body['data']['play']['userRange']
+    age_range = user_range['ageRange']
+    sex_range = user_range['sexRange']
+    province_range = user_range['provinceRange']
+
+    age_range_data = process_range_data(age_range, '年龄范围', '占比', '挑选', '年龄范围占比')
+    sex_range_data = process_range_data(sex_range, '性别', '占比', '挑选')
+    province_range_data = process_range_data(province_range, '地域', '占比', '挑选')
+
+    result = {
+        "年龄范围": [item for item in age_range_data if item],
+        "性别占比": [item for item in sex_range_data if item],
+        "地域占比": [item for item in province_range_data if item]
+    }
+
+    return jsonify(result)
+
+
+if __name__ == '__main__':
+    app.run(host='0.0.0.0', port=5000)

+ 6 - 0
common/__init__.py

@@ -0,0 +1,6 @@
+from .common import Common
+from .aliyun_oss_uploading import Oss
+from .material import Material
+from .feishu import Feishu
+from .db import MysqlHelper
+from .pq import PQ

+ 96 - 0
common/aliyun_oss_uploading.py

@@ -0,0 +1,96 @@
+# -*- coding: utf-8 -*-
+from datetime import datetime
+from typing import Dict, Any,  Optional
+
+import oss2
+import requests
+
+OSS_ACCESS_KEY_ID = "LTAIP6x1l3DXfSxm"
+OSS_ACCESS_KEY_SECRET = "KbTaM9ars4OX3PMS6Xm7rtxGr1FLon"
+OSS_BUCKET_ENDPOINT = "oss-cn-hangzhou-internal.aliyuncs.com"# 内网地址
+# OSS_BUCKET_ENDPOINT = "oss-cn-hangzhou.aliyuncs.com" # 外网地址
+OSS_BUCKET_NAME = "art-crawler"
+class Oss():
+    # 抓取视频上传到art-crawler
+    @classmethod
+    def video_sync_upload_oss(cls, src_url: str,
+                        video_id: str,
+                        account_id: str,
+                        OSS_BUCKET_PATH: str,
+                        referer: Optional[str] = None) -> Dict[str, Any]:
+        headers = {
+            'Accept': '*/*',
+            'Accept-Language': 'zh-CN,zh;q=0.9',
+            'Cache-Control': 'no-cache',
+            'Pragma': 'no-cache',
+            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) '
+                          'Chrome/117.0.0.0 Safari/537.36',
+        }
+        if referer:
+            headers.update({'Referer': referer})
+        response = requests.request(url=src_url, method='GET', headers=headers)
+        file_content = response.content
+        content_type = response.headers.get('Content-Type', 'application/octet-stream')
+
+        oss_object_key = f'{OSS_BUCKET_PATH}/{account_id}/{video_id}'
+        auth = oss2.Auth(OSS_ACCESS_KEY_ID, OSS_ACCESS_KEY_SECRET)
+        bucket = oss2.Bucket(auth, OSS_BUCKET_ENDPOINT, OSS_BUCKET_NAME)
+        response = bucket.put_object(oss_object_key, file_content, headers={'Content-Type': content_type})
+
+        if 'Content-Length' in response.headers:
+            return {
+                'status': response.status,
+                'oss_object_key': oss_object_key}
+        raise AssertionError(f'OSS上传失败,请求ID: \n{response.headers["x-oss-request-id"]}')
+
+
+    #  视频发送到art-pubbucket
+    @classmethod
+    def stitching_sync_upload_oss(cls, src_url: str,
+                        video_id: str) -> Dict[str, Any]:
+        oss_object_key = f'agc_oss/agc_video/{video_id}'
+        auth = oss2.Auth(OSS_ACCESS_KEY_ID, OSS_ACCESS_KEY_SECRET)
+        bucket = oss2.Bucket(auth, OSS_BUCKET_ENDPOINT, "art-pubbucket")
+        response = bucket.put_object_from_file(oss_object_key, src_url)
+
+        if 'Content-Length' in response.headers:
+            return {
+                'status': response.status,
+                'oss_object_key': oss_object_key,
+                'save_oss_timestamp': int(datetime.now().timestamp() * 1000),
+            }
+        raise AssertionError(f'OSS上传失败,请求ID: \n{response.headers["x-oss-request-id"]}')
+
+
+    # 获取视频链接 将视频链接有效时间设置为1天
+    @classmethod
+    def get_oss_url(cls, videos, video_path):
+        auth = oss2.Auth(OSS_ACCESS_KEY_ID, OSS_ACCESS_KEY_SECRET)
+        bucket = oss2.Bucket(auth, OSS_BUCKET_ENDPOINT, OSS_BUCKET_NAME)
+        list = []
+        for i in videos:
+            try:
+                # 获取指定路径下的对象列表
+                filename = i[2].split("/")[-1]
+                bucket.get_object_to_file(i[2], f'{video_path}{filename}.mp4')
+                list.append([i[0], i[1], i[2], f'{video_path}{filename}.mp4'])
+            except Exception:
+                continue
+        return list
+
+    @classmethod
+    def download_url(cls, videos, video_path, video):
+        for i in range(3):
+            payload = {}
+            headers = {}
+            response = requests.request("GET", videos, headers=headers, data=payload)
+            if response.status_code == 200:
+                video_url = []
+                # 以二进制写入模式打开文件
+                video = video_path+video+'.mp4'
+                with open(f"{video}", "wb") as file:
+                    # 将响应内容写入文件
+                    file.write(response.content)
+                video_url.append(video)
+                return video_url
+        return ''

+ 48 - 0
common/common.py

@@ -0,0 +1,48 @@
+# -*- coding: utf-8 -*-
+# @Time: 2023/12/26
+"""
+公共方法,包含:生成log / 删除log / 下载方法 / 删除 weixinzhishu_chlsfiles / 过滤词库 / 保存视频信息至本地 txt / 翻译 / ffmpeg
+"""
+import os
+import sys
+
+sys.path.append(os.getcwd())
+from datetime import date, timedelta
+from datetime import datetime
+from loguru import logger
+
+proxies = {"http": None, "https": None}
+
+
+class Common:
+    # 统一获取当前时间 <class 'datetime.datetime'>  2022-04-14 20:13:51.244472
+    now = datetime.now()
+    # 昨天 <class 'str'>  2022-04-13
+    yesterday = (date.today() + timedelta(days=-1)).strftime("%Y-%m-%d")
+    # 今天 <class 'datetime.date'>  2022-04-14
+    today = date.today()
+    # 明天 <class 'str'>  2022-04-15
+    tomorrow = (date.today() + timedelta(days=1)).strftime("%Y-%m-%d")
+
+    # 使用 logger 模块生成日志
+    @staticmethod
+    def logger(log_type):
+        """
+        使用 logger 模块生成日志
+        """
+        # 日志路径
+        log_dir = f"./{log_type}/logs/"
+        log_path = os.getcwd() + os.sep + log_dir
+        if not os.path.isdir(log_path):
+            os.makedirs(log_path)
+        # 日志文件名
+        log_name = f"{log_type}-{datetime.now().date().strftime('%Y-%m-%d')}.log"
+
+        # 日志不打印到控制台
+        logger.remove(handler_id=None)
+        # 初始化日志
+        logger.add(os.path.join(log_dir, log_name), level="INFO", rotation="00:00", retention="10 days", enqueue=True)
+
+        return logger
+
+

+ 139 - 0
common/db.py

@@ -0,0 +1,139 @@
+# -*- coding: utf-8 -*-
+# @Time: 2023/12/26
+"""
+数据库连接及操作
+"""
+import redis
+import pymysql
+from common.common import Common
+# from common import Common
+
+class MysqlHelper:
+    @classmethod
+    def connect_mysql(cls, env, machine):
+        if machine == 'aliyun_hk':
+            # 创建一个 Connection 对象,代表了一个数据库连接
+            connection = pymysql.connect(
+                host="rm-j6cz4c6pt96000xi3.mysql.rds.aliyuncs.com",# 数据库IP地址,内网地址
+                # host="rm-j6cz4c6pt96000xi3lo.mysql.rds.aliyuncs.com",# 数据库IP地址,外网地址
+                port=3306,                      # 端口号
+                user="crawler",                 #  mysql用户名
+                passwd="crawler123456@",        # mysql用户登录密码
+                db="piaoquan-crawler" ,         # 数据库名
+                # 如果数据库里面的文本是utf8编码的,charset指定是utf8
+                charset = "utf8")
+        elif env == 'prod':
+            # 创建一个 Connection 对象,代表了一个数据库连接
+            connection = pymysql.connect(
+                host="rm-bp1159bu17li9hi94.mysql.rds.aliyuncs.com",# 数据库IP地址,内网地址
+                # host="rm-bp1159bu17li9hi94ro.mysql.rds.aliyuncs.com",# 数据库IP地址,外网地址
+                port=3306,                      # 端口号
+                user="crawler",                 #  mysql用户名
+                passwd="crawler123456@",        # mysql用户登录密码
+                db="piaoquan-crawler" ,         # 数据库名
+                # 如果数据库里面的文本是utf8编码的,charset指定是utf8
+                charset = "utf8")
+        else:
+            # 创建一个 Connection 对象,代表了一个数据库连接
+            connection = pymysql.connect(
+                host="rm-bp1k5853td1r25g3n690.mysql.rds.aliyuncs.com",# 数据库IP地址,内网地址
+                # host="rm-bp1k5853td1r25g3ndo.mysql.rds.aliyuncs.com",  # 数据库IP地址,外网地址
+                port=3306,  # 端口号
+                user="crawler",  # mysql用户名
+                passwd="crawler123456@",  # mysql用户登录密码
+                db="piaoquan-crawler",  # 数据库名
+                # 如果数据库里面的文本是utf8编码的,charset指定是utf8
+                charset="utf8")
+
+        return connection
+
+    @classmethod
+    def get_values(cls, sql, env):
+        try:
+            machine = ""
+            # 连接数据库
+            connect = cls.connect_mysql(env, machine)
+            # 返回一个 Cursor对象
+            mysql = connect.cursor()
+
+            # 执行 sql 语句
+            mysql.execute(sql)
+
+            # fetchall方法返回的是一个元组,里面每个元素也是元组,代表一行记录
+            data = mysql.fetchall()
+
+            # 关闭数据库连接
+            connect.close()
+
+            # 返回查询结果,元组
+            return data
+        except Exception as e:
+            print(f"get_values异常:{e}\n")
+            # Common.logger(log_type, crawler).error(f"get_values异常:{e}\n")
+
+    @classmethod
+    def update_values(cls, sql, env, machine):
+        # 连接数据库
+        connect = cls.connect_mysql(env, machine)
+        # 返回一个 Cursor对象
+        mysql = connect.cursor()
+
+        try:
+            # 执行 sql 语句
+            res = mysql.execute(sql)
+            # 注意 一定要commit,否则添加数据不生效
+            connect.commit()
+            return res
+        except Exception as e:
+            # print(f"update_values异常,进行回滚操作:{e}\n")
+            # 发生错误时回滚
+            connect.rollback()
+
+        # 关闭数据库连接
+        connect.close()
+
+class RedisHelper:
+    @classmethod
+    def connect_redis(cls, env, machine):
+        if machine == 'aliyun_hk':
+            redis_pool = redis.ConnectionPool(
+                # host='r-bp154bpw97gptefiqk.redis.rds.aliyuncs.com',  # 内网地址
+                host='r-bp154bpw97gptefiqkpd.redis.rds.aliyuncs.com',  # 外网地址
+                port=6379,
+                db=2,
+                password='Qingqu2019'
+            )
+            redis_conn = redis.Redis(connection_pool=redis_pool)
+        elif env == 'prod':
+            redis_pool = redis.ConnectionPool(
+                host='r-bp1mb0v08fqi4hjffu.redis.rds.aliyuncs.com',  # 内网地址
+                # host='r-bp1mb0v08fqi4hjffupd.redis.rds.aliyuncs.com',  # 外网地址
+                port=6379,
+                db=2,
+                password='Qingqu2019'
+            )
+            redis_conn = redis.Redis(connection_pool=redis_pool)
+        else:
+            redis_pool = redis.ConnectionPool(
+                # host='r-bp154bpw97gptefiqk.redis.rds.aliyuncs.com',  # 内网地址
+                host='r-bp154bpw97gptefiqkpd.redis.rds.aliyuncs.com',  # 外网地址
+                port=6379,
+                db=2,
+                password='Qingqu2019'
+            )
+            redis_conn = redis.Redis(connection_pool=redis_pool)
+        return redis_conn
+
+    @classmethod
+    def redis_push(cls, env, machine, data):
+        redis_conn = cls.connect_redis(env, machine)
+        redis_conn.lpush(machine, data)
+
+    @classmethod
+    def redis_pop(cls, env, machine):
+        redis_conn = cls.connect_redis(env, machine)
+        if redis_conn.llen(machine) == 0:
+            return None
+        else:
+            return redis_conn.rpop(machine)
+

+ 390 - 0
common/feishu.py

@@ -0,0 +1,390 @@
+# -*- coding: utf-8 -*-
+"""
+飞书表配置: token 鉴权 / 增删改查 / 机器人报警
+"""
+import json
+import os
+import sys
+import requests
+import urllib3
+
+sys.path.append(os.getcwd())
+from common import Common
+
+proxies = {"http": None, "https": None}
+
+
+class Feishu:
+    """
+    编辑飞书云文档
+    """
+    succinct_url = "https://w42nne6hzg.feishu.cn/sheets/"
+    # 飞书路径token
+    @classmethod
+    def spreadsheettoken(cls, crawler):
+        if crawler == "summary":
+            return "IbVVsKCpbhxhSJtwYOUc8S1jnWb"
+        else:
+            return crawler
+
+
+
+    # 获取飞书api token
+    @classmethod
+    def get_token(cls):
+        """
+        获取飞书api token
+        :return:
+        """
+        url = "https://open.feishu.cn/open-apis/auth/v3/tenant_access_token/internal/"
+        post_data = {"app_id": "cli_a13ad2afa438d00b",  # 这里账号密码是发布应用的后台账号及密码
+                     "app_secret": "4tK9LY9VbiQlY5umhE42dclBFo6t4p5O"}
+
+        try:
+            urllib3.disable_warnings()
+            response = requests.post(url=url, data=post_data, proxies=proxies, verify=False)
+            tenant_access_token = response.json()["tenant_access_token"]
+            return tenant_access_token
+        except Exception as e:
+            Common.logger("feishu").error("获取飞书 api token 异常:{}", e)
+
+    # 获取表格元数据
+    @classmethod
+    def get_metainfo(cls, crawler):
+        """
+        获取表格元数据
+        :return:
+        """
+        try:
+            get_metainfo_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                               + cls.spreadsheettoken(crawler) + "/metainfo"
+
+            headers = {
+                "Authorization": "Bearer " + cls.get_token(),
+                "Content-Type": "application/json; charset=utf-8"
+            }
+            params = {
+                "extFields": "protectedRange",  # 额外返回的字段,extFields=protectedRange时返回保护行列信息
+                "user_id_type": "open_id"  # 返回的用户id类型,可选open_id,union_id
+            }
+            urllib3.disable_warnings()
+            r = requests.get(url=get_metainfo_url, headers=headers, params=params, proxies=proxies, verify=False)
+            response = json.loads(r.content.decode("utf8"))
+            return response
+        except Exception as e:
+            Common.logger("feishu").error("获取表格元数据异常:{}", e)
+
+    # 读取工作表中所有数据
+    @classmethod
+    def get_values_batch(cls, crawler, sheetid):
+        """
+        读取工作表中所有数据
+        :param crawler: 哪个爬虫
+        :param sheetid: 哪张表
+        :return: 所有数据
+        """
+        try:
+            get_values_batch_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                                   + cls.spreadsheettoken(crawler) + "/values_batch_get"
+            headers = {
+                "Authorization": "Bearer " + cls.get_token(),
+                "Content-Type": "application/json; charset=utf-8"
+            }
+            params = {
+                "ranges": sheetid,
+                "valueRenderOption": "ToString",
+                "dateTimeRenderOption": "",
+                "user_id_type": "open_id"
+            }
+            urllib3.disable_warnings()
+            r = requests.get(url=get_values_batch_url, headers=headers, params=params, proxies=proxies, verify=False)
+            response = json.loads(r.content.decode("utf8"))
+            values = response["data"]["valueRanges"][0]["values"]
+            return values
+        except Exception as e:
+            Common.logger("feishu").error("读取工作表所有数据异常:{}", e)
+
+    # 工作表,插入行或列
+    @classmethod
+    def insert_columns(cls,  crawler, sheetid, majordimension, startindex, endindex):
+        """
+        工作表插入行或列
+        :param log_type: 日志路径
+        :param crawler: 哪个爬虫的云文档
+        :param sheetid:哪张工作表
+        :param majordimension:行或者列, ROWS、COLUMNS
+        :param startindex:开始位置
+        :param endindex:结束位置
+        """
+        try:
+            insert_columns_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                                 + cls.spreadsheettoken(crawler) + "/insert_dimension_range"
+            headers = {
+                "Authorization": "Bearer " + cls.get_token(),
+                "Content-Type": "application/json; charset=utf-8"
+            }
+            body = {
+                "dimension": {
+                    "sheetId": sheetid,
+                    "majorDimension": majordimension,  # 默认 ROWS ,可选 ROWS、COLUMNS
+                    "startIndex": startindex,  # 开始的位置
+                    "endIndex": endindex  # 结束的位置
+                },
+                "inheritStyle": "AFTER"  # BEFORE 或 AFTER,不填为不继承 style
+            }
+
+            urllib3.disable_warnings()
+            r = requests.post(url=insert_columns_url, headers=headers, json=body, proxies=proxies, verify=False)
+            Common.logger("feishu").info("插入行或列:{}", r.json()["msg"])
+        except Exception as e:
+            Common.logger("feishu").error("插入行或列异常:{}", e)
+
+    # 写入数据
+    @classmethod
+    def update_values(cls, crawler, sheetid, ranges, values):
+        """
+        写入数据
+        :param log_type: 日志路径
+        :param crawler: 哪个爬虫的云文档
+        :param sheetid:哪张工作表
+        :param ranges:单元格范围
+        :param values:写入的具体数据,list
+        """
+        try:
+            update_values_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                                + cls.spreadsheettoken(crawler) + "/values_batch_update"
+            headers = {
+                "Authorization": "Bearer " + cls.get_token(),
+                "Content-Type": "application/json; charset=utf-8"
+            }
+            body = {
+                "valueRanges": [
+                    {
+                        "range": sheetid + "!" + ranges,
+                        "values": values
+                    },
+                ],
+            }
+            urllib3.disable_warnings()
+            r = requests.post(url=update_values_url, headers=headers, json=body, proxies=proxies, verify=False)
+            Common.logger("feishu").info("写入数据:{}", r.json()["msg"])
+        except Exception as e:
+            Common.logger("feishu").error("写入数据异常:{}", e)
+
+    # 合并单元格
+    @classmethod
+    def merge_cells(cls, log_type, crawler, sheetid, ranges):
+        """
+        合并单元格
+        :param log_type: 日志路径
+        :param crawler: 哪个爬虫
+        :param sheetid:哪张工作表
+        :param ranges:需要合并的单元格范围
+        """
+        try:
+            merge_cells_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                              + cls.spreadsheettoken(crawler) + "/merge_cells"
+            headers = {
+                "Authorization": "Bearer " + cls.get_token(),
+                "Content-Type": "application/json; charset=utf-8"
+            }
+
+            body = {
+                "range": sheetid + "!" + ranges,
+                "mergeType": "MERGE_ROWS"
+            }
+            urllib3.disable_warnings()
+            r = requests.post(url=merge_cells_url, headers=headers, json=body, proxies=proxies, verify=False)
+            Common.logger("feishu").info("合并单元格:{}", r.json()["msg"])
+        except Exception as e:
+            Common.logger("feishu").error("合并单元格异常:{}", e)
+
+    # 读取单元格数据
+    @classmethod
+    def get_range_value(cls, crawler, sheetid, cell):
+        """
+        读取单元格内容
+        :param log_type: 日志路径
+        :param crawler: 哪个爬虫
+        :param sheetid: 哪张工作表
+        :param cell: 哪个单元格
+        :return: 单元格内容
+        """
+        try:
+            get_range_value_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                                  + cls.spreadsheettoken(crawler) + "/values/" + sheetid + "!" + cell
+            headers = {
+                "Authorization": "Bearer " + cls.get_token(),
+                "Content-Type": "application/json; charset=utf-8"
+            }
+            params = {
+                "valueRenderOption": "FormattedValue",
+
+                # dateTimeRenderOption=FormattedString 计算并对时间日期按照其格式进行格式化,但不会对数字进行格式化,返回格式化后的字符串。
+                "dateTimeRenderOption": "",
+
+                # 返回的用户id类型,可选open_id,union_id
+                "user_id_type": "open_id"
+            }
+            urllib3.disable_warnings()
+            r = requests.get(url=get_range_value_url, headers=headers, params=params, proxies=proxies, verify=False)
+            # print(r.text)
+            return r.json()["data"]["valueRange"]["values"][0]
+        except Exception as e:
+            Common.logger("feishu").error("读取单元格数据异常:{}", e)
+    # 获取表内容
+    @classmethod
+    def get_sheet_content(cls, crawler, sheet_id):
+        try:
+            sheet = Feishu.get_values_batch(crawler, sheet_id)
+            content_list = []
+            for x in sheet:
+                for y in x:
+                    if y is None:
+                        pass
+                    else:
+                        content_list.append(y)
+            return content_list
+        except Exception as e:
+            Common.logger("feishu").error(f'get_sheet_content:{e}\n')
+
+    # 删除行或列,可选 ROWS、COLUMNS
+    @classmethod
+    def dimension_range(cls, log_type, crawler, sheetid, major_dimension, startindex, endindex):
+        """
+        删除行或列
+        :param log_type: 日志路径
+        :param crawler: 哪个爬虫
+        :param sheetid:工作表
+        :param major_dimension:默认 ROWS ,可选 ROWS、COLUMNS
+        :param startindex:开始的位置
+        :param endindex:结束的位置
+        :return:
+        """
+        try:
+            dimension_range_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                                  + cls.spreadsheettoken(crawler) + "/dimension_range"
+            headers = {
+                "Authorization": "Bearer " + cls.get_token(),
+                "Content-Type": "application/json; charset=utf-8"
+            }
+            body = {
+                "dimension": {
+                    "sheetId": sheetid,
+                    "majorDimension": major_dimension,
+                    "startIndex": startindex,
+                    "endIndex": endindex
+                }
+            }
+            urllib3.disable_warnings()
+            r = requests.delete(url=dimension_range_url, headers=headers, json=body, proxies=proxies, verify=False)
+            Common.logger("feishu").info("删除视频数据:{}", r.json()["msg"])
+        except Exception as e:
+            Common.logger("feishu").error("删除视频数据异常:{}", e)
+
+    # 获取用户 ID
+    @classmethod
+    def get_userid(cls, log_type, crawler, username):
+        try:
+            url = "https://open.feishu.cn/open-apis/user/v1/batch_get_id?"
+            headers = {
+                "Authorization": "Bearer " + cls.get_token(),
+                "Content-Type": "application/json; charset=utf-8"
+            }
+            name_phone_dict = {
+                "xinxin": "15546206651",
+                "muxinyi": "13699208058",
+                "wangxueke": "13513479926",
+                "yuzhuoyi": "18624010360",
+                "luojunhui": "18801281360",
+                "fanjun": "15200827642",
+                "zhangyong": "17600025055"
+            }
+            username = name_phone_dict.get(username)
+
+            data = {"mobiles": [username]}
+            urllib3.disable_warnings()
+            r = requests.get(url=url, headers=headers, params=data, verify=False, proxies=proxies)
+            open_id = r.json()["data"]["mobile_users"][username][0]["open_id"]
+
+            return open_id
+        except Exception as e:
+            Common.logger("feishu").error(f"get_userid异常:{e}\n")
+
+    # 飞书机器人
+    @classmethod
+    def bot(cls, log_type, crawler, text, mark, mark_name):
+        try:
+            # url = "https://open.feishu.cn/open-apis/bot/v2/hook/5a6ce4ca-32fa-44fe-bbe4-69ae369bb3cf"
+            # url = "https://open.feishu.cn/open-apis/bot/v2/hook/2b317db6-93ed-43b4-bf01-03c35cfa1d59"
+            url = "https://open.feishu.cn/open-apis/bot/v2/hook/af368a84-545f-4106-9c4c-af64678ad7af"
+            headers = {'Content-Type': 'application/json'}
+            if crawler == "抖音":
+                content = "抖音cookie过期"
+                sheet_url = "https://w42nne6hzg.feishu.cn/sheets/IbVVsKCpbhxhSJtwYOUc8S1jnWb?sheet=n9xlLF"
+                users = "<at id=" + str(cls.get_userid(log_type, crawler, mark)) + f">{mark_name}</at>"
+            elif crawler == "管理后台":
+                content = "管理后台cookie过期"
+                sheet_url = "https://w42nne6hzg.feishu.cn/sheets/IbVVsKCpbhxhSJtwYOUc8S1jnWb?sheet=n9xlLF"
+                users = f"<at id=" + str(cls.get_userid(log_type, crawler, mark)) + f">{mark_name}</at>"
+            elif crawler == "快手":
+                content = "快手cookie过期"
+                sheet_url = "https://w42nne6hzg.feishu.cn/sheets/IbVVsKCpbhxhSJtwYOUc8S1jnWb?sheet=n9xlLF"
+                users = f"<at id=" + str(cls.get_userid(log_type, crawler, mark)) + f">{mark_name}</at>"
+            elif crawler == "AGC视频":
+                content = 'AGC视频生成条数详情'
+                sheet_url = "https://w42nne6hzg.feishu.cn/sheets/IbVVsKCpbhxhSJtwYOUc8S1jnWb?sheet=n9xlLF"
+                users = f"<at id=" + str(cls.get_userid(log_type, crawler, mark)) + f">{mark_name}</at> \n"
+            elif crawler == "AGC完成通知":
+                content = "今日所有AGC视频完成啦~~~"
+                sheet_url = "https://w42nne6hzg.feishu.cn/sheets/IbVVsKCpbhxhSJtwYOUc8S1jnWb?sheet=n9xlLF"
+                users = f"<at id=" + str(cls.get_userid(log_type, crawler, mark)) + f">{mark_name}</at>"
+            else:
+                content = "今日所有AGC视频完成啦~~~"
+                sheet_url = "https://w42nne6hzg.feishu.cn/sheets/IbVVsKCpbhxhSJtwYOUc8S1jnWb?sheet=n9xlLF"
+                users = f"<at id=" + str(cls.get_userid(log_type, crawler, mark)) + f">{mark_name}</at>"
+
+            data = json.dumps({
+                "msg_type": "interactive",
+                "card": {
+                    "config": {
+                        "wide_screen_mode": True,
+                        "enable_forward": True
+                    },
+                    "elements": [{
+                        "tag": "div",
+                        "text": {
+                            "content": users + text,
+                            "tag": "lark_md"
+                        }
+                    }, {
+                        "actions": [{
+                            "tag": "button",
+                            "text": {
+                                "content": content,
+                                "tag": "lark_md"
+                            },
+                            "url": sheet_url,
+                            "type": "default",
+                            "value": {}
+                        }],
+                        "tag": "action"
+                    }],
+                    "header": {
+                        "title": {
+                            "content": "📣您有新的信息,请注意查收",
+                            "tag": "plain_text"
+                        }
+                    }
+                }
+            })
+            urllib3.disable_warnings()
+            r = requests.post(url, headers=headers, data=data, verify=False, proxies=proxies)
+            Common.logger("feishu").info(f'触发机器人消息:{r.status_code}, {text}')
+        except Exception as e:
+            Common.logger("feishu").error(f"bot异常:{e}\n")
+
+
+if __name__ == "__main__":
+    Feishu.bot('recommend', '抖音', '测试: 抖音cookie失效,请及时更换')
+

+ 186 - 0
common/pq.py

@@ -0,0 +1,186 @@
+
+import os
+import random
+
+import sys
+import time
+import json
+
+import requests
+from urllib.parse import urlencode
+
+sys.path.append(os.getcwd())
+from common import Common,  Feishu
+
+class PQ():
+    """
+    获取封面
+    """
+    @classmethod
+    def get_cover(cls, uid):
+        time.sleep(1)
+        url = "https://admin.piaoquantv.com/manager/video/multiCover/listV2"
+
+        payload = json.dumps({
+            "videoId": uid,
+            "range": "2h"
+        })
+        headers = {
+            'accept': 'application/json',
+            'accept-language': 'zh-CN,zh;q=0.9',
+            'cache-control': 'no-cache',
+            'content-type': 'application/json',
+            'cookie': 'SESSION=YjU3MzgwNTMtM2QyYi00YjljLWI3YWUtZTBjNWYwMGQzYWNl',
+            'origin': 'https://admin.piaoquantv.com',
+            'pragma': 'no-cache',
+            'priority': 'u=1, i',
+            'sec-ch-ua': '"Chromium";v="124", "Google Chrome";v="124", "Not-A.Brand";v="99"',
+            'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36'
+        }
+
+        response = requests.request("POST", url, headers=headers, data=payload)
+        data = response.json()
+        content = data["content"]
+        if len(content) == 1:
+            return content[0]["coverUrl"]
+        max_share_count = 0
+        selected_cover_url = ""
+        for item in content:
+            share_count = item.get("shareWeight")
+            if share_count is not None and share_count > max_share_count:
+                max_share_count = share_count
+                selected_cover_url = item["coverUrl"]
+            elif share_count == max_share_count and item["createUser"] == "用户":
+                selected_cover_url = item["coverUrl"]
+        return selected_cover_url
+
+    """
+    获取标题
+    """
+    @classmethod
+    def get_title(cls, uid):
+        url = "https://admin.piaoquantv.com/manager/video/multiTitleV2/listV2"
+
+        payload = json.dumps({
+            "videoId": uid,
+            "range": "4h"
+        })
+        headers = {
+            'accept': 'application/json',
+            'accept-language': 'zh-CN,zh;q=0.9',
+            'cache-control': 'no-cache',
+            'content-type': 'application/json',
+            'cookie': 'SESSION=YjU3MzgwNTMtM2QyYi00YjljLWI3YWUtZTBjNWYwMGQzYWNl',
+            'origin': 'https://admin.piaoquantv.com',
+            'pragma': 'no-cache',
+            'priority': 'u=1, i',
+            'sec-ch-ua': '"Chromium";v="124", "Google Chrome";v="124", "Not-A.Brand";v="99"',
+            'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36'
+        }
+        response = requests.request("POST", url, headers=headers, data=payload)
+        data = response.json()
+        content = data["content"]
+        if len(content) == 1:
+            return content[0]["title"]
+        max_share_count = 0
+        selected_title = ""
+        for item in content:
+            share_count = item.get("shareWeight")
+            if share_count is not None and share_count > max_share_count:
+                max_share_count = share_count
+                selected_title = item["title"]
+            elif share_count == max_share_count and item["createUser"] == "用户":
+                selected_title = item["title"]
+        return selected_title
+
+
+    """
+    新生成视频上传到对应账号下
+    """
+    @classmethod
+    def insert_piaoquantv(cls, oss_object_key, audio_title, pq_ids_list, cover, uid):
+        if audio_title == '' or None == audio_title:
+            title = cls.get_title(uid)
+        else:
+            if '/' in audio_title:
+                new_titles = audio_title.split('/')
+            else:
+                new_titles = [audio_title]
+            title = random.choice(new_titles)
+
+        cover_url = ''
+        if None == cover or cover == '':
+            cover_url = cls.get_cover(uid)
+        pq_id_list = random.choice(pq_ids_list)
+        url = "https://vlogapi.piaoquantv.com/longvideoapi/crawler/video/send"
+        headers = {
+            'User-Agent': 'PQSpeed/486 CFNetwork/1410.1 Darwin/22.6.0',
+            'cookie': 'JSESSIONID=4DEA2B5173BB9A9E82DB772C0ACDBC9F; JSESSIONID=D02C334150025222A0B824A98B539B78',
+            'referer': 'http://appspeed.piaoquantv.com',
+            'token': '524a8bc871dbb0f4d4717895083172ab37c02d2f',
+            'accept-language': 'zh-CN,zh-Hans;q=0.9',
+            'Content-Type': 'application/x-www-form-urlencoded'
+        }
+        payload = {
+            'coverImgPath': cover_url,
+            'deviceToken': '9ef064f2f7869b3fd67d6141f8a899175dddc91240971172f1f2a662ef891408',
+            'fileExtensions': 'MP4',
+            'loginUid': pq_id_list,
+            'networkType': 'Wi-Fi',
+            'platform': 'iOS',
+            'requestId': 'fb972cbd4f390afcfd3da1869cd7d001',
+            'sessionId': '362290597725ce1fa870d7be4f46dcc2',
+            'subSessionId': '362290597725ce1fa870d7be4f46dcc2',
+            'title': title,
+            'token': '524a8bc871dbb0f4d4717895083172ab37c02d2f',
+            'uid': pq_id_list,
+            'versionCode': '486',
+            'versionName': '3.4.12',
+            'videoFromScene': '1',
+            'videoPath': oss_object_key,
+            'viewStatus': '1'
+        }
+        encoded_payload = urlencode(payload)
+        response = requests.request("POST", url, headers=headers, data=encoded_payload)
+        data = response.json()
+        code = data["code"]
+        if code == 0:
+            new_video_id = data["data"]["id"]
+            return new_video_id, title
+        else:
+            return None, None
+
+    """
+    获取视频链接
+    """
+    @classmethod
+    def get_audio_url(cls, uid):
+        for i in range(3):
+            url = f"https://admin.piaoquantv.com/manager/video/detail/{uid}"
+            payload = {}
+            headers = {
+                'authority': 'admin.piaoquantv.com',
+                'accept': 'application/json, text/plain, */*',
+                'accept-language': 'zh-CN,zh;q=0.9',
+                'cache-control': 'no-cache',
+                'cookie': 'SESSION=YjU3MzgwNTMtM2QyYi00YjljLWI3YWUtZTBjNWYwMGQzYWNl',
+                'pragma': 'no-cache',
+                'referer': f'https://admin.piaoquantv.com/cms/post-detail/{uid}/detail',
+                'sec-ch-ua': '"Not_A Brand";v="8", "Chromium";v="120", "Google Chrome";v="120"',
+                'sec-ch-ua-mobile': '?0',
+                'sec-ch-ua-platform': '"macOS"',
+                'sec-fetch-dest': 'empty',
+                'sec-fetch-mode': 'cors',
+                'sec-fetch-site': 'same-origin',
+                'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
+            }
+
+            response = requests.request("GET", url, headers=headers, data=payload)
+            data = response.json()
+            code = data["code"]
+            if code != 0:
+               continue
+            audio_url = data["content"]["transedVideoPath"]
+            return audio_url
+        return ""
+

+ 106 - 0
common/scheduling_db.py

@@ -0,0 +1,106 @@
+# -*- coding: utf-8 -*-
+# @Time: 2023/12/26
+"""
+数据库连接及操作
+"""
+import pymysql
+from common.common import Common
+# from common import Common
+
+
+class MysqlHelper:
+    @classmethod
+    def connect_mysql(cls, env, action):
+        if env == 'hk':
+            if action == 'get_author_map':
+                # 创建一个 Connection 对象,代表了一个数据库连接
+                connection = pymysql.connect(
+                    host="rm-bp1159bu17li9hi94ro.mysql.rds.aliyuncs.com",  # 数据库IP地址,内网地址
+                    port=3306,  # 端口号
+                    user="crawler",  # mysql用户名
+                    passwd="crawler123456@",  # mysql用户登录密码
+                    db="piaoquan-crawler",  # 数据库名
+                    # 如果数据库里面的文本是utf8编码的,charset指定是utf8
+                    charset="utf8mb4")
+            else:
+                # 创建一个 Connection 对象,代表了一个数据库连接
+                connection = pymysql.connect(
+                    host="rm-j6cz4c6pt96000xi3.mysql.rds.aliyuncs.com",  # 数据库IP地址,内网地址
+                    # host="rm-j6cz4c6pt96000xi3lo.mysql.rds.aliyuncs.com",# 数据库IP地址,外网地址
+                    port=3306,  # 端口号
+                    user="crawler",  # mysql用户名
+                    passwd="crawler123456@",  # mysql用户登录密码
+                    db="piaoquan-crawler",  # 数据库名
+                    # 如果数据库里面的文本是utf8编码的,charset指定是utf8
+                    charset="utf8mb4")
+        elif env == 'prod':
+            # 创建一个 Connection 对象,代表了一个数据库连接
+            connection = pymysql.connect(
+                host="rm-bp1159bu17li9hi94.mysql.rds.aliyuncs.com",  # 数据库IP地址,内网地址
+                # host="rm-bp1159bu17li9hi94ro.mysql.rds.aliyuncs.com",# 数据库IP地址,外网地址
+                port=3306,  # 端口号
+                user="crawler",  # mysql用户名
+                passwd="crawler123456@",  # mysql用户登录密码
+                db="piaoquan-crawler",  # 数据库名
+                # 如果数据库里面的文本是utf8编码的,charset指定是utf8
+                charset="utf8mb4")
+        else:
+            # 创建一个 Connection 对象,代表了一个数据库连接
+            connection = pymysql.connect(
+                host="rm-bp1k5853td1r25g3n690.mysql.rds.aliyuncs.com",  # 数据库IP地址,内网地址
+                # host="rm-bp1k5853td1r25g3ndo.mysql.rds.aliyuncs.com",  # 数据库IP地址,外网地址
+                port=3306,  # 端口号
+                user="crawler",  # mysql用户名
+                passwd="crawler123456@",  # mysql用户登录密码
+                db="piaoquan-crawler",  # 数据库名
+                # 如果数据库里面的文本是utf8编码的,charset指定是utf8
+                charset="utf8mb4")
+
+        return connection
+
+    @classmethod
+    def get_values(cls, log_type, crawler, sql, env, action=''):
+        try:
+            # 连接数据库
+            connect = cls.connect_mysql(env, action)
+            # 返回一个 Cursor对象
+            mysql = connect.cursor(cursor=pymysql.cursors.DictCursor)
+
+            # 执行 sql 语句
+            mysql.execute(sql)
+
+            # fetchall方法返回的是一个元组,里面每个元素也是元组,代表一行记录
+            data = mysql.fetchall()
+
+            # 关闭数据库连接
+            connect.close()
+
+            # 返回查询结果,元组
+            return data
+        except Exception as e:
+            Common.logger(log_type).error(f"get_values异常:{e}\n")
+
+    @classmethod
+    def update_values(cls, log_type, crawler, sql, env, action=''):
+        # 连接数据库
+        connect = cls.connect_mysql(env, action)
+        # 返回一个 Cursor对象
+        mysql = connect.cursor()
+
+        try:
+            # 执行 sql 语句
+            res = mysql.execute(sql)
+            # 注意 一定要commit,否则添加数据不生效
+            connect.commit()
+            return res
+        except Exception as e:
+            Common.logger(log_type).error(f"update_values异常,进行回滚操作:{e}\n")
+            # 发生错误时回滚
+            connect.rollback()
+
+        # 关闭数据库连接
+        connect.close()
+
+
+if __name__ == "__main__":
+    pass

+ 137 - 0
common/sql_help.py

@@ -0,0 +1,137 @@
+import datetime
+import os
+import random
+import sys
+from datetime import timedelta
+
+sys.path.append(os.getcwd())
+from datetime import datetime
+from common import MysqlHelper
+from common import Common
+
+
+class sqlHelp():
+
+    @classmethod
+    def get_count_list(cls, name_list):
+        count_list = []
+        current_time = datetime.now()
+        formatted_time = current_time.strftime("%Y-%m-%d")
+        for name in name_list:
+
+            count = f"""SELECT COUNT(*) AS agc_video_deposit FROM ( SELECT audio, account_id FROM agc_video_deposit WHERE time = '{formatted_time}' AND mark = '{name["mark"]}' GROUP BY audio, account_id) AS subquery;"""
+            count = MysqlHelper.get_values(count, "prod")
+            if count == None:
+                count = 0
+            count = str(count).replace('(', '').replace(')', '').replace(',', '')
+            count_list.append(f"{name['mark_name']}生成条数为:{count}条 \n")
+        return count_list
+
+    """
+    获取未使用的视频链接
+    """
+    @classmethod
+    def get_url_list(cls, user_list, mark, limit_count):
+        for i in range(8):
+            user = str(random.choice(user_list))
+            user = user.replace('(', '').replace(')', '').replace(',', '')
+            current_time = datetime.now()
+            three_days_ago = current_time - timedelta(days=1)
+            formatted_current_time = current_time.strftime("%Y-%m-%d")
+            formatted_three_days_ago = three_days_ago.strftime("%Y-%m-%d")
+            url_list = f"""SELECT a.video_id, a.account_id, a.oss_object_key 
+                                                 FROM agc_video_url a 
+                                                 LEFT JOIN agc_video_deposit b 
+                                                 ON a.oss_object_key = b.oss_object_key 
+                                                 AND b.time >= '{formatted_three_days_ago}' 
+                                                 AND b.time <= '{formatted_current_time}' 
+                                                 WHERE b.video_id IS NULL 
+                                                 AND a.account_id = {user} 
+                                                 AND a.status = 1 
+                                                 AND a.mark = '{mark}' 
+                                                 LIMIT {limit_count};"""
+            url_list = MysqlHelper.get_values(url_list, "prod")
+            if url_list:
+                if len(url_list) >= 35:
+                    return url_list, user
+        return None, None
+
+    """
+    获取已入库的用户id
+    """
+    @classmethod
+    def get_user_id(cls, channel_type, mark):
+        account_id = f"""select account_id from agc_video_url where mark = '{mark}' and  oss_object_key LIKE '%{channel_type}%' group by account_id ;"""
+        account_id = MysqlHelper.get_values(account_id, "prod")
+        return account_id
+
+    """
+    获取已入库的用户id
+    """
+    @classmethod
+    def get_link_count(cls, mark, platform):
+        current_time = datetime.now()
+        formatted_time = current_time.strftime("%Y-%m-%d")
+        count = f"""SELECT COUNT(*) AS total_count FROM ( SELECT audio, account_id FROM agc_video_deposit WHERE time = '{formatted_time}' AND platform = '{platform}' and mark = '{mark}' GROUP BY audio, account_id) AS subquery;"""
+        count = MysqlHelper.get_values(count, "prod")
+        if count == None:
+            count = 0
+        count = str(count).replace('(', '').replace(')', '').replace(',', '')
+        return int(count)
+
+    """
+    获取跟随脚本已入库数量
+    """
+    @classmethod
+    def get_link_gs_count(cls, mark):
+        current_time = datetime.now()
+        formatted_time = current_time.strftime("%Y-%m-%d")
+        count = f"""SELECT COUNT(*) AS total_count FROM ( SELECT audio, account_id FROM agc_video_deposit WHERE time = '{formatted_time}' and mark LIKE '%{mark}%' GROUP BY audio, account_id) AS subquery;"""
+        count = MysqlHelper.get_values(count, "prod")
+        if count == None:
+            count = 0
+        count = str(count).replace('(', '').replace(')', '').replace(',', '')
+        return int(count)
+
+    """
+    获取跟随脚本站外已入库数量
+    """
+    @classmethod
+    def get_link_zw_count(cls, mark, platform):
+        current_time = datetime.now()
+        formatted_time = current_time.strftime("%Y-%m-%d")
+        count = f"""SELECT COUNT(*) AS total_count FROM ( SELECT audio, account_id FROM agc_video_deposit WHERE time = '{formatted_time}' and mark = '{mark}' GROUP BY audio, account_id) AS subquery;"""
+        count = MysqlHelper.get_values(count, "prod")
+        if count == None:
+            count = 0
+        count = str(count).replace('(', '').replace(')', '').replace(',', '')
+        return int(count)
+
+    """
+    获取跟随脚本站内已入库数量
+    """
+    @classmethod
+    def get_link_zn_count(cls, mark, platform):
+        current_time = datetime.now()
+        formatted_time = current_time.strftime("%Y-%m-%d")
+        count = f"""SELECT COUNT(*) AS total_count FROM ( SELECT audio, account_id FROM agc_video_deposit WHERE time = '{formatted_time}' AND platform = '{platform}' and mark = '{mark}' GROUP BY audio, account_id) AS subquery;"""
+        count = MysqlHelper.get_values(count, "prod")
+        if count == None:
+            count = 0
+        count = str(count).replace('(', '').replace(')', '').replace(',', '')
+        return int(count)
+
+    """
+    已使用视频链接存表
+    """
+    @classmethod
+    def insert_videoAudio(cls, video_files, uid, platform, mark):
+        current_time = datetime.now()
+        formatted_time = current_time.strftime("%Y-%m-%d")
+        for j in video_files:
+            insert_sql = f"""INSERT INTO agc_video_deposit (audio, video_id, account_id, oss_object_key, time, platform, mark) values ('{uid}', '{j[0]}', '{j[1]}', '{j[2]}', '{formatted_time}', '{platform}', '{mark}')"""
+            MysqlHelper.update_values(
+                sql=insert_sql,
+                env="prod",
+                machine="",
+            )

+ 64 - 0
common/srt.py

@@ -0,0 +1,64 @@
+import re
+
+import requests
+import json
+
+class SRT():
+
+
+    @classmethod
+    # 定义函数去掉文字后的标点符号并添加换行符
+    def process_srt(cls, srt):
+        lines = srt.strip().split('\n')
+        processed_lines = []
+
+        for line in lines:
+            # 匹配字幕编号行
+            if re.match(r'^\d+$', line):
+                processed_lines.append(line)
+            # 匹配时间戳行
+            elif re.match(r'^\d{2}:\d{2}:\d{2}\.\d{1,3}-->\d{2}:\d{2}:\d{2}\.\d{1,3}$', line):
+                processed_lines.append(line.replace('-->', ' --> '))
+            # 处理字幕文本行
+            else:
+                # 去掉末尾的标点符号
+                line = re.sub(r'[,。!?;、]$', '', line)
+                # 添加换行符
+                processed_lines.append(line + '\n')
+
+        return '\n'.join(processed_lines)
+
+    @classmethod
+    def getSrt(cls, mp3_id):
+        url = "http://api-internal.piaoquantv.com/produce-center/srt/get/content"
+
+        payload = json.dumps({
+            "params": {
+                "resourceChannel": "inner",
+                "videoId": mp3_id
+            }
+        })
+        headers = {
+            'User-Agent': 'Apifox/1.0.0 (https://apifox.com)',
+            'Content-Type': 'application/json',
+            'Accept': '*/*',
+            'Host': 'api-internal.piaoquantv.com',
+            'Connection': 'keep-alive'
+        }
+
+        response = requests.request("POST", url, headers=headers, data=payload)
+        data_list = response.json()
+        code = data_list["code"]
+        if code == 0:
+            srt = data_list["data"]
+            if srt:
+                srt = srt.replace("/n", "\n")
+                # srt = re.sub(r'(\w+)([,。!?])', r'\n\n', srt)
+                new_srt = cls.process_srt(srt)
+                return new_srt
+            else:
+                return None
+        else:
+            return None
+
+