zhangyong 6 tháng trước cách đây
mục cha
commit
f6092598d1

+ 2 - 0
common/__init__.py

@@ -0,0 +1,2 @@
+from .common_log import Common
+from .redis import SyncRedisHelper

+ 6 - 1
common/aliyun_log.py

@@ -25,9 +25,12 @@ class AliyunLogger:
 
     # 写入阿里云日志
     @staticmethod
-    def logging(video_id: str,
+    def logging(
+            video_id: str,
             title: str,
             video_url: str,
+            version: str,
+            type: str,
             data: Optional[str] = None):
         """
         写入阿里云日志
@@ -45,6 +48,8 @@ class AliyunLogger:
                 ("video_id", video_id),
                 ("video_title", title),
                 ("video_url", video_url),
+                ("version", version),
+                ("type", type),
                 ("data", data),
             ]
             # 创建 LogClient 实例

+ 52 - 0
common/common_log.py

@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+# @Time: 2023/12/26
+"""
+公共方法,包含:生成log / 删除log / 下载方法 / 删除 weixinzhishu_chlsfiles / 过滤词库 / 保存视频信息至本地 txt / 翻译 / ffmpeg
+"""
+import os
+import sys
+
+sys.path.append(os.getcwd())
+from datetime import date, timedelta
+from datetime import datetime
+from loguru import logger
+
+proxies = {"http": None, "https": None}
+
+
+class Common:
+    # 统一获取当前时间 <class 'datetime.datetime'>  2022-04-14 20:13:51.244472
+    now = datetime.now()
+    # 昨天 <class 'str'>  2022-04-13
+    yesterday = (date.today() + timedelta(days=-1)).strftime("%Y-%m-%d")
+    # 今天 <class 'datetime.date'>  2022-04-14
+    today = date.today()
+    # 明天 <class 'str'>  2022-04-15
+    tomorrow = (date.today() + timedelta(days=1)).strftime("%Y-%m-%d")
+
+    # 使用 logger 模块生成日志
+    @staticmethod
+    def logger(log_type):
+        try:
+            """
+            使用 logger 模块生成日志
+            """
+            # 日志路径
+            log_dir = f"./logs/{log_type}/"
+            log_path = os.getcwd() + os.sep + log_dir
+            if not os.path.isdir(log_path):
+                os.makedirs(log_path)
+            # 日志文件名
+            log_name = f"{log_type}-{datetime.now().date().strftime('%Y-%m-%d')}.log"
+
+            # 日志不打印到控制台
+            logger.remove(handler_id=None)
+            # 初始化日志
+            logger.add(os.path.join(log_dir, log_name), level="INFO", rotation="00:00", retention="10 days", enqueue=True)
+
+            return logger
+        except Exception as e:
+            Common.logger("aly-logger").log(f"阿里云日志上报异常{e}")
+            return None
+
+

+ 22 - 0
common/feishu_data.py

@@ -0,0 +1,22 @@
+import time
+
+from common.feishu_utils import Feishu
+
+
+class Material():
+
+    @classmethod
+    def feishu_list(cls):
+        for i in range(3):
+            data = Feishu.get_values_batch( "U3jrs64cxhJ40Dt4VmXcS0TKnkf", "cb7QdW" )
+            for row in data[0:]:
+                mark = row[0]
+                prompt = row[1]
+                if mark and prompt:
+                    return mark, prompt
+                else:
+                    time.sleep(5)
+
+if __name__ == '__main__':
+    mark, prompt = Material.feishu_list()
+    print(mark, prompt)

+ 411 - 0
common/feishu_utils.py

@@ -0,0 +1,411 @@
+# -*- coding: utf-8 -*-
+"""
+飞书表配置: token 鉴权 / 增删改查 / 机器人报警
+"""
+import json
+import os
+import sys
+import requests
+import urllib3
+
+sys.path.append(os.getcwd())
+from common import Common
+
+proxies = {"http": None, "https": None}
+
+
+class Feishu:
+    """
+    编辑飞书云文档
+    """
+    succinct_url = "https://w42nne6hzg.feishu.cn/sheets/"
+    # 飞书路径token
+    @classmethod
+    def spreadsheettoken(cls, crawler):
+        if crawler == "summary":
+            return "KsoMsyP2ghleM9tzBfmcEEXBnXg"
+        else:
+            return crawler
+
+
+
+    # 获取飞书api token
+    @classmethod
+    def get_token(cls):
+        """
+        获取飞书api token
+        :return:
+        """
+        url = "https://open.feishu.cn/open-apis/auth/v3/tenant_access_token/internal/"
+        post_data = {"app_id": "cli_a13ad2afa438d00b",  # 这里账号密码是发布应用的后台账号及密码
+                     "app_secret": "4tK9LY9VbiQlY5umhE42dclBFo6t4p5O"}
+
+        try:
+            urllib3.disable_warnings()
+            response = requests.post(url=url, data=post_data, proxies=proxies, verify=False)
+            tenant_access_token = response.json()["tenant_access_token"]
+            return tenant_access_token
+        except Exception as e:
+            Common.logger("feishu").error("获取飞书 api token 异常:{}", e)
+
+    # 获取表格元数据
+    @classmethod
+    def get_metainfo(cls, crawler):
+        """
+        获取表格元数据
+        :return:
+        """
+        try:
+            get_metainfo_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                               + cls.spreadsheettoken(crawler) + "/metainfo"
+
+            headers = {
+                "Authorization": "Bearer " + cls.get_token(),
+                "Content-Type": "application/json; charset=utf-8"
+            }
+            params = {
+                "extFields": "protectedRange",  # 额外返回的字段,extFields=protectedRange时返回保护行列信息
+                "user_id_type": "open_id"  # 返回的用户id类型,可选open_id,union_id
+            }
+            urllib3.disable_warnings()
+            r = requests.get(url=get_metainfo_url, headers=headers, params=params, proxies=proxies, verify=False)
+            response = json.loads(r.content.decode("utf8"))
+            return response
+        except Exception as e:
+            Common.logger("feishu").error("获取表格元数据异常:{}", e)
+
+    # 读取工作表中所有数据
+    @classmethod
+    def get_values_batch(cls, crawler, sheetid):
+        """
+        读取工作表中所有数据
+        :param crawler: 哪个爬虫
+        :param sheetid: 哪张表
+        :return: 所有数据
+        """
+        try:
+            get_values_batch_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                                   + cls.spreadsheettoken(crawler) + "/values_batch_get"
+            headers = {
+                "Authorization": "Bearer " + cls.get_token(),
+                "Content-Type": "application/json; charset=utf-8"
+            }
+            params = {
+                "ranges": sheetid,
+                "valueRenderOption": "ToString",
+                "dateTimeRenderOption": "",
+                "user_id_type": "open_id"
+            }
+            urllib3.disable_warnings()
+            r = requests.get(url=get_values_batch_url, headers=headers, params=params, proxies=proxies, verify=False)
+            response = json.loads(r.content.decode("utf8"))
+            values = response["data"]["valueRanges"][0]["values"]
+            return values
+        except Exception as e:
+            Common.logger("feishu").error("读取工作表所有数据异常:{}", e)
+
+    # 工作表,插入行或列
+    @classmethod
+    def insert_columns(cls, crawler, sheetid, majordimension, startindex, endindex):
+        """
+        工作表插入行或列
+        :param log_type: 日志路径
+        :param crawler: 哪个爬虫的云文档
+        :param sheetid:哪张工作表
+        :param majordimension:行或者列, ROWS、COLUMNS
+        :param startindex:开始位置
+        :param endindex:结束位置
+        """
+        try:
+            insert_columns_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                                 + cls.spreadsheettoken(crawler) + "/insert_dimension_range"
+            headers = {
+                "Authorization": "Bearer " + cls.get_token(),
+                "Content-Type": "application/json; charset=utf-8"
+            }
+            body = {
+                "dimension": {
+                    "sheetId": sheetid,
+                    "majorDimension": majordimension,  # 默认 ROWS ,可选 ROWS、COLUMNS
+                    "startIndex": startindex,  # 开始的位置
+                    "endIndex": endindex  # 结束的位置
+                },
+                "inheritStyle": "AFTER"  # BEFORE 或 AFTER,不填为不继承 style
+            }
+
+            urllib3.disable_warnings()
+            r = requests.post(url=insert_columns_url, headers=headers, json=body, proxies=proxies, verify=False)
+            Common.logger("feishu").info("插入行或列:{}", r.json()["msg"])
+        except Exception as e:
+            Common.logger("feishu").error("插入行或列异常:{}", e)
+
+    # 写入数据
+    @classmethod
+    def update_values(cls, crawler, sheetid, ranges, values):
+        """
+        写入数据
+        :param log_type: 日志路径
+        :param crawler: 哪个爬虫的云文档
+        :param sheetid:哪张工作表
+        :param ranges:单元格范围
+        :param values:写入的具体数据,list
+        """
+        try:
+            update_values_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                                + cls.spreadsheettoken(crawler) + "/values_batch_update"
+            headers = {
+                "Authorization": "Bearer " + cls.get_token(),
+                "Content-Type": "application/json; charset=utf-8"
+            }
+            body = {
+                "valueRanges": [
+                    {
+                        "range": sheetid + "!" + ranges,
+                        "values": values
+                    },
+                ],
+            }
+            urllib3.disable_warnings()
+            r = requests.post(url=update_values_url, headers=headers, json=body, proxies=proxies, verify=False)
+            Common.logger("feishu").info("写入数据:{}", r.json()["msg"])
+        except Exception as e:
+            Common.logger("feishu").error("写入数据异常:{}", e)
+
+    # 合并单元格
+    @classmethod
+    def merge_cells(cls, crawler, sheetid, ranges):
+        """
+        合并单元格
+        :param log_type: 日志路径
+        :param crawler: 哪个爬虫
+        :param sheetid:哪张工作表
+        :param ranges:需要合并的单元格范围
+        """
+        try:
+            merge_cells_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                              + cls.spreadsheettoken(crawler) + "/merge_cells"
+            headers = {
+                "Authorization": "Bearer " + cls.get_token(),
+                "Content-Type": "application/json; charset=utf-8"
+            }
+
+            body = {
+                "range": sheetid + "!" + ranges,
+                "mergeType": "MERGE_ROWS"
+            }
+            urllib3.disable_warnings()
+            r = requests.post(url=merge_cells_url, headers=headers, json=body, proxies=proxies, verify=False)
+            Common.logger("feishu").info("合并单元格:{}", r.json()["msg"])
+        except Exception as e:
+            Common.logger("feishu").error("合并单元格异常:{}", e)
+
+    # 读取单元格数据
+    @classmethod
+    def get_range_value(cls, crawler, sheetid, cell):
+        """
+        读取单元格内容
+        :param log_type: 日志路径
+        :param crawler: 哪个爬虫
+        :param sheetid: 哪张工作表
+        :param cell: 哪个单元格
+        :return: 单元格内容
+        """
+        try:
+            get_range_value_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                                  + cls.spreadsheettoken(crawler) + "/values/" + sheetid + "!" + cell
+            headers = {
+                "Authorization": "Bearer " + cls.get_token(),
+                "Content-Type": "application/json; charset=utf-8"
+            }
+            params = {
+                "valueRenderOption": "FormattedValue",
+
+                # dateTimeRenderOption=FormattedString 计算并对时间日期按照其格式进行格式化,但不会对数字进行格式化,返回格式化后的字符串。
+                "dateTimeRenderOption": "",
+
+                # 返回的用户id类型,可选open_id,union_id
+                "user_id_type": "open_id"
+            }
+            urllib3.disable_warnings()
+            r = requests.get(url=get_range_value_url, headers=headers, params=params, proxies=proxies, verify=False)
+            # print(r.text)
+            return r.json()["data"]["valueRange"]["values"][0]
+        except Exception as e:
+            Common.logger("feishu").error("读取单元格数据异常:{}", e)
+    # 获取表内容
+    @classmethod
+    def get_sheet_content(cls, crawler, sheet_id):
+        try:
+            sheet = Feishu.get_values_batch(crawler, sheet_id)
+            content_list = []
+            for x in sheet:
+                for y in x:
+                    if y is None:
+                        pass
+                    else:
+                        content_list.append(y)
+            return content_list
+        except Exception as e:
+            Common.logger("feishu").error(f'get_sheet_content:{e}\n')
+
+    # 删除行或列,可选 ROWS、COLUMNS
+    @classmethod
+    def dimension_range(cls, log_type, crawler, sheetid, major_dimension, startindex, endindex):
+        """
+        删除行或列
+        :param log_type: 日志路径
+        :param crawler: 哪个爬虫
+        :param sheetid:工作表
+        :param major_dimension:默认 ROWS ,可选 ROWS、COLUMNS
+        :param startindex:开始的位置
+        :param endindex:结束的位置
+        :return:
+        """
+        try:
+            dimension_range_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                                  + cls.spreadsheettoken(crawler) + "/dimension_range"
+            headers = {
+                "Authorization": "Bearer " + cls.get_token(),
+                "Content-Type": "application/json; charset=utf-8"
+            }
+            body = {
+                "dimension": {
+                    "sheetId": sheetid,
+                    "majorDimension": major_dimension,
+                    "startIndex": startindex,
+                    "endIndex": endindex
+                }
+            }
+            urllib3.disable_warnings()
+            r = requests.delete(url=dimension_range_url, headers=headers, json=body, proxies=proxies, verify=False)
+            Common.logger("feishu").info("删除视频数据:{}", r.json()["msg"])
+        except Exception as e:
+            Common.logger("feishu").error("删除视频数据异常:{}", e)
+
+    # 获取用户 ID
+    @classmethod
+    def get_userid(cls, username):
+        try:
+            url = "https://open.feishu.cn/open-apis/user/v1/batch_get_id?"
+            headers = {
+                "Authorization": "Bearer " + cls.get_token(),
+                "Content-Type": "application/json; charset=utf-8"
+            }
+            name_phone_dict = {
+                "xinxin": "15546206651",
+                "muxinyi": "13699208058",
+                "wangxueke": "13513479926",
+                "yuzhuoyi": "18624010360",
+                "luojunhui": "18801281360",
+                "fanjun": "15200827642",
+                "zhangyong": "17600025055",
+                'liukunyu': "18810931977"
+            }
+            username = name_phone_dict.get(username)
+
+            data = {"mobiles": [username]}
+            urllib3.disable_warnings()
+            r = requests.get(url=url, headers=headers, params=data, verify=False, proxies=proxies)
+            open_id = r.json()["data"]["mobile_users"][username][0]["open_id"]
+
+            return open_id
+        except Exception as e:
+            Common.logger("feishu").error(f"get_userid异常:{e}\n")
+
+    # 飞书机器人
+    @classmethod
+    def bot(cls, log_type, crawler, text, mark_name):
+        try:
+
+            headers = {'Content-Type': 'application/json'}
+            if crawler == "机器自动改造消息通知":
+                url = "https://open.feishu.cn/open-apis/bot/v2/hook/e7697dc6-5254-4411-8b59-3cd0742bf703"
+                sheet_url = "https://w42nne6hzg.feishu.cn/sheets/KsoMsyP2ghleM9tzBfmcEEXBnXg?sheet=bc154d"
+                users = f"<at id=" + str(cls.get_userid(log_type)) + f">{mark_name}</at>"
+            elif crawler == "快手关键词搜索":
+                url = "https://open.feishu.cn/open-apis/bot/v2/hook/e7697dc6-5254-4411-8b59-3cd0742bf703"
+                sheet_url = "https://w42nne6hzg.feishu.cn/sheets/KsoMsyP2ghleM9tzBfmcEEXBnXg?sheet=U1gySe"
+                users = "".join([f'<at id="{cls.get_userid(type)}">{name}</at>' for type, name in
+                                 zip(log_type, mark_name)])
+                # users = f"<at id=" + str(cls.get_userid(log_type)) + f">{mark_name}</at>"
+            else:
+                url = "https://open.feishu.cn/open-apis/bot/v2/hook/7928f182-08c1-4c4d-b2f7-82e10c93ca80"
+                sheet_url = "https://w42nne6hzg.feishu.cn/sheets/KsoMsyP2ghleM9tzBfmcEEXBnXg?sheet=bc154d"
+                users = f"<at id=" + str(cls.get_userid(log_type)) + f">{mark_name}</at>"
+            data = json.dumps({
+                "msg_type": "interactive",
+                "card": {
+                    "config": {
+                        "wide_screen_mode": True,
+                        "enable_forward": True
+                    },
+                    "elements": [{
+                        "tag": "div",
+                        "text": {
+                            "content": users + text,
+                            "tag": "lark_md"
+                        }
+                    }, {
+                        "actions": [{
+                            "tag": "button",
+                            "text": {
+                                "content": "详情,点击~~~~~",
+                                "tag": "lark_md"
+                            },
+                            "url": sheet_url,
+                            "type": "default",
+                            "value": {}
+                        }],
+                        "tag": "action"
+                    }],
+                    "header": {
+                        "title": {
+                            "content": "📣消息提醒",
+                            "tag": "plain_text"
+                        }
+                    }
+                }
+            })
+            urllib3.disable_warnings()
+            r = requests.post(url, headers=headers, data=data, verify=False, proxies=proxies)
+            Common.logger("feishu").info(f'触发机器人消息:{r.status_code}, {text}')
+        except Exception as e:
+            Common.logger("feishu").error(f"bot异常:{e}\n")
+
+    # 飞书机器人-改造计划完成通知
+    @classmethod
+    def finish_bot(cls, text, url, content):
+        try:
+            headers = {'Content-Type': 'application/json'}
+            data = json.dumps({
+                "msg_type": "interactive",
+                "card": {
+                    "config": {
+                        "wide_screen_mode": True,
+                        "enable_forward": True
+                    },
+                    "elements": [{
+                        "tag": "div",
+                        "text": {
+                            "content": text,
+                            "tag": "lark_md"
+                        }
+                    }],
+                    "header": {
+                        "title": {
+                            "content": content,
+                            "tag": "plain_text"
+                        }
+                    }
+                }
+            })
+            urllib3.disable_warnings()
+            r = requests.post(url, headers=headers, data=data, verify=False, proxies=proxies)
+            Common.logger("feishu").info(f'触发机器人消息:{r.status_code}, {text}')
+        except Exception as e:
+            Common.logger("feishu").error(f"bot异常:{e}\n")
+
+
+if __name__ == "__main__":
+    Feishu.bot('recommend', '抖音', '测试: 抖音cookie失效,请及时更换')
+

+ 7 - 8
common/odps_data.py

@@ -11,7 +11,7 @@ ODPS_CONFIG = {
 }
 class OdpsDataCount:
     @classmethod
-    def get_data_count(cls, project, table):
+    def get_data_count(cls, project, table, dt):
         odps = ODPS(
             access_id=ODPS_CONFIG['ACCESSID'],
             secret_access_key=ODPS_CONFIG['ACCESSKEY'],
@@ -20,11 +20,10 @@ class OdpsDataCount:
         )
         data_values = []
         try:
-            dt = "2024101015"
-            sql = f'SELECT videoid,title,video_path FROM {project}.{table} WHERE dt = "{dt}" '
+            sql = f'SELECT videoid,title,video_path,type FROM {project}.{table} WHERE dt = "{dt}" '
             with odps.execute_sql(sql).open_reader() as reader:
                 for row in reader:
-                    data_values.append(json.dumps( {"video_id": row[0], "title": row[1], "video_path": row[2]}, ensure_ascii=False ))
+                    data_values.append(json.dumps( {"video_id": row[0], "title": row[1], "video_path": row[2], "type": row[3]}, ensure_ascii=False ))
 
         except Exception as e:
             print(f"An error occurred: {e}")
@@ -32,11 +31,11 @@ class OdpsDataCount:
         return data_values
 
     @classmethod
-    def main(cls):
-        dt = datetime.datetime.now().strftime('%Y%m%d%H')
+    def main(cls, table, dt):
+        # dt = datetime.datetime.now().strftime('%Y%m%d%H')
         project = 'loghubods'
-        table = 'video_need_google_ai_studio'
-        data_count = cls.get_data_count(project=project, table=table)
+        # table = 'content_ai_tag_return_top'
+        data_count = cls.get_data_count(project=project, table=table, dt= dt)
         print(len(data_count))
         return data_count
 

+ 7 - 9
common/redis.py

@@ -35,26 +35,24 @@ class SyncRedisHelper:
         if self._pool:
             self._pool.disconnect(inuse_connections=True)
 
-def install_video_data():
+def install_video_data(dt, redis_task, table_name):
     """写入redis需要打标签的视频"""
-    data = OdpsDataCount.main()
-
+    data = OdpsDataCount.main(table_name, dt)
     if not data:
         return
-
-    task = f"task:video_ai"
+    # task = f"task:video_ai"
     helper = SyncRedisHelper()
     client = helper.get_client()
-    client.rpush(task, *data)
+    client.rpush(redis_task, *data)
 
 
 
-def get_video_data():
+def get_video_data(redis_task):
     """获取一条需要打标签的视频"""
-    task = f"task:video_ai"
+    # task = redis_task
     helper = SyncRedisHelper()
     client = helper.get_client()
-    ret = client.lpop(task)
+    ret = client.lpop(redis_task)
     return ret
 
 def in_video_data(ret):

Những thai đổi đã bị hủy bỏ vì nó quá lớn
+ 54 - 6
google_ai/generativeai_video.py


+ 51 - 0
job_redis_data.py

@@ -0,0 +1,51 @@
+import datetime
+
+import schedule
+
+from common.redis import install_video_data
+
+def bot_video_ai_top():
+    """头部"""
+    dt = datetime.datetime.now().strftime('%Y%m%d')
+    dt = '20241014'
+    redis_task = 'task:video_ai_top'
+    table_name = 'content_ai_tag_return_top'
+    install_video_data(dt, redis_task, table_name)
+
+
+
+def bot_video_ai_recommend():
+    """新推荐"""
+    dt = datetime.datetime.now().strftime('%Y%m%d%H')
+    dt = '2024101514'
+    redis_task = 'task:video_ai_recommend'
+    table_name = 'content_ai_tag_recommend'
+    install_video_data(dt, redis_task, table_name)
+
+
+def bot_video_ai_complex_mode():
+    """复推"""
+    dt = datetime.datetime.now().strftime('%Y%m%d%H')
+    dt = '2024101514'
+    redis_task = 'task:video_ai_complex_mode'
+    table_name = 'content_ai_tag_reflowpool'
+    install_video_data(dt, redis_task, table_name)
+
+
+# def schedule_tasks():
+#     # 每小时第1分钟执行
+#     schedule.every().hour.at(":01").do(video_ai_complex_mode)
+#     schedule.every().hour.at(":01").do(bot_video_ai_recommend)
+#
+#     # 每天 00:10 执行
+#     schedule.every().day.at("00:10").do(bot_video_ai_top)
+
+
+if __name__ == "__main__":
+    # schedule_tasks()  # 调用任务调度函数
+    # while True:
+    #     schedule.run_pending()
+    #     time.sleep( 1 )  # 每秒钟检查一次
+    bot_video_ai_top()
+    bot_video_ai_recommend()
+    bot_video_ai_complex_mode()

+ 28 - 14
job_video_processing.py

@@ -1,19 +1,33 @@
 import time
-
+from concurrent.futures import ThreadPoolExecutor, wait
+from common.redis import install_video_data
 from video_processing.video_processing import VideoProcessing
 
 
-def video_task_start():
-    while True:
-        try:
-            print("开始执行任务")
-            video_processor = VideoProcessing()
-            video_processor.get_video()
-            print("执行完成")
-            time.sleep(5)
-        except Exception as e:
-            print("处理任务时出现异常:", e)
-            time.sleep(5)
-            continue
+max_workers = 10
+
+def video_ai_task_start():
+    with ThreadPoolExecutor( max_workers=max_workers) as executor:
+        while True:
+            try:
+                redis_task_list = ['task:video_ai_top', 'task:video_ai_recommend', 'task:video_ai_complex_mode']
+                # 提交所有任务并等待完成
+                futures = [executor.submit( process_video_ai, redis_task ) for redis_task in redis_task_list]
+                wait( futures )  # 等待所有任务完成
+            except Exception as e:
+                print(f"异常信息{e}")
+                time.sleep(3)
+                continue
+
+def process_video_ai(redis_task):
+    try:
+        print("开始执行任务")
+        video_processor = VideoProcessing()
+        video_processor.get_video(redis_task)
+        print("执行完成")
+        time.sleep(5)
+    except Exception as e:
+        print("处理任务时出现异常:", e)
+        time.sleep(5)
 if __name__ == '__main__':
-    video_task_start()
+    install_video_data()

+ 8 - 11
video_processing/video_processing.py

@@ -18,21 +18,21 @@ class VideoProcessing:
             'Content-Type': 'application/json'
         }
         try:
-            response = requests.request( "POST", url, headers=headers, data=payload )
+            response, mark = requests.request( "POST", url, headers=headers, data=payload )
             response = response.json()
             result = response['result']
             cleaned_string = result.replace( "```json", '' ).replace( "```", '' ).strip()
-            return cleaned_string
+            return cleaned_string, mark
         except Exception as e:
             print(f"视频请求异常:{e}")
             return None
 
 
-    def get_video(self):
-        video_data = get_video_data()
+    def get_video(self, redis_task):
+        video_data = get_video_data(redis_task)
         if not video_data:
             print("没有获取到视频内容")
-            time.sleep(1008611)
+            time.sleep(120)
             return
         # 解码为字符串
         data_str = video_data.decode( 'utf-8' )
@@ -42,13 +42,10 @@ class VideoProcessing:
         video_id = data_json['video_id']
         title = data_json['title']
         video_path = data_json['video_path']
+        type = data_json['type']
         print(video_path)
-        data = self.get_ai_data(video_path)
-        # if not data:
-        #     in_video_data(video_data)
-        #     print( "写入失败,需重新分析" )
-        #     return
-        AliyunLogger.logging(str(video_id), title, video_path, data)
+        data, mark = self.get_ai_data(video_path)
+        AliyunLogger.logging(str(video_id), title, video_path, mark, type, data)
         print("写入日志成功")
 
 

Một số tệp đã không được hiển thị bởi vì quá nhiều tập tin thay đổi trong này khác