zhangyong 6 月之前
父節點
當前提交
1d038768cc

+ 3 - 2
analyze_video.py

@@ -48,13 +48,14 @@ async def release_api_key(api_key):
 async def process_video(request: VideoRequest):
     """处理视频请求"""
     video_path = request.video_path
-
+    prompt = request.prompt
+    mark = request.mark
     # 获取一个可用的 API key
     # api_key = await get_available_api_key()
     api_key = "AIzaSyCor0q5w37Dy6fGxloLlCT7KqyEFU3PWP8"
     try:
         print("来一个请求,使用 API key:", api_key)
-        result, mark = await main(video_path, api_key)
+        result, mark = await main(video_path, api_key, prompt, mark)
         return {
             "code": 0,
             "message": "视频处理成功",

+ 2 - 0
common/aliyun_log.py

@@ -31,6 +31,7 @@ class AliyunLogger:
             video_url: str,
             version: str,
             type: str,
+            partition: str,
             data: Optional[str] = None):
         """
         写入阿里云日志
@@ -50,6 +51,7 @@ class AliyunLogger:
                 ("video_url", video_url),
                 ("version", version),
                 ("type", type),
+                ("partition", partition),
                 ("data", data),
             ]
             # 创建 LogClient 实例

+ 1 - 1
common/odps_data.py

@@ -23,7 +23,7 @@ class OdpsDataCount:
             sql = f'SELECT videoid,title,video_path,type FROM {project}.{table} WHERE dt = "{dt}" '
             with odps.execute_sql(sql).open_reader() as reader:
                 for row in reader:
-                    data_values.append(json.dumps( {"video_id": row[0], "title": row[1], "video_path": row[2], "type": row[3]}, ensure_ascii=False ))
+                    data_values.append(json.dumps( {"video_id": row[0], "title": row[1], "video_path": row[2], "type": row[3], "partition": str(dt)}, ensure_ascii=False ))
 
         except Exception as e:
             print(f"An error occurred: {e}")

File diff suppressed because it is too large
+ 2 - 5
google_ai/generativeai_video.py


+ 3 - 18
job_redis_data.py

@@ -6,11 +6,10 @@ import schedule
 from common.redis import install_video_data
 
 def bot_video_ai_top():
-    """头部"""
+    """当日头部"""
     dt = datetime.datetime.now().strftime('%Y%m%d')
     print(f"开始执行头部{dt}")
-    # dt = '20241014'
-    redis_task = 'task:video_ai'
+    redis_task = 'task:video_ai_top'
     table_name = 'content_ai_tag_return_top'
     install_video_data(dt, redis_task, table_name)
 
@@ -20,28 +19,14 @@ def bot_video_ai_recommend():
     """新推荐"""
     dt = datetime.datetime.now().strftime('%Y%m%d%H')
     print(f"开始执行新推荐{dt}")
-
-    # dt = '2024101514'
-    redis_task = 'task:video_ai'
+    redis_task = 'task:video_ai_recommend'
     table_name = 'content_ai_tag_recommend'
     install_video_data(dt, redis_task, table_name)
 
 
-def bot_video_ai_complex_mode():
-    """复推"""
-    dt = datetime.datetime.now().strftime('%Y%m%d%H')
-    print(f"开始执行复推{dt}")
-    # dt = '2024101514'
-    redis_task = 'task:video_ai'
-    table_name = 'content_ai_tag_reflowpool'
-    install_video_data(dt, redis_task, table_name)
-
 
 def schedule_tasks():
-    schedule.every().hour.at(":20").do(bot_video_ai_complex_mode)
     schedule.every().hour.at(":22").do(bot_video_ai_recommend)
-
-    # 每天 00:10 执行
     schedule.every().day.at("01:25").do(bot_video_ai_top)
 
 

+ 3 - 3
job_video_processing.py

@@ -10,11 +10,11 @@ def video_ai_task_start():
     with ThreadPoolExecutor( max_workers=max_workers) as executor:
         while True:
             try:
-                redis_task_list = ['task:video_ai'] * 2
+                redis_task_list = ['task:video_ai_top', 'task:video_ai_recommend'] * 3
                 futures = []
                 for redis_task in redis_task_list:
-                    futures.append( executor.submit( process_video_ai, redis_task ) )
-                    time.sleep( 1 )  # 每秒提交一个任务
+                    futures.append(executor.submit( process_video_ai, redis_task))
+                    time.sleep(1)  # 每秒提交一个任务
                 wait( futures )  # 等待所有任务完成
             except Exception as e:
                 print(f"异常信息{e}")

+ 9 - 3
video_processing/video_processing.py

@@ -5,14 +5,19 @@ import requests
 import json
 
 from common.aliyun_log import AliyunLogger
-from common.redis import get_video_data, install_video_data, in_video_data
+from common.redis import get_video_data
+from common.feishu_data import Material
 
 
 class VideoProcessing:
     def get_ai_data(self, video_path):
+        mark, prompt = Material.feishu_list()
+
         url = "http://8.219.186.16:8080/process_video/"
         payload = json.dumps( {
-            "video_path": video_path
+            "video_path": video_path,
+            "prompt": prompt,
+            "mark": mark,
         } )
         headers = {
             'Content-Type': 'application/json'
@@ -45,9 +50,10 @@ class VideoProcessing:
         title = data_json['title']
         video_path = data_json['video_path']
         type = data_json['type']
+        partition = data_json['partition']
         print(video_path)
         data, mark = self.get_ai_data(video_path)
-        AliyunLogger.logging(str(video_id), title, video_path, mark, type, data)
+        AliyunLogger.logging(str(video_id), title, video_path, mark, type, partition, data)
         print("写入日志成功")
 
 

Some files were not shown because too many files changed in this diff