|
@@ -12,6 +12,7 @@ from google.generativeai.types import (File, GenerateContentResponse,
|
|
|
HarmBlockThreshold, HarmCategory)
|
|
|
from loguru import logger
|
|
|
|
|
|
+from common.aliyun_log import AliyunLogger
|
|
|
from common.common_log import Common
|
|
|
from common.feishu_data import Material
|
|
|
from common.redis import SyncRedisHelper
|
|
@@ -97,7 +98,7 @@ def download_video(video_link: str) -> Optional[str]:
|
|
|
return
|
|
|
|
|
|
|
|
|
-def upload_video(video_path: str) -> Optional[Tuple[File, str]]:
|
|
|
+def upload_video(video_path: str, redis_task) -> Optional[Tuple[File, str]]:
|
|
|
try:
|
|
|
file = genai.upload_file(path=video_path)
|
|
|
while True:
|
|
@@ -107,11 +108,13 @@ def upload_video(video_path: str) -> Optional[Tuple[File, str]]:
|
|
|
else:
|
|
|
return file, file.state.name
|
|
|
except Exception as e:
|
|
|
+ AliyunLogger.logging( str( redis_task['video_id'] ), redis_task['title'], redis_task['video_path'], '',
|
|
|
+ redis_task['type'], redis_task['partition'], f"[+] 上传视频失败: {e}" )
|
|
|
logger.error(f'[+] 上传视频失败: {e}')
|
|
|
return
|
|
|
|
|
|
|
|
|
-def create_model_cache() -> Optional[genai.GenerativeModel]:
|
|
|
+def create_model_cache(redis_task) -> Optional[genai.GenerativeModel]:
|
|
|
try:
|
|
|
model = genai.GenerativeModel(
|
|
|
model_name='gemini-1.5-flash',
|
|
@@ -121,12 +124,14 @@ def create_model_cache() -> Optional[genai.GenerativeModel]:
|
|
|
logger.info('[+] 创建缓存模型成功')
|
|
|
return model
|
|
|
except Exception as e:
|
|
|
+ AliyunLogger.logging( str( redis_task['video_id'] ), redis_task['title'], redis_task['video_path'], '',
|
|
|
+ redis_task['type'], redis_task['partition'], f"[+] 视频创建缓存内容,并返回生成模型异常信息: {e}" )
|
|
|
logger.error(f'视频创建缓存内容,并返回生成模型异常信息: {e}')
|
|
|
Common.logger('ai').info(f'视频创建缓存内容,并返回生成模型异常信息: {e}')
|
|
|
return
|
|
|
|
|
|
|
|
|
-def analyze_video(model: genai.GenerativeModel, google_file: File, prompt: str) -> Optional[GenerateContentResponse]:
|
|
|
+def analyze_video(model: genai.GenerativeModel, google_file: File, prompt: str, redis_task) -> Optional[GenerateContentResponse]:
|
|
|
try:
|
|
|
session = model.start_chat(history=[])
|
|
|
content = {
|
|
@@ -137,8 +142,11 @@ def analyze_video(model: genai.GenerativeModel, google_file: File, prompt: str)
|
|
|
}
|
|
|
return session.send_message(content=content)
|
|
|
except Exception as e:
|
|
|
+ AliyunLogger.logging( str( redis_task['video_id'] ), redis_task['title'], redis_task['video_path'], '',
|
|
|
+ redis_task['type'], redis_task['partition'], f"[+] 视频处理请求失败: {e}" )
|
|
|
logger.error(f'视频处理请求失败: {e}')
|
|
|
Common.logger('ai').info(f'视频处理请求失败: {e}')
|
|
|
+
|
|
|
return
|
|
|
|
|
|
|
|
@@ -156,35 +164,40 @@ def run():
|
|
|
time.sleep(10)
|
|
|
return
|
|
|
redis_task = json.loads(redis_task)
|
|
|
-
|
|
|
mark, prompt = Material.feishu_list()
|
|
|
|
|
|
video_duration = get_video_duration(video_link=redis_task['video_path'])
|
|
|
if not video_duration:
|
|
|
+ AliyunLogger.logging( str( redis_task['video_id'] ), redis_task['title'], redis_task['video_path'], "",
|
|
|
+ redis_task['type'], redis_task['partition'], "[+] 获取视频时长失败, 跳过任务" )
|
|
|
logger.error('[+] 获取视频时长失败, 跳过任务')
|
|
|
return
|
|
|
elif video_duration >= 600:
|
|
|
+ AliyunLogger.logging( str( redis_task['video_id'] ), redis_task['title'], redis_task['video_path'], "",
|
|
|
+ redis_task['type'], redis_task['partition'], "[+] 视频时长超过10分钟, 跳过任务" )
|
|
|
logger.error('[+] 视频时长超过10分钟, 跳过任务')
|
|
|
return
|
|
|
|
|
|
video_path = download_video(video_link=redis_task['video_path'])
|
|
|
if not video_path:
|
|
|
+ AliyunLogger.logging( str( redis_task['video_id'] ), redis_task['title'], redis_task['video_path'], "",
|
|
|
+ redis_task['type'], redis_task['partition'], "[+] 视频下载失败, 跳过任务" )
|
|
|
logger.error(f'[+] 视频下载失败, 跳过任务')
|
|
|
return
|
|
|
|
|
|
- google_file, google_file_state = upload_video(video_path=video_path)
|
|
|
+ google_file, google_file_state = upload_video(video_path=video_path, redis_task=redis_task)
|
|
|
if not google_file_state:
|
|
|
return
|
|
|
elif google_file_state != 'ACTIVE':
|
|
|
logger.error('[+] 视频上传状态不为 ACTIVE, 跳过任务')
|
|
|
return
|
|
|
|
|
|
- model = create_model_cache()
|
|
|
+ model = create_model_cache(redis_task=redis_task)
|
|
|
if isinstance(model, str):
|
|
|
logger.error('[+] 创建模型失败, 跳过任务')
|
|
|
return
|
|
|
|
|
|
- response = analyze_video(model=model, google_file=google_file, prompt=prompt)
|
|
|
+ response = analyze_video(model=model, google_file=google_file, prompt=prompt, redis_task=redis_task)
|
|
|
if isinstance(response, str):
|
|
|
logger.error('[+] 获取模型响应失败, 跳过任务')
|
|
|
return
|
|
@@ -192,6 +205,7 @@ def run():
|
|
|
usage_info, text = str(response.usage_metadata).replace('\n', ', '), response.text.strip()
|
|
|
logger.info(f'[+] 使用情况: {usage_info}')
|
|
|
logger.info(f'[+] 模型响应结果: {text}')
|
|
|
+ AliyunLogger.logging( str( redis_task['video_id'] ), redis_task['title'], redis_task['video_path'], mark, redis_task['type'], redis_task['partition'], text )
|
|
|
|
|
|
genai.delete_file(google_file)
|
|
|
|