zhangyong 10 ماه پیش
والد
کامیت
82e9b5820b
8فایلهای تغییر یافته به همراه1008 افزوده شده و 167 حذف شده
  1. 146 141
      agc_job.py
  2. 68 0
      agc_job_main.py
  3. 1 0
      common/__init__.py
  4. 1 1
      common/aliyun_oss_uploading.py
  5. 6 22
      common/material.py
  6. 186 0
      common/pq.py
  7. 114 3
      common/sql_help.py
  8. 486 0
      video_agc/agc_video.py

+ 146 - 141
agc_job.py

@@ -1,142 +1,147 @@
-import os
-import concurrent.futures
-import re
+# import os
+# import concurrent.futures
+# import re
+#
+# import schedule
+# import time
+# import threading
+# from common import Material, Common, Feishu
+# from video_agc.agc_video_method import AgcVidoe
+#
+# # 控制读写速度的参数
+# MAX_BPS = 1 * 1024 * 1024  # 120MB/s
+# MAX_WORKERS = os.cpu_count() # 线程池最大工作线程数量
+# READ_WRITE_CHUNK_SIZE = 512 * 1024  # 每次读写的块大小 (1MB)
+# SLEEP_INTERVAL = READ_WRITE_CHUNK_SIZE / MAX_BPS  # 控制每次读写的延迟时间
+#
+# # 全局锁,用于同步读写操作
+# lock = threading.Lock()
+#
+# # 记录今天已经返回的用户名
+# gs_today = []
+# cg_today = []
+# bk_today = []
+#
+# def gs_video_start(user_data):
+#     global gs_today
+#     user_data_mark = user_data["mark"]
+#     video_call = user_data["video_call"]
+#     mark_name = user_data['mark_name']
+#
+#     if user_data_mark is not None and user_data_mark in gs_today:
+#         Common.logger("video").info(f"视频脚本参数中的用户名 {user_data_mark} 今天已经返回过,不再启动线程")
+#         print(f"视频脚本参数中的用户名 {user_data_mark} 今天已经返回过,不再启动线程。")
+#         return  # 如果返回了某个用户名,并且今天已经返回过,则不启动线程
+#     if video_call is not None and video_call in gs_today:
+#         print(f"视频脚本参数中的脚本{user_data_mark} 今天已经返回过,不再启动线程。")
+#         return  # 如果返回了某个用户名,并且今天已经返回过,则不启动线程
+#     else:
+#         print(f"视频脚本参数{user_data}")
+#         mark = AgcVidoe.video_gs_stitching(user_data)
+#         print(f"返回用户名{mark}")
+#         if mark:
+#             Common.logger("video").info(f"返回用户名{mark}")
+#             gs_today.append(mark)
+#             zd_count = user_data["zd_count"]  # 生成总条数
+#             # 总条数
+#             result = re.match(r'([^0-9]+)', user_data_mark).group()
+#             all_count = AgcVidoe.get_link_gs_count(result)
+#             if all_count >= int(zd_count):
+#                 Feishu.bot('recommend', 'AGC完成通知', '今日脚本跟随视频拼接任务完成啦~', user_data_mark.split("-")[0], mark_name)
+#
+# def cg_video_start(user_data):
+#     global cg_today
+#     user_data_mark = user_data["mark"]
+#     if user_data_mark and user_data_mark in cg_today:
+#         Common.logger("video").info(f"视频脚本参数中的用户名 {user_data_mark} 今天已经返回过,不再启动线程。")
+#         print(f"视频脚本参数中的用户名 {user_data_mark} 今天已经返回过,不再启动线程。")
+#         return
+#     print(f"视频脚本参数 {user_data}")
+#     mark = AgcVidoe.video_stitching(user_data)
+#     print(f"返回用户名 {mark}")
+#     if mark:
+#         Common.logger("video").info(f"返回用户名 {mark}")
+#         cg_today.append(user_data_mark)
+#
+# def bk_video_start(user_data):
+#     global bk_today
+#     user_data_mark = user_data["mark"]
+#     # 开始准备执行生成视频脚本
+#     if user_data_mark is not None and user_data_mark in bk_today:
+#         Common.logger("video").info(f"视频脚本参数中的用户名 {user_data_mark} 今天已经返回过,不再启动线程。")
+#         print(f"视频脚本参数中的用户名 {user_data_mark} 今天已经返回过,不再启动线程。")
+#         return
+#     mark = AgcVidoe.video_bk_stitching(user_data)
+#     print(f"返回用户名{mark}")
+#     if mark:
+#         bk_today.append(mark)
+#         Common.logger("video").info(f"返回用户名{mark}")
+#
+# def controlled_io_operation(platform, data):
+#     with lock:
+#         start_time = time.time()
+#         time.sleep(SLEEP_INTERVAL)
+#         end_time = time.time()
+#         elapsed_time = end_time - start_time
+#         if elapsed_time < SLEEP_INTERVAL:
+#             time.sleep(SLEEP_INTERVAL - elapsed_time)
+#     if platform == "gs":
+#         gs_video_start(data)
+#     elif platform == "cg":
+#         cg_video_start(data)
+#     elif platform == "bk":
+#         bk_video_start(data)
+#
+# def video_start(platform):
+#     print("开始执行生成视频脚本.")
+#     if platform == "cg":
+#         data = Material.feishu_list()
+#     elif platform == "gs":
+#         data = Material.feishu_gs_list()
+#     elif platform == "bk":
+#         data = Material.feishu_bk_list()
+#     with concurrent.futures.ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
+#         futures = {executor.submit(controlled_io_operation, platform, user_data): user_data for user_data in data}
+#         for future in concurrent.futures.as_completed(futures):
+#             try:
+#                 future.result()
+#                 print("处理结果: 成功")
+#             except concurrent.futures.TimeoutError:
+#                 print("任务超时,已取消.")
+#             except Exception as e:
+#                 print("处理任务时出现异常:", e)
+#     print("执行生成视频脚本结束.")
+#
+# def gs_usernames_today():
+#     gs_today.clear()
+#     print("gs_usernames_today 已清空")
+# def cg_usernames_today():
+#     cg_today.clear()
+#     print("cg_usernames_today 已清空")
+# def bk_usernames_today():
+#     bk_today.clear()
+#     print("bk_usernames_today 已清空")
+#
+# # 定时任务设置
+# schedule.every().day.at("21:10").do(gs_usernames_today)
+# schedule.every().day.at("04:10").do(cg_usernames_today)
+# schedule.every().day.at("00:10").do(bk_usernames_today)
+#
+# schedule.every(10).minutes.do(video_start, "cg")
+# schedule.every(10).minutes.do(video_start, "gs")
+# # schedule.every(10).minutes.do(video_start, "bk")
+# schedule.every().day.at("00:20").do(video_start, "bk")
+#
+#
+# if __name__ == "__main__":
+#     while True:
+#         try:
+#             schedule.run_pending()
+#         except Exception as e:
+#             print("执行调度任务时出现异常:", e)
+#         time.sleep(1)
+from common import Material
+from video_agc.agc_video import AGC
 
-import schedule
-import time
-import threading
-from common import Material, Common, Feishu
-from video_agc.agc_video_method import AgcVidoe
-
-# 控制读写速度的参数
-MAX_BPS = 1 * 1024 * 1024  # 120MB/s
-MAX_WORKERS = os.cpu_count() # 线程池最大工作线程数量
-READ_WRITE_CHUNK_SIZE = 512 * 1024  # 每次读写的块大小 (1MB)
-SLEEP_INTERVAL = READ_WRITE_CHUNK_SIZE / MAX_BPS  # 控制每次读写的延迟时间
-
-# 全局锁,用于同步读写操作
-lock = threading.Lock()
-
-# 记录今天已经返回的用户名
-gs_today = []
-cg_today = []
-bk_today = []
-
-def gs_video_start(user_data):
-    global gs_today
-    user_data_mark = user_data["mark"]
-    video_call = user_data["video_call"]
-    mark_name = user_data['mark_name']
-
-    if user_data_mark is not None and user_data_mark in gs_today:
-        Common.logger("video").info(f"视频脚本参数中的用户名 {user_data_mark} 今天已经返回过,不再启动线程")
-        print(f"视频脚本参数中的用户名 {user_data_mark} 今天已经返回过,不再启动线程。")
-        return  # 如果返回了某个用户名,并且今天已经返回过,则不启动线程
-    if video_call is not None and video_call in gs_today:
-        print(f"视频脚本参数中的脚本{user_data_mark} 今天已经返回过,不再启动线程。")
-        return  # 如果返回了某个用户名,并且今天已经返回过,则不启动线程
-    else:
-        print(f"视频脚本参数{user_data}")
-        mark = AgcVidoe.video_gs_stitching(user_data)
-        print(f"返回用户名{mark}")
-        if mark:
-            Common.logger("video").info(f"返回用户名{mark}")
-            gs_today.append(mark)
-            zd_count = user_data["zd_count"]  # 生成总条数
-            # 总条数
-            result = re.match(r'([^0-9]+)', user_data_mark).group()
-            all_count = AgcVidoe.get_link_gs_count(result)
-            if all_count >= int(zd_count):
-                Feishu.bot('recommend', 'AGC完成通知', '今日脚本跟随视频拼接任务完成啦~', user_data_mark.split("-")[0], mark_name)
-
-def cg_video_start(user_data):
-    global cg_today
-    user_data_mark = user_data["mark"]
-    if user_data_mark and user_data_mark in cg_today:
-        Common.logger("video").info(f"视频脚本参数中的用户名 {user_data_mark} 今天已经返回过,不再启动线程。")
-        print(f"视频脚本参数中的用户名 {user_data_mark} 今天已经返回过,不再启动线程。")
-        return
-    print(f"视频脚本参数 {user_data}")
-    mark = AgcVidoe.video_stitching(user_data)
-    print(f"返回用户名 {mark}")
-    if mark:
-        Common.logger("video").info(f"返回用户名 {mark}")
-        cg_today.append(user_data_mark)
-
-def bk_video_start(user_data):
-    global bk_today
-    user_data_mark = user_data["mark"]
-    # 开始准备执行生成视频脚本
-    if user_data_mark is not None and user_data_mark in bk_today:
-        Common.logger("video").info(f"视频脚本参数中的用户名 {user_data_mark} 今天已经返回过,不再启动线程。")
-        print(f"视频脚本参数中的用户名 {user_data_mark} 今天已经返回过,不再启动线程。")
-        return
-    mark = AgcVidoe.video_bk_stitching(user_data)
-    print(f"返回用户名{mark}")
-    if mark:
-        bk_today.append(mark)
-        Common.logger("video").info(f"返回用户名{mark}")
-
-def controlled_io_operation(platform, data):
-    with lock:
-        start_time = time.time()
-        time.sleep(SLEEP_INTERVAL)
-        end_time = time.time()
-        elapsed_time = end_time - start_time
-        if elapsed_time < SLEEP_INTERVAL:
-            time.sleep(SLEEP_INTERVAL - elapsed_time)
-    if platform == "gs":
-        gs_video_start(data)
-    elif platform == "cg":
-        cg_video_start(data)
-    elif platform == "bk":
-        bk_video_start(data)
-
-def video_start(platform):
-    print("开始执行生成视频脚本.")
-    if platform == "cg":
-        data = Material.feishu_list()
-    elif platform == "gs":
-        data = Material.feishu_gs_list()
-    elif platform == "bk":
-        data = Material.feishu_bk_list()
-    with concurrent.futures.ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
-        futures = {executor.submit(controlled_io_operation, platform, user_data): user_data for user_data in data}
-        for future in concurrent.futures.as_completed(futures):
-            try:
-                future.result()
-                print("处理结果: 成功")
-            except concurrent.futures.TimeoutError:
-                print("任务超时,已取消.")
-            except Exception as e:
-                print("处理任务时出现异常:", e)
-    print("执行生成视频脚本结束.")
-
-def gs_usernames_today():
-    gs_today.clear()
-    print("gs_usernames_today 已清空")
-def cg_usernames_today():
-    cg_today.clear()
-    print("cg_usernames_today 已清空")
-def bk_usernames_today():
-    bk_today.clear()
-    print("bk_usernames_today 已清空")
-
-# 定时任务设置
-schedule.every().day.at("21:10").do(gs_usernames_today)
-schedule.every().day.at("04:10").do(cg_usernames_today)
-schedule.every().day.at("00:10").do(bk_usernames_today)
-
-schedule.every(10).minutes.do(video_start, "cg")
-schedule.every(10).minutes.do(video_start, "gs")
-# schedule.every(10).minutes.do(video_start, "bk")
-schedule.every().day.at("00:20").do(video_start, "bk")
-
-
-if __name__ == "__main__":
-    while True:
-        try:
-            schedule.run_pending()
-        except Exception as e:
-            print("执行调度任务时出现异常:", e)
-        time.sleep(1)
+data = Material.feishu_list()
+AGC.video(data[4], "常规")

+ 68 - 0
agc_job_main.py

@@ -0,0 +1,68 @@
+import os
+import concurrent.futures
+
+import schedule
+import time
+import threading
+from common import Material
+from video_agc.agc_video import AGC
+
+# 控制读写速度的参数
+MAX_BPS = 1 * 1024 * 1024  # 120MB/s
+MAX_WORKERS = os.cpu_count() * 2  # 线程池最大工作线程数量
+READ_WRITE_CHUNK_SIZE = 512 * 1024  # 每次读写的块大小 (1MB)
+SLEEP_INTERVAL = READ_WRITE_CHUNK_SIZE / MAX_BPS  # 控制每次读写的延迟时间
+
+# 全局锁,用于同步读写操作
+lock = threading.Lock()
+
+
+
+def controlled_io_operation(platform, data):
+    with lock:
+        start_time = time.time()
+        time.sleep(SLEEP_INTERVAL)
+        end_time = time.time()
+        elapsed_time = end_time - start_time
+        if elapsed_time < SLEEP_INTERVAL:
+            time.sleep(SLEEP_INTERVAL - elapsed_time)
+    if platform == "gs":
+        AGC.video(data, "跟随")
+    elif platform == "cg":
+        AGC.video(data, "常规")
+    elif platform == "bk":
+        AGC.video(data, "爆款")
+
+def video_start(platform):
+    print("开始执行生成视频脚本.")
+    if platform == "cg":
+        data = Material.feishu_list()
+    elif platform == "gs":
+        data = Material.feishu_gs_list()
+    elif platform == "bk":
+        data = Material.feishu_bk_list()
+    with concurrent.futures.ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
+        futures = {executor.submit(controlled_io_operation, user_data): user_data for user_data in data}
+        for future in concurrent.futures.as_completed(futures):
+            try:
+                future.result()
+                print("处理结果: 成功")
+            except concurrent.futures.TimeoutError:
+                print("任务超时,已取消.")
+            except Exception as e:
+                print("处理任务时出现异常:", e)
+    print("执行生成视频脚本结束.")
+
+
+schedule.every().day.at("04:10").do(video_start, "cg")
+schedule.every().day.at("21:20").do(video_start, "gs")
+schedule.every().day.at("00:20").do(video_start, "bk")
+
+
+if __name__ == "__main__":
+    while True:
+        try:
+            schedule.run_pending()
+        except Exception as e:
+            print("执行调度任务时出现异常:", e)
+        time.sleep(1)

+ 1 - 0
common/__init__.py

@@ -3,3 +3,4 @@ from .aliyun_oss_uploading import Oss
 from .material import Material
 from .feishu import Feishu
 from .db import MysqlHelper
+from .pq import PQ

+ 1 - 1
common/aliyun_oss_uploading.py

@@ -81,7 +81,7 @@ class Oss():
         return list
 
     @classmethod
-    def get_bk_url(cls, videos, video_path, video):
+    def download_url(cls, videos, video_path, video):
         for i in range(3):
             payload = {}
             headers = {}

+ 6 - 22
common/material.py

@@ -211,9 +211,9 @@ class Material():
         # 获取音频类型+字幕
         all_data = Feishu.get_values_batch(feishu_id, link)
         for row in all_data[1:]:
+            video = row[0]
             uid = row[1]
             text = row[2]
-            video = row[0]
             if len(row) == 3:
                 cover = None
             else:
@@ -222,37 +222,23 @@ class Material():
                 title = None
             else:
                 title = row[4]
-            number = {"uid": uid, "text": text, "cover": cover, "title": title}
+            number = {"uid": uid, "text": text, "cover": cover, "title": title, }
             if uid:
                 list.append(number)
             if video:
                 video_list.append(video)
-        while True:
-            list1 = random.choice(list)
-            uid1 = list1['uid']
-            srt = list1['text']
-            cover = list1['cover']
-            title = list1['title']
+            else:
+                return list, video_list
+        return list, video_list
 
-            id_list = cls.get_uid(uid1, mark)
-            if len(id_list) < 1:
-                return uid1, srt, video_list, cover, title
 
     # 获取音频类型+字幕+标题
     @classmethod
-    def get_allbk_data(cls, feishu_id, link, mark):
+    def get_allbk_data(cls, feishu_id, link):
         list_data = []
         # 获取音频类型+字幕
         all_data = Feishu.get_values_batch(feishu_id, link)
         for row in all_data[1:]:
-            # excel_base_date = datetime(1899, 12, 30)
-            # excel_date_number = row[0]
-            # date_from_excel = excel_base_date + timedelta(days=excel_date_number)
-            # # 获取当前时间
-            # current_date = datetime.now().date()
-            # date_from_excel_str = date_from_excel.strftime("%Y-%m-%d")
-            # current_date_str = current_date.strftime("%Y-%m-%d")
-            # if date_from_excel_str == current_date_str:
             uid = row[0]
             text = row[1]
             video = row[2]
@@ -269,8 +255,6 @@ class Material():
                 list_data.append(number)
             else:
                 return list_data
-            # else:
-            #     return list_data
         return list_data
 
 

+ 186 - 0
common/pq.py

@@ -0,0 +1,186 @@
+
+import os
+import random
+
+import sys
+import time
+import json
+
+import requests
+from urllib.parse import urlencode
+
+sys.path.append(os.getcwd())
+from common import Common,  Feishu
+
+class PQ():
+    """
+    获取封面
+    """
+    @classmethod
+    def get_cover(cls, uid):
+        time.sleep(1)
+        url = "https://admin.piaoquantv.com/manager/video/multiCover/listV2"
+
+        payload = json.dumps({
+            "videoId": uid,
+            "range": "2h"
+        })
+        headers = {
+            'accept': 'application/json',
+            'accept-language': 'zh-CN,zh;q=0.9',
+            'cache-control': 'no-cache',
+            'content-type': 'application/json',
+            'cookie': 'SESSION=YjU3MzgwNTMtM2QyYi00YjljLWI3YWUtZTBjNWYwMGQzYWNl',
+            'origin': 'https://admin.piaoquantv.com',
+            'pragma': 'no-cache',
+            'priority': 'u=1, i',
+            'sec-ch-ua': '"Chromium";v="124", "Google Chrome";v="124", "Not-A.Brand";v="99"',
+            'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36'
+        }
+
+        response = requests.request("POST", url, headers=headers, data=payload)
+        data = response.json()
+        content = data["content"]
+        if len(content) == 1:
+            return content[0]["coverUrl"]
+        max_share_count = 0
+        selected_cover_url = ""
+        for item in content:
+            share_count = item.get("shareWeight")
+            if share_count is not None and share_count > max_share_count:
+                max_share_count = share_count
+                selected_cover_url = item["coverUrl"]
+            elif share_count == max_share_count and item["createUser"] == "用户":
+                selected_cover_url = item["coverUrl"]
+        return selected_cover_url
+
+    """
+    获取标题
+    """
+    @classmethod
+    def get_title(cls, uid):
+        url = "https://admin.piaoquantv.com/manager/video/multiTitleV2/listV2"
+
+        payload = json.dumps({
+            "videoId": uid,
+            "range": "4h"
+        })
+        headers = {
+            'accept': 'application/json',
+            'accept-language': 'zh-CN,zh;q=0.9',
+            'cache-control': 'no-cache',
+            'content-type': 'application/json',
+            'cookie': 'SESSION=YjU3MzgwNTMtM2QyYi00YjljLWI3YWUtZTBjNWYwMGQzYWNl',
+            'origin': 'https://admin.piaoquantv.com',
+            'pragma': 'no-cache',
+            'priority': 'u=1, i',
+            'sec-ch-ua': '"Chromium";v="124", "Google Chrome";v="124", "Not-A.Brand";v="99"',
+            'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36'
+        }
+        response = requests.request("POST", url, headers=headers, data=payload)
+        data = response.json()
+        content = data["content"]
+        if len(content) == 1:
+            return content[0]["title"]
+        max_share_count = 0
+        selected_title = ""
+        for item in content:
+            share_count = item.get("shareWeight")
+            if share_count is not None and share_count > max_share_count:
+                max_share_count = share_count
+                selected_title = item["title"]
+            elif share_count == max_share_count and item["createUser"] == "用户":
+                selected_title = item["title"]
+        return selected_title
+
+
+    """
+    新生成视频上传到对应账号下
+    """
+    @classmethod
+    def insert_piaoquantv(cls, oss_object_key, audio_title, pq_ids_list, cover, uid):
+        if audio_title == '' or None == audio_title:
+            title = cls.get_title(uid)
+        else:
+            if '/' in audio_title:
+                new_titles = audio_title.split('/')
+            else:
+                new_titles = [audio_title]
+            title = random.choice(new_titles)
+
+        cover_url = ''
+        if None == cover or cover == '':
+            cover_url = cls.get_cover(uid)
+        pq_id_list = random.choice(pq_ids_list)
+        url = "https://vlogapi.piaoquantv.com/longvideoapi/crawler/video/send"
+        headers = {
+            'User-Agent': 'PQSpeed/486 CFNetwork/1410.1 Darwin/22.6.0',
+            'cookie': 'JSESSIONID=4DEA2B5173BB9A9E82DB772C0ACDBC9F; JSESSIONID=D02C334150025222A0B824A98B539B78',
+            'referer': 'http://appspeed.piaoquantv.com',
+            'token': '524a8bc871dbb0f4d4717895083172ab37c02d2f',
+            'accept-language': 'zh-CN,zh-Hans;q=0.9',
+            'Content-Type': 'application/x-www-form-urlencoded'
+        }
+        payload = {
+            'coverImgPath': cover_url,
+            'deviceToken': '9ef064f2f7869b3fd67d6141f8a899175dddc91240971172f1f2a662ef891408',
+            'fileExtensions': 'MP4',
+            'loginUid': pq_id_list,
+            'networkType': 'Wi-Fi',
+            'platform': 'iOS',
+            'requestId': 'fb972cbd4f390afcfd3da1869cd7d001',
+            'sessionId': '362290597725ce1fa870d7be4f46dcc2',
+            'subSessionId': '362290597725ce1fa870d7be4f46dcc2',
+            'title': title,
+            'token': '524a8bc871dbb0f4d4717895083172ab37c02d2f',
+            'uid': pq_id_list,
+            'versionCode': '486',
+            'versionName': '3.4.12',
+            'videoFromScene': '1',
+            'videoPath': oss_object_key,
+            'viewStatus': '1'
+        }
+        encoded_payload = urlencode(payload)
+        response = requests.request("POST", url, headers=headers, data=encoded_payload)
+        data = response.json()
+        code = data["code"]
+        if code == 0:
+            new_video_id = data["data"]["id"]
+            return new_video_id
+        else:
+            return None
+
+    """
+    获取视频链接
+    """
+    @classmethod
+    def get_audio_url(cls, uid):
+        for i in range(3):
+            url = f"https://admin.piaoquantv.com/manager/video/detail/{uid}"
+            payload = {}
+            headers = {
+                'authority': 'admin.piaoquantv.com',
+                'accept': 'application/json, text/plain, */*',
+                'accept-language': 'zh-CN,zh;q=0.9',
+                'cache-control': 'no-cache',
+                'cookie': 'SESSION=YjU3MzgwNTMtM2QyYi00YjljLWI3YWUtZTBjNWYwMGQzYWNl',
+                'pragma': 'no-cache',
+                'referer': f'https://admin.piaoquantv.com/cms/post-detail/{uid}/detail',
+                'sec-ch-ua': '"Not_A Brand";v="8", "Chromium";v="120", "Google Chrome";v="120"',
+                'sec-ch-ua-mobile': '?0',
+                'sec-ch-ua-platform': '"macOS"',
+                'sec-fetch-dest': 'empty',
+                'sec-fetch-mode': 'cors',
+                'sec-fetch-site': 'same-origin',
+                'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
+            }
+
+            response = requests.request("GET", url, headers=headers, data=payload)
+            data = response.json()
+            code = data["code"]
+            if code != 0:
+               continue
+            audio_url = data["content"]["transedVideoPath"]
+            return audio_url
+        return ""
+

+ 114 - 3
common/sql_help.py

@@ -1,15 +1,17 @@
+import datetime
 import os
+import random
 import sys
-import datetime
-
-
+from datetime import timedelta
 
 sys.path.append(os.getcwd())
 from datetime import datetime
 from common import MysqlHelper
+from common import Common
 
 
 class sqlHelp():
+
     @classmethod
     def get_count_list(cls, name_list):
         count_list = []
@@ -24,3 +26,112 @@ class sqlHelp():
             count = str(count).replace('(', '').replace(')', '').replace(',', '')
             count_list.append(f"{name['mark_name']}生成条数为:{count}条 \n")
         return count_list
+
+    """
+    获取未使用的视频链接
+    """
+    @classmethod
+    def get_url_list(cls, user_list, mark, limit_count):
+        for i in range(8):
+            user = str(random.choice(user_list))
+            user = user.replace('(', '').replace(')', '').replace(',', '')
+            current_time = datetime.now()
+            three_days_ago = current_time - timedelta(days=3)
+            formatted_current_time = current_time.strftime("%Y-%m-%d")
+            formatted_three_days_ago = three_days_ago.strftime("%Y-%m-%d")
+            url_list = f"""SELECT a.video_id, a.account_id, a.oss_object_key 
+                                                 FROM agc_video_url a 
+                                                 LEFT JOIN agc_video_deposit b 
+                                                 ON a.oss_object_key = b.oss_object_key 
+                                                 AND b.time >= '{formatted_three_days_ago}' 
+                                                 AND b.time <= '{formatted_current_time}' 
+                                                 WHERE b.video_id IS NULL 
+                                                 AND a.account_id = {user} 
+                                                 AND a.status = 1 
+                                                 AND a.mark = '{mark}' 
+                                                 LIMIT {limit_count};"""
+            url_list = MysqlHelper.get_values(url_list, "prod")
+            if url_list:
+                if len(url_list) >= 35:
+                    return url_list, user
+        return None, None
+
+    """
+    获取已入库的用户id
+    """
+    @classmethod
+    def get_user_id(cls, channel_type, mark):
+        account_id = f"""select account_id from agc_video_url where mark = '{mark}' and  oss_object_key LIKE '%{channel_type}%' group by account_id ;"""
+        account_id = MysqlHelper.get_values(account_id, "prod")
+        return account_id
+
+    """
+    获取已入库的用户id
+    """
+    @classmethod
+    def get_link_count(cls, mark, platform):
+        current_time = datetime.now()
+        formatted_time = current_time.strftime("%Y-%m-%d")
+        count = f"""SELECT COUNT(*) AS total_count FROM ( SELECT audio, account_id FROM agc_video_deposit WHERE time = '{formatted_time}' AND platform = '{platform}' and mark = '{mark}' GROUP BY audio, account_id) AS subquery;"""
+        count = MysqlHelper.get_values(count, "prod")
+        if count == None:
+            count = 0
+        count = str(count).replace('(', '').replace(')', '').replace(',', '')
+        return int(count)
+
+    """
+    获取跟随脚本已入库数量
+    """
+    @classmethod
+    def get_link_gs_count(cls, mark):
+        current_time = datetime.now()
+        formatted_time = current_time.strftime("%Y-%m-%d")
+        count = f"""SELECT COUNT(*) AS total_count FROM ( SELECT audio, account_id FROM agc_video_deposit WHERE time = '{formatted_time}' and mark LIKE '%{mark}%' GROUP BY audio, account_id) AS subquery;"""
+        count = MysqlHelper.get_values(count, "prod")
+        if count == None:
+            count = 0
+        count = str(count).replace('(', '').replace(')', '').replace(',', '')
+        return int(count)
+
+    """
+    获取跟随脚本站外已入库数量
+    """
+    @classmethod
+    def get_link_zw_count(cls, mark, platform):
+        current_time = datetime.now()
+        formatted_time = current_time.strftime("%Y-%m-%d")
+        count = f"""SELECT COUNT(*) AS total_count FROM ( SELECT audio, account_id FROM agc_video_deposit WHERE time = '{formatted_time}' and mark = '{mark}' GROUP BY audio, account_id) AS subquery;"""
+        count = MysqlHelper.get_values(count, "prod")
+        if count == None:
+            count = 0
+        count = str(count).replace('(', '').replace(')', '').replace(',', '')
+        return int(count)
+
+    """
+    获取跟随脚本站内已入库数量
+    """
+    @classmethod
+    def get_link_zn_count(cls, mark, platform):
+        current_time = datetime.now()
+        formatted_time = current_time.strftime("%Y-%m-%d")
+        count = f"""SELECT COUNT(*) AS total_count FROM ( SELECT audio, account_id FROM agc_video_deposit WHERE time = '{formatted_time}' AND platform = '{platform}' and mark = '{mark}' GROUP BY audio, account_id) AS subquery;"""
+        count = MysqlHelper.get_values(count, "prod")
+        if count == None:
+            count = 0
+        count = str(count).replace('(', '').replace(')', '').replace(',', '')
+        return int(count)
+
+    """
+    已使用视频链接存表
+    """
+    @classmethod
+    def insert_videoAudio(cls, video_files, uid, platform, mark):
+        current_time = datetime.now()
+        formatted_time = current_time.strftime("%Y-%m-%d")
+        for j in video_files:
+            insert_sql = f"""INSERT INTO agc_video_deposit (audio, video_id, account_id, oss_object_key, time, platform, mark) values ('{uid}', '{j[0]}', '{j[1]}', '{j[2]}', '{formatted_time}', '{platform}', '{mark}')"""
+            MysqlHelper.update_values(
+                sql=insert_sql,
+                env="prod",
+                machine="",
+            )

+ 486 - 0
video_agc/agc_video.py

@@ -0,0 +1,486 @@
+import configparser
+import glob
+import os
+import random
+import re
+import subprocess
+import sys
+import time
+import urllib.parse
+import json
+
+import requests
+from datetime import datetime, timedelta
+from urllib.parse import urlencode
+
+from common.sql_help import sqlHelp
+
+sys.path.append(os.getcwd())
+from common.db import MysqlHelper
+from common.material import Material
+from common import Common, Oss, Feishu, PQ
+from common.srt import SRT
+
+config = configparser.ConfigParser()
+config.read('./config.ini')  # 替换为您的配置文件路径
+
+
+class AGC():
+    """清除文件下所有mp4文件"""
+    @classmethod
+    def clear_mp4_files(cls, folder_path):
+        # 获取文件夹中所有扩展名为 '.mp4' 的文件路径列表
+        mp4_files = glob.glob(os.path.join(folder_path, '*.mp4'))
+        if not mp4_files:
+            return
+        # 遍历并删除所有 .mp4 文件
+        for mp4_file in mp4_files:
+            os.remove(mp4_file)
+        print(f"文件夹 '{folder_path}' 中的所有 .mp4 文件已清空。")
+    """
+    站外视频拼接
+    """
+    @classmethod
+    def zw_concatenate_videos(cls, videos, audio_duration, audio_video, platform, s_path, v_path, mark, v_oss_path):
+
+        video_files = cls.concat_videos_with_subtitles(videos, audio_duration, platform, mark)
+        Common.logger("video").info(f"{mark}的{platform}视频文件:{video_files}")
+
+        if video_files == "":
+            return ""
+        print(f"{mark}的{platform}:开始拼接视频喽~~~")
+        Common.logger("video").info(f"{mark}的{platform}:开始拼接视频喽~~~")
+        if os.path.exists(s_path):
+            # subtitle_cmd = f"subtitles={s_path}:force_style='Fontsize=11,Fontname=Hiragino Sans GB,Outline=0,PrimaryColour=&H000000,SecondaryColour=&H000000'"
+            subtitle_cmd = f"subtitles={s_path}:force_style='Fontsize=12,Fontname=wqy-zenhei,Bold=1,Outline=0,PrimaryColour=&H000000,SecondaryColour=&H000000'"
+        else:
+            start_time = cls.seconds_to_srt_time(0)
+            end_time = cls.seconds_to_srt_time(audio_duration)
+            with open(s_path, 'w') as f:
+                f.write(f"1\n{start_time} --> {end_time}\n分享、转发给群友\n")
+            # subtitle_cmd = "drawtext=text='分享、转发给群友':fontsize=28:fontcolor=black:x=(w-text_w)/2:y=h-text_h-15"
+            subtitle_cmd = f"subtitles={s_path}:force_style='Fontsize=12,Fontname=wqy-zenhei,Bold=1,Outline=0,PrimaryColour=&H000000,SecondaryColour=&H000000'"
+        # 背景色参数
+        background_cmd = "drawbox=y=ih-65:color=yellow@1.0:width=iw:height=0:t=fill"
+
+        VIDEO_COUNTER = 0
+        FF_INPUT = ""
+        FF_SCALE = ""
+        FF_FILTER = ""
+        ffmpeg_cmd = ["ffmpeg"]
+        for videos in video_files:
+            Common.logger("video").info(f"{mark}的{platform}视频:{videos[3]}")
+            # 添加输入文件
+            FF_INPUT += f" -i {videos[3]}"
+            # 为每个视频文件统一长宽,并设置SAR(采样宽高比)
+            FF_SCALE += f"[{VIDEO_COUNTER}:v]scale=320x480,setsar=1[v{VIDEO_COUNTER}];"
+            # 为每个视频文件创建一个输入流,并添加到-filter_complex参数中
+            FF_FILTER += f"[v{VIDEO_COUNTER}][{VIDEO_COUNTER}:a]"
+            # 增加视频计数器
+            VIDEO_COUNTER += 1
+        # 构建最终的FFmpeg命令
+        ffmpeg_cmd.extend(FF_INPUT.split())
+        ffmpeg_cmd.extend(["-filter_complex", f"{FF_SCALE}{FF_FILTER}concat=n={VIDEO_COUNTER}:v=1:a=1[v][a]",
+                           "-map", "[v]", "-map", "[a]", v_path])
+        # 多线程数
+        num_threads = 4
+        # 构建 FFmpeg 命令,生成视频
+        ffmpeg_cmd_oss = [
+            "ffmpeg",
+            "-i", v_path,  # 视频文件列表
+            "-i", audio_video,  # 音频文件
+            "-c:v", "libx264",  # 复制视频流
+            "-c:a", "aac",  # 编码音频流为AAC
+            "-threads", str(num_threads),
+            "-vf", f"{background_cmd},{subtitle_cmd}",  # 添加背景色和字幕
+            "-t", str(int(audio_duration)),  # 保持与音频时长一致
+            "-map", "0:v:0",  # 映射第一个输入的视频流
+            "-map", "1:a:0",  # 映射第二个输入的音频流
+            "-y",  # 覆盖输出文件
+            v_oss_path
+        ]
+        try:
+            subprocess.run(ffmpeg_cmd)
+            if os.path.isfile(v_path):
+                subprocess.run(ffmpeg_cmd_oss)
+            print("视频处理完成!")
+        except subprocess.CalledProcessError as e:
+            print(f"视频处理失败:{e}")
+        print(f"{mark}的{platform}:视频拼接成功啦~~~")
+        Common.logger("video").info(f"{mark}的{platform}:视频拼接成功啦~~~")
+        return video_files
+
+    """视频秒数转换"""
+    @classmethod
+    def seconds_to_srt_time(cls, seconds):
+        hours = int(seconds // 3600)
+        minutes = int((seconds % 3600) // 60)
+        seconds = seconds % 60
+        milliseconds = int((seconds - int(seconds)) * 1000)
+        return f"{hours:02d}:{minutes:02d}:{int(seconds):02d},{milliseconds:03d}"
+
+    """
+    获取视频文件的时长(秒)
+    """
+    @classmethod
+    def get_video_duration(cls, video_file):
+        result = subprocess.run(
+            ["ffprobe", "-v", "error", "-show_entries", "format=duration",
+             "-of", "default=noprint_wrappers=1:nokey=1", video_file],
+            capture_output=True, text=True)
+        return float(result.stdout)
+
+    """计算需要拼接的视频"""
+    @classmethod
+    def concat_videos_with_subtitles(cls, videos, audio_duration, platform, mark):
+        # 计算视频文件列表总时长
+        if platform == "爆款" or platform == "跟随":
+            total_video_duration = sum(cls.get_video_duration(video_file) for video_file in videos)
+        else:
+            total_video_duration = sum(cls.get_video_duration(video_file[3]) for video_file in videos)
+        if platform == "爆款" or platform == "跟随":
+            # 视频时长大于音频时长
+            if total_video_duration > audio_duration:
+                return videos
+            # 计算音频秒数与视频秒数的比率,然后加一得到需要的视频数量
+            video_audio_ratio = audio_duration / total_video_duration
+            videos_needed = int(video_audio_ratio) + 2
+            trimmed_video_list = videos * videos_needed
+            return trimmed_video_list
+        else:
+            # 如果视频总时长小于音频时长,则不做拼接
+            if total_video_duration < audio_duration:
+                Common.logger("video").info(f"{mark}的{platform}渠道时长小于等于目标时长,不做视频拼接")
+                return ""
+            # 如果视频总时长大于音频时长,则截断视频
+            trimmed_video_list = []
+            remaining_duration = audio_duration
+            for video_file in videos:
+                video_duration = cls.get_video_duration(video_file[3])
+                if video_duration <= remaining_duration:
+                    # 如果视频时长小于或等于剩余时长,则将整个视频添加到列表中
+                    trimmed_video_list.append(video_file)
+                    remaining_duration -= video_duration
+                else:
+                    trimmed_video_list.append(video_file)
+                    break
+            return trimmed_video_list
+
+    """
+    text文件没有则创建目录
+    """
+    @classmethod
+    def bk_text_folders(cls, mark):
+        oss_id = cls.random_id()
+        v_text_url = config['PATHS']['VIDEO_PATH'] + mark + "/text/"
+        if not os.path.exists(v_text_url):
+            os.makedirs(v_text_url)
+        # srt 文件地址
+        text_path = v_text_url + mark + f"{str(oss_id)}.text"
+        return text_path
+
+    """
+    站内视频拼接
+    """
+    @classmethod
+    def zn_concatenate_videos(cls, videos, audio_duration, audio_video, platform, s_path, mark, v_oss_path):
+        text_ptah = cls.bk_text_folders(mark)
+        video_files = cls.concat_videos_with_subtitles(videos, audio_duration, platform, mark)
+        with open(text_ptah, 'w') as f:
+            for file in video_files:
+                f.write(f"file '{file}'\n")
+        Common.logger("video").info(f"{mark}的{platform}视频文件:{video_files}")
+        if video_files == "":
+            return ""
+        print(f"{mark}的{platform}:开始拼接视频喽~~~")
+        Common.logger("video").info(f"{mark}的{platform}:开始拼接视频喽~~~")
+        if os.path.exists(s_path):
+            # subtitle_cmd = f"subtitles={s_path}:force_style='Fontsize=11,Fontname=Hiragino Sans GB,Outline=0,PrimaryColour=&H000000,SecondaryColour=&H000000'"
+            subtitle_cmd = f"subtitles={s_path}:force_style='Fontsize=12,Fontname=wqy-zenhei,Bold=1,Outline=0,PrimaryColour=&H000000,SecondaryColour=&H000000'"
+        else:
+            start_time = cls.seconds_to_srt_time(0)
+            end_time = cls.seconds_to_srt_time(audio_duration)
+            with open(s_path, 'w') as f:
+                f.write(f"1\n{start_time} --> {end_time}\n分享、转发给群友\n")
+            # subtitle_cmd = "drawtext=text='分享、转发给群友':fontsize=28:fontcolor=black:x=(w-text_w)/2:y=h-text_h-15"
+            subtitle_cmd = f"subtitles={s_path}:force_style='Fontsize=12,Fontname=wqy-zenhei,Bold=1,Outline=0,PrimaryColour=&H000000,SecondaryColour=&H000000'"
+        # 背景色参数
+        background_cmd = "drawbox=y=ih-65:color=yellow@1.0:width=iw:height=0:t=fill"
+        # 多线程数
+        num_threads = 4
+        # 构建 FFmpeg 命令,生成视频
+        ffmpeg_cmd_oss = [
+            "ffmpeg",
+            "-f", "concat",
+            "-safe", "0",
+            "-i", f"{text_ptah}",  # 视频文件列表
+            "-i", audio_video,  # 音频文件
+            "-c:v", "libx264",
+            "-c:a", "aac",
+            "-threads", str(num_threads),
+            "-vf", f"scale=320x480,{background_cmd},{subtitle_cmd}",  # 添加背景色和字幕
+            "-t", str(int(audio_duration)),  # 保持与音频时长一致
+            "-map", "0:v:0",  # 映射第一个输入的视频流
+            "-map", "1:a:0",  # 映射第二个输入的音频流
+            "-y",  # 覆盖输出文件
+            v_oss_path
+        ]
+        try:
+            subprocess.run(ffmpeg_cmd_oss)
+            print("视频处理完成!")
+            if os.path.isfile(text_ptah):
+                os.remove(text_ptah)
+        except subprocess.CalledProcessError as e:
+            print(f"视频处理失败:{e}")
+        print(f"{mark}:视频拼接成功啦~~~")
+        Common.logger("video").info(f"{mark}:视频拼接成功啦~~~")
+        return v_oss_path
+
+    """
+    获取视频时长
+    """
+    @classmethod
+    def get_audio_duration(cls, video_url):
+        ffprobe_cmd = [
+            "ffprobe",
+            "-i", video_url,
+            "-show_entries", "format=duration",
+            "-v", "quiet",
+            "-of", "csv=p=0"
+        ]
+        output = subprocess.check_output(ffprobe_cmd).decode("utf-8").strip()
+        return float(output)
+    """
+    创建临时字幕
+    """
+    @classmethod
+    def create_subtitle_file(cls, srt, s_path):
+        with open(s_path, 'w') as f:
+            f.write(srt)
+    """
+    随机生成id
+    """
+    @classmethod
+    def random_id(cls):
+        now = datetime.now()
+        rand_num = random.randint(10000, 99999)
+        oss_id = "{}{}".format(now.strftime("%Y%m%d%H%M%S"), rand_num)
+        return oss_id
+    """
+    文件没有则创建目录
+    """
+    @classmethod
+    def create_folders(cls, mark):
+        oss_id = cls.random_id()
+
+        video_path_url = config['PATHS']['VIDEO_PATH'] + mark + "/"
+        # srt 目录
+        s_path_url = config['PATHS']['VIDEO_PATH'] + mark + "/srt/"
+        # oss 目录
+        v_path_url = config['PATHS']['VIDEO_PATH'] + mark + "/oss/"
+
+        if not os.path.exists(video_path_url):
+            os.makedirs(video_path_url)
+        if not os.path.exists(s_path_url):
+            os.makedirs(s_path_url)
+        if not os.path.exists(v_path_url):
+            os.makedirs(v_path_url)
+        # srt 文件地址
+        s_path = s_path_url + mark + f"{str(oss_id)}.srt"
+        # 最终生成视频地址
+        v_path = v_path_url + mark + f"{str(oss_id)}.mp4"
+        v_oss_path = v_path_url + mark + f"{str(oss_id)}oss.mp4"
+        return s_path, v_path, video_path_url, v_oss_path
+
+    """
+    获取未使用的数据
+    """
+    @classmethod
+    def get_unique_uid_data(cls, data, count):
+        unique_data_dict = {item['uid']: item for item in data}
+
+        unique_data = list(unique_data_dict.values())
+
+        if count > len(unique_data):
+            return unique_data
+        return random.sample(unique_data, count)
+
+    """
+    任务处理
+    """
+    @classmethod
+    def video(cls, data, platform):
+        mark_name = data['mark_name']  # 负责人
+        if platform == "爆款":
+            pq_ids = data["pq_id"]
+            pq_ids_list = pq_ids.split(',')  # 账号ID
+            mark = data["mark"]  # 标示
+            feishu_id = data["feishu_id"]  # 飞书文档ID
+            video_call = data["video_call"]  # 脚本sheet
+            list_data = Material.get_allbk_data(feishu_id, video_call)
+            if len(list_data) == 0:
+                Feishu.bot('recommend', 'AGC完成通知', f'爆款任务数为0,不做拼接', mark, mark_name)
+                return mark
+        elif platform == "常规":
+            pq_ids = data["pq_id"]# 账号ID
+            pq_ids_list = pq_ids.split(',')
+            mark = data["mark"]
+            feishu_id = data["feishu_id"]  # 飞书文档ID
+            video_call = data["video_call"]  # 脚本sheet
+            video_count = data["video_count"]
+            if int(video_count) == 0:
+                Feishu.bot('recommend', 'AGC完成通知', f'常规任务数为{video_count},不做拼接', mark, mark_name)
+                return mark
+            data_list = Material.get_all_data(feishu_id, video_call, mark)
+            list_data = cls.get_unique_uid_data(data_list, int(video_count))
+        elif platform == "跟随":
+            pq_ids = data["pq_id"]
+            pq_ids_list = pq_ids.split(',')  # 账号ID
+            mark = data["mark"]  # 标示
+            feishu_id = data["feishu_id"]  # 飞书文档ID
+            video_call = data["video_call"]
+            video_count = data["video_count"]
+            if int(video_count) == 0:
+                Feishu.bot('recommend', 'AGC完成通知', f'跟随任务数为{video_count},不做拼接', mark, mark_name)
+                return mark
+            data_list, videos = Material.get_all_data(feishu_id, video_call, mark)
+            list_data = cls.get_unique_uid_data(data_list, int(video_count))
+        s_path, v_path, video_path_url, v_oss_path = cls.create_folders(mark)
+        count = 0
+        for d_list in list_data:
+            try:
+                uid = d_list['uid']  # 音频id
+                srt = d_list['text']  # srt
+                cover = d_list['cover']
+                audio_title = d_list['title']
+                if srt:
+                    # 创建临时字幕文件
+                    cls.create_subtitle_file(srt, s_path)
+                    Common.logger("bk_video").info(f"S{mark} 文件目录创建成功")
+                else:
+                    srt_new = SRT.getSrt(int(uid))
+                    if srt_new:
+                        current_time = datetime.now()
+                        formatted_time = current_time.strftime("%Y-%m-%d %H:%M:%S")
+                        values = [[mark, str(uid), srt_new, formatted_time]]
+                        random_wait_time = random.uniform(0.5, 2.5)
+                        time.sleep(random_wait_time)
+                        Feishu.insert_columns("IbVVsKCpbhxhSJtwYOUc8S1jnWb", "jd9qD9", "ROWS", 1, 2)
+                        time.sleep(random_wait_time)
+                        Feishu.update_values("IbVVsKCpbhxhSJtwYOUc8S1jnWb", "jd9qD9", "A2:Z2", values)
+                        # 创建临时字幕文件
+                        cls.create_subtitle_file(srt_new, s_path)
+                        Common.logger("video").info(f"S{mark}的{platform}渠道RT 文件目录创建成功")
+                # 获取音频
+                audio_video = PQ.get_audio_url(uid)
+                Common.logger("video").info(f"{mark}的{platform}渠道获音频成功")
+                audio_duration = cls.get_audio_duration(audio_video)
+                Common.logger("video").info(f"{mark}的{platform}渠道获取需要拼接的音频秒数为:{audio_duration}")
+                if platform != "常规":
+                    if platform == "爆款":
+                        videos = d_list['video']
+                        if ',' in videos:
+                            videos = str(videos).split(',')
+                        else:
+                            videos = [str(videos)]
+                    video_id = random.choice(videos)
+
+                    video_url = PQ.get_audio_url(video_id)
+                    download_video = Oss.download_url(video_url, video_path_url, str(video_id))
+                    if download_video:
+                        video_files = cls.zn_concatenate_videos(download_video, audio_duration, audio_video, platform,
+                                                                s_path, mark, v_oss_path)
+
+                        if os.path.isfile(v_oss_path):
+                            Common.logger("video").info(f"{mark}的{platform}渠道新视频生成成功")
+                        else:
+                            Common.logger("video").info(f"{mark}的{platform}渠道新视频生成失败")
+                            continue
+                else:
+                    chnnel_count = int(len(list_data)/2)
+                    if chnnel_count >= count:
+                        channel = "kuaishou"
+                    else:
+                        channel = "douyin"
+                    user_id = sqlHelp.get_user_id(channel, mark)
+                    url_list, user = sqlHelp.get_url_list(user_id, mark, "35")
+                    videos = [list(item) for item in url_list]
+                    videos = Oss.get_oss_url(videos, video_path_url)
+                    video_files = cls.zw_concatenate_videos(videos, audio_duration, audio_video, platform, s_path, v_path,
+                                                         mark, v_oss_path)
+                    if video_files == "":
+                        Common.logger("video").info(f"{mark}的{platform}渠道使用拼接视频为空")
+                        continue
+                    if os.path.isfile(v_oss_path):
+                        Common.logger("video").info(f"{mark}的{platform}渠道新视频生成成功")
+                    else:
+                        Common.logger("video").info(f"{mark}的{platform}渠道新视频生成失败")
+                        continue
+                # 随机生成视频oss_id
+                oss_id = cls.random_id()
+                Common.logger("video").info(f"{mark}的{platform}渠道上传到 OSS 生成视频id为:{oss_id}")
+                oss_object_key = Oss.stitching_sync_upload_oss(v_oss_path, oss_id)
+                status = oss_object_key.get("status")
+                if status == 200:
+                    # 获取 oss 视频地址
+                    oss_object_key = oss_object_key.get("oss_object_key")
+                    Common.logger("video").info(f"{mark}的{platform}渠道拼接视频发送成功,OSS 地址:{oss_object_key}")
+                    time.sleep(10)
+                    if platform != "常规":
+                        # 已使用视频存入数据库
+                        Common.logger("video").info(f"{mark}的{platform}渠道开始已使用视频存入数据库")
+                        sqlHelp.insert_videoAudio(video_files, uid, platform, mark)
+                        Common.logger("video").info(f"{mark}的{platform}渠道完成已使用视频存入数据库")
+                    Common.logger("video").info(f"{mark}的{platform}渠道开始视频添加到对应用户")
+                    new_video_id = PQ.insert_piaoquantv(oss_object_key, audio_title, pq_ids_list, cover, uid)
+                    if new_video_id:
+                        Common.logger("video").info(f"{mark}的{platform}渠道视频添加到对应用户成功")
+                    if os.path.isfile(v_oss_path):
+                        os.remove(v_oss_path)
+                    if os.path.isfile(v_path):
+                        os.remove(v_path)
+                    if os.path.isfile(s_path):
+                        os.remove(s_path)
+                        # 清空所有mp4数据
+                    cls.clear_mp4_files(video_path_url)
+                    count += 1
+                    current_time = datetime.now()
+                    formatted_time = current_time.strftime("%Y-%m-%d %H:%M:%S")
+                    if platform == "常规":
+                        third_chars = [j[2] for j in video_files]
+                        data = ",".join(third_chars)
+                        user_id = user
+                    else:
+                        user_id = video_id
+                        data = ''
+                    values = [[mark, str(uid), user_id, data, audio_title, new_video_id, formatted_time]]
+                    if mark_name == "穆新艺":
+                        sheet = '50b8a1'
+                    elif mark_name == "信欣":
+                        sheet = 'UyVK7y'
+                    elif mark_name == "范军":
+                        sheet = 'uP3zbf'
+                    elif mark_name == "鲁涛":
+                        sheet = 'iDTHt4'
+                    elif mark_name == "余海涛":
+                        sheet = 'R1jIeT'
+                    elif mark_name == "罗情":
+                        sheet = 'iuxfAt'
+                    Feishu.insert_columns("LAn9so7E0hxRYht2UMEcK5wpnMj", sheet, "ROWS", 1, 2)
+                    random_wait_time = random.uniform(0.5, 2.5)
+                    time.sleep(random_wait_time)
+                    Feishu.update_values("LAn9so7E0hxRYht2UMEcK5wpnMj", sheet, "A2:Z2", values)
+            except Exception as e:
+                Common.logger("bk_video").warning(f"{mark}的视频拼接失败:{e}\n")
+                continue
+        if "-" in mark:
+            name = mark.split("-")[0]
+        else:
+            name = mark
+        Feishu.bot('recommend', 'AGC完成通知', f'今日{platform}任务拼接任务完成,共{count}条', name, mark_name)
+
+
+
+
+
+