Jelajahi Sumber

增加 脚本跟随代码

zhangyong 1 tahun lalu
induk
melakukan
077b9dc0a3

+ 60 - 0
agc_assign_main.py

@@ -0,0 +1,60 @@
+from common import Material, Common
+from video_agc.agc_video_method import AgcVidoe
+import concurrent.futures
+import schedule
+import time
+
+
+# 记录今天已经返回的用户名
+returned_usernames_today = []
+def video_start(user_data):
+    global returned_usernames_today
+    user_data_mark = user_data["mark"]
+    # 开始准备执行生成视频脚本
+    if user_data_mark is not None and user_data_mark in returned_usernames_today:
+        Common.logger("video").info(f"视频脚本参数中的用户名 {user_data_mark} 今天已经返回过,不再启动线程。今天已经返回的用户名:{returned_usernames_today}")
+        print(f"视频脚本参数中的用户名 {user_data_mark} 今天已经返回过,不再启动线程。")
+        return  # 如果返回了某个用户名,并且今天已经返回过,则不启动线程
+    else:
+        print(f"视频脚本参数{user_data}")
+        mark = AgcVidoe.video_gs_stitching(user_data)
+        print(f"返回用户名{mark}")
+        if mark:
+            Common.logger("video").info(f"返回用户名{mark}")
+            returned_usernames_today.append(user_data_mark)
+
+# gs_name_list = Material.feishu_gs_list()
+# video_start(gs_name_list[0])
+
+
+def clear_returned_usernames():
+    returned_usernames_today.clear()
+    print("returned_usernames_today 已清空")
+
+# 定义定时任务
+def video_task():
+    print("开始执行生成视频脚.")
+    data = Material.feishu_list()
+    # 创建一个线程池
+    with concurrent.futures.ThreadPoolExecutor() as executor:
+        futures = [executor.submit(video_start, user_data) for user_data in data]
+        # 等待所有任务执行完成
+        for future in concurrent.futures.as_completed(futures):
+            try:
+                # 获取每个任务的执行结果
+                result = future.result()
+                print("处理结果:", result)
+            except Exception as e:
+                print("处理任务时出现异常:", e)
+    print("执行生成视频脚结束")
+
+# 每天0点10清空集合
+schedule.every().day.at("00:05").do(clear_returned_usernames)
+
+#每10分钟执行次脚本
+schedule.every(10).minutes.do(video_task)
+
+
+while True:
+    schedule.run_pending()
+    time.sleep(1)

+ 1 - 2
agc_main.py

@@ -1,5 +1,4 @@
-from common import Feishu, Material, Common
-from common.sql_help import sqlHelp
+from common import Material, Common
 from video_agc.agc_video_method import AgcVidoe
 import concurrent.futures
 

+ 109 - 11
common/material.py

@@ -35,10 +35,38 @@ class Material():
             feishu_id = row[3]
             video_call = row[4]
             pq_id = row[7]
+
+
             number = {"mark": mark, "feishu_id": feishu_id, "video_call": video_call, "pq_id": pq_id, "mark_name": mark_name}
             list.append(number)
         return list
 
+    # 获取汇总表所有
+    @classmethod
+    def feishu_gs_list(cls):
+        summary = Feishu.get_values_batch("summary", "gGiXDp")
+        list = []
+        for row in summary[1:]:
+            mark = row[0]
+            mark_name = row[1]
+            feishu_id = row[3]
+            video_call = row[4]
+            pq_id = row[7]
+            sum_count = row[8]
+            sheet = row[5]
+            zd_count = row[8]
+            platform_list = []
+            if sheet:
+                parts = sheet.split(',')
+                for part in parts:
+                    sub_parts = part.split('--')
+                    platform_list.append(sub_parts)
+
+            number = {"mark": mark, "feishu_id": feishu_id, "video_call": video_call, "pq_id": pq_id,
+                      "mark_name": mark_name, "sum_count": sum_count, "platform_list": platform_list, "zd_count": zd_count}
+            list.append(number)
+        return list
+
     # 获取管理后台cookie
     @classmethod
     def get_houtai_cookie(cls):
@@ -76,6 +104,48 @@ class Material():
                 list.append(number)
         return list
 
+    # 获取跟随任务汇总表所有待抓取 用户
+    @classmethod
+    def get_all_gs_user(cls, type):
+        summary = Feishu.get_values_batch("summary", "gGiXDp")
+        list = []
+        for row in summary[1:]:
+            mark = row[0]
+            mark_name = row[1]
+            feishu_id = row[3]
+            sheet = row[5]
+            token = row[6]
+            zn_id = row[4]
+            parts = zn_id.split(',')
+            zn_result = []
+            for part in parts:
+                sub_parts = part.split('--')
+                zn_result.append(sub_parts)
+            zn_link = zn_result[0][0]  # 脚本链接
+            douyin = ''
+            kuaishou = ''
+            if sheet:
+                parts = sheet.split(',')
+                result = []
+                for part in parts:
+                    sub_parts = part.split('--')
+                    result.append(sub_parts)
+                douyin = result[0]
+                kuaishou = result[1]
+            if type == "douyin":
+                number = {"mark": mark, "feishu_id": feishu_id, "channel": douyin, "token": token,
+                          "mark_name": mark_name, "sheet": '1'}
+                list.append(number)
+            elif type == "kuaishou":
+                number = {"mark": mark, "feishu_id": feishu_id, "channel": kuaishou, "token": token,
+                          "mark_name": mark_name, "sheet": '1'}
+                list.append(number)
+            elif type == "zhannei":
+                number = {"mark": mark, "feishu_id": feishu_id, "channel": zn_link, "token": token,
+                          "mark_name": mark_name, "sheet": None}
+                list.append(number)
+        return list
+
     # 获取抖音 cookie
     @classmethod
     def get_cookie(cls, feishu_id, token, channel):
@@ -87,17 +157,21 @@ class Material():
     # 获取抖音视频链接 存入数据库
     @classmethod
     def insert_user(cls, feishu_id, channel_id, mark, channel):
+        user_list = []
         # 获取抖音视频链接
         douyin = Feishu.get_values_batch(feishu_id, channel_id)
         # 提取账号昵称和账号主页链接
         for row in douyin[1:]:
             uid = row[1]
-            insert_sql = f"""INSERT INTO agc_channel_data (user_id, channel, mark) values ('{uid}', '{channel}', '{mark}')"""
-            MysqlHelper.update_values(
-                sql=insert_sql,
-                env="prod",
-                machine="",
-            )
+            if uid:
+                insert_sql = f"""INSERT INTO agc_channel_data (user_id, channel, mark) values ('{uid}', '{channel}', '{mark}')"""
+                MysqlHelper.update_values(
+                    sql=insert_sql,
+                    env="prod",
+                    machine="",
+                )
+                user_list.append(uid)
+        return user_list
 
     @classmethod
     def get_uid(cls, uid, mark):
@@ -111,25 +185,49 @@ class Material():
     @classmethod
     def get_all_data(cls, feishu_id, link, mark):
         list = []
-        title_list = []
-        # 获取音频类型+字幕+标题
+        video_list = []
+        # 获取音频类型+字幕
         all_data = Feishu.get_values_batch(feishu_id, link)
         for row in all_data[1:]:
             uid = row[1]
             text = row[2]
-            title = row[3]
+            video = row[0]
             number = {"uid": uid, "text": text}
             if uid:
                 list.append(number)
-            title_list.append(title)
+            if video:
+                video_list.append(video)
         while True:
             list1 = random.choice(list)
             uid1 = list1['uid']
             srt = list1['text']
             id_list = cls.get_uid(uid1, mark)
             if len(id_list) < 2:
-                return uid1, srt, title_list
+                return uid1, srt, video_list
 
+    # 获取站内视频id
+    @classmethod
+    def get_zn_user(cls, feishu_id, link):
+        video_list = []
+        # 获取音频类型+字幕
+        all_data = Feishu.get_values_batch(feishu_id, link)
+        for row in all_data[1:]:
+            video = row[0]
+            if video:
+                video_list.append(video)
+        return video_list
+
+    # 获取用户名
+    @classmethod
+    def get_user_id(cls, feishu_id, channel_id):
+        user_list = []
+        # 获取抖音视频链接
+        douyin = Feishu.get_values_batch(feishu_id, channel_id)
+        # 提取账号昵称和账号主页链接
+        for row in douyin[1:]:
+            uid = row[1]
+            user_list.append(uid)
+        return user_list
 
 
 

+ 82 - 0
data_assign_main.py

@@ -0,0 +1,82 @@
+from common import Material
+from extract_data.douyin.douyin_author import douyinAuthor
+from extract_data.kuaishou.kuaishou_author import kuaishouAuthor
+from extract_data.zhannei.zhannei_author import ZhanNeiAuthor
+
+import schedule
+import time
+import concurrent.futures
+
+
+def douyin_start(user_data):
+    print(f"执行抖音数据抓取{user_data}")
+    douyinAuthor.get_videoList(user_data)
+
+def kuaishou_start(user_data):
+    print(f"执行快手数据抓取{user_data}")
+    kuaishouAuthor.get_kuaishou_videoList(user_data)
+
+def zhannei_start(user_data):
+    print(f"执行站内数据抓取{user_data}")
+    ZhanNeiAuthor.get_zhannei_videoList(user_data)
+
+
+# data = Material.get_all_gs_user("douyin")
+# douyin_start(data[0])
+
+# 定义定时任务
+def zhannei_task():
+    data = Material.get_all_gs_user("zhannei")
+    # 创建一个线程池
+    valid_data = [user_data for user_data in data if user_data['sheet'] is None]
+    with concurrent.futures.ThreadPoolExecutor() as executor:
+        futures = [executor.submit(zhannei_start, user_data) for user_data in valid_data]
+        # 等待所有任务执行完成
+        for future in concurrent.futures.as_completed(futures):
+            # 获取每个任务的执行结果
+            result = future.result()
+            print("处理结果:", result)
+    print("抖音数据抓取定时任务执行完成")
+
+
+# 定义定时任务
+def douyin_task():
+    data = Material.get_all_gs_user("douyin")
+    # 创建一个线程池
+    valid_data = [user_data for user_data in data if user_data['sheet'] is not None]
+    with concurrent.futures.ThreadPoolExecutor() as executor:
+        futures = [executor.submit(kuaishou_start, user_data) for user_data in valid_data]
+        # 等待所有任务执行完成
+        for future in concurrent.futures.as_completed(futures):
+            # 获取每个任务的执行结果
+            result = future.result()
+            print("处理结果:", result)
+    print("抖音数据抓取定时任务执行完成")
+
+
+# 定义定时任务
+def kuanshou_task():
+    data = Material.get_all_gs_user("kuaishou")
+    # 创建一个线程池
+    valid_data = [user_data for user_data in data if user_data['sheet'] is not None]
+    with concurrent.futures.ThreadPoolExecutor() as executor:
+        futures = [executor.submit(kuaishou_start, user_data) for user_data in valid_data]
+        # 等待所有任务执行完成
+        for future in concurrent.futures.as_completed(futures):
+            # 获取每个任务的执行结果
+            result = future.result()
+            print("处理结果:", result)
+    print("快手数据抓取定时任务执行完成.")
+
+
+schedule.every().day.at("18:40").do(kuanshou_task)
+schedule.every().day.at("18:30").do(douyin_task)
+schedule.every().day.at("18:00").do(zhannei_task)
+
+
+# 持续运行,直到手动终止
+while True:
+    schedule.run_pending()
+    time.sleep(1)
+
+

+ 2 - 5
extract_data/douyin/douyin_author.py

@@ -68,14 +68,11 @@ class douyinAuthor():
             feishu_id = data['feishu_id']
             channel_id = data['channel'][0]
             channel = data['channel'][1]
-            Material.insert_user(feishu_id, channel_id, mark, channel)
+            user_list = Material.insert_user(feishu_id, channel_id, mark, channel)
             cookie = Material.get_cookie(feishu_id, token, channel)
-            # 获取 用户主页id
-            user_list = cls.get_videoUserId(mark)
             if len(user_list) == 0:
                 return
-            for i in user_list:
-                account_id = i[0].replace('(', '').replace(')', '').replace(',', '')
+            for account_id in user_list:
                 Common.logger("douyin").info(f"用户主页ID:{account_id}")
                 next_cursor = 0
                 count = 0

+ 3 - 6
extract_data/kuaishou/kuaishou_author.py

@@ -64,14 +64,11 @@ class kuaishouAuthor():
             feishu_id = data['feishu_id']
             channel_id = data['channel'][0]
             channel = data['channel'][1]
-            Material.insert_user(feishu_id, channel_id, mark, channel)
+            user_list = Material.insert_user(feishu_id, channel_id, mark, channel)
             cookie = Material.get_cookie(feishu_id, token, channel)
-            # 获取 用户主页id
-            user_list = cls.get_kuaishou_videoUserId(mark)
             if len(user_list) == 0:
                 return
-            for i in user_list:
-                account_id = i[0].replace('(', '').replace(')', '').replace(',', '')
+            for account_id in user_list:
                 Common.logger("kuaishou").info(f"用户主页ID:{account_id}")
                 pcursor = ""
                 count = 0
@@ -149,7 +146,7 @@ class kuaishouAuthor():
                             status = oss_object_key.get("status")
                             # 发送 oss
                             oss_object_key = oss_object_key.get("oss_object_key")
-                            Common.logger("kuaishou").info(f"抖音视频链接oss发送成功,oss地址:{oss_object_key}")
+                            Common.logger("kuaishou").info(f"视频链接oss发送成功,oss地址:{oss_object_key}")
                             if status == 200:
                                 cls.insert_videoUrl(video_id, account_id, oss_object_key, mark)
                                 Common.logger("kuaishou").info(

+ 0 - 0
extract_data/zhannei/__init__.py


+ 113 - 0
extract_data/zhannei/zhannei_author.py

@@ -0,0 +1,113 @@
+# -*- coding: utf-8 -*-
+# @Time: 2024/01/18
+import datetime
+import os
+import random
+import sys
+import time
+from datetime import datetime
+import requests
+import json
+import urllib3
+sys.path.append(os.getcwd())
+from common.aliyun_oss_uploading import Oss
+from common.common import Common
+from common.material import Material
+from common.feishu import Feishu
+from common.db import MysqlHelper
+from requests.adapters import HTTPAdapter
+
+
+class ZhanNeiAuthor():
+    """
+    oss视频地址 存入数据库
+    """
+    @classmethod
+    def insert_videoUrl(cls, video_id, account_id, oss_object_key, mark):
+        current_time = datetime.now()
+        formatted_time = current_time.strftime("%Y-%m-%d %H:%M")
+        insert_sql = f"""INSERT INTO agc_video_url (video_id, account_id, oss_object_key, time, status, mark) values ("{video_id}", "{account_id}", "{oss_object_key}", "{formatted_time}", 1, "{mark}")"""
+        MysqlHelper.update_values(
+            sql=insert_sql,
+            env="prod",
+            machine="",
+        )
+
+    """
+    获取快手用户主页id
+    """
+    @classmethod
+    def get_zhannei_videoUserId(cls, mark):
+        select_user_sql = f"""select user_id, channel from agc_channel_data where mark = '{mark}' and  channel  = '站内' ORDER BY id DESC;"""
+        user_list = MysqlHelper.get_values(select_user_sql, "prod")
+        return user_list
+
+    """
+    查询该video_id是否在数据库存在
+    """
+    @classmethod
+    def select_videoUrl_id(cls, video_id, mark):
+        select_user_sql = f"""select video_id from agc_video_url where video_id='{video_id}' and mark='{mark}' ;"""
+        user_list = MysqlHelper.get_values(select_user_sql, "prod")
+        if user_list:
+            return True
+        else:
+            return False
+
+    """站内读取数据 将数据存储到oss上"""
+    @classmethod
+    def get_zhannei_videoList(cls, data):
+        try:
+            mark = data['mark']
+            mark_name = data['mark_name']
+            feishu_id = data['feishu_id']
+            channel_id = data['channel']
+            channel = "站内"
+            user_list = Material.insert_user(feishu_id, channel_id, mark, channel)
+
+            if len(user_list) == 0:
+                return
+            for account_id in user_list:
+                time.sleep(5)
+                Common.logger("zhannei").info(f"用户主页ID:{account_id}")
+                cookie = Material.get_houtai_cookie()
+                url = f"https://admin.piaoquantv.com/manager/video/detail/{account_id}"
+                payload = {}
+                headers = {
+                    'authority': 'admin.piaoquantv.com',
+                    'accept': 'application/json, text/plain, */*',
+                    'accept-language': 'zh-CN,zh;q=0.9',
+                    'cache-control': 'no-cache',
+                    'cookie': cookie,
+                    'pragma': 'no-cache',
+                    'referer': f'https://admin.piaoquantv.com/cms/post-detail/{account_id}/detail',
+                    'sec-ch-ua': '"Not_A Brand";v="8", "Chromium";v="120", "Google Chrome";v="120"',
+                    'sec-ch-ua-mobile': '?0',
+                    'sec-ch-ua-platform': '"macOS"',
+                    'sec-fetch-dest': 'empty',
+                    'sec-fetch-mode': 'cors',
+                    'sec-fetch-site': 'same-origin',
+                    'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
+                }
+
+                response = requests.request("GET", url, headers=headers, data=payload)
+                data = response.json()
+                code = data["code"]
+                if code != 0:
+                    Common.logger("video").info(
+                        f"未登录,请更换cookie,{data}")
+                    Feishu.bot('recommend', '管理后台', '管理后台cookie失效,请及时更换~', mark, mark_name)
+                    return
+                video_url = data["content"]["transedVideoPath"]
+                channel_name = mark+'/zhannei'
+                oss_object_key = Oss.video_sync_upload_oss(video_url, account_id, account_id, channel_name)
+                status = oss_object_key.get("status")
+                # 发送 oss
+                oss_object_key = oss_object_key.get("oss_object_key")
+                Common.logger("zhannei").info(f"站内视频链接oss发送成功,oss地址:{oss_object_key}")
+                if status == 200:
+                    cls.insert_videoUrl(account_id, account_id, oss_object_key, mark)
+                    Common.logger("zhannei").info(f"视频地址插入数据库成功,视频id:{account_id},用户主页id:{account_id},视频储存地址:{oss_object_key}")
+        except Exception as e:
+            Common.logger("zhannei").warning(f"抓取异常:{e}\n")
+            return

+ 181 - 33
video_agc/agc_video_method.py

@@ -8,7 +8,7 @@ import time
 import urllib.parse
 
 import requests
-from datetime import datetime
+from datetime import datetime, timedelta
 
 sys.path.append(os.getcwd())
 from common.db import MysqlHelper
@@ -23,16 +23,27 @@ class AgcVidoe():
 
     # 获取未使用的视频链接
     @classmethod
-    def get_url_list(cls, user, mark, limit_count):
-        current_time = datetime.now()
-        formatted_time = current_time.strftime("%Y-%m-%d")
-        url_list = f"""SELECT a.video_id,a.account_id,a.oss_object_key FROM agc_video_url a WHERE NOT EXISTS (
-                    SELECT video_id
-                    FROM agc_video_deposit b
-                    WHERE a.oss_object_key = b.oss_object_key AND b.time = '{formatted_time}'
-                ) AND a.account_id = {user} and a.`status` = 1 and a.mark = '{mark}'  limit  {limit_count};"""
-        url_list = MysqlHelper.get_values(url_list, "prod")
-        return url_list
+    def get_url_list(cls, user_list, mark, limit_count):
+        for i in range(5):
+            user = random.choice(user_list)
+            current_time = datetime.now()
+            three_days_ago = current_time - timedelta(days=3)
+            formatted_current_time = current_time.strftime("%Y-%m-%d")
+            formatted_three_days_ago = three_days_ago.strftime("%Y-%m-%d")
+            url_list = f"""SELECT a.video_id,a.account_id,a.oss_object_key FROM agc_video_url a WHERE NOT EXISTS (
+                                SELECT video_id
+                                FROM agc_video_deposit b
+                                WHERE a.oss_object_key = b.oss_object_key AND b.time >= '{formatted_three_days_ago}' AND b.time <= '{formatted_current_time}'
+                            ) AND a.account_id = {user} and a.`status` = 1 and a.mark = '{mark}'  limit    {limit_count};"""
+
+            url_list = MysqlHelper.get_values(url_list, "prod")
+            if limit_count == 1:
+                if url_list:
+                    return url_list
+            else:
+                if len(url_list) >= 30:
+                    return url_list
+        return None
 
 
     # 随机生成id
@@ -62,6 +73,42 @@ class AgcVidoe():
         count = str(count).replace('(', '').replace(')', '').replace(',', '')
         return int(count)
 
+    # 获取跟随脚本已入库数量
+    @classmethod
+    def get_link_gs_count(cls, mark):
+        current_time = datetime.now()
+        formatted_time = current_time.strftime("%Y-%m-%d")
+        count = f"""SELECT COUNT(*) AS total_count FROM ( SELECT audio, account_id FROM agc_video_deposit WHERE time = '{formatted_time}' and mark = '{mark}' GROUP BY audio, account_id) AS subquery;"""
+        count = MysqlHelper.get_values(count, "prod")
+        if count == None:
+            count = 0
+        count = str(count).replace('(', '').replace(')', '').replace(',', '')
+        return int(count)
+
+    # 获取跟随脚本站外已入库数量
+    @classmethod
+    def get_link_zw_count(cls, mark, platform):
+        current_time = datetime.now()
+        formatted_time = current_time.strftime("%Y-%m-%d")
+        count = f"""SELECT COUNT(*) AS total_count FROM ( SELECT audio, account_id FROM agc_video_deposit WHERE time = '{formatted_time}' AND platform != '{platform}' and mark = '{mark}' GROUP BY audio, account_id) AS subquery;"""
+        count = MysqlHelper.get_values(count, "prod")
+        if count == None:
+            count = 0
+        count = str(count).replace('(', '').replace(')', '').replace(',', '')
+        return int(count)
+
+    # 获取跟随脚本站内已入库数量
+    @classmethod
+    def get_link_zn_count(cls, mark, platform):
+        current_time = datetime.now()
+        formatted_time = current_time.strftime("%Y-%m-%d")
+        count = f"""SELECT COUNT(*) AS total_count FROM ( SELECT audio, account_id FROM agc_video_deposit WHERE time = '{formatted_time}' AND platform = '{platform}' and mark = '{mark}' GROUP BY audio, account_id) AS subquery;"""
+        count = MysqlHelper.get_values(count, "prod")
+        if count == None:
+            count = 0
+        count = str(count).replace('(', '').replace(')', '').replace(',', '')
+        return int(count)
+
     @classmethod
     def create_subtitle_file(cls, srt, s_path):
         # 创建临时字幕文件
@@ -75,13 +122,12 @@ class AgcVidoe():
 
     # 新生成视频上传到对应账号下
     @classmethod
-    def insert_piaoquantv(cls, oss_object_key, title_list, pq_ids_list):
+    def insert_piaoquantv(cls, oss_object_key, audio_title, pq_ids_list):
         for i in range(2):
-            title_list = [item for item in title_list if item is not None]
-            title = random.choice(title_list)
+
             url = "https://vlogapi.piaoquantv.com/longvideoapi/crawler/video/send"
             payload = dict(pageSource='vlog-pages/post/post-video-post', videoPath=oss_object_key, width='720',
-                           height='1280', fileExtensions='mp4', viewStatus='1', title=title,
+                           height='1280', fileExtensions='mp4', viewStatus='1', title=audio_title,
                            careModelStatus='1',
                            token='f04f58d6e664cbc9902660a1e8d20ce6cd7fdb0f', loginUid=pq_ids_list[i],
                            versionCode='719',
@@ -93,7 +139,7 @@ class AgcVidoe():
                            hotSenceType='1089', id='1050', channel='pq')
 
             payload['videoPath'] = oss_object_key
-            payload['title'] = title
+            payload['title'] = audio_title
             data = urllib.parse.urlencode(payload)
             headers = {
                 'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 15_6_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 MicroMessenger/8.0.44(0x18002c2d) NetType/WIFI Language/zh_CN',
@@ -102,8 +148,7 @@ class AgcVidoe():
                 'Content-Type': 'application/x-www-form-urlencoded',
                 'Cookie': 'JSESSIONID=A60D96E7A300A25EA05425B069C8B459'
             }
-            response = requests.post(url, data=data, headers=headers)
-            data = response.json()
+            requests.post(url, data=data, headers=headers)
         return True
 
     # 获取视频链接
@@ -140,7 +185,8 @@ class AgcVidoe():
                 Feishu.bot('recommend', '管理后台', '管理后台cookie失效,请及时更换~', mark, mark_name)
                 return ""
             audio_url = data["content"]["transedVideoPath"]
-            return audio_url
+            audio_title = data["content"]['title']
+            return audio_url, audio_title
         except Exception as e:
             Common.logger("video").warning(f"获取音频视频链接失败:{e}\n")
             return ""
@@ -327,6 +373,9 @@ class AgcVidoe():
         Common.logger("video").info(f"{mark}的{platform}:视频拼接成功啦~~~")
         return video_files
 
+
+
+    # 常规任务
     @classmethod
     def video_stitching(cls, ex_list):
 
@@ -357,7 +406,7 @@ class AgcVidoe():
         channel = ['douyin', 'kuaishou', 'koubo']
         try:
             for platform in channel:
-                limit_count = 40
+                limit_count = 35
                 count = cls.get_link_count(mark, platform)
                 if platform == "douyin" and count >= yhmw_count:
                     continue
@@ -367,26 +416,18 @@ class AgcVidoe():
                     link = result[1][0]
                     limit_count = 1
                     if count >= kb_count or kb_count == 0:
-                        Feishu.bot('recommend', 'AGC完成通知', '今日自制视频拼接任务完成啦~', mark, mark_name)
+                        Feishu.bot('recommend', 'AGC完成通知', '今日常规自制视频拼接任务完成啦~', mark, mark_name)
                         return mark
                 # 获取音频类型+字幕+标题
-                uid, srt, title_list = Material.get_all_data(feishu_id, link, mark)
+                uid, srt, video_list = Material.get_all_data(feishu_id, link, mark)
                 # 获取已入库的用户id
                 user_id = cls.get_user_id(platform, mark)
-                if user_id:
-                    user = random.choice(user_id)
-                else:
-                    Common.logger("video").info(f"{mark}的{platform} 渠道无抓取的数据")
-                    return ""
-                user = str(user).replace('(', '').replace(')', '').replace(',', '')
-                Common.logger("video").info(f"{mark}的{platform}渠道获取的用户ID:{user}")
                 # 获取 未使用的视频链接
-                url_list = cls.get_url_list(user, mark, limit_count)
+                url_list = cls.get_url_list(user_id, mark, limit_count)
                 if url_list == None:
                     Common.logger("video").info(f"未使用视频链接为空:{url_list}")
                     return ''
                 videos = [list(item) for item in url_list]
-
                 # 下载视频
                 videos = Oss.get_oss_url(videos, video_path_url)
 
@@ -395,7 +436,7 @@ class AgcVidoe():
                     cls.create_subtitle_file(srt, s_path)
                     Common.logger("video").info(f"S{mark}的{platform}渠道RT 文件目录创建成功")
                 # 获取音频
-                audio_video = cls.get_audio_url(uid, mark, mark_name)
+                audio_video, audio_title = cls.get_audio_url(uid, mark, mark_name)
                 Common.logger("video").info(f"{mark}的{platform}渠道获取需要拼接的音频成功")
                 # 获取音频秒数
                 audio_duration = cls.get_audio_duration(audio_video)
@@ -431,10 +472,117 @@ class AgcVidoe():
                     cls.insert_videoAudio(video_files, uid, platform, mark)
                     Common.logger("video").info(f"{mark}的{platform}渠道完成已使用视频存入数据库")
                     Common.logger("video").info(f"{mark}的{platform}渠道开始视频添加到对应用户")
-                    piaoquantv = cls.insert_piaoquantv(oss_object_key, title_list, pq_ids_list)
+                    piaoquantv = cls.insert_piaoquantv(oss_object_key, audio_title, pq_ids_list)
                     if piaoquantv:
                         Common.logger("video").info(f"{mark}的{platform}渠道视频添加到对应用户成功")
                 return ''
+        except Exception as e:
+            Common.logger("video").warning(f"{mark}的视频拼接失败:{e}\n")
+            return ''
+
+    # 脚本跟随任务
+    @classmethod
+    def video_gs_stitching(cls, ex_list):
+
+        pq_ids = ex_list["pq_id"]
+        pq_ids_list = pq_ids.split(',')  # 账号ID
+        mark_name = ex_list['mark_name']  # 负责人
+        mark = ex_list["mark"]  # 标示
+        feishu_id = ex_list["feishu_id"] # 飞书文档ID
+        video_call = ex_list["video_call"]
+        parts = video_call.split(',')
+        result = []
+        for part in parts:
+            sub_parts = part.split('--')
+            result.append(sub_parts)
+        link = result[0][0] # 脚本链接
+        count = result[0][1] # 生成条数
+        zd_count = ex_list["zd_count"] # 生成总条数
+
+        # 总条数
+        all_count = cls.get_link_gs_count(mark)
+        if all_count >= int(zd_count):
+            Feishu.bot('recommend', 'AGC完成通知', '今日脚本跟随视频拼接任务完成啦~', mark, mark_name)
+            return mark
+        # 获取音频类型+字幕+标题
+        uid, srt, video_list = Material.get_all_data(feishu_id, link, mark)
+        platform_list = ex_list["platform_list"] # 渠道
+        # 如果没有该文件目录则创建,有文件目录的话 则删除文件
+        s_path, v_path, video_path_url, v_oss_path = cls.create_folders(mark)
+        platform = ''
+        if platform_list:
+            platform_name_list = random.choice(platform_list)
+            platform_name = platform_name_list[1]
+            platform_url = platform_name_list[0]
+            if platform_name == "快手":
+                platform = 'kuaishou'
+            elif platform_name == "抖音":
+                platform = 'douyin'
+            zw_count = cls.get_link_zw_count(mark, "zhannei")
+            if zw_count >= int(count):
+                return
+            # 获取所有视频素材ID
+            video_list = Material.get_user_id(feishu_id, platform_url)
+            limit_count = 35
+        else:
+            platform = 'zhannei'
+            zw_count = cls.get_link_zn_count(mark, platform)
+            if zw_count >= int(count):
+                return
+            limit_count = 1
+        url_list = cls.get_url_list(video_list, mark, limit_count)
+        if url_list == None:
+            Common.logger("video").info(f"S{mark}的{platform} 渠道 视频画面不足无法拼接")
+            return
+        videos = [list(item) for item in url_list]
+        try:
+            # 下载视频
+            videos = Oss.get_oss_url(videos, video_path_url)
+            if srt:
+                # 创建临时字幕文件
+                cls.create_subtitle_file(srt, s_path)
+                Common.logger("video").info(f"S{mark}的{platform}渠道RT 文件目录创建成功")
+            # 获取音频
+            audio_video, audio_title = cls.get_audio_url(uid, mark, mark_name)
+            Common.logger("video").info(f"{mark}的{platform}渠道获取需要拼接的音频成功")
+            # 获取音频秒数
+            audio_duration = cls.get_audio_duration(audio_video)
+            Common.logger("video").info(f"{mark}的{platform}渠道获取需要拼接的音频秒数为:{audio_duration}")
+            video_files = cls.concatenate_videos(videos, audio_duration, audio_video, platform, s_path, v_path, mark, v_oss_path)
+            if video_files == "":
+                Common.logger("video").info(f"{mark}的{platform}渠道使用拼接视频为空")
+                return ""
+            if os.path.isfile(v_oss_path):
+                Common.logger("video").info(f"{mark}的{platform}渠道新视频生成成功")
+            else:
+                Common.logger("video").info(f"{mark}的{platform}渠道新视频生成失败")
+                return ""
+            # 随机生成视频oss_id
+            oss_id = cls.random_id()
+            # 获取新生成视频时长
+            v_path_duration = cls.get_audio_duration(v_oss_path)
+            if v_path_duration > audio_duration+3 or v_path_duration < audio_duration-3:
+                print(f"{mark}的{platform}渠道最终生成视频秒数错误,生成了:{v_path_duration}秒,实际秒数{audio_duration}")
+                Common.logger("video").info(f"{mark}的{platform}渠道最终生成视频秒数错误,生成了:{v_path_duration}秒,实际秒数{audio_duration}")
+                return ""
+            # 上传 oss
+            Common.logger("video").info(f"{mark}的{platform}渠道上传到 OSS 生成视频id为:{oss_id}")
+            oss_object_key = Oss.stitching_sync_upload_oss(v_oss_path, oss_id)
+            status = oss_object_key.get("status")
+            if status == 200:
+                # 获取 oss 视频地址
+                oss_object_key = oss_object_key.get("oss_object_key")
+                Common.logger("video").info(f"{mark}的{platform}渠道拼接视频发送成功,OSS 地址:{oss_object_key}")
+                time.sleep(10)
+                # 已使用视频存入数据库
+                Common.logger("video").info(f"{mark}的{platform}渠道开始已使用视频存入数据库")
+                cls.insert_videoAudio(video_files, uid, platform, mark)
+                Common.logger("video").info(f"{mark}的{platform}渠道完成已使用视频存入数据库")
+                Common.logger("video").info(f"{mark}的{platform}渠道开始视频添加到对应用户")
+                piaoquantv = cls.insert_piaoquantv(oss_object_key, audio_title, pq_ids_list)
+                if piaoquantv:
+                    Common.logger("video").info(f"{mark}的{platform}渠道视频添加到对应用户成功")
+            return ''
         except Exception as e:
             Common.logger("video").warning(f"{mark}的视频拼接失败:{e}\n")
             return ''