Kaynağa Gözat

增加抖音、视频号渠道抓取

zhangyong 10 ay önce
ebeveyn
işleme
0cd5d503d4

+ 22 - 21
common/aliyun_oss.py

@@ -1,5 +1,6 @@
 # -*- coding: utf-8 -*-
 # @Time: 2023/12/26
+import time
 from datetime import datetime
 from typing import Dict, Any,  Optional
 
@@ -13,13 +14,11 @@ OSS_BUCKET_ENDPOINT = "oss-cn-hangzhou-internal.aliyuncs.com"# 内网地址
 # OSS_BUCKET_ENDPOINT = "oss-cn-hangzhou.aliyuncs.com" # 外网地址
 OSS_BUCKET_NAME = "art-crawler"
 class Oss():
-    # 抓取视频上传到art-crawler
+
     @classmethod
-    def video_sync_upload_oss(cls, src_url: str,
-                        video_id: str,
-                        account_id: str,
-                        OSS_BUCKET_PATH: str,
-                        referer: Optional[str] = None) -> Dict[str, Any]:
+    def channel_upload_oss(cls, src_url: str,
+                              video_id: str,
+                              referer: Optional[str] = None) -> Dict[str, Any]:
         headers = {
             'Accept': '*/*',
             'Accept-Language': 'zh-CN,zh;q=0.9',
@@ -34,7 +33,7 @@ class Oss():
         file_content = response.content
         content_type = response.headers.get('Content-Type', 'application/octet-stream')
 
-        oss_object_key = f'{OSS_BUCKET_PATH}/{account_id}/{video_id}'
+        oss_object_key = f'channel/video/{video_id}'
         auth = oss2.Auth(OSS_ACCESS_KEY_ID, OSS_ACCESS_KEY_SECRET)
         bucket = oss2.Bucket(auth, OSS_BUCKET_ENDPOINT, OSS_BUCKET_NAME)
         response = bucket.put_object(oss_object_key, file_content, headers={'Content-Type': content_type})
@@ -65,20 +64,22 @@ class Oss():
         raise AssertionError(f'OSS上传失败,请求ID: \n{response.headers["x-oss-request-id"]}')
 
 
-    # 获取视频链接 将视频链接有效时间设置为1天
     @classmethod
-    def get_oss_url(cls, videos, video_path):
-        auth = oss2.Auth(OSS_ACCESS_KEY_ID, OSS_ACCESS_KEY_SECRET)
-        bucket = oss2.Bucket(auth, OSS_BUCKET_ENDPOINT, OSS_BUCKET_NAME)
-        list = []
-        for i in videos:
-            try:
-                # 获取指定路径下的对象列表
-                filename = i[2].split("/")[-1]
-                bucket.get_object_to_file(i[2], f'{video_path}{filename}.mp4')
-                list.append([i[0], i[1], i[2], f'{video_path}{filename}.mp4'])
-            except Exception:
-                continue
-        return list
+    def download_video_oss(cls, video_url, video_path_url, v_id):
+        video_path = video_path_url + str(v_id) + '.mp4'
+        oss_object_key = cls.channel_upload_oss(video_url, v_id)
+        time.sleep(2)
+        oss_object = oss_object_key.get("oss_object_key")
+        if oss_object:
+            auth = oss2.Auth(OSS_ACCESS_KEY_ID, OSS_ACCESS_KEY_SECRET)
+            bucket = oss2.Bucket(auth, OSS_BUCKET_ENDPOINT, OSS_BUCKET_NAME)
+            # 获取指定路径下的对象列表
+            bucket.get_object_to_file(oss_object, video_path)
+            time.sleep(5)
+            return video_path
+        else:
+            return video_path
+
+
 
 

+ 23 - 16
common/feishu_form.py

@@ -15,16 +15,15 @@ class Material():
     """
     @classmethod
     def feishu_list(cls):
-        summary = Feishu.get_values_batch("summary", "3e1295")
+        summary = Feishu.get_values_batch("summary", "bc154d")
         list = []
         for row in summary[1:]:
             mark = row[0]
             name = row[1]
             feishu_id = row[3]
             feishu_sheet = row[4]
-            pw_sheet = row[5]
-            pz_sheet = row[6]
-            number = {"mark": mark, "name": name, "feishu_id": feishu_id, "feishu_sheet": feishu_sheet, "pw_sheet": pw_sheet, "pz_sheet": pz_sheet}
+            cookie_sheet = row[5]
+            number = {"mark": mark, "name": name, "feishu_id": feishu_id, "feishu_sheet": feishu_sheet, "cookie_sheet": cookie_sheet}
             if mark:
                 list.append(number)
             else:
@@ -40,9 +39,9 @@ class Material():
         processed_list = []
 
         for row in data[1:]:
-            old_id = row[1]
-            video_id = row[2]
-            new_id = row[3]
+            channel_id = row[1]
+            channel_url = row[2]
+            piaoquan_id = row[3]
             number = row[4]
             video_share = row[5]
             video_ending = row[6]
@@ -55,22 +54,20 @@ class Material():
                     return len(item.split(separator))
                 return 0
 
-            old_id_total = count_items(str(old_id), ',')
-            video_id_total = count_items(str(video_id), ',')
-            new_id_total = count_items(str(new_id), ',')
+            video_id_total = count_items(str(channel_url), ',')
             title_total = count_items(str(title), '/')
             video_ending_total = count_items(str(video_ending), ',')
 
-            values = [old_id_total, video_id_total, new_id_total, video_share, video_ending_total, crop_tool, gg_duration, title_total]
+            values = [channel_id, video_id_total, piaoquan_id, video_share, video_ending_total, crop_tool, gg_duration, title_total]
             filtered_values = [str(value) for value in values if value is not None and value != "None"]
             task_mark = "_".join(map(str, filtered_values))
 
-            if new_id and new_id not in {'None', ''}:
+            if piaoquan_id and piaoquan_id not in {'None', ''}:
                 number_dict = {
                     "task_mark": task_mark,
-                    "old_id": old_id,
-                    "video_id": video_id,
-                    "new_id": new_id,
+                    "channel_id": channel_id,
+                    "channel_url": channel_url,
+                    "piaoquan_id": piaoquan_id,
                     "number": number,
                     "title": title,
                     "video_share": video_share,
@@ -118,7 +115,17 @@ class Material():
 
 
 
-
+    """
+    获取 cookie 信息
+    """
+    @classmethod
+    def get_cookie_data(cls, feishu_id, cookie_sheet, channel):
+        data = Feishu.get_values_batch(feishu_id, cookie_sheet)
+        for row in data[1:]:
+            channel_mask = row[0]
+            cookie = row[1]
+            if channel_mask == channel:
+                return cookie
 
 
 

+ 1 - 1
common/feishu_utils.py

@@ -24,7 +24,7 @@ class Feishu:
     @classmethod
     def spreadsheettoken(cls, crawler):
         if crawler == "summary":
-            return "S8jusgF83h8gEKtILW4cli2Bngf"
+            return "KsoMsyP2ghleM9tzBfmcEEXBnXg"
         else:
             return crawler
 

+ 1 - 0
common/mysql_db.py

@@ -64,3 +64,4 @@ class MysqlHelper:
 
 
 
+

+ 23 - 5
common/sql_help.py

@@ -14,10 +14,10 @@ class sqlCollect():
     视频信息写入库中
     """
     @classmethod
-    def insert_task(cls, task_mark, video_id, mark):
+    def insert_task(cls, task_mark, video_id, mark, channel):
         current_time = datetime.now()
         formatted_time = current_time.strftime("%Y-%m-%d %H:%M:%S")
-        insert_sql = f"""INSERT INTO pj_video_data (task_name, used_video_id, mark_name, data_time) values ('{task_mark}' ,'{video_id}','{mark}', '{formatted_time}')"""
+        insert_sql = f"""INSERT INTO pj_video_data (task_name, used_video_id, mark_name, data_time, channel) values ('{task_mark}' ,'{video_id}','{mark}', '{formatted_time}', '{channel}')"""
         MysqlHelper.update_values(
             sql=insert_sql
         )
@@ -26,16 +26,34 @@ class sqlCollect():
     判断该任务id是否用过
     """
     @classmethod
-    def is_used(cls, task_mark, video_id, mark_name):
+    def is_used(cls, task_mark, video_id, mark_name, channel):
         sql = """
             SELECT used_video_id
             FROM pj_video_data
-            WHERE used_video_id = %s AND task_name = %s AND mark_name = %s 
+            WHERE used_video_id = %s AND task_name = %s AND mark_name = %s AND channel = %s 
             ORDER BY data_time DESC
             LIMIT 1
         """
-        data = MysqlHelper.get_values(sql, (str(video_id), task_mark, mark_name))
+        data = MysqlHelper.get_values(sql, (str(video_id), task_mark, mark_name, channel))
         if len(data) == 0 or data == ():
             return True
         return False
 
+    @classmethod
+    def get_history_id(cls, channel, url):
+        """
+        从数据库表中读取 id
+        """
+        sql = f"""select name_id from accounts where name = %s and platform = %s and useful = 1 limit 1"""
+        data = MysqlHelper.get_values(sql, (url,  channel))
+        if data:
+            return data[0][0]
+        else:
+            return False
+
+    @classmethod
+    def insert_history_id(cls, account_name, target, channel):
+        insert_sql = f"""INSERT INTO accounts (name, name_id, platform, useful) values ("{account_name}", "{target}", "{channel}", 1 )"""
+        MysqlHelper.update_values(
+            sql=insert_sql
+        )

+ 0 - 0
data_channel/__init__.py


+ 76 - 0
data_channel/douyin.py

@@ -0,0 +1,76 @@
+import json
+import random
+import time
+
+import requests
+import urllib3
+from requests.adapters import HTTPAdapter
+from common import Material, Common, Feishu
+from common.sql_help import sqlCollect
+from data_channel.douyin_help import DouYinHelper
+
+
+class DY:
+
+    @classmethod
+    def get_dy_url(cls, task_mark, url_id, number, mark, feishu_id, cookie_sheet, channel_id, name):
+        list = []
+        next_cursor = 0
+        for i in range(3):
+            cookie = Material.get_cookie_data(feishu_id, cookie_sheet, channel_id)
+            time.sleep(random.randint(5, 10))
+            url = 'https://www.douyin.com/aweme/v1/web/aweme/post/'
+            headers = {
+                'Accept': 'application/json, text/plain, */*',
+                'Accept-Language': 'zh-CN,zh;q=0.9',
+                'Cache-Control': 'no-cache',
+                'Cookie': cookie,
+                'Pragma': 'no-cache',
+                'Referer': f'https://www.douyin.com/user/{url_id}',
+                'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) '
+                              'Chrome/118.0.0.0 Safari/537.36',
+            }
+            query = DouYinHelper.get_full_query(ua=headers['User-Agent'], extra_data={
+                'sec_user_id': url_id,
+                'max_cursor': next_cursor,
+                'locate_query': 'false',
+                'show_live_replay_strategy': '1',
+                'need_time_list': '1',
+                'time_list_query': '0',
+                'whale_cut_token': '',
+                'cut_version': '1',
+                'count': '18',
+                'publish_video_strategy_type': '2',
+            })
+            urllib3.disable_warnings()
+            s = requests.session()
+            s.mount('http://', HTTPAdapter(max_retries=3))
+            s.mount('https://', HTTPAdapter(max_retries=3))
+            response = requests.request(method='GET', url=url, headers=headers, params=query)
+            body = response.content.decode()
+            obj = json.loads(body)
+            has_more = True if obj.get('has_more', 0) == 1 else False
+            next_cursor = str(obj.get('max_cursor')) if has_more else None
+            data = obj.get('aweme_list', [])
+            if not data[0].get('search_impr').get('entity_type'):
+                Feishu.bot(mark, '机器自动改造消息通知', f'今日任务为空,请关注', name)
+                time.sleep(900)
+                continue
+            response.close()
+            for i in range(len(data)):
+                entity_type = data[i].get('search_impr').get('entity_type')
+                if entity_type == 'GENERAL':
+                    # is_top = data[i].get('is_top')  # 是否置顶
+                    video_id = data[i].get('aweme_id')  # 文章id
+                    status = sqlCollect.is_used(task_mark, video_id, mark, channel_id)
+                    if status:
+                        video_uri = data[i].get('video', {}).get('play_addr', {}).get('uri')
+                        ratio = f'{data[i].get("video", {}).get("height")}p'
+                        video_url = f'https://www.iesdouyin.com/aweme/v1/play/?video_id={video_uri}&ratio={ratio}&line=0'  # 视频链接
+                        cover_url = data[i].get('video').get('cover').get('url_list')[0]  # 视频封面
+                        all_data = {"video_id": video_id, "cover": cover_url, "video_url": video_url}
+                        list.append(all_data)
+                        if len(list) == int(number):
+                            Common.logger("log").info(f"获取抖音视频总数:{len(list)}\n")
+                            return list
+        return list

+ 138 - 0
data_channel/douyin_help.py

@@ -0,0 +1,138 @@
+import json
+import time
+from base64 import b64encode
+from functools import reduce
+from hashlib import md5
+from random import choice, randint
+from typing import Any, Dict, List, Optional
+from urllib.parse import urlencode
+
+
+class DouYinHelper(object):
+    ttwid_list = [
+        '1|G3wy_-RdLJnfG5P9zAcP54OM8_nTLZVrJxNi1lPzdmg|1693558867|5e43c47a424e939aaf7193b096e3c6f2274982ee64e9608c99c54d2a43982aca'
+    ]
+
+    @classmethod
+    def _0x30492c(cls, x: bytes, y: bytes, f: Optional[List[int]] = None) -> bytes:
+        """RC4加密, 可以用Crypto.Cipher.ARC4替代"""
+        c = 0
+        d = [i for i in range(256)]
+        for b in range(256):
+            c = (c + d[b] + x[b % len(x)]) % 256
+            e = d[b]
+            d[b] = d[c]
+            d[c] = e
+        t, c = 0, 0
+
+        if not f:
+            f = []
+        for i in range(len(y)):
+            t = (t + 1) % 256
+            c = (c + d[t]) % 256
+            e = d[t]
+            d[t] = d[c]
+            d[c] = e
+            f.append(y[i] ^ d[(d[t] + d[c]) % 256])
+        return bytes(f)
+
+    @classmethod
+    def _0x485470(cls, a: str) -> List[int]:
+        _0x583e81 = [0] * 103
+        for i in range(10):
+            _0x583e81[i + 48] = i
+        for j in range(10, 16):
+            _0x583e81[j + 87] = j
+
+        b = len(a) >> 1
+        e = b << 1
+        d = [0] * b
+        c = 0
+        for f in range(0, e, 2):
+            d[c] = _0x583e81[ord(a[f])] << 4 | _0x583e81[ord(a[f + 1])]
+            c += 1
+        return d
+
+    @classmethod
+    def calc_x_bogus(cls, ua: str, query: str, data: Optional[Dict[str, Any]] = None) -> str:
+        """计算X_Bogus参数"""
+        query = query.encode()
+        for _ in range(2):
+            query = md5(query).hexdigest()
+            query = bytes([int(query[i:i + 2], 16) for i in range(0, len(query), 2)])
+
+        data = json.dumps(data, separators=(',', ':'), ensure_ascii=False).encode() if data else b''
+        for _ in range(2):
+            data = md5(data).hexdigest()
+            data = bytes([int(data[i:i + 2], 16) for i in range(0, len(data), 2)])
+
+        a = b'\x00\x01\x0e'
+        ua = b64encode(cls._0x30492c(a, ua.encode())).decode()
+        ua = md5(ua.encode()).hexdigest()
+        ua = cls._0x485470(ua)
+
+        t = int(time.time())
+        fp = 2421646185  # 真实的canvas指纹
+        arr1 = [
+            64,
+            1 / 256,
+            1 % 256,
+            14,
+            query[14],
+            query[15],
+            data[14],
+            data[15],
+            ua[14],
+            ua[15],
+            t >> 24 & 255,
+            t >> 16 & 255,
+            t >> 8 & 255,
+            t >> 0 & 255,
+            fp >> 24 & 255,
+            fp >> 16 & 255,
+            fp >> 8 & 255,
+            fp >> 0 & 255,
+        ]
+        reduce_num = reduce(lambda x, y: int(x) ^ int(y), arr1)
+        arr1.append(reduce_num)
+        arr2 = [int(arr1[i]) for i in range(len(arr1))]
+
+        garble = cls._0x30492c(b'\xff', bytes(arr2), [2, 255])
+        m = 'Dkdpgh4ZKsQB80/Mfvw36XI1R25-WUAlEi7NLboqYTOPuzmFjJnryx9HVGcaStCe='
+        xb = ''
+        for i in range(0, len(garble), 3):
+            a, b, c = garble[i], garble[i + 1], garble[i + 2]
+            base_num = c | b << 8 | a << 16
+            c1 = m[(base_num & 16515072) >> 18]
+            c2 = m[(base_num & 258048) >> 12]
+            c3 = m[(base_num & 4032) >> 6]
+            c4 = m[(base_num & 63)]
+            xb += ''.join([c1, c2, c3, c4])
+        return xb
+
+    @classmethod
+    def get_full_query(cls, ua: str, extra_data: Dict[str, Any]) -> Dict[str, Any]:
+        ms_token = b64encode(bytes([randint(0, 255) for _ in range(94)])).decode()
+        ms_token = ms_token.replace('+', '-').replace('/', '_').rstrip('=')
+
+        data = {
+            'device_platform': 'webapp',
+            'aid': '6383',
+            'channel': 'channel_pc_web',
+            'pc_client_type': '1',
+            'version_code': '190500',
+            'version_name': '19.5.0',
+            'cookie_enabled': 'true',
+            'platform': 'PC',
+            'msToken': ms_token,
+        }
+        data.update(extra_data)
+        query = urlencode(data, safe='=')
+        x_bogus = cls.calc_x_bogus(ua=ua, query=query, data=None)
+        data.update({'X-Bogus': x_bogus})
+        return data
+
+    @classmethod
+    def get_cookie(cls):
+        ttwid = choice(cls.ttwid_list)
+        return f'ttwid={ttwid}'

+ 3 - 0
data_channel/kuaishou.py

@@ -0,0 +1,3 @@
+
+class KS:
+    pass

+ 4 - 12
common/piaoquan_utils.py → data_channel/piaoquan.py

@@ -99,7 +99,7 @@ class PQ:
     获取用户下的所有视频
     """
     @classmethod
-    def get_user_url(cls, task_mark, user_id, number, title, mark):
+    def get_pq_url(cls, task_mark, user_id, number, mark):
         url = f"https://admin.piaoquantv.com/manager/video/page?uid={user_id}&pageNum=1&pageSize=100"
 
         payload = {}
@@ -116,24 +116,16 @@ class PQ:
             list = []
             for url in content:
                 video_id = url["id"]
-                status = sqlCollect.is_used(task_mark, video_id, mark)
+                status = sqlCollect.is_used(task_mark, video_id, mark, "票圈")
                 if status:
-                    if title == '' or title == None:
-                        new_title = url["title"]
-                    else:
-                        if '/' in title:
-                            titles = title.split('/')
-                        else:
-                            titles = [title]
-                        new_title = random.choice(titles)
                     cover = url["coverImgPath"]
                     video_url = url["transedVideoPath"]
-                    all_data = {"video_id": video_id, "title": new_title, "cover": cover, "video_url": video_url}
+                    all_data = {"video_id": video_id, "cover": cover, "video_url": video_url}
                     list.append(all_data)
                     if len(list) == int(number):
                         Common.logger("log").info(f"获取视频总数:{len(list)}\n")
                         return list
-            Common.logger("log").info(f"获取视频总数:{len(list)}\n")
+            Common.logger("log").info(f"获取票圈视频总数:{len(list)}\n")
             return list
         except Exception as e:
             Common.logger("log").warning(f"获取音频视频链接失败:{e}\n")

+ 107 - 0
data_channel/shipinhao.py

@@ -0,0 +1,107 @@
+import json
+import random
+import time
+
+import requests
+
+from common import Common
+from common.sql_help import sqlCollect
+
+
+class SPH:
+
+    @classmethod
+    def find_target_user(cls, name, user_list):
+        """
+        在搜索到到账号列表中找目标列表
+        """
+        for obj in user_list:
+            if obj['contact']["nickname"] == name:
+                return obj
+            else:
+                continue
+        return False
+
+    @classmethod
+    def get_account_id(cls, account_name):
+        channel = 'shipinhao'
+        history_id = sqlCollect.get_history_id(channel, account_name)
+        if history_id:
+            return history_id
+        else:
+            url = "http://61.48.133.26:30001/Find_Video_Content"
+            payload = json.dumps({
+                "content": account_name,
+                "type": "19"
+            })
+            headers = {
+                'Content-Type': 'application/json'
+            }
+            response = requests.request("POST", url, headers=headers, data=payload)
+            info_list = response.json()['info_list']
+            if len(info_list) == 0:
+                return False
+            target_user = cls.find_target_user(name=account_name, user_list=info_list)
+            # 写入 MySql 数据库
+            if target_user:
+                target = target_user['contact']['username']
+                sqlCollect.insert_history_id(account_name, target, channel)
+
+                return target_user['contact']["username"]
+            else:
+                return False
+
+    @classmethod
+    def get_sph_url(cls, task_mark, url, number, mark):
+        account_id = cls.get_account_id(url)
+        if account_id:
+            url = "http://61.48.133.26:30001/FinderGetUpMasterNextPage"
+            last_buffer = ""
+            list = []
+            for i in range(5):
+                headers = {
+                    'Content-Type': 'application/json'
+                }
+                payload = json.dumps({
+                    "username": account_id,
+                    "last_buffer": last_buffer
+                })
+
+                response = requests.request("POST", url, headers=headers, data=payload)
+                time.sleep(random.randint(1, 5))
+                if "objectId" not in response.text or response.status_code != 200:
+                    continue
+                res_json = response.json()
+                if len(res_json["UpMasterHomePage"]) == 0:
+                    continue
+                if not res_json["UpMasterHomePage"]:
+                    continue
+                else:
+                    last_buffer = res_json.get('last_buffer')
+                    for obj in res_json["UpMasterHomePage"]:
+                        objectId = obj['objectId']
+                        status = sqlCollect.is_used(task_mark, objectId, mark, "票圈")
+                        if status:
+                            objectNonceId = obj['objectNonceId']
+                            url = "http://61.48.133.26:30001/GetFinderDownloadAddress"
+                            payload = json.dumps({
+                                "objectId": objectId,
+                                "objectNonceId": objectNonceId
+                            })
+                            headers = {
+                                'Content-Type': 'text/plain'
+                            }
+                            response = requests.request("POST", url, headers=headers, data=payload)
+                            time.sleep(random.randint(0, 1))
+                            video_obj = response.json()
+                            video_url = video_obj.get('DownloadAddress')
+                            cover = video_obj.get('thumb_url')
+                            all_data = {"video_id": objectId, "cover": cover, "video_url": video_url}
+                            list.append(all_data)
+                            if len(list) == int(number):
+                                Common.logger("log").info(f"获取视频号视频总数:{len(list)}\n")
+                                return list
+            return list
+        return []
+
+

+ 53 - 51
video_rewriting/video_prep.py

@@ -6,8 +6,10 @@ from datetime import datetime
 
 from common import Material, Feishu, Common, Oss
 from common.ffmpeg import FFmpeg
-from common.piaoquan_utils import PQ
+from data_channel.douyin import DY
+from data_channel.piaoquan import PQ
 from common.sql_help import sqlCollect
+from data_channel.shipinhao import SPH
 
 config = configparser.ConfigParser()
 config.read('./config.ini')
@@ -66,69 +68,70 @@ class getVideo:
         name = data["name"]
         feishu_id = data["feishu_id"]
         feishu_sheet = data["feishu_sheet"]
-        pw_sheet = data["pw_sheet"]
-        pz_sheet = data["pz_sheet"]
-
+        cookie_sheet = data["cookie_sheet"]
+        pz_sheet = '500Oe0'
+        pw_sheet = 'DgX7vC'
         task_data = Material.get_task_data(feishu_id, feishu_sheet)
         if len(task_data) == 0:
             Feishu.bot(mark, '机器自动改造消息通知', f'今日任务为空,请关注', name)
             return mark
-
         for task in task_data:
-            task_marks = task["task_mark"]  # 任务标示
-            old_id = str(task["old_id"])
-            video_id = str(task["video_id"])
-            new_id = str(task["new_id"])
+            task_mark = task["task_mark"]  # 任务标示
+            channel_id = str(task["channel_id"])
+            channel_urls = str(task["channel_url"])
+            piaoquan_id = str(task["piaoquan_id"])
             number = task["number"]  # 指定条数
             title = task["title"]
             video_share = task["video_share"]
             video_ending = task["video_ending"]
             crop_total = task["crop_total"]
             gg_duration_total = task["gg_duration_total"]
+            video_path_url = cls.create_folders(mark, str(task_mark))  # 创建目录
+
             if video_share and video_share != 'None':
                 video_share_list = video_share.split('/')
                 video_share_mark = video_share_list[0]
                 video_share_name = video_share_list[1]
-                zm = Material.get_pzsrt_data(feishu_id, pz_sheet, video_share_name)  # 获取srt
+                zm = Material.get_pzsrt_data("summary", pz_sheet, video_share_name)  # 获取srt
                 if zm == '':
-                    Feishu.bot(mark, '机器自动改造消息通知', f'{task_marks}任务下片中标示填写错误,请关注!!!!', name)
-
-            if ',' in new_id:
-                n_id = new_id.split(',')
+                    Feishu.bot(mark, '机器自动改造消息通知', f'{task_mark}任务下片中标示填写错误,请关注!!!!', name)
+            if ',' in channel_urls:
+                channel_url = channel_urls.split(',')
             else:
-                n_id = [new_id]
-            if old_id and old_id != 'None':
-                task_id = old_id.split(',')
-            else:
-                task_id = video_id.split(',')
-            values = [task_id[0], task_marks]
-            task_mark = "_".join(map(str, values))  # 最终任务标示
-            video_path_url = cls.create_folders(mark, str(task_mark))  # 创建目录
-            try:
-                count = 0  # 初始化计数器
-                for id in task_id:
-                    Common.logger("log").info(f"{task_mark}下的ID{id} 开始获取视频")
-                    time.sleep(1)
-                    if old_id and old_id != 'None':
-                        data_list = PQ.get_user_url(task_mark, id, number, title, mark)
-                    else:
-                        data_list = PQ.get_audio_url(task_mark, id, title, mark)
+                channel_url = [channel_urls]
+            for url in channel_url:
+                Common.logger("log").info(f"{task_mark}下的用户:{channel_url}开始获取视频")
+                if '/' in title:
+                    titles = title.split('/')
+                else:
+                    titles = [title]
+                if channel_id == "抖音":
+                    data_list = DY.get_dy_url(task_mark, url, number, mark, feishu_id, cookie_sheet, channel_id, name)
+                elif channel_id == "票圈":
+                    data_list = PQ.get_pq_url(task_mark, url, number, mark)
+                elif channel_id == "视频号":
+                    data_list = SPH.get_sph_url(task_mark, url, number, mark)
+                # elif channel_id == "快手":
+                #     pass
+                if len(data_list) == 0:
+                    Common.logger("log").info(f"{task_mark}下的视频ID{id} 已经改造过了")
+                    Feishu.bot(mark, '机器自动改造消息通知', f'{task_mark}任务下的用户ID{id},没有已经改造的视频了', name)
+                    cls.remove_files(video_path_url)
+                    continue
+                Common.logger("log").info(f"{task_mark}下的ID{id} 获取视频完成,共{len(data_list)}条")
 
-                    if len(data_list) == 0:
-                        Common.logger("log").info(f"{task_mark}下的视频ID{id} 已经改造过了")
-                        Feishu.bot(mark, '机器自动改造消息通知', f'{task_mark}任务下的用户ID{id},没有已经改造的视频了', name)
-                        cls.remove_files(video_path_url)
-                        continue
-                    Common.logger("log").info(f"{task_mark}下的ID{id} 获取视频完成,共{len(data_list)}条")
+                try:
                     for video in data_list:
                         v_id = video["video_id"]
-                        new_title = video["title"]
                         cover = video["cover"]
                         video_url = video["video_url"]
                         time.sleep(1)
                         pw_random_id = cls.random_id()
-                        new_video_path = PQ.download_video(video_url, video_path_url, v_id)  # 下载视频地址
-                        if new_video_path == '':
+                        if channel_id == "票圈":
+                            new_video_path = PQ.download_video(video_url, video_path_url, v_id)  # 下载视频地址
+                        else:
+                            new_video_path = Oss.download_video_oss(video_url, video_path_url, v_id)  # 下载视频地址
+                        if not os.path.isfile(new_video_path):
                             Common.logger("log").info(f"{task_mark}下的视频ID{id},{new_video_path}视频下载失败")
                             cls.remove_files(video_path_url)
                             continue
@@ -142,13 +145,13 @@ class getVideo:
                             else:
                                 video_ending_list = [video_ending]
                             ending = random.choice(video_ending_list)
-                            pw_list = Material.get_pwsrt_data(feishu_id, pw_sheet, ending)  # 获取srt
+                            pw_list = Material.get_pwsrt_data("summary", pw_sheet, ending)  # 获取srt
                             if pw_list:
                                 pw_id = pw_list["pw_id"]
                                 pw_srt = pw_list["pw_srt"]
                                 pw_url = PQ.get_pw_url(pw_id)
                             else:
-                                Feishu.bot(mark, '机器自动改造消息通知', f'{task_marks}任务下片尾标示错误,请关注!!!!', name)
+                                Feishu.bot(mark, '机器自动改造消息通知', f'{task_mark}任务下片尾标示错误,请关注!!!!', name)
                             for attempt in range(3):
                                 jpg_path = FFmpeg.video_png(new_video_path, video_path_url, pw_random_id)  # 生成视频最后一帧jpg
                                 if os.path.isfile(jpg_path):
@@ -188,14 +191,14 @@ class getVideo:
                         if status == 200:
                             oss_object_key = oss_object_key.get("oss_object_key")
                             time.sleep(1)
-                            code = PQ.insert_piaoquantv(oss_object_key, new_title, cover, n_id[count])
-
+                            new_title = random.choice(titles)
+                            code = PQ.insert_piaoquantv(oss_object_key, new_title, cover, piaoquan_id)
                             if code:
                                 Common.logger("log").info(f"{task_mark}下的视频ID{v_id}发送成功")
-                                sqlCollect.insert_task(task_mark, v_id, mark)  # 插入数据库
+                                sqlCollect.insert_task(task_mark, v_id, mark, channel_id)  # 插入数据库
                                 current_time = datetime.now()
                                 formatted_time = current_time.strftime("%Y-%m-%d %H:%M:%S")
-                                values = [[name, task_mark, v_id, n_id[count], new_title, str(code), formatted_time]]
+                                values = [[name, task_mark, v_id, piaoquan_id, new_title, str(code), formatted_time]]
                                 Feishu.insert_columns("ILb4sa0LahddRktnRipcu2vQnLb", "a74fc4", "ROWS", 1, 2)
                                 time.sleep(0.5)
                                 Feishu.update_values("ILb4sa0LahddRktnRipcu2vQnLb", "a74fc4", "A2:Z2", values)
@@ -203,10 +206,9 @@ class getVideo:
                         else:
                             cls.remove_files(video_path_url)
                             Common.logger("log").info(f"{task_mark}下的视频ID{id} 视频发送OSS失败 ")
-                    count += 1  # 每次迭代计数器加1
-                Feishu.bot(mark, '机器自动改造消息通知', f'{task_mark}任务改造完成,请关注', name)
-            except Exception as e:
-                cls.remove_files(video_path_url)
-                Common.logger("warning").warning(f"{name}的{task_mark}任务处理失败:{e}\n")
+                    Feishu.bot(mark, '机器自动改造消息通知', f'{task_mark}任务改造完成,请关注', name)
+                except Exception as e:
+                    cls.remove_files(video_path_url)
+                    Common.logger("warning").warning(f"{name}的{task_mark}任务处理失败:{e}\n")
         Feishu.bot(mark, '机器自动改造消息通知', f'你的任务全部完成,请关注!!!!!', name)
         return mark