zhangyong 4 months ago
parent
commit
7e86ac5ca8

+ 1 - 1
common/__init__.py

@@ -1,4 +1,4 @@
-from .common_log import Common
+# from .common_log import Common
 from .aliyun_oss import Oss
 from .feishu_form import Material
 from .feishu_utils import Feishu

+ 2 - 3
common/aliyun_log.py

@@ -8,8 +8,7 @@ from datetime import datetime
 from typing import Optional
 
 from aliyun.log import PutLogsRequest, LogClient, LogItem
-
-from common import Common
+from loguru import logger
 
 proxies = {"http": None, "https": None}
 
@@ -76,4 +75,4 @@ class AliyunLogger:
 
             client.put_logs(request)
         except Exception as e:
-            Common.logger('aliyun').error(f"写入日志失败: {e}")
+            logger.error(f"写入日志失败: {e}")

+ 52 - 52
common/common_log.py

@@ -1,52 +1,52 @@
-# -*- coding: utf-8 -*-
-# @Time: 2023/12/26
-"""
-公共方法,包含:生成log / 删除log / 下载方法 / 删除 weixinzhishu_chlsfiles / 过滤词库 / 保存视频信息至本地 txt / 翻译 / ffmpeg
-"""
-import os
-import sys
-
-sys.path.append(os.getcwd())
-from datetime import date, timedelta
-from datetime import datetime
-from loguru import logger
-
-proxies = {"http": None, "https": None}
-
-
-class Common:
-    # 统一获取当前时间 <class 'datetime.datetime'>  2022-04-14 20:13:51.244472
-    now = datetime.now()
-    # 昨天 <class 'str'>  2022-04-13
-    yesterday = (date.today() + timedelta(days=-1)).strftime("%Y-%m-%d")
-    # 今天 <class 'datetime.date'>  2022-04-14
-    today = date.today()
-    # 明天 <class 'str'>  2022-04-15
-    tomorrow = (date.today() + timedelta(days=1)).strftime("%Y-%m-%d")
-
-    # 使用 logger 模块生成日志
-    @staticmethod
-    def logger(log_type):
-        try:
-            """
-            使用 logger 模块生成日志
-            """
-            # 日志路径
-            log_dir = f"./logs/{log_type}/"
-            log_path = os.getcwd() + os.sep + log_dir
-            if not os.path.isdir(log_path):
-                os.makedirs(log_path)
-            # 日志文件名
-            log_name = f"{log_type}-{datetime.now().date().strftime('%Y-%m-%d')}.log"
-
-            # 日志不打印到控制台
-            logger.remove(handler_id=None)
-            # 初始化日志
-            logger.add(os.path.join(log_dir, log_name), level="INFO", rotation="00:00", retention="10 days", enqueue=True)
-
-            return logger
-        except Exception as e:
-            Common.logger("aly-logger").log(f"阿里云日志上报异常{e}")
-            return None
-
-
+# # -*- coding: utf-8 -*-
+# # @Time: 2023/12/26
+# """
+# 公共方法,包含:生成log / 删除log / 下载方法 / 删除 weixinzhishu_chlsfiles / 过滤词库 / 保存视频信息至本地 txt / 翻译 / ffmpeg
+# """
+# import os
+# import sys
+#
+# sys.path.append(os.getcwd())
+# from datetime import date, timedelta
+# from datetime import datetime
+# from loguru import logger
+#
+# proxies = {"http": None, "https": None}
+#
+#
+# class Common:
+#     # 统一获取当前时间 <class 'datetime.datetime'>  2022-04-14 20:13:51.244472
+#     now = datetime.now()
+#     # 昨天 <class 'str'>  2022-04-13
+#     yesterday = (date.today() + timedelta(days=-1)).strftime("%Y-%m-%d")
+#     # 今天 <class 'datetime.date'>  2022-04-14
+#     today = date.today()
+#     # 明天 <class 'str'>  2022-04-15
+#     tomorrow = (date.today() + timedelta(days=1)).strftime("%Y-%m-%d")
+#
+#     # 使用 logger 模块生成日志
+#     @staticmethod
+#     def logger(log_type):
+#         try:
+#             """
+#             使用 logger 模块生成日志
+#             """
+#             # 日志路径
+#             log_dir = f"./logs/{log_type}/"
+#             log_path = os.getcwd() + os.sep + log_dir
+#             if not os.path.isdir(log_path):
+#                 os.makedirs(log_path)
+#             # 日志文件名
+#             log_name = f"{log_type}-{datetime.now().date().strftime('%Y-%m-%d')}.log"
+#
+#             # 日志不打印到控制台
+#             logger.remove(handler_id=None)
+#             # 初始化日志
+#             logger.add(os.path.join(log_dir, log_name), level="INFO", rotation="00:00", retention="10 days", enqueue=True)
+#
+#             return logger
+#         except Exception as e:
+#             Common.logger("aly-logger").log(f"阿里云日志上报异常{e}")
+#             return None
+#
+#

+ 35 - 46
common/feishu_utils.py

@@ -8,9 +8,9 @@ import os
 import sys
 import requests
 import urllib3
+from loguru import logger
 
 sys.path.append(os.getcwd())
-from common import Common
 
 proxies = {"http": None, "https": None}
 
@@ -40,14 +40,10 @@ class Feishu:
         url = "https://open.feishu.cn/open-apis/auth/v3/tenant_access_token/internal/"
         post_data = {"app_id": "cli_a13ad2afa438d00b",  # 这里账号密码是发布应用的后台账号及密码
                      "app_secret": "4tK9LY9VbiQlY5umhE42dclBFo6t4p5O"}
-
-        try:
-            urllib3.disable_warnings()
-            response = requests.post(url=url, data=post_data, proxies=proxies, verify=False)
-            tenant_access_token = response.json()["tenant_access_token"]
-            return tenant_access_token
-        except Exception as e:
-            Common.logger("feishu").error("获取飞书 api token 异常:{}", e)
+        urllib3.disable_warnings()
+        response = requests.post(url=url, data=post_data, proxies=proxies, verify=False)
+        tenant_access_token = response.json()["tenant_access_token"]
+        return tenant_access_token
 
     # 获取表格元数据
     @classmethod
@@ -73,7 +69,7 @@ class Feishu:
             response = json.loads(r.content.decode("utf8"))
             return response
         except Exception as e:
-            Common.logger("feishu").error("获取表格元数据异常:{}", e)
+            logger.error("获取表格元数据异常:{}", e)
 
     # 读取工作表中所有数据
     @classmethod
@@ -84,26 +80,25 @@ class Feishu:
         :param sheetid: 哪张表
         :return: 所有数据
         """
-        try:
-            get_values_batch_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
-                                   + cls.spreadsheettoken(crawler) + "/values_batch_get"
-            headers = {
-                "Authorization": "Bearer " + cls.get_token(),
-                "Content-Type": "application/json; charset=utf-8"
-            }
-            params = {
-                "ranges": sheetid,
-                "valueRenderOption": "ToString",
-                "dateTimeRenderOption": "",
-                "user_id_type": "open_id"
-            }
-            urllib3.disable_warnings()
-            r = requests.get(url=get_values_batch_url, headers=headers, params=params, proxies=proxies, verify=False)
-            response = json.loads(r.content.decode("utf8"))
-            values = response["data"]["valueRanges"][0]["values"]
-            return values
-        except Exception as e:
-            Common.logger("feishu").error("读取工作表所有数据异常:{}", e)
+
+        get_values_batch_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                               + cls.spreadsheettoken(crawler) + "/values_batch_get"
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        params = {
+            "ranges": sheetid,
+            "valueRenderOption": "ToString",
+            "dateTimeRenderOption": "",
+            "user_id_type": "open_id"
+        }
+        urllib3.disable_warnings()
+        r = requests.get(url=get_values_batch_url, headers=headers, params=params, proxies=proxies, verify=False)
+        response = json.loads(r.content.decode("utf8"))
+        values = response["data"]["valueRanges"][0]["values"]
+        return values
+
 
     # 工作表,插入行或列
     @classmethod
@@ -136,9 +131,8 @@ class Feishu:
 
             urllib3.disable_warnings()
             r = requests.post(url=insert_columns_url, headers=headers, json=body, proxies=proxies, verify=False)
-            Common.logger("feishu").info("插入行或列:{}", r.json()["msg"])
         except Exception as e:
-            Common.logger("feishu").error("插入行或列异常:{}", e)
+            logger.error("插入行或列异常:{}", e)
 
     # 写入数据
     @classmethod
@@ -168,9 +162,8 @@ class Feishu:
             }
             urllib3.disable_warnings()
             r = requests.post(url=update_values_url, headers=headers, json=body, proxies=proxies, verify=False)
-            Common.logger("feishu").info("写入数据:{}", r.json()["msg"])
         except Exception as e:
-            Common.logger("feishu").error("写入数据异常:{}", e)
+            logger.error("写入数据异常:{}", e)
 
     # 合并单元格
     @classmethod
@@ -196,9 +189,8 @@ class Feishu:
             }
             urllib3.disable_warnings()
             r = requests.post(url=merge_cells_url, headers=headers, json=body, proxies=proxies, verify=False)
-            Common.logger("feishu").info("合并单元格:{}", r.json()["msg"])
         except Exception as e:
-            Common.logger("feishu").error("合并单元格异常:{}", e)
+            logger.error("合并单元格异常:{}", e)
 
     # 读取单元格数据
     @classmethod
@@ -229,10 +221,10 @@ class Feishu:
             }
             urllib3.disable_warnings()
             r = requests.get(url=get_range_value_url, headers=headers, params=params, proxies=proxies, verify=False)
-            # print(r.text)
+            # logger.error(r.text)
             return r.json()["data"]["valueRange"]["values"][0]
         except Exception as e:
-            Common.logger("feishu").error("读取单元格数据异常:{}", e)
+            logger.error("读取单元格数据异常:{}", e)
     # 获取表内容
     @classmethod
     def get_sheet_content(cls, crawler, sheet_id):
@@ -247,7 +239,7 @@ class Feishu:
                         content_list.append(y)
             return content_list
         except Exception as e:
-            Common.logger("feishu").error(f'get_sheet_content:{e}\n')
+            logger.error(f'get_sheet_content:{e}\n')
 
     # 删除行或列,可选 ROWS、COLUMNS
     @classmethod
@@ -279,9 +271,8 @@ class Feishu:
             }
             urllib3.disable_warnings()
             r = requests.delete(url=dimension_range_url, headers=headers, json=body, proxies=proxies, verify=False)
-            Common.logger("feishu").info("删除视频数据:{}", r.json()["msg"])
         except Exception as e:
-            Common.logger("feishu").error("删除视频数据异常:{}", e)
+            logger.error("删除视频数据异常:{}", e)
 
     # 获取用户 ID
     @classmethod
@@ -311,7 +302,7 @@ class Feishu:
 
             return open_id
         except Exception as e:
-            Common.logger("feishu").error(f"get_userid异常:{e}\n")
+            logger.error(f"get_userid异常:{e}\n")
 
     # 飞书机器人
     @classmethod
@@ -369,9 +360,8 @@ class Feishu:
             })
             urllib3.disable_warnings()
             r = requests.post(url, headers=headers, data=data, verify=False, proxies=proxies)
-            Common.logger("feishu").info(f'触发机器人消息:{r.status_code}, {text}')
         except Exception as e:
-            Common.logger("feishu").error(f"bot异常:{e}\n")
+            logger.error(f"bot异常:{e}\n")
 
     # 飞书机器人-改造计划完成通知
     @classmethod
@@ -402,9 +392,8 @@ class Feishu:
             })
             urllib3.disable_warnings()
             r = requests.post(url, headers=headers, data=data, verify=False, proxies=proxies)
-            Common.logger("feishu").info(f'触发机器人消息:{r.status_code}, {text}')
         except Exception as e:
-            Common.logger("feishu").error(f"bot异常:{e}\n")
+            logger.error(f"bot异常:{e}\n")
 
 
 if __name__ == "__main__":

+ 0 - 3
common/mysql_db.py

@@ -2,10 +2,7 @@
 """
 数据库连接及操作
 """
-import redis
 import pymysql
-from common.common_log import Common
-# from common import Common
 
 class MysqlHelper:
     @classmethod

+ 2 - 3
common/scheduling_db.py

@@ -4,7 +4,6 @@
 数据库连接及操作
 """
 import pymysql
-from common.common_log import Common
 # from common import Common
 
 
@@ -78,7 +77,8 @@ class MysqlHelper:
             # 返回查询结果,元组
             return data
         except Exception as e:
-            Common.logger(log_type).error(f"get_values异常:{e}\n")
+            print(e)
+
 
     @classmethod
     def update_values(cls, log_type, crawler, sql, env, action=''):
@@ -94,7 +94,6 @@ class MysqlHelper:
             connect.commit()
             return res
         except Exception as e:
-            Common.logger(log_type).error(f"update_values异常,进行回滚操作:{e}\n")
             # 发生错误时回滚
             connect.rollback()
 

+ 4 - 12
data_channel/douyin.py

@@ -5,9 +5,8 @@ import time
 import requests
 import urllib3
 from requests.adapters import HTTPAdapter
-from common import Material, Common, Feishu, AliyunLogger
+from common import Material, Feishu, AliyunLogger
 from common.sql_help import sqlCollect
-from data_channel.data_help import dataHelp
 from data_channel.douyin_help import DouYinHelper
 
 
@@ -88,8 +87,7 @@ class DY:
                         duration = data[i].get('video', {}).get('duration', {})
                         duration = int(duration) // 1000
                         # duration = dataHelp.video_duration(video_url)
-                        Common.logger("dy").info(
-                            f"扫描:{task_mark},用户主页id:{url_id},视频id{video_id} ,分享:{share_count},点赞{digg_count}")
+
                         log_data = f"user:{url_id},,video_id:{video_id},,video_url:{video_url},,original_title:{old_title},,share_count:{share_count},,digg_count:{digg_count},,duration:{duration}"
                         AliyunLogger.logging(channel_id, name, url_id, video_id, "扫描到一条视频", "2001", log_data)
                         if status:
@@ -97,21 +95,17 @@ class DY:
                             continue
                         if share_count < 200:
                             AliyunLogger.logging(channel_id, name, url_id, video_id, "不符合规则:分享小于200", "2003", log_data)
-                            Common.logger("dy").info(
-                                f"不符合规则:{task_mark},用户主页id:{url_id},视频id{video_id} ,分享:{share_count},点赞{digg_count}")
+
                             continue
                         video_percent = '%.2f' % (share_count / digg_count)
                         special = float(0.15)
                         if float(video_percent) < special:
                             AliyunLogger.logging(channel_id, name, url_id, video_id, "不符合规则:分享/点赞小于0.15", "2003", log_data)
-                            Common.logger("dy").info(
-                                f"不符合规则:{task_mark},用户主页id:{url_id},视频id{video_id} ,分享:{share_count},点赞{digg_count} ")
+
                             continue
 
                         if int(duration) < 30 or int(duration) > 720:
                             AliyunLogger.logging(channel_id, name, url_id, video_id, "不符合规则:时长不符合规则大于720秒/小于30秒", "2003", log_data)
-                            Common.logger("dy").info(
-                                f"不符合规则:{task_mark},用户主页id:{url_id},视频id{video_id} ,分享:{share_count},点赞{digg_count} ,时长:{duration} ")
                             continue
                         cover_url = data[i].get('video').get('cover').get('url_list')[0]  # 视频封面
                         all_data = {"video_id": video_id, "cover": cover_url, "video_url": video_url,
@@ -119,7 +113,6 @@ class DY:
                         list.append(all_data)
                         AliyunLogger.logging(channel_id, name, url_id, video_id, "符合规则等待改造", "2004", log_data)
                         if len(list) == int(number):
-                            Common.logger(mark).info(f"获取抖音视频总数:{len(list)}\n")
                             return list
 
             return list
@@ -127,5 +120,4 @@ class DY:
             # Feishu.bot("liuzhaoheng", '机器自动改造消息通知', f'抖音-{name}cookie过期,请及时更换', '刘兆恒')
             # Feishu.bot("wangxueke", '机器自动改造消息通知', f'抖音-{name}cookie过期,请及时更换', '王雪珂')
             # Feishu.bot("xinxin", '机器自动改造消息通知', f'抖音-{name}cookie过期,请及时更换', '信欣')
-            Common.logger("dy").info(f"抖音历史数据获取失败:{exc}\n")
             return list

+ 1 - 11
data_channel/dy_keyword.py

@@ -1,7 +1,7 @@
 import requests
 import json
 
-from common import Common, AliyunLogger
+from common import AliyunLogger
 from common.sql_help import sqlCollect
 
 
@@ -49,7 +49,6 @@ class DyKeyword:
             response = response.json()
             code = response['code']
             if code != 0:
-                Common.logger("dy-key-word").info(f"抖音搜索词数据获取失败,接口为/dou_yin/keyword\n")
                 return list
             data = response['data']['data']
             for i in range(len(data)):
@@ -74,26 +73,18 @@ class DyKeyword:
                 duration = duration / 1000
                 log_data = f"user:{keyword},,video_id:{video_id},,video_url:{video_url},,original_title:{old_title},,share_count:{share_count},,digg_count:{digg_count},,duration:{duration}"
                 AliyunLogger.logging(channel_id, name, keyword, video_id, "扫描到一条视频", "2001", log_data)
-                Common.logger("dy-key-word").info(
-                    f"扫描:{task_mark},搜索词:{keyword},视频id{video_id} ,分享:{share_count},点赞{digg_count}")
                 if status:
                     AliyunLogger.logging(channel_id, name, keyword, video_id, "该视频已改造过", "2002", log_data)
                     continue
                 video_percent = '%.2f' % (int(share_count) / int(digg_count))
                 if int(share_count) < share_count_rule:
                     AliyunLogger.logging(channel_id, name, keyword, video_id, f"不符合规则:分享小于{share_count_rule}", "2003", log_data)
-                    Common.logger("dy-key-word").info(
-                        f"不符合规则:{task_mark},用户主页id:{keyword},视频id{video_id} ,分享:{share_count},点赞{digg_count} ,时长:{int(duration)} ")
                     continue
                 if float(video_percent) < special:
                     AliyunLogger.logging(channel_id, name, keyword, video_id, f"不符合规则:分享/点赞小于{special}", "2003", log_data)
-                    Common.logger("dy-key-word").info(
-                        f"不符合规则:{task_mark},用户主页id:{keyword},视频id{video_id} ,分享:{share_count},点赞{digg_count} ,时长:{int(duration)} ")
                     continue
                 if int(duration) < short_duration_rule or int(duration) > 720:
                     AliyunLogger.logging(channel_id, name, keyword, video_id, f"不符合规则:时长不符合规则大于720秒/小于{short_duration_rule}秒", "2003", log_data)
-                    Common.logger("dy-key-word").info(
-                        f"不符合规则:{task_mark},用户主页id:{keyword},视频id{video_id} ,分享:{share_count},点赞{digg_count} ,时长:{int(duration)} ")
                     continue
                 cover_url = data[i].get('video').get('cover').get('url_list')[0]  # 视频封面
                 all_data = {"video_id": video_id, "cover": cover_url, "video_url": video_url, "rule": video_percent,
@@ -102,7 +93,6 @@ class DyKeyword:
                 AliyunLogger.logging(channel_id, name, keyword, video_id, "符合规则等待改造", "2004", log_data)
             return list
         except Exception as exc:
-            Common.logger("dy-key-word").info(f"抖音搜索词{keyword}获取失败{exc}\n")
             return list
 
 

+ 2 - 17
data_channel/dy_ls.py

@@ -4,7 +4,7 @@ import time
 import requests
 import json
 
-from common import Common, Feishu, AliyunLogger
+from common import Feishu, AliyunLogger
 from common.sql_help import sqlCollect
 
 
@@ -31,7 +31,6 @@ class DYLS:
                 response = response.json()
                 code = response['code']
                 if code != 0:
-                    Common.logger("dy-ls").info(f"抖音历史数据获取失败,接口为/dou_yin/blogge\n")
                     return list
                 data_list = response['data']
                 next_cursor = str(data_list['next_cursor'])
@@ -60,8 +59,7 @@ class DYLS:
                         .replace("'", "").replace("#", "").replace("Merge", "")
                     log_data = f"user:{url_id},,video_id:{video_id},,video_url:{video_url},,original_title:{old_title},,share_count:{share_count},,digg_count:{digg_count},,duration:{duration}"
                     AliyunLogger.logging(channel_id, name, url_id, video_id, "扫描到一条视频", "2001", log_data)
-                    Common.logger("dy-ls").info(
-                        f"扫描:{task_mark},用户主页id:{url_id},视频id{video_id} ,分享:{share_count},点赞{digg_count}")
+
                     if status:
                         AliyunLogger.logging(channel_id, name, url_id, video_id, "该视频已改造过", "2002", log_data)
                         continue
@@ -69,18 +67,12 @@ class DYLS:
                     special = float(0.25)
                     if int(share_count) < 500:
                         AliyunLogger.logging(channel_id, name, url_id, video_id, "不符合规则:分享小于500", "2003", log_data)
-                        Common.logger("dy-ls").info(
-                            f"不符合规则:{task_mark},用户主页id:{url_id},视频id{video_id} ,分享:{share_count},点赞{digg_count} ,时长:{int(duration)} ")
                         continue
                     if float(video_percent) < special:
                         AliyunLogger.logging(channel_id, name, url_id, video_id, "不符合规则:分享/点赞小于0.25", "2003", log_data)
-                        Common.logger("dy-ls").info(
-                            f"不符合规则:{task_mark},用户主页id:{url_id},视频id{video_id} ,分享:{share_count},点赞{digg_count} ,时长:{int(duration)} ")
                         continue
                     if int(duration) < 30 or int(duration) > 720:
                         AliyunLogger.logging(channel_id, name, url_id, video_id, "不符合规则:时长不符合规则大于720秒/小于30秒", "2003", log_data)
-                        Common.logger("dy-ls").info(
-                            f"不符合规则:{task_mark},用户主页id:{url_id},视频id{video_id} ,分享:{share_count},点赞{digg_count} ,时长:{int(duration)} ")
                         continue
                     cover_url = data[i].get('video').get('cover').get('url_list')[0]  # 视频封面
                     all_data = {"video_id": video_id, "cover": cover_url, "video_url": video_url, "rule": video_percent,
@@ -88,13 +80,11 @@ class DYLS:
                     list.append(all_data)
                     AliyunLogger.logging(channel_id, name, url_id, video_id, "符合规则等待改造", "2004", log_data)
                     if len(list) == int(number):
-                        Common.logger("dy-ls").info(f"获取抖音历史视频总数:{len(list)}\n")
                         return list
 
                 if next_cursor == False:
                     return list
             except Exception as exc:
-                Common.logger("dy-ls").info(f"抖音历史数据获取失败:{exc}\n")
                 return list
             return list
         return list
@@ -143,8 +133,6 @@ class DYLS:
                         special = float(0.25)
                         duration = duration / 1000
                         if int(share_count) < 500 or float(video_percent) < special or int(duration) < 30 or int(duration) > 720:
-                            Common.logger("dy-ls").info(
-                                f"不符合规则:{task_mark},用户主页id:{url_id},视频id{video_id} ,分享:{share_count},点赞{good_count} ,时长:{int(duration)} ")
                             continue
                         video_url, image_url = cls.get_video(video_id)
                         if video_url:
@@ -152,17 +140,14 @@ class DYLS:
                                         "old_title": old_title}
                             list.append(all_data)
                             if len(list) == int(number):
-                                Common.logger("dy-ls").info(f"获取抖音历史视频总数:{len(list)}\n")
                                 return list
                         else:
-                            Common.logger("dy-ls").info(f"抖音历史获取url失败")
                             Feishu.finish_bot("dou_yin/detail接口无法获取到视频链接",
                                               "https://open.feishu.cn/open-apis/bot/v2/hook/575ca6a1-84b4-4a2f-983b-1d178e7b16eb",
                                               "【抖音异常提示 】")
                 if has_more == False:
                     return list
             except Exception as exc:
-                Common.logger("dy-ls").info(f"抖音历史数据获取失败:{exc}\n")
                 return list
 
     @classmethod

+ 1 - 12
data_channel/ks_keyword.py

@@ -3,7 +3,7 @@ import time
 import requests
 import json
 
-from common import Common, AliyunLogger, Feishu
+from common import AliyunLogger, Feishu
 from common.sql_help import sqlCollect
 
 
@@ -58,9 +58,7 @@ class KsKeyword:
                     Feishu.finish_bot(f"kuai_shou/keyword {response['msg']},cookie 过期需要更换",
                                       "https://open.feishu.cn/open-apis/bot/v2/hook/575ca6a1-84b4-4a2f-983b-1d178e7b16eb",
                                       "【快手搜索接口使用提示】")
-                    Common.logger("ks-key-word").info(f"快手搜索词数据获取失败,{response['msg']}\n")
                     return list
-                Common.logger("ks-key-word").info(f"快手搜索词数据获取失败,接口为kuai_shou/keyword\n")
                 return list
             data_list = response['data']['data']
             for data in data_list:
@@ -90,21 +88,13 @@ class KsKeyword:
                 if float(video_percent) < special:
                     AliyunLogger.logging(channel_id, name, keyword, photo_id, f"不符合规则:分享/浏览{special}", "2003", log_data)
 
-                    Common.logger("ks-key-word").info(
-                        f"不符合规则:{task_mark},用户主页id:{keyword},视频id{photo_id} ,分享:{share_count},浏览{view_count} ,时长:{int(duration)} ")
                     continue
                 if int(share_count) < share_count_rule:
                     AliyunLogger.logging(channel_id, name, keyword, photo_id, f"不符合规则:分享小于{share_count_rule}", "2003", log_data)
-
-                    Common.logger("ks-key-word").info(
-                        f"不符合规则:{task_mark},用户主页id:{keyword},视频id{photo_id} ,分享:{share_count},浏览{view_count} ,时长:{int(duration)} ")
                     continue
                 if int(duration) < short_duration_rule or int(duration) > 720:
                     AliyunLogger.logging(channel_id, name, keyword, photo_id, f"不符合规则:时长不符合规则大于720秒/小于{short_duration_rule}", "2003",
                                          log_data)
-
-                    Common.logger("ks-key-word").info(
-                        f"不符合规则:{task_mark},用户主页id:{keyword},视频id{photo_id} ,分享:{share_count},浏览{view_count} ,时长:{int(duration)} ")
                     continue
 
                 log_data = f"user:{keyword},,video_id:{photo_id},,video_url:{video_url},,original_title:{old_title},,share_count:{share_count},,view_count:{view_count},,duration:{duration}"
@@ -116,7 +106,6 @@ class KsKeyword:
 
             return list
         except Exception as exc:
-            Common.logger("ks-key-word").info(f"快手搜索词{keyword}获取失败{exc}\n")
             return list
 
     @classmethod

+ 1 - 12
data_channel/ks_ls.py

@@ -2,7 +2,7 @@ import random
 import time
 import requests
 import json
-from common import Common, Feishu, AliyunLogger
+from common import Feishu, AliyunLogger
 from common.sql_help import sqlCollect
 
 class KSLS:
@@ -59,21 +59,12 @@ class KSLS:
                     #     continue
                     if float(video_percent) < special:
                         AliyunLogger.logging(channel_id, name, url_id, photo_id, "不符合规则:分享/浏览小于0.0005", "2003", log_data)
-
-                        Common.logger("ks-ls").info(
-                            f"不符合规则:{task_mark},用户主页id:{url_id},视频id{photo_id} ,分享:{share_count},浏览{view_count} ,时长:{int(duration)} ")
                         continue
                     if int(share_count) < 100:
                         AliyunLogger.logging(channel_id, name, url_id, photo_id, "不符合规则:分享小于100", "2003", log_data)
-
-                        Common.logger("ks-ls").info(
-                            f"不符合规则:{task_mark},用户主页id:{url_id},视频id{photo_id} ,分享:{share_count},浏览{view_count} ,时长:{int(duration)} ")
                         continue
                     if int(duration) < 30 or (duration) > 720:
                         AliyunLogger.logging(channel_id, name, url_id, photo_id, "不符合规则:时长不符合规则大于720秒/小于30秒", "2003", log_data)
-
-                        Common.logger("ks-ls").info(
-                            f"不符合规则:{task_mark},用户主页id:{url_id},视频id{photo_id} ,分享:{share_count},浏览{view_count} ,时长:{int(duration)} ")
                         continue
                     video_url, image_url = cls.get_video(photo_id)
                     if video_url:
@@ -85,7 +76,6 @@ class KSLS:
                         AliyunLogger.logging(channel_id, name, url_id, photo_id, "符合规则等待改造", "2004", log_data)
 
                         if len(list) == int(number):
-                            Common.logger("ks-ls").info(f"获取快手历史视频总数:{len(list)}\n")
                             return list
                     else:
                         AliyunLogger.logging(channel_id, name, url_id, photo_id, "无法获取到视频链接", "2003", log_data)
@@ -94,7 +84,6 @@ class KSLS:
                     return list
                 return list
         except Exception as exc:
-            Common.logger("ks-ls").info(f"快手历史数据获取失败:{exc}\n")
             return list
 
     @classmethod

+ 121 - 121
data_channel/ks_pc_keyword.py

@@ -1,121 +1,121 @@
-import time
-
-import requests
-import json
-
-from common import Common, AliyunLogger, Feishu, Material
-from common.sql_help import sqlCollect
-from common.userAgent import get_random_user_agent
-
-
-class KsPcKeyword:
-    @classmethod
-    def get_key_word(cls, keyword, task_mark, mark, channel_id, name, task):
-        list = []
-        url = "https://www.kuaishou.com/graphql"
-
-        payload = json.dumps({
-            "operationName": "visionSearchPhoto",
-            "variables": {
-                "keyword": keyword,
-                "pcursor": "",
-                "page": "search"
-            },
-            "query": "fragment photoContent on PhotoEntity {\n  __typename\n  id\n  duration\n  caption\n  originCaption\n  likeCount\n  viewCount\n  commentCount\n  realLikeCount\n  coverUrl\n  photoUrl\n  photoH265Url\n  manifest\n  manifestH265\n  videoResource\n  coverUrls {\n    url\n    __typename\n  }\n  timestamp\n  expTag\n  animatedCoverUrl\n  distance\n  videoRatio\n  liked\n  stereoType\n  profileUserTopPhoto\n  musicBlocked\n  riskTagContent\n  riskTagUrl\n}\n\nfragment recoPhotoFragment on recoPhotoEntity {\n  __typename\n  id\n  duration\n  caption\n  originCaption\n  likeCount\n  viewCount\n  commentCount\n  realLikeCount\n  coverUrl\n  photoUrl\n  photoH265Url\n  manifest\n  manifestH265\n  videoResource\n  coverUrls {\n    url\n    __typename\n  }\n  timestamp\n  expTag\n  animatedCoverUrl\n  distance\n  videoRatio\n  liked\n  stereoType\n  profileUserTopPhoto\n  musicBlocked\n  riskTagContent\n  riskTagUrl\n}\n\nfragment feedContent on Feed {\n  type\n  author {\n    id\n    name\n    headerUrl\n    following\n    headerUrls {\n      url\n      __typename\n    }\n    __typename\n  }\n  photo {\n    ...photoContent\n    ...recoPhotoFragment\n    __typename\n  }\n  canAddComment\n  llsid\n  status\n  currentPcursor\n  tags {\n    type\n    name\n    __typename\n  }\n  __typename\n}\n\nquery visionSearchPhoto($keyword: String, $pcursor: String, $searchSessionId: String, $page: String, $webPageArea: String) {\n  visionSearchPhoto(keyword: $keyword, pcursor: $pcursor, searchSessionId: $searchSessionId, page: $page, webPageArea: $webPageArea) {\n    result\n    llsid\n    webPageArea\n    feeds {\n      ...feedContent\n      __typename\n    }\n    searchSessionId\n    pcursor\n    aladdinBanner {\n      imgUrl\n      link\n      __typename\n    }\n    __typename\n  }\n}\n"
-        })
-        cookie = Material.get_cookie_data("KsoMsyP2ghleM9tzBfmcEEXBnXg", "U1gySe", "快手搜索-cookie")
-        headers = {
-            'Accept-Language': 'zh-CN,zh;q=0.9',
-            'Cache-Control': 'no-cache',
-            'Connection': 'keep-alive',
-            'Origin': 'https://www.kuaishou.com',
-            'Pragma': 'no-cache',
-            'User-Agent': get_random_user_agent("pc"),
-            'accept': '*/*',
-            'content-type': 'application/json',
-            'Cookie': cookie
-        }
-        try:
-            time.sleep(3)
-            # 代理信息
-            proxy = "http://spkbt3wnzw:cx6R=v5mQuBgqsQ4o7@cn.visitxiangtan.com:30000"
-            proxies = {
-                "http": proxy,
-                "https": proxy
-            }
-            response = requests.request("POST", url, headers=headers, data=payload, proxies=proxies)
-            text = response.text
-            if text:
-                response_dict = json.loads(text)
-                result = response_dict.get('result', None)
-                if result:
-                    log_type = ['liukunyu', 'wangxueke', 'xinxin']
-                    mark_name = ['刘坤宇', '王雪珂', '信欣']
-                    Feishu.bot(log_type, '快手关键词搜索', f'快手关键词搜索cookie过期,请及时更换', mark_name)
-                    time.sleep(10)
-                    return list
-            response = response.json()
-            data_list = response['data']['visionSearchPhoto']['feeds']
-            for data in data_list:
-                data = data['photo']
-                photo_id = data["id"]
-                status = sqlCollect.is_used(task_mark, photo_id, mark, channel_id)
-
-                view_count = data["viewCount"] if "viewCount" in data and data["viewCount"] else 0
-                like_count = data["likeCount"] if "likeCount" in data and data["likeCount"] else 0
-                like_count = cls.convert_to_number(like_count)
-                video_percent = '%.4f' % (int(like_count) / int(view_count))
-                special = 0.015
-                old_title = data["caption"]  # 标题
-                duration = data["duration"]
-                duration = int(duration) / 1000
-                video_url = data["photoUrl"]
-                image_url = data["coverUrl"]
-                log_data = f"user:{keyword},,video_id:{photo_id},,video_url:{video_url},original_title:{old_title},,like_count:{like_count},,view_count:{view_count},,duration:{duration}"
-                AliyunLogger.logging(channel_id, name, keyword, photo_id, "扫描到一条视频", "2001", log_data)
-                if status:
-                    AliyunLogger.logging(channel_id, name, keyword, photo_id, "该视频已改造过", "2001", log_data)
-                    continue
-                if int(view_count) < 1000:
-                    AliyunLogger.logging(channel_id, name, keyword, photo_id, f"不符合规则:浏览小于1000", "2003", log_data)
-                    Common.logger("ks-key-word").info(
-                        f"不符合规则:{task_mark},用户主页id:{keyword},视频id{photo_id} ,浏览:{view_count},浏览{view_count} ,时长:{int(duration)} ")
-                    continue
-                if float(video_percent) < special:
-                    AliyunLogger.logging(channel_id, name, keyword, photo_id, f"不符合规则:点赞/浏览{special}", "2003", log_data)
-                    Common.logger("ks-key-word").info(
-                        f"不符合规则:{task_mark},用户主页id:{keyword},视频id{photo_id} ,浏览:{view_count},浏览{view_count} ,时长:{int(duration)} ")
-                    continue
-                if int(duration) < 30 or int(duration) > 600:
-                    AliyunLogger.logging(channel_id, name, keyword, photo_id,
-                                         f"不符合规则:时长不符合规则大于600秒/小于30秒", "2003",
-                                         log_data)
-
-                    Common.logger("ks-key-word").info(
-                        f"不符合规则:{task_mark},用户主页id:{keyword},视频id{photo_id} ,浏览:{view_count},浏览{view_count} ,时长:{int(duration)} ")
-                    continue
-                AliyunLogger.logging(channel_id, name, keyword, photo_id, "符合规则等待改造", "2004", log_data)
-                all_data = {"video_id": photo_id, "cover": image_url, "video_url": video_url,
-                            "rule": '',
-                            "old_title": old_title}
-                list.append(all_data)
-            return list
-        except Exception as exc:
-            Common.logger("ks-key-word").info(f"快手搜索词{keyword}获取失败{exc}\n")
-            return list
-
-    @classmethod
-    def convert_to_number(cls, value):
-        if value.endswith("万"):
-            return float(value[:-1]) * 10000  # 去掉“万”并乘以 10000
-        return int(value)  # 处理其他格式
-
-
-if __name__ == '__main__':
-    keyword = '毛主席故居'
-    task_mark = '1'
-    mark = 'pl-gjc'
-    channel_id = '快手搜索'
-    name = '1'
-    task = {'combo': ['最新发布', '近1日', '1分钟内']}
-    KsPcKeyword.get_key_word(keyword, task_mark, mark, channel_id, name, task)
+# import time
+#
+# import requests
+# import json
+#
+# from common import Common, AliyunLogger, Feishu, Material
+# from common.sql_help import sqlCollect
+# from common.userAgent import get_random_user_agent
+#
+#
+# class KsPcKeyword:
+#     @classmethod
+#     def get_key_word(cls, keyword, task_mark, mark, channel_id, name, task):
+#         list = []
+#         url = "https://www.kuaishou.com/graphql"
+#
+#         payload = json.dumps({
+#             "operationName": "visionSearchPhoto",
+#             "variables": {
+#                 "keyword": keyword,
+#                 "pcursor": "",
+#                 "page": "search"
+#             },
+#             "query": "fragment photoContent on PhotoEntity {\n  __typename\n  id\n  duration\n  caption\n  originCaption\n  likeCount\n  viewCount\n  commentCount\n  realLikeCount\n  coverUrl\n  photoUrl\n  photoH265Url\n  manifest\n  manifestH265\n  videoResource\n  coverUrls {\n    url\n    __typename\n  }\n  timestamp\n  expTag\n  animatedCoverUrl\n  distance\n  videoRatio\n  liked\n  stereoType\n  profileUserTopPhoto\n  musicBlocked\n  riskTagContent\n  riskTagUrl\n}\n\nfragment recoPhotoFragment on recoPhotoEntity {\n  __typename\n  id\n  duration\n  caption\n  originCaption\n  likeCount\n  viewCount\n  commentCount\n  realLikeCount\n  coverUrl\n  photoUrl\n  photoH265Url\n  manifest\n  manifestH265\n  videoResource\n  coverUrls {\n    url\n    __typename\n  }\n  timestamp\n  expTag\n  animatedCoverUrl\n  distance\n  videoRatio\n  liked\n  stereoType\n  profileUserTopPhoto\n  musicBlocked\n  riskTagContent\n  riskTagUrl\n}\n\nfragment feedContent on Feed {\n  type\n  author {\n    id\n    name\n    headerUrl\n    following\n    headerUrls {\n      url\n      __typename\n    }\n    __typename\n  }\n  photo {\n    ...photoContent\n    ...recoPhotoFragment\n    __typename\n  }\n  canAddComment\n  llsid\n  status\n  currentPcursor\n  tags {\n    type\n    name\n    __typename\n  }\n  __typename\n}\n\nquery visionSearchPhoto($keyword: String, $pcursor: String, $searchSessionId: String, $page: String, $webPageArea: String) {\n  visionSearchPhoto(keyword: $keyword, pcursor: $pcursor, searchSessionId: $searchSessionId, page: $page, webPageArea: $webPageArea) {\n    result\n    llsid\n    webPageArea\n    feeds {\n      ...feedContent\n      __typename\n    }\n    searchSessionId\n    pcursor\n    aladdinBanner {\n      imgUrl\n      link\n      __typename\n    }\n    __typename\n  }\n}\n"
+#         })
+#         cookie = Material.get_cookie_data("KsoMsyP2ghleM9tzBfmcEEXBnXg", "U1gySe", "快手搜索-cookie")
+#         headers = {
+#             'Accept-Language': 'zh-CN,zh;q=0.9',
+#             'Cache-Control': 'no-cache',
+#             'Connection': 'keep-alive',
+#             'Origin': 'https://www.kuaishou.com',
+#             'Pragma': 'no-cache',
+#             'User-Agent': get_random_user_agent("pc"),
+#             'accept': '*/*',
+#             'content-type': 'application/json',
+#             'Cookie': cookie
+#         }
+#         try:
+#             time.sleep(3)
+#             # 代理信息
+#             proxy = "http://spkbt3wnzw:cx6R=v5mQuBgqsQ4o7@cn.visitxiangtan.com:30000"
+#             proxies = {
+#                 "http": proxy,
+#                 "https": proxy
+#             }
+#             response = requests.request("POST", url, headers=headers, data=payload, proxies=proxies)
+#             text = response.text
+#             if text:
+#                 response_dict = json.loads(text)
+#                 result = response_dict.get('result', None)
+#                 if result:
+#                     log_type = ['liukunyu', 'wangxueke', 'xinxin']
+#                     mark_name = ['刘坤宇', '王雪珂', '信欣']
+#                     Feishu.bot(log_type, '快手关键词搜索', f'快手关键词搜索cookie过期,请及时更换', mark_name)
+#                     time.sleep(10)
+#                     return list
+#             response = response.json()
+#             data_list = response['data']['visionSearchPhoto']['feeds']
+#             for data in data_list:
+#                 data = data['photo']
+#                 photo_id = data["id"]
+#                 status = sqlCollect.is_used(task_mark, photo_id, mark, channel_id)
+#
+#                 view_count = data["viewCount"] if "viewCount" in data and data["viewCount"] else 0
+#                 like_count = data["likeCount"] if "likeCount" in data and data["likeCount"] else 0
+#                 like_count = cls.convert_to_number(like_count)
+#                 video_percent = '%.4f' % (int(like_count) / int(view_count))
+#                 special = 0.015
+#                 old_title = data["caption"]  # 标题
+#                 duration = data["duration"]
+#                 duration = int(duration) / 1000
+#                 video_url = data["photoUrl"]
+#                 image_url = data["coverUrl"]
+#                 log_data = f"user:{keyword},,video_id:{photo_id},,video_url:{video_url},original_title:{old_title},,like_count:{like_count},,view_count:{view_count},,duration:{duration}"
+#                 AliyunLogger.logging(channel_id, name, keyword, photo_id, "扫描到一条视频", "2001", log_data)
+#                 if status:
+#                     AliyunLogger.logging(channel_id, name, keyword, photo_id, "该视频已改造过", "2001", log_data)
+#                     continue
+#                 if int(view_count) < 1000:
+#                     AliyunLogger.logging(channel_id, name, keyword, photo_id, f"不符合规则:浏览小于1000", "2003", log_data)
+#                     Common.logger("ks-key-word").info(
+#                         f"不符合规则:{task_mark},用户主页id:{keyword},视频id{photo_id} ,浏览:{view_count},浏览{view_count} ,时长:{int(duration)} ")
+#                     continue
+#                 if float(video_percent) < special:
+#                     AliyunLogger.logging(channel_id, name, keyword, photo_id, f"不符合规则:点赞/浏览{special}", "2003", log_data)
+#                     Common.logger("ks-key-word").info(
+#                         f"不符合规则:{task_mark},用户主页id:{keyword},视频id{photo_id} ,浏览:{view_count},浏览{view_count} ,时长:{int(duration)} ")
+#                     continue
+#                 if int(duration) < 30 or int(duration) > 600:
+#                     AliyunLogger.logging(channel_id, name, keyword, photo_id,
+#                                          f"不符合规则:时长不符合规则大于600秒/小于30秒", "2003",
+#                                          log_data)
+#
+#                     Common.logger("ks-key-word").info(
+#                         f"不符合规则:{task_mark},用户主页id:{keyword},视频id{photo_id} ,浏览:{view_count},浏览{view_count} ,时长:{int(duration)} ")
+#                     continue
+#                 AliyunLogger.logging(channel_id, name, keyword, photo_id, "符合规则等待改造", "2004", log_data)
+#                 all_data = {"video_id": photo_id, "cover": image_url, "video_url": video_url,
+#                             "rule": '',
+#                             "old_title": old_title}
+#                 list.append(all_data)
+#             return list
+#         except Exception as exc:
+#             Common.logger("ks-key-word").info(f"快手搜索词{keyword}获取失败{exc}\n")
+#             return list
+#
+#     @classmethod
+#     def convert_to_number(cls, value):
+#         if value.endswith("万"):
+#             return float(value[:-1]) * 10000  # 去掉“万”并乘以 10000
+#         return int(value)  # 处理其他格式
+#
+#
+# if __name__ == '__main__':
+#     keyword = '毛主席故居'
+#     task_mark = '1'
+#     mark = 'pl-gjc'
+#     channel_id = '快手搜索'
+#     name = '1'
+#     task = {'combo': ['最新发布', '近1日', '1分钟内']}
+#     KsPcKeyword.get_key_word(keyword, task_mark, mark, channel_id, name, task)

+ 1 - 2
data_channel/ks_xcx.py

@@ -1,9 +1,8 @@
 import json
-import time
 
 import requests
 
-from common import Feishu, AliyunLogger
+from common import AliyunLogger
 from common.sql_help import sqlCollect
 
 

+ 1 - 10
data_channel/ks_xcx_keyword.py

@@ -3,7 +3,7 @@ import time
 import requests
 import json
 
-from common import Common, AliyunLogger, Feishu
+from common import AliyunLogger, Feishu
 from common.sql_help import sqlCollect
 
 
@@ -60,9 +60,7 @@ class KsXCXKeyword:
                     Feishu.finish_bot(f"kuai_shou/keyword {response['msg']},cookie 过期需要更换",
                                       "https://open.feishu.cn/open-apis/bot/v2/hook/575ca6a1-84b4-4a2f-983b-1d178e7b16eb",
                                       "【快手搜索接口使用提示】")
-                    Common.logger("ks-key-word").info(f"快手搜索词数据获取失败,{response['msg']}\n")
                     return list
-                Common.logger("ks-key-word").info(f"快手搜索词数据获取失败,接口为kuai_shou/keyword\n")
                 return list
             data_list = response['data']['data']
             for data in data_list:
@@ -92,21 +90,15 @@ class KsXCXKeyword:
                 if float(video_percent) < special:
                     AliyunLogger.logging(channel_id, name, keyword, photo_id, f"不符合规则:分享/浏览{special}", "2003", log_data)
 
-                    Common.logger("ks-key-word").info(
-                        f"不符合规则:{task_mark},用户主页id:{keyword},视频id{photo_id} ,分享:{share_count},浏览{view_count} ,时长:{int(duration)} ")
                     continue
                 if int(share_count) < share_count_rule:
                     AliyunLogger.logging(channel_id, name, keyword, photo_id, f"不符合规则:分享小于{share_count_rule}", "2003", log_data)
 
-                    Common.logger("ks-key-word").info(
-                        f"不符合规则:{task_mark},用户主页id:{keyword},视频id{photo_id} ,分享:{share_count},浏览{view_count} ,时长:{int(duration)} ")
                     continue
                 if int(duration) < short_duration_rule or int(duration) > 720:
                     AliyunLogger.logging(channel_id, name, keyword, photo_id, f"不符合规则:时长不符合规则大于720秒/小于{short_duration_rule}", "2003",
                                          log_data)
 
-                    Common.logger("ks-key-word").info(
-                        f"不符合规则:{task_mark},用户主页id:{keyword},视频id{photo_id} ,分享:{share_count},浏览{view_count} ,时长:{int(duration)} ")
                     continue
 
                 log_data = f"user:{keyword},,video_id:{photo_id},,video_url:{video_url},,original_title:{old_title},,share_count:{share_count},,view_count:{view_count},,duration:{duration}"
@@ -118,7 +110,6 @@ class KsXCXKeyword:
             print(list)
             return list
         except Exception as exc:
-            Common.logger("ks-key-word").info(f"快手搜索词{keyword}获取失败{exc}\n")
             return list
 
     @classmethod

+ 6 - 15
data_channel/kuaishou.py

@@ -5,7 +5,7 @@ import json
 import urllib3
 from requests.adapters import HTTPAdapter
 
-from common import Feishu, Material, Common, AliyunLogger
+from common import Feishu, Material, AliyunLogger
 from common.sql_help import sqlCollect
 from data_channel.data_help import dataHelp
 
@@ -32,7 +32,7 @@ class KS:
                     share_count = data.get("share_count")
                     return int(share_count)
             except KeyError as e:
-                Common.logger("ks").info(f"获取分享数据失败:{e}\n")
+                continue
         return 0
 
     @classmethod
@@ -129,35 +129,26 @@ class KS:
                     # log_data = f"user:{url_id},,video_id:{video_id},,video_url:{video_url},,original_title:{old_title},,view_count:{view_count},,duration:{duration}"
 
                     AliyunLogger.logging(channel_id, name, url_id, video_id, "扫描到一条视频", "2001", log_data)
-                    Common.logger("ks").info(
-                        f"扫描:{task_mark},用户主页id:{url_id},视频id{video_id} ,播放数:{view_count} ,分享数:{share_count},时长:{duration} ")
-                    # if status:
-                    #     AliyunLogger.logging(channel_id, name, url_id, video_id, "该视频已改造过", "2002", log_data)
-                    #     continue
+                    if status:
+                        AliyunLogger.logging(channel_id, name, url_id, video_id, "该视频已改造过", "2002", log_data)
+                        continue
                     special = float(0.001)
                     if float(video_percent) < special:
                         AliyunLogger.logging(channel_id, name, url_id, video_id, "不符合规则:分享/浏览小于0.001", "2003", log_data)
-                        Common.logger("ks").info(
-                            f"不符合规则:{task_mark},用户主页id:{url_id},视频id{video_id} ,播放数:{view_count} ,分享数:{share_count},时长:{duration} ")
+
                         continue
                     if share_count < 500:
                         AliyunLogger.logging(channel_id, name, url_id, video_id, "不符合规则:分享小于500", "2003", log_data)
-                        Common.logger("ks").info(
-                            f"不符合规则:{task_mark},用户主页id:{url_id},视频id{video_id} ,播放数:{view_count} ,分享数:{share_count},时长:{duration} ")
                         continue
                     if duration < 30 or duration > 720:
                         AliyunLogger.logging(channel_id, name, url_id, video_id, "不符合规则:时长不符合规则大于720秒/小于30秒", "2003", log_data)
-                        Common.logger("ks").info(
-                            f"不符合规则:{task_mark},用户主页id:{url_id},视频id{video_id} ,播放数:{view_count} ,分享数:{share_count},时长:{duration} ")
                         continue
                     all_data = {"video_id": video_id, "cover": cover_url, "video_url": video_url, "rule": video_percent, "old_title": old_title}
                     list.append(all_data)
                     AliyunLogger.logging(channel_id, name, url_id, video_id, "符合规则等待改造", "2004", log_data)
                     if len(list) == int(number):
-                        Common.logger(mark).info(f"获取快手视频总数:{len(list)}\n")
                         return list
             except Exception as exc:
-                Common.logger("ks").warning(f"{name}的快手获取数据失败:{exc}\n")
                 # Feishu.bot("wangxueke", '机器自动改造消息通知', f'快手-{name}cookie过期,请及时更换', 'wangxueke')
                 # Feishu.bot("liuzhaoheng", '机器自动改造消息通知', f'快手-{name}cookie过期,请及时更换', '刘兆恒')
                 return list

+ 325 - 325
data_channel/kuaishouchuangzuozhe.py

@@ -1,325 +1,325 @@
-import json
-import os
-from hashlib import md5
-import requests
-import time
-from urllib.parse import urlencode
-from datetime import datetime, timedelta
-
-from common import Oss, Feishu, Common, AliyunLogger
-from common.sql_help import sqlCollect
-
-headers = {
-    'Accept-Language': 'zh-cn',
-    'Connection': 'keep-alive',
-    'Content-Type': 'application/x-www-form-urlencoded',
-    'Host': 'creator-app.kuaishou.com',
-    'User-Agent': 'kwai-android aegon/3.12.1',
-}
-class KsFeedVideo:
-    CATEGORY_IDS = {
-        1: "生活",
-        2: "才艺",
-        # 3: "时尚",
-        # 4: "宠物",
-        5: "读书",
-        # 6: "二次元",
-        7: "家居",
-        # 8: "数码",
-        9: "搞笑",
-        10: "健康",
-        11: "旅游",
-        12: "美食",
-        # 13: "美妆",
-        # 14: "汽车",
-        15: "亲子",
-        16: "情感",
-        # 17: "三农",
-        # 18: "摄影",
-        # 19: "舞蹈",
-        # 20: "颜值",
-        # 21: "音乐",
-        # 22: "影视",
-        # 23: "短剧",
-        # 24: "游戏",
-        25: "运动",
-        26: "资讯",
-        27: "人文"
-    }
-    current_category_index = 0
-
-    @staticmethod
-    def calculate_sig(data):
-        src = ''.join([f'{key}={data[key]}' for key in sorted(data.keys())])
-        salt = '08d8eece8e83'
-        return md5(f'{src}{salt}'.encode()).hexdigest()
-
-
-    """
-    切换品类
-    """
-    @classmethod
-    def switch_category(cls):
-        if cls.current_category_index >= len(cls.CATEGORY_IDS):
-            cls.current_category_index = 0
-        category_id = list(cls.CATEGORY_IDS.keys())[cls.current_category_index]
-        url = 'https://creator-app.kuaishou.com/rest/bamboo/inspiration/n/category/confirm/optimize'
-        data = {
-            'isRecommendChange': False,
-            'categoryId': category_id,
-            # 'kuaishou.api_st': "Cg9rdWFpc2hvdS5hcGkuc3QSkAGMQoIK2ZpwlQszYISTxSFxzugi58w2U5gpPqa6an0eU6MFcVsXq2rd_K16UTItZ_OzPV-4jmVN5rNXKXW9jL97JV79Y9PqxaR9xOIr1TEyDzpOq2GM-0W1QRW3M8Li_J6NZ5t1hRFCWHBlOESjiBWs7vq4m1bq_ml0dZ6pgEDfpsWNpBaLRzwZwOO1mD4LqO4aEokh6uHql0RmmtbfoBF25r7QOyIgqNv0TBf6mlwS3bjE0K6sl08M1mMPjW1PB9e0Qr494H8oBTAB",
-            'kuaishou.api_st': 'Cg9rdWFpc2hvdS5hcGkuc3QSkAGMQoIK2ZpwlQszYISTxSFxzugi58w2U5gpPqa6an0eU6MFcVsXq2rd_K16UTItZ_OzPV-4jmVN5rNXKXW9jL97JV79Y9PqxaR9xOIr1TEyDzpOq2GM-0W1QRW3M8Li_J6NZ5t1hRFCWHBlOESjiBWs7vq4m1bq_ml0dZ6pgEDfpsWNpBaLRzwZwOO1mD4LqO4aEokh6uHql0RmmtbfoBF25r7QOyIgqNv0TBf6mlwS3bjE0K6sl08M1mMPjW1PB9e0Qr494H8oBTAB;region_ticket=RT_FAC86448E713714136C088FFCC4431455D1FA7E05A6D25DAD4E4B8CC011FB6E8294169DD9',
-            'client_key': '214c9979',
-        }
-        sig = cls.calculate_sig(data)
-        data['sig'] = sig
-        response = requests.post(url=url, headers=headers, data=data)
-        body = response.content.decode()
-        cls.current_category_index += 1
-        return body
-
-    """
-    获取feed流信息
-    """
-    @classmethod
-    def get_feed_list(cls):
-        cls.switch_category()
-        url = 'https://creator-app.kuaishou.com/rest/bamboo/inspiration/n/feed'
-        data = {
-            'cs': False,
-            'kuaishou.api_st': 'Cg9rdWFpc2hvdS5hcGkuc3QSkAGMQoIK2ZpwlQszYISTxSFxzugi58w2U5gpPqa6an0eU6MFcVsXq2rd_K16UTItZ_OzPV-4jmVN5rNXKXW9jL97JV79Y9PqxaR9xOIr1TEyDzpOq2GM-0W1QRW3M8Li_J6NZ5t1hRFCWHBlOESjiBWs7vq4m1bq_ml0dZ6pgEDfpsWNpBaLRzwZwOO1mD4LqO4aEokh6uHql0RmmtbfoBF25r7QOyIgqNv0TBf6mlwS3bjE0K6sl08M1mMPjW1PB9e0Qr494H8oBTAB;region_ticket=RT_FAC86448E713714136C088FFCC4431455D1FA7E05A6D25DAD4E4B8CC011FB6E8294169DD9',
-            # 'kuaishou.api_st': "Cg9rdWFpc2hvdS5hcGkuc3QSkAGMQoIK2ZpwlQszYISTxSFxzugi58w2U5gpPqa6an0eU6MFcVsXq2rd_K16UTItZ_OzPV-4jmVN5rNXKXW9jL97JV79Y9PqxaR9xOIr1TEyDzpOq2GM-0W1QRW3M8Li_J6NZ5t1hRFCWHBlOESjiBWs7vq4m1bq_ml0dZ6pgEDfpsWNpBaLRzwZwOO1mD4LqO4aEokh6uHql0RmmtbfoBF25r7QOyIgqNv0TBf6mlwS3bjE0K6sl08M1mMPjW1PB9e0Qr494H8oBTAB",
-            'client_key': '214c9979',
-        }
-        sig = cls.calculate_sig(data)
-        data['sig'] = sig
-        response = requests.post(url=url, headers=headers, data=data)
-        body = response.content.decode()
-        return body
-
-    """
-    获取观众画像
-    """
-    @classmethod
-    def analyze_photo(cls, photo_id):
-        url = 'https://creator-app.kuaishou.com/rest/bamboo/inspiration/n/photo/analysis'
-
-        headers = {
-            'Accept-Language': 'zh-cn',
-            'Connection': 'keep-alive',
-            'Content-Type': 'application/x-www-form-urlencoded',
-            'Host': 'creator-app.kuaishou.com',
-        }
-        data = {
-            'photoId': photo_id,
-            'client_key': '214c9979',
-        }
-        sig = cls.calculate_sig(data)
-        data['sig'] = sig
-        response = requests.post(url=url, headers=headers, data=data)
-        body = response.content.decode()
-        json_body = json.loads(body)
-        user_range = json_body['data']['play']['userRange']
-        if len(user_range) == 0:
-            return False, "无画像"
-        age_range = user_range['ageRange']
-        value = age_range[5]['value']
-        value = int(value.strip('%'))
-        if value >= 40:
-            return False, value
-        else:
-            return True, value
-
-    """
-    视频时长转换成秒
-    """
-    @classmethod
-    def milliseconds_to_seconds(cls, milliseconds):
-        seconds = milliseconds / 1000
-        return int(seconds)
-
-    """
-    判断当前视频是否在90天内
-    """
-    @classmethod
-    def get_video_data(cls, timestamp_str):
-        timestamp = datetime.strptime(timestamp_str, "%Y-%m-%d %H:%M:%S")
-        # 获取当前时间
-        current_time = datetime.now()
-        difference = current_time - timestamp
-        if difference <= timedelta(days=90):
-            return False
-        else:
-            return True
-    """
-    获取票圈ID
-    """
-    @classmethod
-    def get_id_by_category(cls, category_name):
-        category_list = [
-            {"id": 71502003, "category": "生活"},
-            {"id": 71502004, "category": "才艺"},
-            {"id": 71502005, "category": "时尚"},
-            {"id": 71502006, "category": "宠物"},
-            {"id": 71502007, "category": "读书"},
-            {"id": 71502008, "category": "二次元"},
-            {"id": 71502009, "category": "家居"},
-            {"id": 71502010, "category": "数码"},
-            {"id": 71502011, "category": "搞笑"},
-            {"id": 71502012, "category": "健康"},
-            {"id": 71502013, "category": "旅游"},
-            {"id": 71502014, "category": "美食"},
-            {"id": 71502015, "category": "美妆"},
-            {"id": 71502016, "category": "汽车"},
-            {"id": 71502018, "category": "亲子"},
-            {"id": 71502019, "category": "情感"},
-            {"id": 71502020, "category": "三农"},
-            {"id": 71502021, "category": "摄影"},
-            {"id": 71502022, "category": "舞蹈"},
-            {"id": 71502023, "category": "颜值"},
-            {"id": 71502024, "category": "音乐"},
-            {"id": 71502025, "category": "影视"},
-            {"id": 71502026, "category": "短剧"},
-            {"id": 71502027, "category": "游戏"},
-            {"id": 71502028, "category": "运动"},
-            {"id": 71502029, "category": "资讯"},
-            {"id": 71502030, "category": "人文"}
-        ]
-        for category in category_list:
-            if category['category'] == category_name:
-                return category['id']
-        return None
-
-    """
-    新生成视频上传到对应账号下
-    """
-    @classmethod
-    def insert_piaoquantv(cls, new_video_path, new_title, n_id, cover):
-
-        url = "https://vlogapi.piaoquantv.com/longvideoapi/crawler/video/send"
-        headers = {
-            'User-Agent': 'PQSpeed/486 CFNetwork/1410.1 Darwin/22.6.0',
-            'cookie': 'JSESSIONID=4DEA2B5173BB9A9E82DB772C0ACDBC9F; JSESSIONID=D02C334150025222A0B824A98B539B78',
-            'referer': 'http://appspeed.piaoquantv.com',
-            'token': '524a8bc871dbb0f4d4717895083172ab37c02d2f',
-            'accept-language': 'zh-CN,zh-Hans;q=0.9',
-            'Content-Type': 'application/x-www-form-urlencoded'
-        }
-        payload = {
-            'coverImgPath': cover,
-            'deviceToken': '9ef064f2f7869b3fd67d6141f8a899175dddc91240971172f1f2a662ef891408',
-            'fileExtensions': 'MP4',
-            'loginUid': n_id,
-            'networkType': 'Wi-Fi',
-            'platform': 'iOS',
-            'requestId': 'fb972cbd4f390afcfd3da1869cd7d001',
-            'sessionId': '362290597725ce1fa870d7be4f46dcc2',
-            'subSessionId': '362290597725ce1fa870d7be4f46dcc2',
-            'title': new_title,
-            'token': '524a8bc871dbb0f4d4717895083172ab37c02d2f',
-            'uid': n_id,
-            'versionCode': '486',
-            'versionName': '3.4.12',
-            'videoFromScene': '1',
-            'videoPath': new_video_path,
-            'viewStatus': '1'
-        }
-        encoded_payload = urlencode(payload)
-        response = requests.request("POST", url, headers=headers, data=encoded_payload)
-        data = response.json()
-        code = data["code"]
-        if code == 0:
-            new_video_id = data["data"]["id"]
-            return new_video_id
-        else:
-            return None
-
-    @classmethod
-    def get_data(cls, channel_id, name):
-        number = 1
-        list = []
-        for category_id, category_name in cls.CATEGORY_IDS.items():
-            try:
-                feed_data = cls.get_feed_list()
-                feed_data = json.loads(feed_data)
-                feeds = feed_data['feeds']
-                for feed in feeds:
-                    photo_id = feed["photo_id"]  # 视频ID
-                    status = sqlCollect.ks_is_used(photo_id)
-
-                    user_name = feed["user_name"]  # 用户名
-                    user_sex = feed["user_sex"]  # 性别 F为女,U为男
-                    time_data = feed["time"]  # 发布时间
-                    caption = feed["caption"]  # 标题
-                    view_count = feed["view_count"]  # 浏览数
-                    like_count = feed["like_count"]  # 点赞数
-                    share_count = feed["share_count"]  # 分享数
-                    duration = feed["duration"]  # 时长/秒
-                    duration = cls.milliseconds_to_seconds(duration)
-                    main_mv_url = feed["main_mv_url"]  # 视频链接
-                    thumbnail_url = feed["thumbnail_url"]  # 视频封面
-                    user_id = feed["user_id"]  # 用户id非用户主页id
-                    log_data = f"user:{user_name},,video_id:{photo_id},,video_url:{main_mv_url},,original_title:{caption},,share_count:{share_count},,view_count:{view_count},,duration:{duration}"
-                    AliyunLogger.logging(channel_id, name, user_name, photo_id, "扫描到一条视频", "2001", log_data)
-                    value, age = cls.analyze_photo(photo_id)
-                    if status:
-                        AliyunLogger.logging(channel_id, name, user_name, photo_id, "该视频已改造过", "2001", log_data)
-                        continue
-                    if value:
-                        AliyunLogger.logging(channel_id, name, user_name, photo_id, f"不符合规则:50+年龄占比小于40%,实际占比{age}", "2003", log_data)
-                        sqlCollect.insert_ks_data(user_name, user_sex, time_data, caption, view_count, like_count,
-                                                  share_count, duration, main_mv_url, thumbnail_url, user_id, '1',
-                                                  photo_id, category_name, age, oss_object=None, video_uid=None)
-
-                        continue
-                    video_percent = '%.4f' % (share_count / view_count)
-                    special = float(0.0005)
-                    if float(video_percent) < special:
-                        AliyunLogger.logging(channel_id, name, user_name, photo_id, "不符合规则:分享/浏览小于0.0005", "2003", log_data)
-
-                        sqlCollect.insert_ks_data(user_name, user_sex, time_data, caption, view_count, like_count, share_count, duration, main_mv_url, thumbnail_url, user_id, '1', photo_id, category_name, age, oss_object=None, video_uid=None)
-                        continue
-                    if share_count < 100:
-                        AliyunLogger.logging(channel_id, name, user_name, photo_id, "不符合规则:分享小于100", "2003", log_data)
-
-                        sqlCollect.insert_ks_data(user_name, user_sex, time_data, caption, view_count, like_count,
-                                                  share_count, duration, main_mv_url, thumbnail_url, user_id, '1',
-                                                  photo_id, category_name, age, oss_object=None, video_uid=None)
-                        continue
-                    if duration < 30 or duration > 720:
-                        AliyunLogger.logging(channel_id, name, user_name, photo_id, "不符合规则:时长不符合规则大于720秒/小于30秒", "2003", log_data)
-                        sqlCollect.insert_ks_data(user_name, user_sex, time_data, caption, view_count, like_count,
-                                                  share_count, duration, main_mv_url, thumbnail_url, user_id, '1',
-                                                  photo_id, category_name, age, oss_object=None, video_uid=None)
-                        continue
-
-                    sqlCollect.insert_ks_data(user_name, user_sex, time_data, caption, view_count,
-                                              like_count, share_count, duration, main_mv_url, thumbnail_url,
-                                              user_id, '0', photo_id, category_name, age, oss_object=None, video_uid=None)
-                    all_data = {"video_id": photo_id, "cover": thumbnail_url, "video_url": main_mv_url, "rule": video_percent,
-                                "old_title": caption}
-                    AliyunLogger.logging(channel_id, name, user_name, photo_id, "符合规则等待改造", "2004", log_data)
-
-                    list.append(all_data)
-                    current_time = datetime.now()
-                    formatted_time = current_time.strftime("%Y-%m-%d %H:%M:%S")
-                    values = [
-                        [category_name, user_name, photo_id, user_sex, caption, view_count, like_count, share_count, duration,
-                         main_mv_url, thumbnail_url, user_id, age, '', '', time_data, formatted_time]]
-                    Feishu.insert_columns("PlcisKhObhzmBothRutc65sJnph", "8fQxFv", "ROWS", 2, 3)
-                    time.sleep(0.5)
-                    Feishu.update_values("PlcisKhObhzmBothRutc65sJnph", "8fQxFv", "A3:Z3", values)
-                    if len(list) == int(number):
-                        Common.logger("ks-czz").info(f"获取快手创作者视频总数:{len(list)}\n")
-                        return list
-                time.sleep(5)
-            except Exception as exc:
-                print(f"异常信息: {exc}")
-                return list
-        return list
-
-
-
-# Example usage:
-if __name__ == "__main__":
-    KsFeedVideo.get_data(1)
+# import json
+# import os
+# from hashlib import md5
+# import requests
+# import time
+# from urllib.parse import urlencode
+# from datetime import datetime, timedelta
+#
+# from common import Oss, Feishu, Common, AliyunLogger
+# from common.sql_help import sqlCollect
+#
+# headers = {
+#     'Accept-Language': 'zh-cn',
+#     'Connection': 'keep-alive',
+#     'Content-Type': 'application/x-www-form-urlencoded',
+#     'Host': 'creator-app.kuaishou.com',
+#     'User-Agent': 'kwai-android aegon/3.12.1',
+# }
+# class KsFeedVideo:
+#     CATEGORY_IDS = {
+#         1: "生活",
+#         2: "才艺",
+#         # 3: "时尚",
+#         # 4: "宠物",
+#         5: "读书",
+#         # 6: "二次元",
+#         7: "家居",
+#         # 8: "数码",
+#         9: "搞笑",
+#         10: "健康",
+#         11: "旅游",
+#         12: "美食",
+#         # 13: "美妆",
+#         # 14: "汽车",
+#         15: "亲子",
+#         16: "情感",
+#         # 17: "三农",
+#         # 18: "摄影",
+#         # 19: "舞蹈",
+#         # 20: "颜值",
+#         # 21: "音乐",
+#         # 22: "影视",
+#         # 23: "短剧",
+#         # 24: "游戏",
+#         25: "运动",
+#         26: "资讯",
+#         27: "人文"
+#     }
+#     current_category_index = 0
+#
+#     @staticmethod
+#     def calculate_sig(data):
+#         src = ''.join([f'{key}={data[key]}' for key in sorted(data.keys())])
+#         salt = '08d8eece8e83'
+#         return md5(f'{src}{salt}'.encode()).hexdigest()
+#
+#
+#     """
+#     切换品类
+#     """
+#     @classmethod
+#     def switch_category(cls):
+#         if cls.current_category_index >= len(cls.CATEGORY_IDS):
+#             cls.current_category_index = 0
+#         category_id = list(cls.CATEGORY_IDS.keys())[cls.current_category_index]
+#         url = 'https://creator-app.kuaishou.com/rest/bamboo/inspiration/n/category/confirm/optimize'
+#         data = {
+#             'isRecommendChange': False,
+#             'categoryId': category_id,
+#             # 'kuaishou.api_st': "Cg9rdWFpc2hvdS5hcGkuc3QSkAGMQoIK2ZpwlQszYISTxSFxzugi58w2U5gpPqa6an0eU6MFcVsXq2rd_K16UTItZ_OzPV-4jmVN5rNXKXW9jL97JV79Y9PqxaR9xOIr1TEyDzpOq2GM-0W1QRW3M8Li_J6NZ5t1hRFCWHBlOESjiBWs7vq4m1bq_ml0dZ6pgEDfpsWNpBaLRzwZwOO1mD4LqO4aEokh6uHql0RmmtbfoBF25r7QOyIgqNv0TBf6mlwS3bjE0K6sl08M1mMPjW1PB9e0Qr494H8oBTAB",
+#             'kuaishou.api_st': 'Cg9rdWFpc2hvdS5hcGkuc3QSkAGMQoIK2ZpwlQszYISTxSFxzugi58w2U5gpPqa6an0eU6MFcVsXq2rd_K16UTItZ_OzPV-4jmVN5rNXKXW9jL97JV79Y9PqxaR9xOIr1TEyDzpOq2GM-0W1QRW3M8Li_J6NZ5t1hRFCWHBlOESjiBWs7vq4m1bq_ml0dZ6pgEDfpsWNpBaLRzwZwOO1mD4LqO4aEokh6uHql0RmmtbfoBF25r7QOyIgqNv0TBf6mlwS3bjE0K6sl08M1mMPjW1PB9e0Qr494H8oBTAB;region_ticket=RT_FAC86448E713714136C088FFCC4431455D1FA7E05A6D25DAD4E4B8CC011FB6E8294169DD9',
+#             'client_key': '214c9979',
+#         }
+#         sig = cls.calculate_sig(data)
+#         data['sig'] = sig
+#         response = requests.post(url=url, headers=headers, data=data)
+#         body = response.content.decode()
+#         cls.current_category_index += 1
+#         return body
+#
+#     """
+#     获取feed流信息
+#     """
+#     @classmethod
+#     def get_feed_list(cls):
+#         cls.switch_category()
+#         url = 'https://creator-app.kuaishou.com/rest/bamboo/inspiration/n/feed'
+#         data = {
+#             'cs': False,
+#             'kuaishou.api_st': 'Cg9rdWFpc2hvdS5hcGkuc3QSkAGMQoIK2ZpwlQszYISTxSFxzugi58w2U5gpPqa6an0eU6MFcVsXq2rd_K16UTItZ_OzPV-4jmVN5rNXKXW9jL97JV79Y9PqxaR9xOIr1TEyDzpOq2GM-0W1QRW3M8Li_J6NZ5t1hRFCWHBlOESjiBWs7vq4m1bq_ml0dZ6pgEDfpsWNpBaLRzwZwOO1mD4LqO4aEokh6uHql0RmmtbfoBF25r7QOyIgqNv0TBf6mlwS3bjE0K6sl08M1mMPjW1PB9e0Qr494H8oBTAB;region_ticket=RT_FAC86448E713714136C088FFCC4431455D1FA7E05A6D25DAD4E4B8CC011FB6E8294169DD9',
+#             # 'kuaishou.api_st': "Cg9rdWFpc2hvdS5hcGkuc3QSkAGMQoIK2ZpwlQszYISTxSFxzugi58w2U5gpPqa6an0eU6MFcVsXq2rd_K16UTItZ_OzPV-4jmVN5rNXKXW9jL97JV79Y9PqxaR9xOIr1TEyDzpOq2GM-0W1QRW3M8Li_J6NZ5t1hRFCWHBlOESjiBWs7vq4m1bq_ml0dZ6pgEDfpsWNpBaLRzwZwOO1mD4LqO4aEokh6uHql0RmmtbfoBF25r7QOyIgqNv0TBf6mlwS3bjE0K6sl08M1mMPjW1PB9e0Qr494H8oBTAB",
+#             'client_key': '214c9979',
+#         }
+#         sig = cls.calculate_sig(data)
+#         data['sig'] = sig
+#         response = requests.post(url=url, headers=headers, data=data)
+#         body = response.content.decode()
+#         return body
+#
+#     """
+#     获取观众画像
+#     """
+#     @classmethod
+#     def analyze_photo(cls, photo_id):
+#         url = 'https://creator-app.kuaishou.com/rest/bamboo/inspiration/n/photo/analysis'
+#
+#         headers = {
+#             'Accept-Language': 'zh-cn',
+#             'Connection': 'keep-alive',
+#             'Content-Type': 'application/x-www-form-urlencoded',
+#             'Host': 'creator-app.kuaishou.com',
+#         }
+#         data = {
+#             'photoId': photo_id,
+#             'client_key': '214c9979',
+#         }
+#         sig = cls.calculate_sig(data)
+#         data['sig'] = sig
+#         response = requests.post(url=url, headers=headers, data=data)
+#         body = response.content.decode()
+#         json_body = json.loads(body)
+#         user_range = json_body['data']['play']['userRange']
+#         if len(user_range) == 0:
+#             return False, "无画像"
+#         age_range = user_range['ageRange']
+#         value = age_range[5]['value']
+#         value = int(value.strip('%'))
+#         if value >= 40:
+#             return False, value
+#         else:
+#             return True, value
+#
+#     """
+#     视频时长转换成秒
+#     """
+#     @classmethod
+#     def milliseconds_to_seconds(cls, milliseconds):
+#         seconds = milliseconds / 1000
+#         return int(seconds)
+#
+#     """
+#     判断当前视频是否在90天内
+#     """
+#     @classmethod
+#     def get_video_data(cls, timestamp_str):
+#         timestamp = datetime.strptime(timestamp_str, "%Y-%m-%d %H:%M:%S")
+#         # 获取当前时间
+#         current_time = datetime.now()
+#         difference = current_time - timestamp
+#         if difference <= timedelta(days=90):
+#             return False
+#         else:
+#             return True
+#     """
+#     获取票圈ID
+#     """
+#     @classmethod
+#     def get_id_by_category(cls, category_name):
+#         category_list = [
+#             {"id": 71502003, "category": "生活"},
+#             {"id": 71502004, "category": "才艺"},
+#             {"id": 71502005, "category": "时尚"},
+#             {"id": 71502006, "category": "宠物"},
+#             {"id": 71502007, "category": "读书"},
+#             {"id": 71502008, "category": "二次元"},
+#             {"id": 71502009, "category": "家居"},
+#             {"id": 71502010, "category": "数码"},
+#             {"id": 71502011, "category": "搞笑"},
+#             {"id": 71502012, "category": "健康"},
+#             {"id": 71502013, "category": "旅游"},
+#             {"id": 71502014, "category": "美食"},
+#             {"id": 71502015, "category": "美妆"},
+#             {"id": 71502016, "category": "汽车"},
+#             {"id": 71502018, "category": "亲子"},
+#             {"id": 71502019, "category": "情感"},
+#             {"id": 71502020, "category": "三农"},
+#             {"id": 71502021, "category": "摄影"},
+#             {"id": 71502022, "category": "舞蹈"},
+#             {"id": 71502023, "category": "颜值"},
+#             {"id": 71502024, "category": "音乐"},
+#             {"id": 71502025, "category": "影视"},
+#             {"id": 71502026, "category": "短剧"},
+#             {"id": 71502027, "category": "游戏"},
+#             {"id": 71502028, "category": "运动"},
+#             {"id": 71502029, "category": "资讯"},
+#             {"id": 71502030, "category": "人文"}
+#         ]
+#         for category in category_list:
+#             if category['category'] == category_name:
+#                 return category['id']
+#         return None
+#
+#     """
+#     新生成视频上传到对应账号下
+#     """
+#     @classmethod
+#     def insert_piaoquantv(cls, new_video_path, new_title, n_id, cover):
+#
+#         url = "https://vlogapi.piaoquantv.com/longvideoapi/crawler/video/send"
+#         headers = {
+#             'User-Agent': 'PQSpeed/486 CFNetwork/1410.1 Darwin/22.6.0',
+#             'cookie': 'JSESSIONID=4DEA2B5173BB9A9E82DB772C0ACDBC9F; JSESSIONID=D02C334150025222A0B824A98B539B78',
+#             'referer': 'http://appspeed.piaoquantv.com',
+#             'token': '524a8bc871dbb0f4d4717895083172ab37c02d2f',
+#             'accept-language': 'zh-CN,zh-Hans;q=0.9',
+#             'Content-Type': 'application/x-www-form-urlencoded'
+#         }
+#         payload = {
+#             'coverImgPath': cover,
+#             'deviceToken': '9ef064f2f7869b3fd67d6141f8a899175dddc91240971172f1f2a662ef891408',
+#             'fileExtensions': 'MP4',
+#             'loginUid': n_id,
+#             'networkType': 'Wi-Fi',
+#             'platform': 'iOS',
+#             'requestId': 'fb972cbd4f390afcfd3da1869cd7d001',
+#             'sessionId': '362290597725ce1fa870d7be4f46dcc2',
+#             'subSessionId': '362290597725ce1fa870d7be4f46dcc2',
+#             'title': new_title,
+#             'token': '524a8bc871dbb0f4d4717895083172ab37c02d2f',
+#             'uid': n_id,
+#             'versionCode': '486',
+#             'versionName': '3.4.12',
+#             'videoFromScene': '1',
+#             'videoPath': new_video_path,
+#             'viewStatus': '1'
+#         }
+#         encoded_payload = urlencode(payload)
+#         response = requests.request("POST", url, headers=headers, data=encoded_payload)
+#         data = response.json()
+#         code = data["code"]
+#         if code == 0:
+#             new_video_id = data["data"]["id"]
+#             return new_video_id
+#         else:
+#             return None
+#
+#     @classmethod
+#     def get_data(cls, channel_id, name):
+#         number = 1
+#         list = []
+#         for category_id, category_name in cls.CATEGORY_IDS.items():
+#             try:
+#                 feed_data = cls.get_feed_list()
+#                 feed_data = json.loads(feed_data)
+#                 feeds = feed_data['feeds']
+#                 for feed in feeds:
+#                     photo_id = feed["photo_id"]  # 视频ID
+#                     status = sqlCollect.ks_is_used(photo_id)
+#
+#                     user_name = feed["user_name"]  # 用户名
+#                     user_sex = feed["user_sex"]  # 性别 F为女,U为男
+#                     time_data = feed["time"]  # 发布时间
+#                     caption = feed["caption"]  # 标题
+#                     view_count = feed["view_count"]  # 浏览数
+#                     like_count = feed["like_count"]  # 点赞数
+#                     share_count = feed["share_count"]  # 分享数
+#                     duration = feed["duration"]  # 时长/秒
+#                     duration = cls.milliseconds_to_seconds(duration)
+#                     main_mv_url = feed["main_mv_url"]  # 视频链接
+#                     thumbnail_url = feed["thumbnail_url"]  # 视频封面
+#                     user_id = feed["user_id"]  # 用户id非用户主页id
+#                     log_data = f"user:{user_name},,video_id:{photo_id},,video_url:{main_mv_url},,original_title:{caption},,share_count:{share_count},,view_count:{view_count},,duration:{duration}"
+#                     AliyunLogger.logging(channel_id, name, user_name, photo_id, "扫描到一条视频", "2001", log_data)
+#                     value, age = cls.analyze_photo(photo_id)
+#                     if status:
+#                         AliyunLogger.logging(channel_id, name, user_name, photo_id, "该视频已改造过", "2001", log_data)
+#                         continue
+#                     if value:
+#                         AliyunLogger.logging(channel_id, name, user_name, photo_id, f"不符合规则:50+年龄占比小于40%,实际占比{age}", "2003", log_data)
+#                         sqlCollect.insert_ks_data(user_name, user_sex, time_data, caption, view_count, like_count,
+#                                                   share_count, duration, main_mv_url, thumbnail_url, user_id, '1',
+#                                                   photo_id, category_name, age, oss_object=None, video_uid=None)
+#
+#                         continue
+#                     video_percent = '%.4f' % (share_count / view_count)
+#                     special = float(0.0005)
+#                     if float(video_percent) < special:
+#                         AliyunLogger.logging(channel_id, name, user_name, photo_id, "不符合规则:分享/浏览小于0.0005", "2003", log_data)
+#
+#                         sqlCollect.insert_ks_data(user_name, user_sex, time_data, caption, view_count, like_count, share_count, duration, main_mv_url, thumbnail_url, user_id, '1', photo_id, category_name, age, oss_object=None, video_uid=None)
+#                         continue
+#                     if share_count < 100:
+#                         AliyunLogger.logging(channel_id, name, user_name, photo_id, "不符合规则:分享小于100", "2003", log_data)
+#
+#                         sqlCollect.insert_ks_data(user_name, user_sex, time_data, caption, view_count, like_count,
+#                                                   share_count, duration, main_mv_url, thumbnail_url, user_id, '1',
+#                                                   photo_id, category_name, age, oss_object=None, video_uid=None)
+#                         continue
+#                     if duration < 30 or duration > 720:
+#                         AliyunLogger.logging(channel_id, name, user_name, photo_id, "不符合规则:时长不符合规则大于720秒/小于30秒", "2003", log_data)
+#                         sqlCollect.insert_ks_data(user_name, user_sex, time_data, caption, view_count, like_count,
+#                                                   share_count, duration, main_mv_url, thumbnail_url, user_id, '1',
+#                                                   photo_id, category_name, age, oss_object=None, video_uid=None)
+#                         continue
+#
+#                     sqlCollect.insert_ks_data(user_name, user_sex, time_data, caption, view_count,
+#                                               like_count, share_count, duration, main_mv_url, thumbnail_url,
+#                                               user_id, '0', photo_id, category_name, age, oss_object=None, video_uid=None)
+#                     all_data = {"video_id": photo_id, "cover": thumbnail_url, "video_url": main_mv_url, "rule": video_percent,
+#                                 "old_title": caption}
+#                     AliyunLogger.logging(channel_id, name, user_name, photo_id, "符合规则等待改造", "2004", log_data)
+#
+#                     list.append(all_data)
+#                     current_time = datetime.now()
+#                     formatted_time = current_time.strftime("%Y-%m-%d %H:%M:%S")
+#                     values = [
+#                         [category_name, user_name, photo_id, user_sex, caption, view_count, like_count, share_count, duration,
+#                          main_mv_url, thumbnail_url, user_id, age, '', '', time_data, formatted_time]]
+#                     Feishu.insert_columns("PlcisKhObhzmBothRutc65sJnph", "8fQxFv", "ROWS", 2, 3)
+#                     time.sleep(0.5)
+#                     Feishu.update_values("PlcisKhObhzmBothRutc65sJnph", "8fQxFv", "A3:Z3", values)
+#                     if len(list) == int(number):
+#                         Common.logger("ks-czz").info(f"获取快手创作者视频总数:{len(list)}\n")
+#                         return list
+#                 time.sleep(5)
+#             except Exception as exc:
+#                 print(f"异常信息: {exc}")
+#                 return list
+#         return list
+#
+#
+#
+# # Example usage:
+# if __name__ == "__main__":
+#     KsFeedVideo.get_data(1)

+ 1 - 7
data_channel/piaoquan.py

@@ -7,7 +7,7 @@ import cffi
 import requests
 from urllib.parse import urlencode, urlparse
 
-from common import Common, AliyunLogger, Feishu
+from common import AliyunLogger, Feishu
 from common.sql_help import sqlCollect
 
 
@@ -43,7 +43,6 @@ class PQ:
             video_url = data["content"]["transedVideoPath"]
             return video_url
         except Exception as e:
-            Common.logger("video").warning(f"获取视频链接失败:{e}\n")
             return ""
 
     """
@@ -95,7 +94,6 @@ class PQ:
                 return list
             return list
         except Exception as e:
-            Common.logger("video").warning(f"获取视频链接失败:{e}\n")
             return ""
 
     """
@@ -136,12 +134,9 @@ class PQ:
                 all_data = {"video_id": video_id, "cover": cover, "video_url": video_url, "rule": "无", "old_title": old_title}
                 list.append(all_data)
                 if len(list) == int(number):
-                    Common.logger("pq").info(f"获取视频总数:{len(list)}\n")
                     return list
-            Common.logger("pq").info(f"获取票圈视频总数:{len(list)}\n")
             return list
         except Exception as e:
-            Common.logger("pq").warning(f"获取音频视频链接失败:{e}\n")
             return list
 
     """
@@ -302,7 +297,6 @@ class PQ:
             data = bytes(ffi.buffer(c_data, len(data))[:])
             return data
         except Exception as e:
-            Common.logger("dd-sph").info(f"decrypt_video获取异常,异常信息{e}")
             return data
 
     """

+ 1 - 10
data_channel/shipinhao.py

@@ -4,9 +4,8 @@ import time
 
 import requests
 
-from common import Common, AliyunLogger, Feishu
+from common import AliyunLogger, Feishu
 from common.sql_help import sqlCollect
-from data_channel.data_help import dataHelp
 
 
 class SPH:
@@ -99,8 +98,6 @@ class SPH:
 
                         log_data = f"user:{url_id},,video_id:{objectId},,video_url:{video_url},,original_title:{old_title},,share_count:{share_cnt},,like_count:{like_cnt},,duration:{duration}"
                         AliyunLogger.logging(channel_id, name, url_id, objectId, "扫描到一条视频", "2001", log_data)
-                        Common.logger("sph").info(
-                            f"扫描:{task_mark},用户主页id:{url_id},视频id{objectId} ,分享:{share_cnt},点赞:{like_cnt}")
                         if status:
                             AliyunLogger.logging(channel_id, name, url_id, objectId, "该视频已改造过", "2002", log_data)
                             continue
@@ -108,8 +105,6 @@ class SPH:
                         special = float(0.25)
                         if like_cnt >= 30000 or like_cnt >= 50000 or (share_cnt >= 300 and float(video_percent) >= special):
                             if int(duration) < 30 or int(duration) > 720:
-                                Common.logger("sph").info(
-                                    f"任务:{task_mark},用户主页id:{url_id},视频id{objectId} ,分享:{share_cnt},点赞:{like_cnt} ,时长:{duration} ")
                                 AliyunLogger.logging(channel_id, name, url, objectId, "不符合规则:时长不符合规则大于720秒/小于30秒",
                                                      "2003", log_data)
 
@@ -118,13 +113,9 @@ class SPH:
                             list.append(all_data)
                             AliyunLogger.logging(channel_id, name, url_id, objectId, "符合规则等待改造", "2004", log_data)
                             if len(list) == int(number):
-                                Common.logger(mark).info(f"获取视频号视频总数:{len(list)}\n")
                                 return list
                         else:
                             AliyunLogger.logging(channel_id, name, url_id, objectId, "不符合规则:点赞小于30000/50000 或 分享/点赞小于0.25和分享小于300", "2003", log_data)
-
-                            Common.logger("sph").info(
-                                     f"不符合规则:{task_mark},用户主页id:{url_id},视频id{objectId} ,分享:{share_cnt},点赞:{like_cnt}")
                             continue
                     return list
             except Exception as e:

+ 1 - 9
data_channel/sph_keyword.py

@@ -4,7 +4,7 @@ import time
 import requests
 import json
 
-from common import Common, Feishu, AliyunLogger
+from common import Feishu, AliyunLogger
 from common.sql_help import sqlCollect
 
 
@@ -39,7 +39,6 @@ class SphKeyword:
                 Feishu.finish_bot(f"shi_pin_hao/keyword {response['msg']}",
                                   "https://open.feishu.cn/open-apis/bot/v2/hook/575ca6a1-84b4-4a2f-983b-1d178e7b16eb",
                                   "【视频号搜索接口使用提示】")
-                Common.logger("sph-key-word").info(f"视频号搜索词数据获取失败,{response['msg']}\n")
                 return list
             data_list = response['data']['data']
             for data in data_list:
@@ -58,8 +57,6 @@ class SphKeyword:
                 video_url = items["videoUrl"]
                 log_data = f"user:{keyword},,video_id:{video_id},,video_url:{video_url},,original_title:{old_title},,digg_count:{digg_count},,duration:{duration}"
                 AliyunLogger.logging(channel_id, name, keyword, video_id, "扫描到一条视频", "2001", log_data)
-                Common.logger("sph-key-word").info(
-                    f"扫描:{task_mark},搜索词:{keyword},视频id{video_id},点赞{digg_count}")
                 status = sqlCollect.is_used(task_mark, video_id, mark, channel_id)
                 if status:
                     AliyunLogger.logging(channel_id, name, keyword, video_id, "该视频已改造过", "2002", log_data)
@@ -67,13 +64,9 @@ class SphKeyword:
                 if int(digg_count) < 2000:
                     AliyunLogger.logging(channel_id, name, keyword, video_id, f"不符合规则:点赞小于2000", "2003",
                                          log_data)
-                    Common.logger("sph-key-word").info(
-                        f"不符合规则:{task_mark},用户主页id:{keyword},视频id{video_id} ,点赞{digg_count} ,时长:{int(duration)} ")
                     continue
                 if int(duration) < 30 or int(duration) > 900:
                     AliyunLogger.logging(channel_id, name, keyword, video_id, f"不符合规则:时长不符合规则大于900秒/小于30秒", "2003", log_data)
-                    Common.logger("sph-key-word").info(
-                        f"不符合规则:{task_mark},用户主页id:{keyword},视频id{video_id} 点赞{digg_count} ,时长:{int(duration)} ")
                     continue
                 all_data = {"video_id": video_id, "cover": cover_url, "video_url": video_url, "rule": '',
                             "old_title": old_title}
@@ -81,7 +74,6 @@ class SphKeyword:
                 AliyunLogger.logging(channel_id, name, keyword, video_id, "符合规则等待改造", "2004", log_data)
             return list
         except Exception as exc:
-            Common.logger("sph-key-word").info(f"视频号搜索词{keyword}获取失败{exc}\n")
             return list
 
 

+ 1 - 13
data_channel/sph_ls.py

@@ -1,4 +1,4 @@
-from common import Common, AliyunLogger
+from common import AliyunLogger
 from common.sql_help import sqlCollect
 
 
@@ -19,8 +19,6 @@ class SPHLS:
                 oss_url = data[4]
                 oss_cover = data[5]
                 duration = int(float(data[6]))
-                Common.logger("sph-ls").info(
-                    f"扫描:{task_mark},用户主页id:{url},视频id{video_id} ,分享:{share_cnt},点赞:{like_cnt},时长:{duration}")
                 log_data = f"user:{url},,video_id:{video_id},,video_url:{oss_url},,original_title:{old_title},,share_count:{share_cnt},,like_count:{like_cnt},,duration:{duration}"
                 AliyunLogger.logging(channel_id, name, url, video_id, "扫描到一条视频", "2001", log_data)
 
@@ -29,32 +27,22 @@ class SPHLS:
                     continue
                 if share_cnt < 300:
                     AliyunLogger.logging(channel_id, name, url, video_id, "不符合规则:分享小于300", "2003", log_data)
-
-                    Common.logger("sph-ls").info(
-                        f"任务:{task_mark},用户主页id:{url},视频id{video_id} ,分享:{share_cnt},点赞:{like_cnt} ,时长:{duration} ")
                     continue
                 if share_cnt < like_cnt:
                     AliyunLogger.logging(channel_id, name, url, video_id, "不符合规则:分享小于点赞", "2003", log_data)
-
-                    Common.logger("sph-ls").info(
-                        f"任务:{task_mark},用户主页id:{url},视频id{video_id} ,分享:{share_cnt},点赞:{like_cnt} ,时长:{duration} ")
                     continue
                 if duration < 30 or duration > 720:
                     AliyunLogger.logging(channel_id, name, url, video_id, "不符合规则:时长不符合规则大于720秒/小于30秒", "2003",
                                          log_data)
-                    Common.logger("sph-ls").info(
-                        f"任务:{task_mark},用户主页id:{url},视频id{video_id} ,分享:{share_cnt},点赞:{like_cnt} ,时长:{duration} ")
                     continue
                 all_data = {"video_id": video_id, "cover": oss_cover, "video_url": oss_url, "rule": '',
                             "old_title": old_title}
                 list.append(all_data)
                 AliyunLogger.logging(channel_id, name, url, video_id, "符合规则等待改造", "2004", log_data)
                 if len(list) == int(number):
-                    Common.logger("sph-ls").info(f"获取视频号视频总数:{len(list)}\n")
                     return list
             return list
         else:
-            Common.logger("sph-ls").info(f"{url}无数据\n")
             return list
 
 

+ 7 - 4
job_czz_ks.py

@@ -1,5 +1,8 @@
 
 import time
+
+from loguru import logger
+
 from common import Material
 
 from video_rewriting.video_processor import VideoProcessor
@@ -8,12 +11,12 @@ def video_task_start():
     data = Material.feishu_list()[9]
     while True:
         try:
-            print("开始执行任务")
+            logger.info("[+] 开始执行任务")
             mark = VideoProcessor.main(data)
-            print(f"返回用户名: {mark}")
-            time.sleep(10)  # 根据 mark 是否为空设置延迟
+            logger.info(f"[+] 返回用户名: {mark}")
+            time.sleep(10 if mark else 120)  # 根据 mark 是否为空设置延迟
         except Exception as e:
-            print("处理任务时出现异常:", e)
+            logger.error(f"[+] 处理任务时出现异常: {e}")
             time.sleep(10)
             continue
 if __name__ == '__main__':

+ 7 - 4
job_dd_sph.py

@@ -1,5 +1,8 @@
 
 import time
+
+from loguru import logger
+
 from common import Material
 
 from video_rewriting.video_processor import VideoProcessor
@@ -8,12 +11,12 @@ def video_task_start():
     data = Material.feishu_list()[12]
     while True:
         try:
-            print("开始执行任务")
+            logger.info("[+] 开始执行任务")
             mark = VideoProcessor.main(data)
-            print(f"返回用户名: {mark}")
-            time.sleep(120 if mark else 120)  # 根据 mark 是否为空设置延迟
+            logger.info(f"[+] 返回用户名: {mark}")
+            time.sleep(10 if mark else 120)  # 根据 mark 是否为空设置延迟
         except Exception as e:
-            print("处理任务时出现异常:", e)
+            logger.error(f"[+] 处理任务时出现异常: {e}")
             time.sleep(10)
             continue
 if __name__ == '__main__':

+ 6 - 3
job_fj.py

@@ -1,6 +1,9 @@
 import os
 import time
 import threading
+
+from loguru import logger
+
 from common import Material
 from video_rewriting.video_processor import VideoProcessor
 
@@ -18,12 +21,12 @@ def video_task_start():
     """处理视频任务,返回用户名并根据结果决定延迟时间"""
     while True:
         try:
-            print("开始执行任务")
+            logger.info("[+] 开始执行任务")
             mark = VideoProcessor.main(data)
-            print(f"返回用户名: {mark}")
+            logger.info(f"[+] 返回用户名: {mark}")
             time.sleep(10 if mark else 120)  # 根据 mark 是否为空设置延迟
         except Exception as e:
-            print("处理任务时出现异常:", e)
+            logger.error(f"[+] 处理任务时出现异常: {e}")
             time.sleep(10)
             continue
 if __name__ == '__main__':

+ 5 - 4
job_keyword_dy.py

@@ -2,6 +2,7 @@
 import time
 
 import schedule
+from loguru import logger
 
 from common import Material
 from common.sql_help import sqlCollect
@@ -17,16 +18,16 @@ def video_task_start():
             sheet_count = Material.get_count_restrict("抖音关键词搜索")
             if sheet_count:
                 if count >= int(sheet_count):
-                    print(f"到了限制数{count}")
+                    logger.info(f"[+] 到了限制数{count}")
                     time.sleep(100)
                     continue
-            print("开始执行任务")
+            logger.info("[+] 开始执行任务")
             mark = VideoProcessor.main(data)
-            print(f"返回用户名: {mark}")
+            logger.info(f"[+] 执行结束-{mark}")
             time.sleep(5)
             continue
         except Exception as e:
-            print("处理任务时出现异常:", e)
+            logger.error("处理任务时出现异常:", e)
             time.sleep(10)
             continue
 def schedule_tasks():

+ 8 - 5
job_keyword_ks.py

@@ -1,5 +1,8 @@
 
 import time
+
+from loguru import logger
+
 from common import Material
 from common.sql_help import sqlCollect
 
@@ -14,16 +17,16 @@ def video_task_start():
             sheet_count = Material.get_count_restrict("快手关键词搜索")
             if sheet_count:
                 if count >= int(sheet_count):
-                    print(f"到了限制数{count}")
+                    logger.info(f"[+] 到了限制数{count}")
                     time.sleep(3600)
                     continue
+            logger.info(f"[+] 开始执行任务")
+
             print("开始执行任务")
             mark = VideoProcessor.main(data)
-            print(f"返回用户名: {mark}")
-            # time.sleep(10 if mark else 120)  # 根据 mark 是否为空设置延迟
+            logger.info(f"[+] 返回用户名: {mark}")
         except Exception as e:
-            print("处理任务时出现异常:", e)
-            # time.sleep(10)
+            logger.error(f"[+] 处理任务时出现异常: {e}")
             continue
 if __name__ == '__main__':
     video_task_start()

+ 8 - 5
job_keyword_sph.py

@@ -1,5 +1,8 @@
 
 import time
+
+from loguru import logger
+
 from common import Material
 from common.sql_help import sqlCollect
 
@@ -14,15 +17,15 @@ def video_task_start():
             sheet_count = Material.get_count_restrict("视频号关键词搜索")
             if sheet_count:
                 if count >= int(sheet_count):
-                    print(f"到了限制数{count}")
+                    logger.info(f"[+] 到了限制数{count}")
                     time.sleep(3600)
                     continue
-            print("开始执行任务")
+            logger.info(f"[+] 开始执行任务")
+
             mark = VideoProcessor.main(data)
-            print(f"返回用户名: {mark}")
-            # time.sleep(10 if mark else 120)  # 根据 mark 是否为空设置延迟
+            logger.info(f"[+] 返回用户名: {mark}")
         except Exception as e:
-            print("处理任务时出现异常:", e)
+            logger.error(f"[+] 处理任务时出现异常: {e}")
             time.sleep(10)
             continue
 if __name__ == '__main__':

+ 7 - 5
job_ks_feed.py

@@ -1,3 +1,5 @@
+from loguru import logger
+
 from common import Material
 from common.sql_help import sqlCollect
 import time
@@ -12,15 +14,15 @@ def video_task_start():
             sheet_count = Material.get_count_restrict("快手推荐流")
             if sheet_count:
                 if count >= int(sheet_count):
-                    print(f"到了限制数{count}")
+                    logger.info(f"[+] 到了限制数{count}")
                     time.sleep(3600)
                     continue
-            print("开始执行任务")
+            logger.info(f"[+] 开始执行任务")
+
             mark = VideoProcessor.main(data)
-            print(f"返回用户名: {mark}")
-            time.sleep(120 if mark else 120)  # 根据 mark 是否为空设置延迟
+            logger.info(f"[+] 返回用户名: {mark}")
         except Exception as e:
-            print("处理任务时出现异常:", e)
+            logger.error(f"[+] 处理任务时出现异常: {e}")
             time.sleep(10)
             continue
 if __name__ == '__main__':

+ 7 - 4
job_ks_xcx.py

@@ -1,5 +1,8 @@
 
 import time
+
+from loguru import logger
+
 from common import Material
 
 from video_rewriting.video_processor import VideoProcessor
@@ -8,12 +11,12 @@ def video_task_start():
     data = Material.feishu_list()[19]
     while True:
         try:
-            print("开始执行任务")
+            logger.info("[+] 开始执行任务")
             mark = VideoProcessor.main(data)
-            print(f"返回用户名: {mark}")
-            time.sleep(60 if mark else 60)  # 根据 mark 是否为空设置延迟
+            logger.info(f"[+] 返回用户名: {mark}")
+            time.sleep(10 if mark else 120)  # 根据 mark 是否为空设置延迟
         except Exception as e:
-            print("处理任务时出现异常:", e)
+            logger.error(f"[+] 处理任务时出现异常: {e}")
             time.sleep(10)
             continue
 if __name__ == '__main__':

+ 6 - 3
job_lq.py

@@ -1,5 +1,8 @@
 
 import time
+
+from loguru import logger
+
 from common import Material
 
 from video_rewriting.video_processor import VideoProcessor
@@ -8,12 +11,12 @@ def video_task_start():
     data = Material.feishu_list()[4]
     while True:
         try:
-            print("开始执行任务")
+            logger.info("[+] 开始执行任务")
             mark = VideoProcessor.main(data)
-            print(f"返回用户名: {mark}")
+            logger.info(f"[+] 返回用户名: {mark}")
             time.sleep(10 if mark else 120)  # 根据 mark 是否为空设置延迟
         except Exception as e:
-            print("处理任务时出现异常:", e)
+            logger.error(f"[+] 处理任务时出现异常: {e}")
             time.sleep(10)
             continue
 if __name__ == '__main__':

+ 6 - 3
job_lsy.py

@@ -1,5 +1,8 @@
 
 import time
+
+from loguru import logger
+
 from common import Material
 
 from video_rewriting.video_processor import VideoProcessor
@@ -8,12 +11,12 @@ def video_task_start():
     data = Material.feishu_list()[6]
     while True:
         try:
-            print("开始执行任务")
+            logger.info("[+] 开始执行任务")
             mark = VideoProcessor.main(data)
-            print(f"返回用户名: {mark}")
+            logger.info(f"[+] 返回用户名: {mark}")
             time.sleep(10 if mark else 120)  # 根据 mark 是否为空设置延迟
         except Exception as e:
-            print("处理任务时出现异常:", e)
+            logger.error(f"[+] 处理任务时出现异常: {e}")
             time.sleep(10)
             continue
 if __name__ == '__main__':

+ 6 - 3
job_lt.py

@@ -1,5 +1,8 @@
 
 import time
+
+from loguru import logger
+
 from common import Material
 
 from video_rewriting.video_processor import VideoProcessor
@@ -8,12 +11,12 @@ def video_task_start():
     data = Material.feishu_list()[2]
     while True:
         try:
-            print("开始执行任务")
+            logger.info("[+] 开始执行任务")
             mark = VideoProcessor.main(data)
-            print(f"返回用户名: {mark}")
+            logger.info(f"[+] 返回用户名: {mark}")
             time.sleep(10 if mark else 120)  # 根据 mark 是否为空设置延迟
         except Exception as e:
-            print("处理任务时出现异常:", e)
+            logger.error(f"[+] 处理任务时出现异常: {e}")
             time.sleep(10)
             continue
 if __name__ == '__main__':

+ 9 - 6
job_pl_dy.py

@@ -1,5 +1,8 @@
 
 import time
+
+from loguru import logger
+
 from common import Material
 from common.sql_help import sqlCollect
 
@@ -14,15 +17,15 @@ def video_task_start():
             sheet_count = Material.get_count_restrict("抖音品类账号")
             if sheet_count:
                 if count >= int(sheet_count):
-                    print(f"到了限制数{count}")
+                    logger.info(f"[+] 到了限制数{count}")
                     time.sleep(3600)
                     continue
-            print("开始执行任务")
-            mark = VideoProcessor.main(data)
-            print(f"返回用户名: {mark}")
-            time.sleep(10 if mark else 120)  # 根据 mark 是否为空设置延迟
+                logger.info(f"[+] 开始执行任务")
+
+                mark = VideoProcessor.main(data)
+                logger.info(f"[+] 返回用户名: {mark}")
         except Exception as e:
-            print("处理任务时出现异常:", e)
+            logger.error(f"[+] 处理任务时出现异常: {e}")
             time.sleep(10)
             continue
 if __name__ == '__main__':

+ 0 - 20
job_pl_dy_1.py

@@ -1,20 +0,0 @@
-
-import time
-from common import Material
-
-from video_rewriting.video_processor import VideoProcessor
-def video_task_start():
-    """处理视频任务,返回用户名并根据结果决定延迟时间"""
-    data = Material.feishu_list()[5]
-    while True:
-        try:
-            print("开始执行任务")
-            mark = VideoProcessor.main(data)
-            print(f"返回用户名: {mark}")
-            time.sleep(10 if mark else 120)  # 根据 mark 是否为空设置延迟
-        except Exception as e:
-            print("处理任务时出现异常:", e)
-            time.sleep(10)
-            continue
-if __name__ == '__main__':
-    video_task_start()

+ 11 - 9
job_pl_ks.py

@@ -1,5 +1,8 @@
 
 import time
+
+from loguru import logger
+
 from common import Material
 from common.sql_help import sqlCollect
 
@@ -12,17 +15,16 @@ def video_task_start():
             count = sqlCollect.get_name_count("快手品类账号")
             count = int(count[0][0])
             sheet_count = Material.get_count_restrict("快手品类账号")
-            if sheet_count:
-                if count >= int(sheet_count):
-                    print(f"到了限制数{count}")
-                    time.sleep(3600)
-                    continue
-            print("开始执行任务")
+            if count >= int(sheet_count):
+                logger.info(f"[+] 到了限制数{count}")
+                time.sleep(3600)
+                continue
+            logger.info(f"[+] 开始执行任务")
+
             mark = VideoProcessor.main(data)
-            print(f"返回用户名: {mark}")
-            time.sleep(10 if mark else 120)  # 根据 mark 是否为空设置延迟
+            logger.info(f"[+] 返回用户名: {mark}")
         except Exception as e:
-            print("处理任务时出现异常:", e)
+            logger.error(f"[+] 处理任务时出现异常: {e}")
             time.sleep(10)
             continue
 if __name__ == '__main__':

+ 11 - 9
job_sph.py

@@ -1,5 +1,8 @@
 
 import time
+
+from loguru import logger
+
 from common import Material
 from common.sql_help import sqlCollect
 
@@ -12,17 +15,16 @@ def video_task_start():
             count = sqlCollect.get_name_count("视频号品类账号")
             count = int(count[0][0])
             sheet_count = Material.get_count_restrict("视频号品类账号")
-            if sheet_count:
-                if count >= int(sheet_count):
-                    print(f"到了限制数{count}")
-                    time.sleep(3600)
-                    continue
-            print("开始执行任务")
+            if count >= int(sheet_count):
+                logger.info(f"[+] 到了限制数{count}")
+                time.sleep(3600)
+                continue
+            logger.info(f"[+] 开始执行任务")
+
             mark = VideoProcessor.main(data)
-            print(f"返回用户名: {mark}")
-            time.sleep(10 if mark else 120)  # 根据 mark 是否为空设置延迟
+            logger.info(f"[+] 返回用户名: {mark}")
         except Exception as e:
-            print("处理任务时出现异常:", e)
+            logger.error(f"[+] 处理任务时出现异常: {e}")
             time.sleep(10)
             continue
 if __name__ == '__main__':

+ 11 - 9
job_sph_feed.py

@@ -1,5 +1,8 @@
 
 import time
+
+from loguru import logger
+
 from common import Material
 from common.sql_help import sqlCollect
 
@@ -12,17 +15,16 @@ def video_task_start():
             count = sqlCollect.get_feed_count("视频号推荐流")
             count = int(count[0][0])
             sheet_count = Material.get_count_restrict("视频号推荐流")
-            if sheet_count:
-                if count >= int(sheet_count):
-                    print(f"到了限制数{count}")
-                    time.sleep(3600)
-                    continue
-            print("开始执行任务")
+            if count >= int(sheet_count):
+                logger.info(f"[+] 到了限制数{count}")
+                time.sleep(3600)
+                continue
+            logger.info(f"[+] 开始执行任务")
+
             mark = VideoProcessor.main(data)
-            print(f"返回用户名: {mark}")
-            time.sleep(120 if mark else 120)  # 根据 mark 是否为空设置延迟
+            logger.info(f"[+] 返回用户名: {mark}")
         except Exception as e:
-            print("处理任务时出现异常:", e)
+            logger.error(f"[+] 处理任务时出现异常: {e}")
             time.sleep(10)
             continue
 if __name__ == '__main__':

+ 6 - 3
job_wxk.py

@@ -1,5 +1,8 @@
 
 import time
+
+from loguru import logger
+
 from common import Material
 
 from video_rewriting.video_processor import VideoProcessor
@@ -8,12 +11,12 @@ def video_task_start():
     data = Material.feishu_list()[0]
     while True:
         try:
-            print("开始执行任务")
+            logger.info("[+] 开始执行任务")
             mark = VideoProcessor.main(data)
-            print(f"返回用户名: {mark}")
+            logger.info(f"[+] 返回用户名: {mark}")
             time.sleep(10 if mark else 120)  # 根据 mark 是否为空设置延迟
         except Exception as e:
-            print("处理任务时出现异常:", e)
+            logger.error(f"[+] 处理任务时出现异常: {e}")
             time.sleep(10)
             continue
 if __name__ == '__main__':

+ 6 - 3
job_wyt.py

@@ -1,5 +1,8 @@
 
 import time
+
+from loguru import logger
+
 from common import Material
 
 from video_rewriting.video_processor import VideoProcessor
@@ -8,12 +11,12 @@ def video_task_start():
     data = Material.feishu_list()[7]
     while True:
         try:
-            print("开始执行任务")
+            logger.info("[+] 开始执行任务")
             mark = VideoProcessor.main(data)
-            print(f"返回用户名: {mark}")
+            logger.info(f"[+] 返回用户名: {mark}")
             time.sleep(10 if mark else 120)  # 根据 mark 是否为空设置延迟
         except Exception as e:
-            print("处理任务时出现异常:", e)
+            logger.error(f"[+] 处理任务时出现异常: {e}")
             time.sleep(10)
             continue
 if __name__ == '__main__':

+ 6 - 3
job_xx.py

@@ -1,5 +1,8 @@
 
 import time
+
+from loguru import logger
+
 from common import Material
 
 from video_rewriting.video_processor import VideoProcessor
@@ -8,12 +11,12 @@ def video_task_start():
     data = Material.feishu_list()[8]
     while True:
         try:
-            print("开始执行任务")
+            logger.info("[+] 开始执行任务")
             mark = VideoProcessor.main(data)
-            print(f"返回用户名: {mark}")
+            logger.info(f"[+] 返回用户名: {mark}")
             time.sleep(10 if mark else 120)  # 根据 mark 是否为空设置延迟
         except Exception as e:
-            print("处理任务时出现异常:", e)
+            logger.error(f"[+] 处理任务时出现异常: {e}")
             time.sleep(10)
             continue
 if __name__ == '__main__':

+ 6 - 3
job_yht.py

@@ -1,5 +1,8 @@
 
 import time
+
+from loguru import logger
+
 from common import Material
 
 from video_rewriting.video_processor import VideoProcessor
@@ -8,12 +11,12 @@ def video_task_start():
     data = Material.feishu_list()[3]
     while True:
         try:
-            print("开始执行任务")
+            logger.info("[+] 开始执行任务")
             mark = VideoProcessor.main(data)
-            print(f"返回用户名: {mark}")
+            logger.info(f"[+] 返回用户名: {mark}")
             time.sleep(10 if mark else 120)  # 根据 mark 是否为空设置延迟
         except Exception as e:
-            print("处理任务时出现异常:", e)
+            logger.error(f"[+] 处理任务时出现异常: {e}")
             time.sleep(10)
             continue
 if __name__ == '__main__':

+ 1 - 12
qpl_channel/sph_crawling_data.py

@@ -4,7 +4,7 @@ import time
 
 import requests
 
-from common import Material, Oss, Common, Feishu
+from common import Material, Oss, Feishu
 from common.sql_help import sqlCollect
 from data_channel.data_help import dataHelp
 from data_channel.shipinhao import SPH
@@ -19,7 +19,6 @@ class SphHistory:
         if user_list == None:
             return
         for user in user_list:
-            Common.logger("sph_crawling").info(f"{user}开始获取数据")
             account_id = SPH.get_account_id(user)
             if account_id == False:
                 print(f"{account_id}:没有获取到视频account_id,无法抓取数据")
@@ -39,7 +38,6 @@ class SphHistory:
 
                     response = requests.request("POST", url, headers=headers, data=payload)
                     time.sleep(random.randint(1, 5))
-                    Common.logger("sph_crawling").info(f"{user}获取第{count}页视频")
                     count += 1
                     if response.text == "" or response.text == None:
                         break
@@ -58,7 +56,6 @@ class SphHistory:
                     last_buffer = res_json.get('last_buffer')
                     try:
                         for obj in res_json["UpMasterHomePage"]:
-                            Common.logger("sph_crawling").info(f"{user}扫描到一条数据")
                             objectId = obj['objectId']
                             object_id = sqlCollect.sph_data_info_v_id(objectId, "视频号")
                             if object_id:
@@ -83,14 +80,10 @@ class SphHistory:
                                 continue
                             v_id = f"sph/{objectId}"
                             try:
-                                Common.logger("sph_crawling").info(f"{user}视频ID:{objectId},视频链接:{video_url}开始发送oss")
                                 oss_video_key = Oss.channel_upload_oss(video_url, v_id)  # 视频发送OSS
                                 oss_video_key = oss_video_key.get("oss_object_key")
-                                Common.logger("sph_crawling").info(f"{user}视频发送oss成功,视频oss地址{oss_video_key}")
-                                Common.logger("sph_crawling").info(f"{user}视频ID:{objectId},封面链接:{cover}开始发送oss")
                                 oss_cover_key = Oss.channel_upload_oss(cover, f"sph/{objectId}.jpg")  # 视频发送OSS
                                 oss_cover_key = oss_cover_key.get("oss_object_key")
-                                Common.logger("sph_crawling").info(f"{user}封面发送oss成功,封面oss地址{oss_video_key}")
                                 create_time = obj['createtime']  # 发布时间
                             except:
                                 continue
@@ -102,12 +95,9 @@ class SphHistory:
                             comment_count = obj['comment_count']  # 评论数
                             fav_count = obj['fav_count']  # 大拇指点赞数
                             sqlCollect.sph_data_info('视频号', objectId, video_url, cover, video_title, str(share_cnt), str(like_cnt), oss_video_key, oss_cover_key, nick_name, user_name, comment_count, fav_count, create_time,duration)
-                            Common.logger("sph_crawling").info(f"{nick_name}插入数据成功")
                     except Exception as e:
-                        Common.logger("sph_crawling").info(f"{user}异常,异常信息{e}")
                         continue
                 sqlCollect.update_sph_channel_user_status(user)
-                Common.logger("sph_crawling").info(f"{user}用户抓取完成")
                 count = sqlCollect.sph_data_info_count(user, "视频号")
                 text = (
                     f"**{user}抓取完成:共抓了{count[0]}条数据**\n"
@@ -116,7 +106,6 @@ class SphHistory:
                                   "https://open.feishu.cn/open-apis/bot/v2/hook/029fa989-9847-4574-8e1b-5c396e665f16",
                                   "【 视频号历史数据抓取通知 】")
             except Exception as e:
-                Common.logger("sph_crawling").info(f"{user}异常,异常信息{e}")
                 Feishu.finish_bot(e,
                                   "https://open.feishu.cn/open-apis/bot/v2/hook/029fa989-9847-4574-8e1b-5c396e665f16",
                                   "【 视频号抓取异常通知 】")

+ 1 - 3
video_job.py

@@ -5,7 +5,7 @@ import re
 import schedule
 import time
 import threading
-from common import Material, Common, Feishu
+from common import Material, Feishu
 # 控制读写速度的参数
 from video_rewriting.video_prep import getVideo
 
@@ -24,14 +24,12 @@ def video_task_start(data):
     user_data_mark = data["mark"]
     # 开始准备执行生成视频脚本
     if user_data_mark is not None and user_data_mark in today:
-        Common.logger("log").info(f"视频脚本参数中的用户名 {user_data_mark} 今天已经返回过,不再启动线程。今天已经返回的用户名:{user_data_mark}")
         print(f"视频脚本参数中的用户名 {user_data_mark} 今天已经返回过,不再启动线程。")
         return
     mark = getVideo.video_task(data)
     print(f"返回用户名{mark}")
     if mark:
         today.append(mark)
-        Common.logger("log").info(f"返回用户名{mark}")
 
 # data = Material.feishu_list()
 # video_task_start(data[0])

+ 15 - 20
video_rewriting/video_processor.py

@@ -13,7 +13,7 @@ from common.gpt4o_mini_help import GPT4oMini
 from common.redis import get_data, get_first_value_with_prefix, increment_key
 from common.tag_video import Tag
 from common.tts_help import TTS
-from common import Material, Feishu, Common, Oss, AliyunLogger
+from common import Material, Feishu, Oss, AliyunLogger
 from common.ffmpeg import FFmpeg
 from data_channel.douyin import DY
 from data_channel.dy_keyword import DyKeyword
@@ -23,7 +23,7 @@ from data_channel.ks_ls import KSLS
 from data_channel.ks_xcx import KSXCX
 from data_channel.ks_xcx_keyword import KsXCXKeyword
 from data_channel.kuaishou import KS
-from data_channel.kuaishouchuangzuozhe import KsFeedVideo
+# from data_channel.kuaishouchuangzuozhe import KsFeedVideo
 from data_channel.piaoquan import PQ
 from common.sql_help import sqlCollect
 from data_channel.shipinhao import SPH
@@ -156,7 +156,6 @@ class VideoProcessor:
                     Feishu.finish_bot(text,
                                       "https://open.feishu.cn/open-apis/bot/v2/hook/e7697dc6-5254-4411-8b59-3cd0742bf703",
                                       "【 机器改造通知 】")
-                    Common.logger(mark).info(f"{name}的{task_mark}下的视频{url},标题为空,使用兜底标题生成片尾")
                 time.sleep(1)
                 pw_random_id = cls.random_id()
                 logger.info(f"[+] {name}的{task_mark}下的ID{url} 开始下载视频")
@@ -292,7 +291,6 @@ class VideoProcessor:
                 if name == "单点视频":
                     sphdd_status = sqlCollect.update_shp_dd_vid(v_id)
                     if sphdd_status == 1:
-                        Common.logger(mark).info(f"{name}的{task_mark}下的ID{url} 视频修改已使用,状态已修改")
                         from_user_name = video['from_user_name']  # 来源用户
                         from_group_name = video['from_group_name']  # 来源群组
                         source = video['source']  # 渠道
@@ -309,8 +307,6 @@ class VideoProcessor:
                         Feishu.finish_bot(text, "https://open.feishu.cn/open-apis/bot/v2/hook/d2f751a8-5b0a-49ca-a306-1fda142707a9", "【 有一条新的内容改造成功 】")
                 if name == "快手推荐流" or name == "视频号推荐流":
                     feed_status = sqlCollect.update_feed_vid(v_id)
-                    if feed_status == 1:
-                        Common.logger(mark).info(f"{name}的{task_mark}下的ID{url} 视频修改已使用,状态已修改")
                 if channel_id == "快手历史" or channel_id == "抖音历史" or channel_id == "视频号历史":
                     explain = "历史爆款"
                 else:
@@ -369,9 +365,8 @@ class VideoProcessor:
                     tag = f"{tag_first},{tag_channel}"
                     tag_status = Tag.video_tag( code, tag )
                     if tag_status == 0:
-                        Common.logger(mark).info(f"{name}的{task_mark}下的ID{url}下的票圈视频{code},写入标签成功")
+                        logger.info(f"{name}的{task_mark}下的ID{url}下的票圈视频{code},写入标签成功")
                     log_data = f"user:{url},,video_id:{v_id},,video_url:{video_url},,ai_title:{new_title},,voice:{voice},,tag:{tag}"
-                    # log_data = f"user:{url},,video_id:{v_id},,video_url:{video_url},,ai_title:{new_title},,voice:{voice},,first_category:{first_category},,tag:{tag}"
                     values = [
                         [
                             name,
@@ -488,8 +483,8 @@ class VideoProcessor:
             return SPH.get_sph_url(task_mark, url, number, mark, channel_id, name)
         elif channel_id == "快手":
             return KS.get_ks_url(task_mark, url, number, mark, feishu_id, cookie_sheet, channel_id, name)
-        elif channel_id == "快手创作者版":
-            return KsFeedVideo.get_data(channel_id, name)
+        # elif channel_id == "快手创作者版":
+        #     return KsFeedVideo.get_data(channel_id, name)
         elif channel_id == "单点视频":
             return SPHDD.get_sphdd_data(url, channel_id, name)
         elif channel_id == "抖音历史":
@@ -627,16 +622,16 @@ class VideoProcessor:
             return
         task = json.loads(data)
         try:
-            limit_number = task["limit_number"]
-            if limit_number:
-                task_mark = task["task_mark"]
-                makr_count = sqlCollect.get_mark_count(task_mark)
-                if int(limit_number) <= int(makr_count[0][0]):
-                    AliyunLogger.logging((task["channel_id"]), name, task["channel_url"], '', f"{task_mark}标识任务每日指定条数已足够,指定条数{limit_number},实际生成条数{int(makr_count[0][0])}",
-                                         "1111")
-                    logger.info(f"[+] {task_mark}标识任务每日指定条数已足够,指定条数{limit_number},实际生成条数{int(makr_count[0][0])}")
-
-                    return
+            # limit_number = task["limit_number"]
+            # if limit_number:
+            #     task_mark = task["task_mark"]
+            #     makr_count = sqlCollect.get_mark_count(task_mark)
+            #     if int(limit_number) <= int(makr_count[0][0]):
+            #         AliyunLogger.logging((task["channel_id"]), name, task["channel_url"], '', f"{task_mark}标识任务每日指定条数已足够,指定条数{limit_number},实际生成条数{int(makr_count[0][0])}",
+            #                              "1111")
+            #         logger.info(f"[+] {task_mark}标识任务每日指定条数已足够,指定条数{limit_number},实际生成条数{int(makr_count[0][0])}")
+            #
+            #         return
             if mark == 'dy-pl-gjc' and task['channel_id'] == '抖音搜索':
                 mark_count = 'dyss-count'
                 count = get_first_value_with_prefix(mark_count)

+ 722 - 722
video_rewriting/video_processor1.py

@@ -1,722 +1,722 @@
-import configparser
-import json
-import os
-import random
-import re
-import shutil
-import time
-from datetime import datetime
-
-from common.gpt4o_mini_help import GPT4oMini
-from common.redis import get_data, get_first_value_with_prefix, increment_key
-from common.tag_video import Tag
-from common.tts_help import TTS
-from common import Material, Feishu, Common, Oss, AliyunLogger
-from common.ffmpeg import FFmpeg
-from data_channel.douyin import DY
-from data_channel.dy_keyword import DyKeyword
-from data_channel.dy_ls import DYLS
-from data_channel.ks_feed import KSFeed
-from data_channel.ks_keyword import KsKeyword
-from data_channel.ks_ls import KSLS
-from data_channel.ks_xcx import KSXCX
-from data_channel.ks_xcx_keyword import KsXCXKeyword
-from data_channel.kuaishou import KS
-from data_channel.kuaishouchuangzuozhe import KsFeedVideo
-from data_channel.piaoquan import PQ
-from common.sql_help import sqlCollect
-from data_channel.shipinhao import SPH
-
-# 读取配置文件
-from data_channel.shipinhaodandian import SPHDD
-from data_channel.sph_feed import SPHFeed
-from data_channel.sph_keyword import SphKeyword
-from data_channel.sph_ls import SPHLS
-
-config = configparser.ConfigParser()
-config.read('./config.ini')
-
-
-class VideoProcessor:
-
-    """
-    视频处理类,包含创建文件夹、生成随机ID、删除文件和处理视频任务等方法。
-    """
-
-    @classmethod
-    def create_folders(cls, mark):
-        """
-        根据标示和任务标示创建目录
-        """
-        id = cls.random_id()
-        video_path_url = config['PATHS']['VIDEO_PATH'] + mark + "/" + str(id) + "/"
-        if not os.path.exists(video_path_url):
-            os.makedirs(video_path_url)
-        return video_path_url
-
-    @classmethod
-    def random_id(cls):
-        """
-        随机生成ID
-        """
-        now = datetime.now()
-        rand_num = random.randint(10000, 99999)
-        return f"{now.strftime('%Y%m%d%H%M%S')}{rand_num}"
-
-    @classmethod
-    def remove_files(cls, mark):
-        """
-        删除指定目录下的所有文件和子目录
-        """
-        path = config['PATHS']['VIDEO_PATH'] + mark + "/"
-        # 删除目录下的所有内容
-        if os.path.exists(path):
-            # 遍历目录下的所有文件和子目录
-            for filename in os.listdir(path):
-                file_path = os.path.join(path, filename)
-                try:
-                    if os.path.isfile(file_path) or os.path.islink(file_path):
-                        os.unlink(file_path)  # 删除文件或符号链接
-                    elif os.path.isdir(file_path):
-                        shutil.rmtree(file_path)  # 删除子目录及其所有内容
-                except Exception as e:
-                    print(f'Failed to delete {file_path}. Reason: {e}')
-
-
-
-    @classmethod
-    def process_task(cls, task, mark, name, feishu_id, cookie_sheet):
-        """
-        处理单个任务
-        """
-        try:
-            task_mark = task["task_mark"]
-            channel_id = str(task["channel_id"])
-            url = str(task["channel_url"])
-            piaoquan_id = str(task["piaoquan_id"])
-            number = task["number"]
-            title = task["title"]
-            video_share = task["video_share"]
-            video_ending = task["video_ending"]
-            crop_total = task["crop_total"]
-            gg_duration_total = task["gg_duration_total"]
-            voice = task['voice']
-            tags = task['tags']
-            if voice:
-                if ',' in voice:
-                    voices = voice.split(',')
-                else:
-                    voices = [voice]
-                voice = random.choice(voices)
-            else:
-                voice = "zhifeng_emo"
-            zm = Material.get_pzsrt_data("summary", "500Oe0", video_share)
-            Common.logger(mark).info(f"{name}的{task_mark}下{channel_id}的用户:{url}开始获取视频")
-            new_count = None
-            # if name in ['快手品类账号', '抖音品类账号', '抖音品类账号-1', '视频号品类账号']:
-            #     new_count = OdpsDataCount.main(channel_id, name, url)
-            data_list = cls.get_data_list(
-                channel_id, task_mark, url,
-                number,
-                mark, feishu_id, cookie_sheet, name, task
-            )
-            if not data_list:
-                AliyunLogger.logging(channel_id, name, url, "", "无改造视频", "4000")
-                Common.logger(mark).info(f"{name}的{task_mark}下{channel_id}的视频ID{url} 无改造视频")
-                text = (
-                    f"**通知类型**: 没有改造的视频\n"
-                    f"**负责人**: {name}\n"
-                    f"**渠道**: {channel_id}\n"
-                    f"**视频主页ID**: {url}\n"
-                )
-                Feishu.finish_bot(text, "https://open.feishu.cn/open-apis/bot/v2/hook/e7697dc6-5254-4411-8b59-3cd0742bf703",
-                                  "【 机器改造通知 】")
-                return
-            if new_count:
-                sqlCollect.insert_spider_supply_targetcnt(channel_id, name, url, number, new_count, str(len(data_list)))
-                current_time = datetime.now()
-                formatted_time = current_time.strftime("%Y-%m-%d %H:%M:%S")
-                values = [
-                    [
-                        name,
-                        channel_id,
-                        url,
-                        str(number),
-                        str(new_count),
-                        str(len(data_list)),
-                        formatted_time
-                    ]
-                ]
-                Feishu.insert_columns("Z5xLsdyyxh3abntTTvUc9zw8nYd", "099da8", "ROWS", 1, 2)
-                time.sleep(0.5)
-                Feishu.update_values("Z5xLsdyyxh3abntTTvUc9zw8nYd", "099da8", "A2:Z2", values)
-            Common.logger(mark).info(f"{name}的{task_mark}下的ID{url} 获取视频完成,共{len(data_list)}条")
-            try:
-                for video in data_list:
-                    # limit_number = task["limit_number"]
-                    # if limit_number:
-                    #     task_mark = task["task_mark"]
-                    #     makr_count = sqlCollect.get_mark_count(task_mark)
-                    #     if int(limit_number) <= int(makr_count[0][0]):
-                    #         AliyunLogger.logging((task["channel_id"]), name, task["channel_url"], '',
-                    #                              f"{task_mark}标识任务每日指定条数已足够,指定条数{limit_number},实际生成条数{int(makr_count[0][0])}",
-                    #                              "1111")
-                    #         return
-                    cls.remove_files(mark)
-                    video_path_url = cls.create_folders(mark)
-                    new_title = cls.generate_title(video, title)
-                    v_id = video["video_id"]
-                    cover = video["cover"]
-                    video_url = video["video_url"]
-                    old_title = video['old_title']
-                    rule = video['rule']
-                    if not old_title:
-                        old_title = '这个视频,分享给我的老友,祝愿您能幸福安康'
-                        text = (
-                            f"**通知类型**: 标题为空,使用兜底标题生成片尾\n"
-                            f"**负责人**: {name}\n"
-                            f"**渠道**: {channel_id}\n"
-                            f"**视频主页ID**: {url}\n"
-                            f"**视频Video_id**: {v_id}\n"
-                        )
-                        Feishu.finish_bot(text,
-                                          "https://open.feishu.cn/open-apis/bot/v2/hook/e7697dc6-5254-4411-8b59-3cd0742bf703",
-                                          "【 机器改造通知 】")
-                        Common.logger(mark).info(f"{name}的{task_mark}下的视频{url},标题为空,使用兜底标题生成片尾")
-                    time.sleep(1)
-                    pw_random_id = cls.random_id()
-                    Common.logger(mark).info(f"{name}的{task_mark}下的ID{url} 开始下载视频")
-                    new_video_path = cls.download_and_process_video(channel_id, video_url, video_path_url, v_id,
-                                                                    crop_total, gg_duration_total, pw_random_id, new_title, mark, video)
-                    if not os.path.isfile(new_video_path) or os.path.getsize(new_video_path) == 0:
-                        AliyunLogger.logging(channel_id, name, url, v_id, "视频下载失败", "3002", f"video_url:{video_url}")
-                        text = (
-                            f"**通知类型**: 视频下载失败\n"
-                            f"**负责人**: {name}\n"
-                            f"**渠道**: {channel_id}\n"
-                            f"**视频主页ID**: {url}\n"
-                            f"**视频Video_id**: {v_id}\n"
-                        )
-                        Feishu.finish_bot(text,
-                                          "https://open.feishu.cn/open-apis/bot/v2/hook/e7697dc6-5254-4411-8b59-3cd0742bf703",
-                                          "【 机器改造通知 】")
-                        continue
-                    if video_ending and video_ending != 'None':
-                        new_video_path = cls.handle_video_ending(new_video_path, video_ending, old_title, pw_random_id, video_path_url, mark, task_mark, url, name, video_share, zm, voice)
-                        if new_video_path == None:
-                            if name == "单点视频":
-                                sqlCollect.update_shp_dd_vid_4(v_id)
-                                from_user_name = video['from_user_name']  # 来源用户
-                                from_group_name = video['from_group_name']  # 来源群组
-                                source = video['source']  # 渠道
-                                text = (
-                                    f"**渠道**: {source}\n"
-                                    f"**来源用户**: {from_user_name}\n"
-                                    f"**来源群组**: {from_group_name}\n"
-                                    f"**原视频链接**: {video['video_url']}\n"
-                                    f"**原视频封面**: {video['cover']}\n"
-                                    f"**原视频标题**: {video['old_title']}\n"
-                                )
-                                AliyunLogger.logging(channel_id, name, url, v_id, "视频下载失败", "3002")
-                                Feishu.finish_bot(text,
-                                                  "https://open.feishu.cn/open-apis/bot/v2/hook/493b3d4c-5fae-4a9d-980b-1dd86636524e",
-                                                  "【 视频下载失败,跳过该视频 】")
-                            if name == "快手推荐流" or name == "视频号推荐流":
-                                sqlCollect.update_feed_vid_2(v_id)
-                                Feishu.finish_bot(text,
-                                                  "https://open.feishu.cn/open-apis/bot/v2/hook/493b3d4c-5fae-4a9d-980b-1dd86636524e",
-                                                  "【 视频下载失败,跳过该视频 】")
-                            continue
-                    else:
-                        if video_share and video_share != 'None':
-                            new_video_path = FFmpeg.single_video(new_video_path, video_path_url, zm)
-                    if not os.path.isfile(new_video_path) or os.path.getsize(new_video_path) == 0:
-                        AliyunLogger.logging(channel_id, name, url, v_id, "视频改造失败", "3001")
-                        text = (
-                            f"**通知类型**: 视频改造失败\n"
-                            f"**负责人**: {name}\n"
-                            f"**渠道**: {channel_id}\n"
-                            f"**视频主页ID**: {url}\n"
-                            f"**视频Video_id**: {v_id}\n"
-                        )
-                        Feishu.finish_bot(text,
-                                          "https://open.feishu.cn/open-apis/bot/v2/hook/e7697dc6-5254-4411-8b59-3cd0742bf703",
-                                          "【 机器改造通知 】")
-                        continue
-                    # 上传视频和封面,并更新数据库
-                    code = cls.upload_video_and_thumbnail(new_video_path, cover, v_id, new_title, task_mark, name, piaoquan_id,
-                                                   video_path_url, mark, channel_id, url, old_title, title, rule, video)
-                    # 更新已使用的视频号状态
-                    pq_url = f'https://admin.piaoquantv.com/cms/post-detail/{code}/detail'  # 站内视频链接
-                    if name == "单点视频":
-                        sphdd_status = sqlCollect.update_shp_dd_vid(v_id)
-                        if sphdd_status == 1:
-                            Common.logger(mark).info(f"{name}的{task_mark}下的ID{url} 视频修改已使用,状态已修改")
-                            from_user_name = video['from_user_name']  # 来源用户
-                            from_group_name = video['from_group_name']  # 来源群组
-                            source = video['source']  # 渠道
-                            channel_id = source
-                            text = (
-                                f"**站内视频链接**: {pq_url}\n"
-                                f"**渠道**: {source}\n"
-                                f"**来源用户**: {from_user_name}\n"
-                                f"**来源群组**: {from_group_name}\n"
-                                f"**原视频链接**: {video['video_url']}\n"
-                                f"**原视频封面**: {video['cover']}\n"
-                                f"**原视频标题**: {video['old_title']}\n"
-                            )
-                            Feishu.finish_bot(text, "https://open.feishu.cn/open-apis/bot/v2/hook/d2f751a8-5b0a-49ca-a306-1fda142707a9", "【 有一条新的内容改造成功 】")
-                    if name == "快手推荐流" or name == "视频号推荐流":
-                        feed_status = sqlCollect.update_feed_vid(v_id)
-                        if feed_status == 1:
-                            Common.logger(mark).info(f"{name}的{task_mark}下的ID{url} 视频修改已使用,状态已修改")
-                    if channel_id == "快手历史" or channel_id == "抖音历史" or channel_id == "视频号历史":
-                        explain = "历史爆款"
-                    else:
-                        explain = "新供给"
-                    current_time = datetime.now()
-                    formatted_time = current_time.strftime("%Y-%m-%d %H:%M:%S")
-                    if name == "品类关键词搜索":
-                        first_category = task["first_category"]
-                        keyword_principal = task["keyword_name"]
-                        tag_first = f"一级品类_{first_category}"
-                        tag_keyword = f"关键词_{url}"
-                        if channel_id == "抖音搜索":
-                            tag_channel = "来源_抖音关键词"
-                        elif channel_id == "快手搜索":
-                            tag_channel = "来源_快手关键词"
-                        elif channel_id == "视频号搜索":
-                            tag_channel = "来源_视频号关键词"
-                        tag = f"{tag_first},{tag_keyword},{tag_channel}"
-                        tag_status = Tag.video_tag(code, tag)
-                        if tag_status == 0:
-                            Common.logger(mark).info(f"{name}的{task_mark}下的ID{url}下的票圈视频{code},写入标签成功")
-                        secondary_category = task["secondary_category"]
-                        log_data = f"user:{url},,video_id:{v_id},,video_url:{video_url},,ai_title:{new_title},,voice:{voice},,first_category:{first_category},,secondary_category:{secondary_category},,keyword_principal:{keyword_principal},,tag:{tag}"
-                        values = [
-                            [
-                                name,
-                                task_mark,
-                                channel_id,
-                                url,
-                                str(v_id),
-                                piaoquan_id,
-                                old_title,
-                                title if title in ["原标题", "AI标题"] else "",
-                                new_title,
-                                str(code),
-                                formatted_time,
-                                str(rule),
-                                explain,
-                                voice,
-                                first_category,
-                                secondary_category,
-                                keyword_principal,
-                                pq_url
-                            ]
-                        ]
-                    elif name == "抖音品类账号-1" or name == "抖音品类账号" or name == "视频号品类账号" or name == "快手品类账号":
-                        first_category = task["first_category"]
-                        tag_first = f"一级品类_{first_category}"
-                        if channel_id == "抖音" or channel_id == "抖音历史":
-                            tag_channel = "来源_抖音品类账号"
-                        elif channel_id == "快手" or channel_id == "快手历史":
-                            tag_channel = "来源_快手品类账号"
-                        elif channel_id == "视频号" or channel_id == "视频号历史":
-                            tag_channel = "来源_视频号品类账号"
-                        tag = f"{tag_first},{tag_channel}"
-                        tag_status = Tag.video_tag( code, tag )
-                        if tag_status == 0:
-                            Common.logger(mark).info(f"{name}的{task_mark}下的ID{url}下的票圈视频{code},写入标签成功")
-                        log_data = f"user:{url},,video_id:{v_id},,video_url:{video_url},,ai_title:{new_title},,voice:{voice},,tag:{tag}"
-                        # log_data = f"user:{url},,video_id:{v_id},,video_url:{video_url},,ai_title:{new_title},,voice:{voice},,first_category:{first_category},,tag:{tag}"
-                        values = [
-                            [
-                                name,
-                                task_mark,
-                                channel_id,
-                                url,
-                                str( v_id ),
-                                piaoquan_id,
-                                old_title,
-                                title if title in ["原标题", "AI标题"] else "",
-                                new_title,
-                                str( code ),
-                                formatted_time,
-                                str( rule ),
-                                explain,
-                                voice,
-                                first_category,
-                                pq_url
-                            ]
-                        ]
-
-                    else:
-                        log_data = f"user:{url},,video_id:{v_id},,video_url:{video_url},,ai_title:{new_title},,voice:{voice}"
-                        values = [
-                            [
-                                name,
-                                task_mark,
-                                channel_id,
-                                url,
-                                str(v_id),
-                                piaoquan_id,
-                                old_title,
-                                title if title in ["原标题", "AI标题"] else "",
-                                new_title,
-                                str(code),
-                                formatted_time,
-                                str(rule),
-                                explain,
-                                voice
-                            ]
-                        ]
-                    AliyunLogger.logging(channel_id, name, url, v_id, "视频改造成功", "1000", log_data, str(code))
-                    text = (
-                        f"**通知类型**: 视频改造成功\n"
-                        f"**站内视频链接**: {pq_url}\n"
-                        f"**负责人**: {name}\n"
-                        f"**渠道**: {channel_id}\n"
-                        f"**视频主页ID**: {url}\n"
-                        f"**视频Video_id**: {v_id}\n"
-                        f"**使用音频音色**: {voice}\n"
-                    )
-                    Feishu.finish_bot(text,
-                                      "https://open.feishu.cn/open-apis/bot/v2/hook/e7697dc6-5254-4411-8b59-3cd0742bf703",
-                                      "【 机器改造通知 】")
-                    if tags:
-                        Tag.video_tag(code, tags)
-                    if values:
-                        if name == "王雪珂":
-                            sheet = "vfhHwj"
-                        elif name == "鲁涛":
-                            sheet = "FhewlS"
-                        elif name == "范军":
-                            sheet = "B6dCfS"
-                        elif name == "余海涛":
-                            sheet = "mfBrNT"
-                        elif name == "罗情":
-                            sheet = "2J3PwN"
-                        elif name == "王玉婷":
-                            sheet = "bBHFwC"
-                        elif name == "刘诗雨":
-                            sheet = "fBdxIQ"
-                        elif name == "信欣":
-                            sheet = "lPe1eT"
-                        elif name == "快手创作者版品类推荐流":
-                            sheet = "k7l7nQ"
-                        elif name == "抖音品类账号":
-                            sheet = "ZixHmf"
-                        elif name == "抖音品类账号-1":
-                            sheet = "61kvW7"
-                        elif name == "视频号品类账号":
-                            sheet = "b0uLWw"
-                        elif name == "单点视频":
-                            sheet = "ptgCXW"
-                        elif name == "快手品类账号":
-                            sheet = "ibjoMx"
-                        elif name == "品类关键词搜索":
-                            sheet = "rBAJT8"
-                        elif name == "快手推荐流":
-                            sheet = "9Ii8lw"
-                        elif name == "视频号推荐流":
-                            sheet = "hMBv7T"
-                        elif name == "快手小程序":
-                            sheet = "GeDT6Q"
-                        Feishu.insert_columns("ILb4sa0LahddRktnRipcu2vQnLb", sheet, "ROWS", 1, 2)
-                        time.sleep(0.5)
-                        Feishu.update_values("ILb4sa0LahddRktnRipcu2vQnLb", sheet, "A2:Z2", values)
-            except Exception as e:
-                AliyunLogger.logging(channel_id, name, url, video["video_id"], f"改造失败{e}", "3001", log_data)
-                Common.logger(mark).error(f"{name}的{task_mark}任务处理失败:{e}")
-        except Exception as e:
-            AliyunLogger.logging(channel_id, name, url, video["video_id"], f"改造失败{e}", "3001", log_data)
-            Common.logger(mark).error(f"{name}的{task_mark}任务处理失败:{e}")
-
-
-    @classmethod
-    def get_data_list(cls, channel_id, task_mark, url, number, mark, feishu_id, cookie_sheet, name, task):
-        """
-        根据渠道ID获取数据列表
-        """
-        if channel_id == "抖音":
-            return DY.get_dy_url(task_mark, url, number, mark, feishu_id, cookie_sheet, channel_id, name)
-        elif channel_id == "票圈":
-            return PQ.get_pq_url(task_mark, url, number, mark, channel_id, name)
-        elif channel_id == "视频号":
-            return SPH.get_sph_url(task_mark, url, number, mark, channel_id, name)
-        elif channel_id == "快手":
-            return KS.get_ks_url(task_mark, url, number, mark, feishu_id, cookie_sheet, channel_id, name)
-        elif channel_id == "快手创作者版":
-            return KsFeedVideo.get_data(channel_id, name)
-        elif channel_id == "单点视频":
-            return SPHDD.get_sphdd_data(url, channel_id, name)
-        elif channel_id == "抖音历史":
-            return DYLS.get_dy_zr_list(task_mark, url, number, mark, channel_id, name)
-        elif channel_id == "快手历史":
-            return KSLS.get_ksls_list(task_mark, url, number, mark, channel_id, name)
-        elif channel_id == "视频号历史":
-            return SPHLS.get_sphls_data(task_mark, url, number, mark, channel_id, name)
-        elif channel_id == '抖音搜索':
-            return DyKeyword.get_key_word(url, task_mark, mark, channel_id, name, task)
-        elif channel_id == '快手搜索':
-            return KsXCXKeyword.get_key_word(url, task_mark, mark, channel_id, name, task)
-        elif channel_id == '视频号搜索':
-            return SphKeyword.get_key_word(url, task_mark, mark, channel_id, name)
-        elif channel_id == '快手推荐流':
-            return KSFeed.get_feed_date()
-        elif channel_id == '视频号推荐流':
-            return SPHFeed.get_feed_date()
-        elif channel_id == '快手小程序':
-            return KSXCX.get_xcx_date()
-
-
-    @classmethod
-    def generate_title(cls, video, title):
-        """
-        生成新标题
-        """
-        if video['old_title']:
-            new_title = video['old_title'].strip().replace("\n", "") \
-                .replace("/", "").replace("\\", "").replace("\r", "") \
-                .replace(":", "").replace("*", "").replace("?", "") \
-                .replace("?", "").replace('"', "").replace("<", "") \
-                .replace(">", "").replace("|", "").replace(" ", "") \
-                .replace("&NBSP", "").replace(".", "。").replace(" ", "") \
-                .replace("'", "").replace("#", "").replace("Merge", "")
-        else:
-            return '这个视频,分享给我的老友,祝愿您能幸福安康'
-        if title == "原标题":
-            if not new_title:
-                new_title = '这个视频,分享给我的老友,祝愿您能幸福安康'
-        elif title == "AI标题":
-            if not new_title:
-                new_title = '这个视频,分享给我的老友,祝愿您能幸福安康'
-            else:
-                new_title = GPT4oMini.get_ai_mini_title(new_title)
-        else:
-            titles = title.split('/') if '/' in title else [title]
-            new_title = random.choice(titles)
-        return new_title
-
-    @classmethod
-    def download_and_process_video(cls, channel_id, video_url, video_path_url, v_id, crop_total, gg_duration_total,
-                                   pw_random_id, new_title, mark, video):
-        """
-        下载并处理视频
-        """
-        if channel_id == "单点视频":
-            new_video_path = PQ.dd_sph_download_video(video_url, video_path_url, v_id, video, channel_id)
-        elif channel_id == "视频号":
-            new_video_path = PQ.sph_download_video(video_url, video_path_url, v_id, video)
-            if new_video_path == None:
-                return None
-            Common.logger(mark).info(f"{channel_id}视频下载成功: {new_video_path}")
-        elif channel_id == "票圈" or channel_id == "快手创作者版" or channel_id == '视频号搜索' or channel_id == "快手推荐流":
-            new_video_path = PQ.download_video(video_url, video_path_url, v_id)
-            if new_video_path == None:
-                return None
-            Common.logger(mark).info(f"{channel_id}视频下载成功: {new_video_path}")
-        elif channel_id == "抖音" or channel_id == "抖音历史" or channel_id == "抖音搜索":
-            new_video_path = PQ.download_dy_video(video_url, video_path_url, v_id)
-            if new_video_path == None:
-                return None
-            Common.logger(mark).info(f"{channel_id}视频下载成功: {new_video_path}")
-        elif channel_id == "视频号历史":
-            new_video_path = Oss.download_sph_ls(video_url, video_path_url, v_id)
-        else:
-            Common.logger(mark).info(f"视频准备下载")
-            new_video_path = Oss.download_video_oss(video_url, video_path_url, v_id)
-        if not os.path.isfile(new_video_path)  or os.path.getsize(new_video_path) == 0:
-            return None
-        Common.logger(mark).info(f"视频下载成功: {new_video_path}")
-        if crop_total and crop_total != 'None':  # 判断是否需要裁剪
-            new_video_path = FFmpeg.video_crop(new_video_path, video_path_url, pw_random_id)
-        if gg_duration_total and gg_duration_total != 'None':  # 判断是否需要指定视频时长
-            new_video_path = FFmpeg.video_ggduration(new_video_path, video_path_url, pw_random_id,
-                                                     gg_duration_total)
-        width, height = FFmpeg.get_w_h_size(new_video_path)
-        if width < height:  # 判断是否需要修改为竖屏
-            new_video_path = FFmpeg.update_video_h_w(new_video_path, video_path_url, pw_random_id)
-            new_title_re = re.sub(r'[^\w\s\u4e00-\u9fff,。!?]', '', new_title)
-            if len(new_title_re) > 12:
-                new_title_re = '\n'.join(
-                    [new_title_re[i:i + 12] for i in range(0, len(new_title_re), 12)])
-            new_video_path = FFmpeg.add_video_zm(new_video_path, video_path_url, pw_random_id, new_title_re)
-        return new_video_path
-
-    @classmethod
-    def handle_video_ending(cls, new_video_path, video_ending, old_title, pw_random_id, video_path_url, mark, task_mark, url, name, video_share, zm, voice):
-        """
-        处理视频片尾
-        """
-        if video_ending == "AI片尾引导":
-            pw_srt_text = GPT4oMini.get_ai_mini_pw(old_title)
-            if pw_srt_text:
-
-                pw_url = TTS.get_pw_zm(pw_srt_text, voice)
-                if pw_url:
-                    pw_mp3_path = TTS.download_mp3(pw_url, video_path_url, pw_random_id)
-                    # oss_mp3_key = Oss.mp3_upload_oss(pw_mp3_path, pw_random_id)
-                    # oss_mp3_key = oss_mp3_key.get("oss_object_key")
-                    # new_pw_path = f"http://art-crawler.oss-cn-hangzhou.aliyuncs.com/{oss_mp3_key}"
-                    # print(f"mp3地址:{new_pw_path}")
-                    # pw_url_sec = FFmpeg.get_video_duration(pw_mp3_path)
-                    pw_srt = TTS.getSrt(pw_url)
-                    Common.logger(mark).info(f"{name}的{task_mark}下的视频{url},获取AI片尾srt成功")
-                else:
-                    Common.logger(mark).info(f"{name}的{task_mark}下的视频{url},获取AI片尾失败")
-                    return None
-            else:
-                Common.logger(mark).info(f"{name}的{task_mark}下的视频{url},获取AI片尾失败")
-
-                return None
-        else:
-            if ',' in video_ending:
-                video_ending_list = video_ending.split(',')
-            else:
-                video_ending_list = [video_ending]
-            ending = random.choice(video_ending_list)
-            pw_list = Material.get_pwsrt_data("summary", "DgX7vC", ending)  # 获取srt
-            if pw_list:
-                pw_id = pw_list["pw_id"]
-                pw_srt = pw_list["pw_srt"]
-                pw_url = PQ.get_pw_url(pw_id)
-                pw_mp3_path = FFmpeg.get_video_mp3(pw_url, video_path_url, pw_random_id)
-            else:
-                Feishu.bot(mark, '机器自动改造消息通知', f'{task_mark}任务下片尾标示错误,请关注!!!!', name)
-        for attempt in range(3):
-            jpg_path = FFmpeg.video_png(new_video_path, video_path_url, pw_random_id)  # 生成视频最后一帧jpg
-            if os.path.isfile(jpg_path):
-                Common.logger(mark).info(f"{name}的{task_mark}下的视频{url},生成视频最后一帧成功")
-                break
-            time.sleep(1)
-        for attempt in range(3):
-            Common.logger(mark).info(f"{name}的{task_mark}下的视频{url},获取mp3成功")
-            pw_path = FFmpeg.pw_video(jpg_path, video_path_url, pw_mp3_path, pw_srt, pw_random_id,
-                                      pw_mp3_path)  # 生成片尾视频
-            if os.path.isfile(pw_path):
-                Common.logger(mark).info(f"{task_mark}下的视频{url},生成片尾视频成功")
-                break
-            time.sleep(1)
-        pw_video_list = [new_video_path, pw_path]
-        Common.logger(mark).info(f"{task_mark}下的视频{url},视频与片尾开始拼接")
-        video_path = FFmpeg.concatenate_videos(pw_video_list, video_path_url)  # 视频与片尾拼接到一起
-        if os.path.exists(video_path) or os.path.getsize(video_path) != 0:
-            Common.logger(mark).info(f"{name}的{task_mark}下的视频{url},视频与片尾拼接成功")
-            time.sleep(1)
-            if video_share and video_share != 'None':
-                new_video_path = FFmpeg.single_video(video_path, video_path_url, zm)
-            else:
-                new_video_path = video_path
-            return new_video_path
-        else:
-            return new_video_path
-
-
-
-
-    @classmethod
-    def upload_video_and_thumbnail(cls, new_video_path: str, cover: str, v_id, new_title: str, task_mark: str, name: str, piaoquan_id,
-                                   video_path_url: str, mark: str, channel_id: str, url: str, old_title: str, title, rule: str, video):
-        """
-        上传视频和封面到OSS,并更新数据库
-        """
-        try:
-            oss_id = cls.random_id()
-            Common.logger(mark).info(f"{name}的{task_mark},开始发送oss")
-            oss_object_key = Oss.stitching_sync_upload_oss(new_video_path, oss_id)  # 视频发送OSS
-            Common.logger(mark).info(f"{name}的{task_mark},发送oss成功{oss_object_key}")
-            status = oss_object_key.get("status")
-            if status == 200:
-                oss_object_key = oss_object_key.get("oss_object_key")
-                time.sleep(1)
-                jpg_path = None
-                if channel_id == "快手历史" or channel_id == "快手" or channel_id == '快手搜索' or channel_id == '视频号':
-                    jpg = None
-                elif channel_id == "视频号历史":
-                    jpg_path = Oss.download_sph_ls( cover, video_path_url, v_id )
-                elif channel_id == '单点视频':
-                    if video['source'] != "快手":
-                        jpg_path = PQ.download_video_jpg( cover, video_path_url, v_id )  # 下载视频封面
-                if jpg_path and os.path.isfile( jpg_path ):
-                    oss_jpg_key = Oss.stitching_fm_upload_oss( jpg_path, oss_id )  # 封面上传OSS
-                    jpg = oss_jpg_key.get( "oss_object_key" )
-                else:
-                    jpg = None
-                code = PQ.insert_piaoquantv(oss_object_key, new_title, jpg, piaoquan_id)
-                Common.logger(mark).info(f"{name}的{task_mark}下的视频ID{v_id}发送成功")
-                sqlCollect.insert_task(task_mark, v_id, mark, channel_id)  # 插入数据库
-                current_time = datetime.now()
-                formatted_time = current_time.strftime("%Y-%m-%d %H:%M:%S")
-                if name == "单点视频":
-                    url = str(rule)
-                sqlCollect.insert_machine_making_data(name, task_mark, channel_id, url, v_id, piaoquan_id, new_title, code,
-                                                      formatted_time, old_title, oss_object_key)
-
-                return code
-        except Exception as e:
-            Common.logger(mark).error(f"{name}的{task_mark}上传视频和封面到OSS,并更新数据库失败:{e}\n")
-            AliyunLogger.logging(channel_id, name, url, video["video_id"], "改造失败-上传视频和封面到OSS", "3001")
-            return
-
-
-    @classmethod
-    def main(cls, data):
-        """
-        主函数,初始化任务并使用线程池处理任务。
-        """
-        mark = data["mark"]
-        name = data["name"]
-        feishu_id = data["feishu_id"]
-        feishu_sheet = data["feishu_sheet"]
-        cookie_sheet = data["cookie_sheet"]
-        data = get_data(mark, feishu_id, feishu_sheet)
-        if not data:
-            Common.logger("redis").error(f"{mark}任务开始新的一轮\n")
-            return
-        task = json.loads(data)
-        try:
-            limit_number = task["limit_number"]
-            if limit_number:
-                task_mark = task["task_mark"]
-                makr_count = sqlCollect.get_mark_count(task_mark)
-                if int(limit_number) <= int(makr_count[0][0]):
-                    AliyunLogger.logging((task["channel_id"]), name, task["channel_url"], '', f"{task_mark}标识任务每日指定条数已足够,指定条数{limit_number},实际生成条数{int(makr_count[0][0])}",
-                                         "1111")
-                    return
-            if mark == 'dy-pl-gjc' and task['channel_id'] == '抖音搜索':
-                mark_count = 'dyss-count'
-                count = get_first_value_with_prefix(mark_count)
-                increment_key(mark_count)
-                if int(count) >= 300:
-                    return "抖音搜索上限"
-            if mark == 'ks-pl-gjc':
-                mark_count = 'ksss-count'
-                count = get_first_value_with_prefix(mark_count)
-                increment_key(mark_count)
-                if int(count) >= 300:
-                    return "快手搜索上限"
-            if mark == 'sph-pl-gjc':
-                mark_count = 'ss-sph-count'
-                count = get_first_value_with_prefix(mark_count)
-                increment_key(mark_count)
-                if int(count) >= 300:
-                    time.sleep(10)
-                    return "视频号搜索上限"
-            if mark == 'sph-plzh'and task['channel_id'] == '视频号':
-                mark_count = 'sph-count'
-                count = get_first_value_with_prefix(mark_count)
-                increment_key(mark_count)
-                if int(count) >= 400:
-                    time.sleep(10)
-                    return "视频号获取用户主页视频上限"
-            VideoProcessor.process_task(task, mark, name, feishu_id, cookie_sheet)
-            return mark
-        except Exception as e:
-            AliyunLogger.logging((task["channel_id"]), name, task["channel_url"],'', f"用户抓取异常:{e}", "3001")
-            return mark
-
-
-# if __name__ == "__main__":
-#     main()
+# import configparser
+# import json
+# import os
+# import random
+# import re
+# import shutil
+# import time
+# from datetime import datetime
+#
+# from common.gpt4o_mini_help import GPT4oMini
+# from common.redis import get_data, get_first_value_with_prefix, increment_key
+# from common.tag_video import Tag
+# from common.tts_help import TTS
+# from common import Material, Feishu,  Oss, AliyunLogger
+# from common.ffmpeg import FFmpeg
+# from data_channel.douyin import DY
+# from data_channel.dy_keyword import DyKeyword
+# from data_channel.dy_ls import DYLS
+# from data_channel.ks_feed import KSFeed
+# from data_channel.ks_keyword import KsKeyword
+# from data_channel.ks_ls import KSLS
+# from data_channel.ks_xcx import KSXCX
+# from data_channel.ks_xcx_keyword import KsXCXKeyword
+# from data_channel.kuaishou import KS
+# from data_channel.kuaishouchuangzuozhe import KsFeedVideo
+# from data_channel.piaoquan import PQ
+# from common.sql_help import sqlCollect
+# from data_channel.shipinhao import SPH
+#
+# # 读取配置文件
+# from data_channel.shipinhaodandian import SPHDD
+# from data_channel.sph_feed import SPHFeed
+# from data_channel.sph_keyword import SphKeyword
+# from data_channel.sph_ls import SPHLS
+#
+# config = configparser.ConfigParser()
+# config.read('./config.ini')
+#
+#
+# class VideoProcessor:
+#
+#     """
+#     视频处理类,包含创建文件夹、生成随机ID、删除文件和处理视频任务等方法。
+#     """
+#
+#     @classmethod
+#     def create_folders(cls, mark):
+#         """
+#         根据标示和任务标示创建目录
+#         """
+#         id = cls.random_id()
+#         video_path_url = config['PATHS']['VIDEO_PATH'] + mark + "/" + str(id) + "/"
+#         if not os.path.exists(video_path_url):
+#             os.makedirs(video_path_url)
+#         return video_path_url
+#
+#     @classmethod
+#     def random_id(cls):
+#         """
+#         随机生成ID
+#         """
+#         now = datetime.now()
+#         rand_num = random.randint(10000, 99999)
+#         return f"{now.strftime('%Y%m%d%H%M%S')}{rand_num}"
+#
+#     @classmethod
+#     def remove_files(cls, mark):
+#         """
+#         删除指定目录下的所有文件和子目录
+#         """
+#         path = config['PATHS']['VIDEO_PATH'] + mark + "/"
+#         # 删除目录下的所有内容
+#         if os.path.exists(path):
+#             # 遍历目录下的所有文件和子目录
+#             for filename in os.listdir(path):
+#                 file_path = os.path.join(path, filename)
+#                 try:
+#                     if os.path.isfile(file_path) or os.path.islink(file_path):
+#                         os.unlink(file_path)  # 删除文件或符号链接
+#                     elif os.path.isdir(file_path):
+#                         shutil.rmtree(file_path)  # 删除子目录及其所有内容
+#                 except Exception as e:
+#                     print(f'Failed to delete {file_path}. Reason: {e}')
+#
+#
+#
+#     @classmethod
+#     def process_task(cls, task, mark, name, feishu_id, cookie_sheet):
+#         """
+#         处理单个任务
+#         """
+#         try:
+#             task_mark = task["task_mark"]
+#             channel_id = str(task["channel_id"])
+#             url = str(task["channel_url"])
+#             piaoquan_id = str(task["piaoquan_id"])
+#             number = task["number"]
+#             title = task["title"]
+#             video_share = task["video_share"]
+#             video_ending = task["video_ending"]
+#             crop_total = task["crop_total"]
+#             gg_duration_total = task["gg_duration_total"]
+#             voice = task['voice']
+#             tags = task['tags']
+#             if voice:
+#                 if ',' in voice:
+#                     voices = voice.split(',')
+#                 else:
+#                     voices = [voice]
+#                 voice = random.choice(voices)
+#             else:
+#                 voice = "zhifeng_emo"
+#             zm = Material.get_pzsrt_data("summary", "500Oe0", video_share)
+#             Common.logger(mark).info(f"{name}的{task_mark}下{channel_id}的用户:{url}开始获取视频")
+#             new_count = None
+#             # if name in ['快手品类账号', '抖音品类账号', '抖音品类账号-1', '视频号品类账号']:
+#             #     new_count = OdpsDataCount.main(channel_id, name, url)
+#             data_list = cls.get_data_list(
+#                 channel_id, task_mark, url,
+#                 number,
+#                 mark, feishu_id, cookie_sheet, name, task
+#             )
+#             if not data_list:
+#                 AliyunLogger.logging(channel_id, name, url, "", "无改造视频", "4000")
+#                 Common.logger(mark).info(f"{name}的{task_mark}下{channel_id}的视频ID{url} 无改造视频")
+#                 text = (
+#                     f"**通知类型**: 没有改造的视频\n"
+#                     f"**负责人**: {name}\n"
+#                     f"**渠道**: {channel_id}\n"
+#                     f"**视频主页ID**: {url}\n"
+#                 )
+#                 Feishu.finish_bot(text, "https://open.feishu.cn/open-apis/bot/v2/hook/e7697dc6-5254-4411-8b59-3cd0742bf703",
+#                                   "【 机器改造通知 】")
+#                 return
+#             if new_count:
+#                 sqlCollect.insert_spider_supply_targetcnt(channel_id, name, url, number, new_count, str(len(data_list)))
+#                 current_time = datetime.now()
+#                 formatted_time = current_time.strftime("%Y-%m-%d %H:%M:%S")
+#                 values = [
+#                     [
+#                         name,
+#                         channel_id,
+#                         url,
+#                         str(number),
+#                         str(new_count),
+#                         str(len(data_list)),
+#                         formatted_time
+#                     ]
+#                 ]
+#                 Feishu.insert_columns("Z5xLsdyyxh3abntTTvUc9zw8nYd", "099da8", "ROWS", 1, 2)
+#                 time.sleep(0.5)
+#                 Feishu.update_values("Z5xLsdyyxh3abntTTvUc9zw8nYd", "099da8", "A2:Z2", values)
+#             Common.logger(mark).info(f"{name}的{task_mark}下的ID{url} 获取视频完成,共{len(data_list)}条")
+#             try:
+#                 for video in data_list:
+#                     # limit_number = task["limit_number"]
+#                     # if limit_number:
+#                     #     task_mark = task["task_mark"]
+#                     #     makr_count = sqlCollect.get_mark_count(task_mark)
+#                     #     if int(limit_number) <= int(makr_count[0][0]):
+#                     #         AliyunLogger.logging((task["channel_id"]), name, task["channel_url"], '',
+#                     #                              f"{task_mark}标识任务每日指定条数已足够,指定条数{limit_number},实际生成条数{int(makr_count[0][0])}",
+#                     #                              "1111")
+#                     #         return
+#                     cls.remove_files(mark)
+#                     video_path_url = cls.create_folders(mark)
+#                     new_title = cls.generate_title(video, title)
+#                     v_id = video["video_id"]
+#                     cover = video["cover"]
+#                     video_url = video["video_url"]
+#                     old_title = video['old_title']
+#                     rule = video['rule']
+#                     if not old_title:
+#                         old_title = '这个视频,分享给我的老友,祝愿您能幸福安康'
+#                         text = (
+#                             f"**通知类型**: 标题为空,使用兜底标题生成片尾\n"
+#                             f"**负责人**: {name}\n"
+#                             f"**渠道**: {channel_id}\n"
+#                             f"**视频主页ID**: {url}\n"
+#                             f"**视频Video_id**: {v_id}\n"
+#                         )
+#                         Feishu.finish_bot(text,
+#                                           "https://open.feishu.cn/open-apis/bot/v2/hook/e7697dc6-5254-4411-8b59-3cd0742bf703",
+#                                           "【 机器改造通知 】")
+#                         Common.logger(mark).info(f"{name}的{task_mark}下的视频{url},标题为空,使用兜底标题生成片尾")
+#                     time.sleep(1)
+#                     pw_random_id = cls.random_id()
+#                     Common.logger(mark).info(f"{name}的{task_mark}下的ID{url} 开始下载视频")
+#                     new_video_path = cls.download_and_process_video(channel_id, video_url, video_path_url, v_id,
+#                                                                     crop_total, gg_duration_total, pw_random_id, new_title, mark, video)
+#                     if not os.path.isfile(new_video_path) or os.path.getsize(new_video_path) == 0:
+#                         AliyunLogger.logging(channel_id, name, url, v_id, "视频下载失败", "3002", f"video_url:{video_url}")
+#                         text = (
+#                             f"**通知类型**: 视频下载失败\n"
+#                             f"**负责人**: {name}\n"
+#                             f"**渠道**: {channel_id}\n"
+#                             f"**视频主页ID**: {url}\n"
+#                             f"**视频Video_id**: {v_id}\n"
+#                         )
+#                         Feishu.finish_bot(text,
+#                                           "https://open.feishu.cn/open-apis/bot/v2/hook/e7697dc6-5254-4411-8b59-3cd0742bf703",
+#                                           "【 机器改造通知 】")
+#                         continue
+#                     if video_ending and video_ending != 'None':
+#                         new_video_path = cls.handle_video_ending(new_video_path, video_ending, old_title, pw_random_id, video_path_url, mark, task_mark, url, name, video_share, zm, voice)
+#                         if new_video_path == None:
+#                             if name == "单点视频":
+#                                 sqlCollect.update_shp_dd_vid_4(v_id)
+#                                 from_user_name = video['from_user_name']  # 来源用户
+#                                 from_group_name = video['from_group_name']  # 来源群组
+#                                 source = video['source']  # 渠道
+#                                 text = (
+#                                     f"**渠道**: {source}\n"
+#                                     f"**来源用户**: {from_user_name}\n"
+#                                     f"**来源群组**: {from_group_name}\n"
+#                                     f"**原视频链接**: {video['video_url']}\n"
+#                                     f"**原视频封面**: {video['cover']}\n"
+#                                     f"**原视频标题**: {video['old_title']}\n"
+#                                 )
+#                                 AliyunLogger.logging(channel_id, name, url, v_id, "视频下载失败", "3002")
+#                                 Feishu.finish_bot(text,
+#                                                   "https://open.feishu.cn/open-apis/bot/v2/hook/493b3d4c-5fae-4a9d-980b-1dd86636524e",
+#                                                   "【 视频下载失败,跳过该视频 】")
+#                             if name == "快手推荐流" or name == "视频号推荐流":
+#                                 sqlCollect.update_feed_vid_2(v_id)
+#                                 Feishu.finish_bot(text,
+#                                                   "https://open.feishu.cn/open-apis/bot/v2/hook/493b3d4c-5fae-4a9d-980b-1dd86636524e",
+#                                                   "【 视频下载失败,跳过该视频 】")
+#                             continue
+#                     else:
+#                         if video_share and video_share != 'None':
+#                             new_video_path = FFmpeg.single_video(new_video_path, video_path_url, zm)
+#                     if not os.path.isfile(new_video_path) or os.path.getsize(new_video_path) == 0:
+#                         AliyunLogger.logging(channel_id, name, url, v_id, "视频改造失败", "3001")
+#                         text = (
+#                             f"**通知类型**: 视频改造失败\n"
+#                             f"**负责人**: {name}\n"
+#                             f"**渠道**: {channel_id}\n"
+#                             f"**视频主页ID**: {url}\n"
+#                             f"**视频Video_id**: {v_id}\n"
+#                         )
+#                         Feishu.finish_bot(text,
+#                                           "https://open.feishu.cn/open-apis/bot/v2/hook/e7697dc6-5254-4411-8b59-3cd0742bf703",
+#                                           "【 机器改造通知 】")
+#                         continue
+#                     # 上传视频和封面,并更新数据库
+#                     code = cls.upload_video_and_thumbnail(new_video_path, cover, v_id, new_title, task_mark, name, piaoquan_id,
+#                                                    video_path_url, mark, channel_id, url, old_title, title, rule, video)
+#                     # 更新已使用的视频号状态
+#                     pq_url = f'https://admin.piaoquantv.com/cms/post-detail/{code}/detail'  # 站内视频链接
+#                     if name == "单点视频":
+#                         sphdd_status = sqlCollect.update_shp_dd_vid(v_id)
+#                         if sphdd_status == 1:
+#                             Common.logger(mark).info(f"{name}的{task_mark}下的ID{url} 视频修改已使用,状态已修改")
+#                             from_user_name = video['from_user_name']  # 来源用户
+#                             from_group_name = video['from_group_name']  # 来源群组
+#                             source = video['source']  # 渠道
+#                             channel_id = source
+#                             text = (
+#                                 f"**站内视频链接**: {pq_url}\n"
+#                                 f"**渠道**: {source}\n"
+#                                 f"**来源用户**: {from_user_name}\n"
+#                                 f"**来源群组**: {from_group_name}\n"
+#                                 f"**原视频链接**: {video['video_url']}\n"
+#                                 f"**原视频封面**: {video['cover']}\n"
+#                                 f"**原视频标题**: {video['old_title']}\n"
+#                             )
+#                             Feishu.finish_bot(text, "https://open.feishu.cn/open-apis/bot/v2/hook/d2f751a8-5b0a-49ca-a306-1fda142707a9", "【 有一条新的内容改造成功 】")
+#                     if name == "快手推荐流" or name == "视频号推荐流":
+#                         feed_status = sqlCollect.update_feed_vid(v_id)
+#                         if feed_status == 1:
+#                             Common.logger(mark).info(f"{name}的{task_mark}下的ID{url} 视频修改已使用,状态已修改")
+#                     if channel_id == "快手历史" or channel_id == "抖音历史" or channel_id == "视频号历史":
+#                         explain = "历史爆款"
+#                     else:
+#                         explain = "新供给"
+#                     current_time = datetime.now()
+#                     formatted_time = current_time.strftime("%Y-%m-%d %H:%M:%S")
+#                     if name == "品类关键词搜索":
+#                         first_category = task["first_category"]
+#                         keyword_principal = task["keyword_name"]
+#                         tag_first = f"一级品类_{first_category}"
+#                         tag_keyword = f"关键词_{url}"
+#                         if channel_id == "抖音搜索":
+#                             tag_channel = "来源_抖音关键词"
+#                         elif channel_id == "快手搜索":
+#                             tag_channel = "来源_快手关键词"
+#                         elif channel_id == "视频号搜索":
+#                             tag_channel = "来源_视频号关键词"
+#                         tag = f"{tag_first},{tag_keyword},{tag_channel}"
+#                         tag_status = Tag.video_tag(code, tag)
+#                         if tag_status == 0:
+#                             Common.logger(mark).info(f"{name}的{task_mark}下的ID{url}下的票圈视频{code},写入标签成功")
+#                         secondary_category = task["secondary_category"]
+#                         log_data = f"user:{url},,video_id:{v_id},,video_url:{video_url},,ai_title:{new_title},,voice:{voice},,first_category:{first_category},,secondary_category:{secondary_category},,keyword_principal:{keyword_principal},,tag:{tag}"
+#                         values = [
+#                             [
+#                                 name,
+#                                 task_mark,
+#                                 channel_id,
+#                                 url,
+#                                 str(v_id),
+#                                 piaoquan_id,
+#                                 old_title,
+#                                 title if title in ["原标题", "AI标题"] else "",
+#                                 new_title,
+#                                 str(code),
+#                                 formatted_time,
+#                                 str(rule),
+#                                 explain,
+#                                 voice,
+#                                 first_category,
+#                                 secondary_category,
+#                                 keyword_principal,
+#                                 pq_url
+#                             ]
+#                         ]
+#                     elif name == "抖音品类账号-1" or name == "抖音品类账号" or name == "视频号品类账号" or name == "快手品类账号":
+#                         first_category = task["first_category"]
+#                         tag_first = f"一级品类_{first_category}"
+#                         if channel_id == "抖音" or channel_id == "抖音历史":
+#                             tag_channel = "来源_抖音品类账号"
+#                         elif channel_id == "快手" or channel_id == "快手历史":
+#                             tag_channel = "来源_快手品类账号"
+#                         elif channel_id == "视频号" or channel_id == "视频号历史":
+#                             tag_channel = "来源_视频号品类账号"
+#                         tag = f"{tag_first},{tag_channel}"
+#                         tag_status = Tag.video_tag( code, tag )
+#                         if tag_status == 0:
+#                             Common.logger(mark).info(f"{name}的{task_mark}下的ID{url}下的票圈视频{code},写入标签成功")
+#                         log_data = f"user:{url},,video_id:{v_id},,video_url:{video_url},,ai_title:{new_title},,voice:{voice},,tag:{tag}"
+#                         # log_data = f"user:{url},,video_id:{v_id},,video_url:{video_url},,ai_title:{new_title},,voice:{voice},,first_category:{first_category},,tag:{tag}"
+#                         values = [
+#                             [
+#                                 name,
+#                                 task_mark,
+#                                 channel_id,
+#                                 url,
+#                                 str( v_id ),
+#                                 piaoquan_id,
+#                                 old_title,
+#                                 title if title in ["原标题", "AI标题"] else "",
+#                                 new_title,
+#                                 str( code ),
+#                                 formatted_time,
+#                                 str( rule ),
+#                                 explain,
+#                                 voice,
+#                                 first_category,
+#                                 pq_url
+#                             ]
+#                         ]
+#
+#                     else:
+#                         log_data = f"user:{url},,video_id:{v_id},,video_url:{video_url},,ai_title:{new_title},,voice:{voice}"
+#                         values = [
+#                             [
+#                                 name,
+#                                 task_mark,
+#                                 channel_id,
+#                                 url,
+#                                 str(v_id),
+#                                 piaoquan_id,
+#                                 old_title,
+#                                 title if title in ["原标题", "AI标题"] else "",
+#                                 new_title,
+#                                 str(code),
+#                                 formatted_time,
+#                                 str(rule),
+#                                 explain,
+#                                 voice
+#                             ]
+#                         ]
+#                     AliyunLogger.logging(channel_id, name, url, v_id, "视频改造成功", "1000", log_data, str(code))
+#                     text = (
+#                         f"**通知类型**: 视频改造成功\n"
+#                         f"**站内视频链接**: {pq_url}\n"
+#                         f"**负责人**: {name}\n"
+#                         f"**渠道**: {channel_id}\n"
+#                         f"**视频主页ID**: {url}\n"
+#                         f"**视频Video_id**: {v_id}\n"
+#                         f"**使用音频音色**: {voice}\n"
+#                     )
+#                     Feishu.finish_bot(text,
+#                                       "https://open.feishu.cn/open-apis/bot/v2/hook/e7697dc6-5254-4411-8b59-3cd0742bf703",
+#                                       "【 机器改造通知 】")
+#                     if tags:
+#                         Tag.video_tag(code, tags)
+#                     if values:
+#                         if name == "王雪珂":
+#                             sheet = "vfhHwj"
+#                         elif name == "鲁涛":
+#                             sheet = "FhewlS"
+#                         elif name == "范军":
+#                             sheet = "B6dCfS"
+#                         elif name == "余海涛":
+#                             sheet = "mfBrNT"
+#                         elif name == "罗情":
+#                             sheet = "2J3PwN"
+#                         elif name == "王玉婷":
+#                             sheet = "bBHFwC"
+#                         elif name == "刘诗雨":
+#                             sheet = "fBdxIQ"
+#                         elif name == "信欣":
+#                             sheet = "lPe1eT"
+#                         elif name == "快手创作者版品类推荐流":
+#                             sheet = "k7l7nQ"
+#                         elif name == "抖音品类账号":
+#                             sheet = "ZixHmf"
+#                         elif name == "抖音品类账号-1":
+#                             sheet = "61kvW7"
+#                         elif name == "视频号品类账号":
+#                             sheet = "b0uLWw"
+#                         elif name == "单点视频":
+#                             sheet = "ptgCXW"
+#                         elif name == "快手品类账号":
+#                             sheet = "ibjoMx"
+#                         elif name == "品类关键词搜索":
+#                             sheet = "rBAJT8"
+#                         elif name == "快手推荐流":
+#                             sheet = "9Ii8lw"
+#                         elif name == "视频号推荐流":
+#                             sheet = "hMBv7T"
+#                         elif name == "快手小程序":
+#                             sheet = "GeDT6Q"
+#                         Feishu.insert_columns("ILb4sa0LahddRktnRipcu2vQnLb", sheet, "ROWS", 1, 2)
+#                         time.sleep(0.5)
+#                         Feishu.update_values("ILb4sa0LahddRktnRipcu2vQnLb", sheet, "A2:Z2", values)
+#             except Exception as e:
+#                 AliyunLogger.logging(channel_id, name, url, video["video_id"], f"改造失败{e}", "3001", log_data)
+#                 Common.logger(mark).error(f"{name}的{task_mark}任务处理失败:{e}")
+#         except Exception as e:
+#             AliyunLogger.logging(channel_id, name, url, video["video_id"], f"改造失败{e}", "3001", log_data)
+#             Common.logger(mark).error(f"{name}的{task_mark}任务处理失败:{e}")
+#
+#
+#     @classmethod
+#     def get_data_list(cls, channel_id, task_mark, url, number, mark, feishu_id, cookie_sheet, name, task):
+#         """
+#         根据渠道ID获取数据列表
+#         """
+#         if channel_id == "抖音":
+#             return DY.get_dy_url(task_mark, url, number, mark, feishu_id, cookie_sheet, channel_id, name)
+#         elif channel_id == "票圈":
+#             return PQ.get_pq_url(task_mark, url, number, mark, channel_id, name)
+#         elif channel_id == "视频号":
+#             return SPH.get_sph_url(task_mark, url, number, mark, channel_id, name)
+#         elif channel_id == "快手":
+#             return KS.get_ks_url(task_mark, url, number, mark, feishu_id, cookie_sheet, channel_id, name)
+#         elif channel_id == "快手创作者版":
+#             return KsFeedVideo.get_data(channel_id, name)
+#         elif channel_id == "单点视频":
+#             return SPHDD.get_sphdd_data(url, channel_id, name)
+#         elif channel_id == "抖音历史":
+#             return DYLS.get_dy_zr_list(task_mark, url, number, mark, channel_id, name)
+#         elif channel_id == "快手历史":
+#             return KSLS.get_ksls_list(task_mark, url, number, mark, channel_id, name)
+#         elif channel_id == "视频号历史":
+#             return SPHLS.get_sphls_data(task_mark, url, number, mark, channel_id, name)
+#         elif channel_id == '抖音搜索':
+#             return DyKeyword.get_key_word(url, task_mark, mark, channel_id, name, task)
+#         elif channel_id == '快手搜索':
+#             return KsXCXKeyword.get_key_word(url, task_mark, mark, channel_id, name, task)
+#         elif channel_id == '视频号搜索':
+#             return SphKeyword.get_key_word(url, task_mark, mark, channel_id, name)
+#         elif channel_id == '快手推荐流':
+#             return KSFeed.get_feed_date()
+#         elif channel_id == '视频号推荐流':
+#             return SPHFeed.get_feed_date()
+#         elif channel_id == '快手小程序':
+#             return KSXCX.get_xcx_date()
+#
+#
+#     @classmethod
+#     def generate_title(cls, video, title):
+#         """
+#         生成新标题
+#         """
+#         if video['old_title']:
+#             new_title = video['old_title'].strip().replace("\n", "") \
+#                 .replace("/", "").replace("\\", "").replace("\r", "") \
+#                 .replace(":", "").replace("*", "").replace("?", "") \
+#                 .replace("?", "").replace('"', "").replace("<", "") \
+#                 .replace(">", "").replace("|", "").replace(" ", "") \
+#                 .replace("&NBSP", "").replace(".", "。").replace(" ", "") \
+#                 .replace("'", "").replace("#", "").replace("Merge", "")
+#         else:
+#             return '这个视频,分享给我的老友,祝愿您能幸福安康'
+#         if title == "原标题":
+#             if not new_title:
+#                 new_title = '这个视频,分享给我的老友,祝愿您能幸福安康'
+#         elif title == "AI标题":
+#             if not new_title:
+#                 new_title = '这个视频,分享给我的老友,祝愿您能幸福安康'
+#             else:
+#                 new_title = GPT4oMini.get_ai_mini_title(new_title)
+#         else:
+#             titles = title.split('/') if '/' in title else [title]
+#             new_title = random.choice(titles)
+#         return new_title
+#
+#     @classmethod
+#     def download_and_process_video(cls, channel_id, video_url, video_path_url, v_id, crop_total, gg_duration_total,
+#                                    pw_random_id, new_title, mark, video):
+#         """
+#         下载并处理视频
+#         """
+#         if channel_id == "单点视频":
+#             new_video_path = PQ.dd_sph_download_video(video_url, video_path_url, v_id, video, channel_id)
+#         elif channel_id == "视频号":
+#             new_video_path = PQ.sph_download_video(video_url, video_path_url, v_id, video)
+#             if new_video_path == None:
+#                 return None
+#             Common.logger(mark).info(f"{channel_id}视频下载成功: {new_video_path}")
+#         elif channel_id == "票圈" or channel_id == "快手创作者版" or channel_id == '视频号搜索' or channel_id == "快手推荐流":
+#             new_video_path = PQ.download_video(video_url, video_path_url, v_id)
+#             if new_video_path == None:
+#                 return None
+#             Common.logger(mark).info(f"{channel_id}视频下载成功: {new_video_path}")
+#         elif channel_id == "抖音" or channel_id == "抖音历史" or channel_id == "抖音搜索":
+#             new_video_path = PQ.download_dy_video(video_url, video_path_url, v_id)
+#             if new_video_path == None:
+#                 return None
+#             Common.logger(mark).info(f"{channel_id}视频下载成功: {new_video_path}")
+#         elif channel_id == "视频号历史":
+#             new_video_path = Oss.download_sph_ls(video_url, video_path_url, v_id)
+#         else:
+#             Common.logger(mark).info(f"视频准备下载")
+#             new_video_path = Oss.download_video_oss(video_url, video_path_url, v_id)
+#         if not os.path.isfile(new_video_path)  or os.path.getsize(new_video_path) == 0:
+#             return None
+#         Common.logger(mark).info(f"视频下载成功: {new_video_path}")
+#         if crop_total and crop_total != 'None':  # 判断是否需要裁剪
+#             new_video_path = FFmpeg.video_crop(new_video_path, video_path_url, pw_random_id)
+#         if gg_duration_total and gg_duration_total != 'None':  # 判断是否需要指定视频时长
+#             new_video_path = FFmpeg.video_ggduration(new_video_path, video_path_url, pw_random_id,
+#                                                      gg_duration_total)
+#         width, height = FFmpeg.get_w_h_size(new_video_path)
+#         if width < height:  # 判断是否需要修改为竖屏
+#             new_video_path = FFmpeg.update_video_h_w(new_video_path, video_path_url, pw_random_id)
+#             new_title_re = re.sub(r'[^\w\s\u4e00-\u9fff,。!?]', '', new_title)
+#             if len(new_title_re) > 12:
+#                 new_title_re = '\n'.join(
+#                     [new_title_re[i:i + 12] for i in range(0, len(new_title_re), 12)])
+#             new_video_path = FFmpeg.add_video_zm(new_video_path, video_path_url, pw_random_id, new_title_re)
+#         return new_video_path
+#
+#     @classmethod
+#     def handle_video_ending(cls, new_video_path, video_ending, old_title, pw_random_id, video_path_url, mark, task_mark, url, name, video_share, zm, voice):
+#         """
+#         处理视频片尾
+#         """
+#         if video_ending == "AI片尾引导":
+#             pw_srt_text = GPT4oMini.get_ai_mini_pw(old_title)
+#             if pw_srt_text:
+#
+#                 pw_url = TTS.get_pw_zm(pw_srt_text, voice)
+#                 if pw_url:
+#                     pw_mp3_path = TTS.download_mp3(pw_url, video_path_url, pw_random_id)
+#                     # oss_mp3_key = Oss.mp3_upload_oss(pw_mp3_path, pw_random_id)
+#                     # oss_mp3_key = oss_mp3_key.get("oss_object_key")
+#                     # new_pw_path = f"http://art-crawler.oss-cn-hangzhou.aliyuncs.com/{oss_mp3_key}"
+#                     # print(f"mp3地址:{new_pw_path}")
+#                     # pw_url_sec = FFmpeg.get_video_duration(pw_mp3_path)
+#                     pw_srt = TTS.getSrt(pw_url)
+#                     Common.logger(mark).info(f"{name}的{task_mark}下的视频{url},获取AI片尾srt成功")
+#                 else:
+#                     Common.logger(mark).info(f"{name}的{task_mark}下的视频{url},获取AI片尾失败")
+#                     return None
+#             else:
+#                 Common.logger(mark).info(f"{name}的{task_mark}下的视频{url},获取AI片尾失败")
+#
+#                 return None
+#         else:
+#             if ',' in video_ending:
+#                 video_ending_list = video_ending.split(',')
+#             else:
+#                 video_ending_list = [video_ending]
+#             ending = random.choice(video_ending_list)
+#             pw_list = Material.get_pwsrt_data("summary", "DgX7vC", ending)  # 获取srt
+#             if pw_list:
+#                 pw_id = pw_list["pw_id"]
+#                 pw_srt = pw_list["pw_srt"]
+#                 pw_url = PQ.get_pw_url(pw_id)
+#                 pw_mp3_path = FFmpeg.get_video_mp3(pw_url, video_path_url, pw_random_id)
+#             else:
+#                 Feishu.bot(mark, '机器自动改造消息通知', f'{task_mark}任务下片尾标示错误,请关注!!!!', name)
+#         for attempt in range(3):
+#             jpg_path = FFmpeg.video_png(new_video_path, video_path_url, pw_random_id)  # 生成视频最后一帧jpg
+#             if os.path.isfile(jpg_path):
+#                 Common.logger(mark).info(f"{name}的{task_mark}下的视频{url},生成视频最后一帧成功")
+#                 break
+#             time.sleep(1)
+#         for attempt in range(3):
+#             Common.logger(mark).info(f"{name}的{task_mark}下的视频{url},获取mp3成功")
+#             pw_path = FFmpeg.pw_video(jpg_path, video_path_url, pw_mp3_path, pw_srt, pw_random_id,
+#                                       pw_mp3_path)  # 生成片尾视频
+#             if os.path.isfile(pw_path):
+#                 Common.logger(mark).info(f"{task_mark}下的视频{url},生成片尾视频成功")
+#                 break
+#             time.sleep(1)
+#         pw_video_list = [new_video_path, pw_path]
+#         Common.logger(mark).info(f"{task_mark}下的视频{url},视频与片尾开始拼接")
+#         video_path = FFmpeg.concatenate_videos(pw_video_list, video_path_url)  # 视频与片尾拼接到一起
+#         if os.path.exists(video_path) or os.path.getsize(video_path) != 0:
+#             Common.logger(mark).info(f"{name}的{task_mark}下的视频{url},视频与片尾拼接成功")
+#             time.sleep(1)
+#             if video_share and video_share != 'None':
+#                 new_video_path = FFmpeg.single_video(video_path, video_path_url, zm)
+#             else:
+#                 new_video_path = video_path
+#             return new_video_path
+#         else:
+#             return new_video_path
+#
+#
+#
+#
+#     @classmethod
+#     def upload_video_and_thumbnail(cls, new_video_path: str, cover: str, v_id, new_title: str, task_mark: str, name: str, piaoquan_id,
+#                                    video_path_url: str, mark: str, channel_id: str, url: str, old_title: str, title, rule: str, video):
+#         """
+#         上传视频和封面到OSS,并更新数据库
+#         """
+#         try:
+#             oss_id = cls.random_id()
+#             Common.logger(mark).info(f"{name}的{task_mark},开始发送oss")
+#             oss_object_key = Oss.stitching_sync_upload_oss(new_video_path, oss_id)  # 视频发送OSS
+#             Common.logger(mark).info(f"{name}的{task_mark},发送oss成功{oss_object_key}")
+#             status = oss_object_key.get("status")
+#             if status == 200:
+#                 oss_object_key = oss_object_key.get("oss_object_key")
+#                 time.sleep(1)
+#                 jpg_path = None
+#                 if channel_id == "快手历史" or channel_id == "快手" or channel_id == '快手搜索' or channel_id == '视频号':
+#                     jpg = None
+#                 elif channel_id == "视频号历史":
+#                     jpg_path = Oss.download_sph_ls( cover, video_path_url, v_id )
+#                 elif channel_id == '单点视频':
+#                     if video['source'] != "快手":
+#                         jpg_path = PQ.download_video_jpg( cover, video_path_url, v_id )  # 下载视频封面
+#                 if jpg_path and os.path.isfile( jpg_path ):
+#                     oss_jpg_key = Oss.stitching_fm_upload_oss( jpg_path, oss_id )  # 封面上传OSS
+#                     jpg = oss_jpg_key.get( "oss_object_key" )
+#                 else:
+#                     jpg = None
+#                 code = PQ.insert_piaoquantv(oss_object_key, new_title, jpg, piaoquan_id)
+#                 Common.logger(mark).info(f"{name}的{task_mark}下的视频ID{v_id}发送成功")
+#                 sqlCollect.insert_task(task_mark, v_id, mark, channel_id)  # 插入数据库
+#                 current_time = datetime.now()
+#                 formatted_time = current_time.strftime("%Y-%m-%d %H:%M:%S")
+#                 if name == "单点视频":
+#                     url = str(rule)
+#                 sqlCollect.insert_machine_making_data(name, task_mark, channel_id, url, v_id, piaoquan_id, new_title, code,
+#                                                       formatted_time, old_title, oss_object_key)
+#
+#                 return code
+#         except Exception as e:
+#             Common.logger(mark).error(f"{name}的{task_mark}上传视频和封面到OSS,并更新数据库失败:{e}\n")
+#             AliyunLogger.logging(channel_id, name, url, video["video_id"], "改造失败-上传视频和封面到OSS", "3001")
+#             return
+#
+#
+#     @classmethod
+#     def main(cls, data):
+#         """
+#         主函数,初始化任务并使用线程池处理任务。
+#         """
+#         mark = data["mark"]
+#         name = data["name"]
+#         feishu_id = data["feishu_id"]
+#         feishu_sheet = data["feishu_sheet"]
+#         cookie_sheet = data["cookie_sheet"]
+#         data = get_data(mark, feishu_id, feishu_sheet)
+#         if not data:
+#             Common.logger("redis").error(f"{mark}任务开始新的一轮\n")
+#             return
+#         task = json.loads(data)
+#         try:
+#             limit_number = task["limit_number"]
+#             if limit_number:
+#                 task_mark = task["task_mark"]
+#                 makr_count = sqlCollect.get_mark_count(task_mark)
+#                 if int(limit_number) <= int(makr_count[0][0]):
+#                     AliyunLogger.logging((task["channel_id"]), name, task["channel_url"], '', f"{task_mark}标识任务每日指定条数已足够,指定条数{limit_number},实际生成条数{int(makr_count[0][0])}",
+#                                          "1111")
+#                     return
+#             if mark == 'dy-pl-gjc' and task['channel_id'] == '抖音搜索':
+#                 mark_count = 'dyss-count'
+#                 count = get_first_value_with_prefix(mark_count)
+#                 increment_key(mark_count)
+#                 if int(count) >= 300:
+#                     return "抖音搜索上限"
+#             if mark == 'ks-pl-gjc':
+#                 mark_count = 'ksss-count'
+#                 count = get_first_value_with_prefix(mark_count)
+#                 increment_key(mark_count)
+#                 if int(count) >= 300:
+#                     return "快手搜索上限"
+#             if mark == 'sph-pl-gjc':
+#                 mark_count = 'ss-sph-count'
+#                 count = get_first_value_with_prefix(mark_count)
+#                 increment_key(mark_count)
+#                 if int(count) >= 300:
+#                     time.sleep(10)
+#                     return "视频号搜索上限"
+#             if mark == 'sph-plzh'and task['channel_id'] == '视频号':
+#                 mark_count = 'sph-count'
+#                 count = get_first_value_with_prefix(mark_count)
+#                 increment_key(mark_count)
+#                 if int(count) >= 400:
+#                     time.sleep(10)
+#                     return "视频号获取用户主页视频上限"
+#             VideoProcessor.process_task(task, mark, name, feishu_id, cookie_sheet)
+#             return mark
+#         except Exception as e:
+#             AliyunLogger.logging((task["channel_id"]), name, task["channel_url"],'', f"用户抓取异常:{e}", "3001")
+#             return mark
+#
+#
+# # if __name__ == "__main__":
+# #     main()

+ 1 - 2
xssy_channel/dy_rdb_nrxs.py

@@ -4,7 +4,7 @@ import time
 from datetime import datetime
 
 import requests
-from common import Feishu, Material, Common
+from common import Feishu, Material
 from common.sql_help import sqlCollect
 from xssy_channel.sph_jr_nrxs import SphNrxs
 
@@ -95,7 +95,6 @@ class DyRdbNrxs:
                 return None
         except Exception as e:
             Feishu.bot("xinxin", '热点宝提醒', f'热点宝平台 cookie 失效了,请及时更换', 'xinxin')
-            Common.logger("dy_rdb_nrxs").error(f"用户名:{uid}视频号加热bot异常:{e}\n")
             return
 
 

+ 1 - 8
xssy_channel/sph_jr_nrxs.py

@@ -6,7 +6,7 @@ import requests
 import json
 import re
 
-from common import Material, Feishu, Common
+from common import Material, Feishu
 from common.sql_help import sqlCollect
 
 class SphNrxs:
@@ -146,7 +146,6 @@ class SphNrxs:
                     if appid:
                         return appid
                 except Exception as e:
-                    Common.logger("sph_nrxs").info(f"{user}获取腾讯互选平台appid异常,异常信息{e}")
                     continue
 
             else:
@@ -185,7 +184,6 @@ class SphNrxs:
             return
         try:
             for obj in res_json["UpMasterHomePage"]:
-                Common.logger("sph_nrxs").info(f"{user}扫描到一条数据")
                 objectId = obj['objectId']
                 object_id = sqlCollect.sph_data_info_v_id(objectId, "视频号")
                 if object_id:
@@ -215,14 +213,10 @@ class SphNrxs:
                 # fav_count = obj['fav_count']  # 大拇指点赞数
                 video_percent = '%.2f' % (share_cnt / like_cnt)
                 special = float(0.25)
-                Common.logger("sph_nrxs").info(
-                    f"扫描:原用户主页名:{uid},溯源用户主页id:{url},视频id{objectId} ,分享:{share_cnt},点赞:{like_cnt} ,时长:{duration},视频链接:{video_url}")
                 if share_cnt >= 300 and float(video_percent) >= special and int(duration) >= 30:
-                    Common.logger("sph_nrxs").info(f"{nick_name}符合规则")
                     return nick_name
             return None
         except Exception as e:
-            Common.logger("sph_nrxs").info(f"{user}异常,异常信息{e}")
             return None
 
 
@@ -315,7 +309,6 @@ class SphNrxs:
                 return None
         except Exception as e:
             Feishu.bot("xinxin", '视频号加热提醒', f'视频号加热平台 cookie 失效了,请及时更换', 'xinxin')
-            Common.logger("sph_nrxs").error(f"用户名:{uid}视频号加热bot异常:{e}\n")
             return None
 
     """获取需溯源账号"""