فهرست منبع

调试视频号

zhangyong 9 ماه پیش
والد
کامیت
39b1042461
5فایلهای تغییر یافته به همراه100 افزوده شده و 5 حذف شده
  1. 1 1
      data_channel/douyin.py
  2. 1 1
      data_channel/kuaishou.py
  3. 1 1
      data_channel/shipinhao.py
  4. 90 0
      job_xx.py
  5. 7 2
      video_rewriting/video_prep.py

+ 1 - 1
data_channel/douyin.py

@@ -79,7 +79,7 @@ class DY:
                         duration = dataHelp.video_duration(video_url)
                         if int(duration) >= 45:
                             cover_url = data[i].get('video').get('cover').get('url_list')[0]  # 视频封面
-                            all_data = {"video_id": video_id, "cover": cover_url, "video_url": video_url}
+                            all_data = {"video_id": video_id, "cover": cover_url, "video_url": video_url, "rule": video_percent}
                             list.append(all_data)
                             if len(list) == int(number):
                                 Common.logger("log").info(f"获取抖音视频总数:{len(list)}\n")

+ 1 - 1
data_channel/kuaishou.py

@@ -80,7 +80,7 @@ class KS:
                         continue
                     duration = dataHelp.video_duration(video_url)
                     if int(duration) >= 45:
-                        all_data = {"video_id": video_id, "cover": cover_url, "video_url": video_url}
+                        all_data = {"video_id": video_id, "cover": cover_url, "video_url": video_url, "rule": realLikeCount}
                         list.append(all_data)
                         if len(list) == int(number):
                             Common.logger("log").info(f"获取快手视频总数:{len(list)}\n")

+ 1 - 1
data_channel/shipinhao.py

@@ -107,7 +107,7 @@ class SPH:
                             duration = dataHelp.video_duration(video_url)
                             if int(duration) >= 45:
                                 cover = video_obj.get('thumb_url')
-                                all_data = {"video_id": objectId, "cover": cover, "video_url": video_url}
+                                all_data = {"video_id": objectId, "cover": cover, "video_url": video_url, "rule": video_percent}
                                 list.append(all_data)
                                 if len(list) == int(number):
                                     Common.logger("log").info(f"获取视频号视频总数:{len(list)}\n")

+ 90 - 0
job_xx.py

@@ -0,0 +1,90 @@
+import os
+import concurrent.futures
+import re
+
+import schedule
+import time
+import threading
+from common import Material, Common, Feishu
+# 控制读写速度的参数
+from video_rewriting.video_prep import getVideo
+
+MAX_BPS = 120 * 1024 * 1024  # 120MB/s
+MAX_WORKERS = os.cpu_count() * 2  # 线程池最大工作线程数量
+READ_WRITE_CHUNK_SIZE = 1024 * 1024  # 每次读写的块大小 (1MB)
+SLEEP_INTERVAL = READ_WRITE_CHUNK_SIZE / MAX_BPS  # 控制每次读写的延迟时间
+# 全局锁,用于同步读写操作
+lock = threading.Lock()
+# 记录今天已经返回的用户名
+today = []
+
+
+def video_task_start(data):
+    # global today
+    # user_data_mark = data["mark"]
+    # # 开始准备执行生成视频脚本
+    # if user_data_mark is not None and user_data_mark in today:
+    #     Common.logger("log").info(f"视频脚本参数中的用户名 {user_data_mark} 今天已经返回过,不再启动线程。今天已经返回的用户名:{user_data_mark}")
+    #     print(f"视频脚本参数中的用户名 {user_data_mark} 今天已经返回过,不再启动线程。")
+    #     return
+    mark = getVideo.video_task(data)
+    print(f"返回用户名{mark}")
+    # if mark:
+    #     today.append(mark)
+    #     Common.logger("log").info(f"返回用户名{mark}")
+
+# data = Material.feishu_list()
+# video_task_start(data[0])
+
+
+def controlled_io_operation(data):
+    with lock:
+        start_time = time.time()
+        time.sleep(SLEEP_INTERVAL)
+        end_time = time.time()
+        elapsed_time = end_time - start_time
+        if elapsed_time < SLEEP_INTERVAL:
+            time.sleep(SLEEP_INTERVAL - elapsed_time)
+    video_task_start(data)
+
+
+
+
+def video_start():
+    print("开始执行生成视频脚本.")
+
+    data = Material.feishu_list()
+    data = data[8]
+    with concurrent.futures.ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
+        futures = {executor.submit(controlled_io_operation, data)}
+        for future in concurrent.futures.as_completed(futures):
+            try:
+                future.result()
+                print("处理结果: 成功")
+            except concurrent.futures.TimeoutError:
+                print("任务超时,已取消.")
+            except Exception as e:
+                print("处理任务时出现异常:", e)
+    print("执行生成视频脚本结束.")
+
+def usernames_today():
+    today.clear()
+    print("today 已清空")
+
+
+video_start()
+
+
+# 定时任务设置
+schedule.every().day.at("01:00").do(usernames_today)
+
+
+schedule.every(6).hours.do(video_start)
+
+
+
+while True:
+    schedule.run_pending()
+    time.sleep(1)
+
+

+ 7 - 2
video_rewriting/video_prep.py

@@ -122,16 +122,19 @@ class getVideo:
                         v_id = video["video_id"]
                         cover = video["cover"]
                         video_url = video["video_url"]
+                        rule = video['rule']
                         time.sleep(1)
                         pw_random_id = cls.random_id()
                         if channel_id == "票圈":
                             new_video_path = PQ.download_video(video_url, video_path_url, v_id)  # 下载视频地址
                         else:
                             new_video_path = Oss.download_video_oss(video_url, video_path_url, v_id)  # 下载视频地址
+                            Common.logger("log").info(f"{task_mark}下的视频{url},{new_video_path}视频下载成功")
                         if not os.path.isfile(new_video_path):
                             Common.logger("log").info(f"{task_mark}下的视频{url},{new_video_path}视频下载失败")
                             cls.remove_files(video_path_url)
                             continue
+                        Common.logger("log").info(f"{task_mark}下的视频{url},{new_video_path}视频下载成功")
                         if crop_total and crop_total != 'None':  # 判断是否需要裁剪
                             new_video_path = FFmpeg.video_crop(new_video_path, video_path_url, pw_random_id)
                         if gg_duration_total and gg_duration_total != 'None':  # 判断是否需要指定视频时长
@@ -195,7 +198,7 @@ class getVideo:
                                 sqlCollect.insert_task(task_mark, v_id, mark, channel_id)  # 插入数据库
                                 current_time = datetime.now()
                                 formatted_time = current_time.strftime("%Y-%m-%d %H:%M:%S")
-                                values = [[name, task_mark, channel_id, url, v_id, piaoquan_id, new_title, str(code), formatted_time]]
+                                values = [[name, task_mark, channel_id, url, v_id, piaoquan_id, new_title, str(code), formatted_time, str(rule)]]
                                 # 使用锁保护表格插入操作
                                 with lock:
                                     if name == "王雪珂":
@@ -214,6 +217,8 @@ class getVideo:
                                         sheet = "bBHFwC"
                                     elif name == "刘诗雨":
                                         sheet = "fBdxIQ"
+                                    elif name == "信欣":
+                                        sheet = "lPe1eT"
                                     Feishu.insert_columns("ILb4sa0LahddRktnRipcu2vQnLb", sheet, "ROWS", 1, 2)
                                     time.sleep(0.5)
                                     Feishu.update_values("ILb4sa0LahddRktnRipcu2vQnLb", sheet, "A2:Z2", values)
@@ -226,7 +231,7 @@ class getVideo:
                     cls.remove_files(video_path_url)
                     Common.logger("warning").warning(f"{name}的{task_mark}任务处理失败:{e}\n")
 
-        batch_size = 2
+        batch_size = 1
         with concurrent.futures.ThreadPoolExecutor(max_workers=batch_size) as executor:
             index = 0
             while index < len(task_data):