zhangyong 2 months ago
parent
commit
52d2779730

+ 19 - 0
Dockerfile

@@ -0,0 +1,19 @@
+FROM python:3.11-slim
+
+WORKDIR /app
+
+COPY . .
+
+ENV TZ=Asia/Shanghai
+
+RUN apt update && apt --no-install-recommends install -y  curl jq wget xz-utils nscd libgl-dev libglib2.0-dev fonts-wqy-zenhei \
+    && apt-get clean && rm -rf /var/lib/apt/lists/* \
+    && pip install -r requirements.txt --no-cache-dir \
+    && wget -O /tmp/ffmpeg-7.0.2-amd64-static.tar.xz https://johnvansickle.com/ffmpeg/releases/ffmpeg-release-amd64-static.tar.xz \
+    && tar -xJvf /tmp/ffmpeg-7.0.2-amd64-static.tar.xz -C /usr/local/ \
+    && rm /tmp/ffmpeg-7.0.2-amd64-static.tar.xz \
+    && ln -s /usr/local/ffmpeg-7.0.2-amd64-static/ffprobe /usr/local/bin/ffprobe \
+    && ln -s /usr/local/ffmpeg-7.0.2-amd64-static/ffmpeg /usr/local/bin/ffmpeg \
+    && mkdir -p /app/cache
+
+#ENTRYPOINT ["python", "/app/carry_data_redis.py"]

+ 48 - 0
docker-compose.yml

@@ -0,0 +1,48 @@
+services:
+  consumption_studio:
+    build:
+      context: .
+      dockerfile: Dockerfile
+    image: tool_job
+    container_name: tool_worker1
+    restart: unless-stopped
+    environment:
+      - ENV=prod
+    networks:
+      - tool_net
+    entrypoint: "python /app/workers/consumption_work_studio.py"
+  select_studio:
+    image: tool_job
+    restart: unless-stopped
+    environment:
+      - ENV=prod
+    networks:
+      - tool_net
+    entrypoint: "python /app/workers/select_work_studio.py"
+  select:
+    image: tool_job
+    restart: unless-stopped
+    env_file:
+      - product.env
+    volumes:
+      - ./sh:/app/sh
+    networks:
+      - tool_net
+    deploy:
+      replicas: 9
+    entrypoint: sh /app/sh/select.sh
+  consumption:
+    image: tool_job
+    restart: unless-stopped
+    env_file:
+      - product.env
+    volumes:
+      - ./sh:/app/sh
+    networks:
+      - tool_net
+    deploy:
+      replicas: 9
+    entrypoint: sh /app/sh/consumption.sh
+networks:
+  tool_net:
+    name: tool_net

+ 11 - 0
product.env

@@ -0,0 +1,11 @@
+ENV=prod
+
+FS_DATA_1=范军,4a768d,task:carry_data_redis_fj
+FS_DATA_2=鲁涛,EZef39,task:carry_data_redis_lt
+FS_DATA_3=余海涛,Frush6,task:carry_data_redis_yht
+FS_DATA_4=罗情,wolznW,task:carry_data_redis_lq
+FS_DATA_5=刘诗雨,5MXdSK,task:carry_data_redis_lsy
+FS_DATA_6=王媛,Nv8E4z,task:carry_data_redis_wy
+FS_DATA_7=周仙琴,2WIcBU,task:carry_data_redis_zxq
+FS_DATA_8=信欣,v0fFCb,task:carry_data_redis_xx
+FS_DATA_9=邓锋,DEpi6V,task:carry_data_redis_df

+ 12 - 0
requirements.txt

@@ -0,0 +1,12 @@
+aliyun-log-python-sdk==0.9.12
+google-generativeai==0.8.3
+loguru==0.7.2
+mutagen==1.47.0
+odps==3.5.1
+opencv-python==4.10.0.84
+oss2==2.19.1
+redis==5.1.1
+requests==2.32.3
+schedule==1.2.2
+pymysql==1.0.2
+orjson==3.10.13

+ 8 - 0
sh/consumption.sh

@@ -0,0 +1,8 @@
+#!/bin/sh
+
+export CONTAINER_INFO="$(curl -s --unix-socket /var/run/docker.sock http://docker/containers/$HOSTNAME/json)"
+export CONTAINER_INDEX="$(echo "$CONTAINER_INFO" | jq '.Name' | sed 's/^"\(.*\)"$/\1/' | awk -F'-' '{print $NF}')"
+echo "export FS_DATA=$(eval echo \$"FS_DATA_${CONTAINER_INDEX}")" >> /root/.bashrc
+. /root/.bashrc
+
+python /app/workers/consumption_work.py

+ 8 - 0
sh/select.sh

@@ -0,0 +1,8 @@
+#!/bin/sh
+
+export CONTAINER_INFO="$(curl -s --unix-socket /var/run/docker.sock http://docker/containers/$HOSTNAME/json)"
+export CONTAINER_INDEX="$(echo "$CONTAINER_INFO" | jq '.Name' | sed 's/^"\(.*\)"$/\1/' | awk -F'-' '{print $NF}')"
+echo "export FS_DATA=$(eval echo \$"FS_DATA_${CONTAINER_INDEX}")" >> /root/.bashrc
+. /root/.bashrc
+
+python /app/workers/select_work.py

+ 0 - 0
utils/__init__.py


+ 66 - 0
utils/aliyun_log.py

@@ -0,0 +1,66 @@
+# -*- coding: utf-8 -*-
+"""
+公共方法,包含:生成log / 删除log
+"""
+import json
+from typing import Optional
+from aliyun.log import PutLogsRequest, LogClient, LogItem
+from loguru import logger
+
+proxies = {"http": None, "https": None}
+
+
+class AliyunLogger:
+
+    # 写入阿里云日志
+    @staticmethod
+    def logging(channel: str,
+            principal: str,
+            channel_user: str,
+            channel_video_id: str,
+            message: str,
+            code: str,
+            data: Optional[str] = None,
+            pq_video_id: Optional[str] = None):
+        """
+        写入阿里云日志
+        测试库: https://sls.console.aliyun.com/lognext/project/crawler-log-dev/logsearch/crawler-log-dev
+        正式库: https://sls.console.aliyun.com/lognext/project/crawler-log-prod/logsearch/crawler-log-prod
+        """
+        accessKeyId = "LTAIWYUujJAm7CbH"
+        accessKey = "RfSjdiWwED1sGFlsjXv0DlfTnZTG1P"
+
+        project = "crawler-log-prod"
+        logstore = "rewriting-log"
+        endpoint = "cn-hangzhou.log.aliyuncs.com"
+        try:
+            contents = [
+                ("principal", principal),
+                ("channel", channel),
+                ("channel_user", str(channel_user) if channel_user is not None else ""),
+                ("channel_video_id", str(channel_video_id) if channel_video_id is not None else ""),
+                ("message", str(message) if message is not None else ""),
+                ("code", str(code) if code is not None else ""),
+                ("data", json.dumps(data, ensure_ascii=False) if data else ""),
+                ("pq_video_id", pq_video_id if pq_video_id else "")
+            ]
+            # 创建 LogClient 实例
+            client = LogClient(endpoint, accessKeyId, accessKey)
+            log_group = []
+            log_item = LogItem()
+            log_item.set_contents(contents)
+            log_group.append(log_item)
+            # 写入日志
+            request = PutLogsRequest(
+                project=project,
+                logstore=logstore,
+                topic="",
+                source="",
+                logitems=log_group,
+                compress=False,
+            )
+
+            client.put_logs(request)
+        except Exception as e:
+            logger.error(f"[+] 阿里云日志写入失败{e}")
+

+ 134 - 0
utils/aliyun_oss.py

@@ -0,0 +1,134 @@
+# -*- coding: utf-8 -*-
+# @Time: 2023/12/26
+import time
+import uuid
+from datetime import datetime
+from typing import Dict, Any,  Optional
+import oss2
+import requests
+OSS_ACCESS_KEY_ID = "LTAIP6x1l3DXfSxm"
+OSS_ACCESS_KEY_SECRET = "KbTaM9ars4OX3PMS6Xm7rtxGr1FLon"
+# OSS_BUCKET_ENDPOINT = "oss-cn-hangzhou-internal.aliyuncs.com"# 内网地址
+OSS_BUCKET_ENDPOINT = "oss-cn-hangzhou.aliyuncs.com" # 外网地址
+OSS_BUCKET_NAME = "art-crawler"
+class Oss():
+
+    @classmethod
+    def channel_upload_oss(cls, src_url: str,
+                              video_id: str,
+                              referer: Optional[str] = None) -> Dict[str, Any]:
+        headers = {
+            'Accept': '*/*',
+            'Accept-Language': 'zh-CN,zh;q=0.9',
+            'Cache-Control': 'no-cache',
+            'Pragma': 'no-cache',
+            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) '
+                          'Chrome/117.0.0.0 Safari/537.36',
+        }
+        if referer:
+            headers.update({'Referer': referer})
+        response = requests.request(url=src_url, method='GET', headers=headers, timeout=30)
+        file_content = response.content
+        content_type = response.headers.get('Content-Type', 'application/octet-stream')
+
+        oss_object_key = f'carry/video/{video_id}'
+        auth = oss2.Auth(OSS_ACCESS_KEY_ID, OSS_ACCESS_KEY_SECRET)
+        bucket = oss2.Bucket(auth, OSS_BUCKET_ENDPOINT, OSS_BUCKET_NAME)
+        response = bucket.put_object(oss_object_key, file_content, headers={'Content-Type': content_type})
+
+        if 'Content-Length' in response.headers:
+            return {
+                'status': response.status,
+                'oss_object_key': oss_object_key}
+        raise AssertionError(f'OSS上传失败,请求ID: \n{response.headers["x-oss-request-id"]}')
+
+    """
+    视频发送到art-pubbucket
+    """
+    @classmethod
+    def stitching_sync_upload_oss(cls, src_url: str,
+                        video_id: str) -> Dict[str, Any]:
+        oss_object_key = f'carry/video/{video_id}'
+        auth = oss2.Auth(OSS_ACCESS_KEY_ID, OSS_ACCESS_KEY_SECRET)
+        bucket = oss2.Bucket(auth, OSS_BUCKET_ENDPOINT, "art-pubbucket")
+        response = bucket.put_object_from_file(oss_object_key, src_url)
+
+        if 'Content-Length' in response.headers:
+            return {
+                'status': response.status,
+                'oss_object_key': oss_object_key,
+                'save_oss_timestamp': int(datetime.now().timestamp() * 1000),
+            }
+        raise AssertionError(f'OSS上传失败,请求ID: \n{response.headers["x-oss-request-id"]}')
+
+    """
+    封面发送到art-pubbucket
+    """
+    @classmethod
+    def stitching_fm_upload_oss(cls, src_url: str,
+                                  video_id: str) -> Dict[str, Any]:
+        oss_object_key = f'jq_oss/jpg/{video_id}.jpg'
+        auth = oss2.Auth(OSS_ACCESS_KEY_ID, OSS_ACCESS_KEY_SECRET)
+        bucket = oss2.Bucket(auth, OSS_BUCKET_ENDPOINT, "art-pubbucket")
+        response = bucket.put_object_from_file(oss_object_key, src_url)
+
+        if 'Content-Length' in response.headers:
+            return {
+                'status': response.status,
+                'oss_object_key': oss_object_key,
+                'save_oss_timestamp': int(datetime.now().timestamp() * 1000),
+            }
+        raise AssertionError(f'OSS上传失败,请求ID: \n{response.headers["x-oss-request-id"]}')
+
+    """
+    封面发送到art-pubbucket
+    """
+    @classmethod
+    def mp3_upload_oss(cls, src_url: str,
+                                video_id: str) -> Dict[str, Any]:
+        oss_object_key = f'jq_audio/audio/{video_id}.mp3'
+        auth = oss2.Auth(OSS_ACCESS_KEY_ID, OSS_ACCESS_KEY_SECRET)
+        bucket = oss2.Bucket(auth, OSS_BUCKET_ENDPOINT, "art-crawler")
+        response = bucket.put_object_from_file(oss_object_key, src_url)
+
+        if 'Content-Length' in response.headers:
+            return {
+                'status': response.status,
+                'oss_object_key': oss_object_key,
+                'save_oss_timestamp': int(datetime.now().timestamp() * 1000),
+            }
+        raise AssertionError(f'OSS上传失败,请求ID: \n{response.headers["x-oss-request-id"]}')
+
+
+    @classmethod
+    def download_video_oss(cls, url, file_path):
+        video_path = file_path + 'video.mp4'
+        oss_object_key = cls.channel_upload_oss(url, str(uuid.uuid4()))
+        time.sleep(2)
+        oss_object = oss_object_key.get("oss_object_key")
+        if oss_object:
+            auth = oss2.Auth(OSS_ACCESS_KEY_ID, OSS_ACCESS_KEY_SECRET)
+            bucket = oss2.Bucket(auth, OSS_BUCKET_ENDPOINT, OSS_BUCKET_NAME)
+            # 获取指定路径下的对象列表
+            bucket.get_object_to_file(oss_object, video_path)
+            time.sleep(5)
+            return video_path
+        else:
+            return video_path
+
+    @classmethod
+    def download_sph_ls(cls, video_url, video_path_url, v_id):
+        if "jpg" in video_url:
+            video_path = video_path_url + str(v_id) + '.jpg'
+        else:
+            video_path = video_path_url + str(v_id) + '.mp4'
+        auth = oss2.Auth(OSS_ACCESS_KEY_ID, OSS_ACCESS_KEY_SECRET)
+        bucket = oss2.Bucket(auth, OSS_BUCKET_ENDPOINT, OSS_BUCKET_NAME)
+        # 获取指定路径下的对象列表
+        bucket.get_object_to_file(video_url, video_path)
+        time.sleep(5)
+        return video_path
+
+
+if __name__ == '__main__':
+    Oss.download_sph_ls('channel/video/sph/14374775553517295881.jpg','asa','1')

+ 95 - 0
utils/download_video.py

@@ -0,0 +1,95 @@
+import os
+import time
+import uuid
+import requests
+
+
+class DownLoad:
+    @classmethod
+    def download_video(cls, video_url, video_path_url, tag_transport_channel, video_id):
+        video = video_path_url + 'video.mp4'
+        if tag_transport_channel == "抖音":
+            headers = {
+                'accept': '*/*',
+                'accept-encoding': 'identity;q=1, *;q=0',
+                'accept-language': 'zh-CN,zh;q=0.9',
+                'cache-control': 'no-cache',
+                'connection': 'keep-alive',
+                'pragma': 'no-cache',
+                'range': 'bytes=0-',
+                'referer': f'https://www.douyin.com/video/{video_id}',
+                'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36',
+            }
+            payload = {}
+            for i in range(3):
+                try:
+                    response = requests.request("GET", video_url, headers=headers, data=payload, timeout=120)
+                    if response.status_code == 206:
+                        # 以二进制写入模式打开文件
+
+                        with open(f"{video}", "wb") as file:
+                            # 将响应内容写入文件
+                            file.write(response.content)
+                        return video
+                except Exception:
+                    return video
+        else:
+            try:
+                for i in range(3):
+                    payload = {}
+                    headers = {}
+                    response = requests.request("GET", video_url, headers=headers, data=payload, timeout=240)
+                    if response.status_code == 200:
+                        # 以二进制写入模式打开文件
+
+                        with open(f"{video}", "wb") as file:
+                            # 将响应内容写入文件
+                            file.write(response.content)
+                        return video
+                return video
+            except Exception:
+                return video
+
+    @classmethod
+    def download_m3u8_video(cls ,url, file_path):
+        r = requests.get(url)
+        if r.status_code != 200:
+            return False
+        m3u8_list = r.text.split('\n')
+        m3u8_list = [i for i in m3u8_list if i and i[0] != '#']
+
+        ts_list = []
+        for ts_url in m3u8_list:
+            ts_url = url.rsplit('/', 1)[0] + '/' + ts_url
+            ts_list.append(ts_url)
+        with open(file_path, 'wb') as f:
+            for ts_url in ts_list:
+                r = requests.get(ts_url)
+                if r.status_code == 200:
+                    f.write(r.content)
+        return True
+
+    @classmethod
+    def convert_ts_to_mp4(cls, ts_file_path, mp4_file_path):
+        os.system(f'ffmpeg -i {ts_file_path} -c copy {mp4_file_path}')
+
+    @classmethod
+    def download_pq_video(cls,video_path_url , video_url_list):
+        video_list = []
+        for video_url in video_url_list:
+            video = f'{video_path_url}{str(uuid.uuid4())}.mp4'
+            try:
+                payload = {}
+                headers = {}
+                response = requests.request("GET", video_url, headers=headers, data=payload, timeout=60)
+                if response.status_code == 200:
+                    # 以二进制写入模式打开文件
+
+                    with open(f"{video}", "wb") as file:
+                        # 将响应内容写入文件
+                        file.write(response.content)
+                    video_list.append(video)
+                time.sleep(1)
+            except Exception:
+                continue
+        return video_list

+ 149 - 0
utils/dy_ks_get_url.py

@@ -0,0 +1,149 @@
+import html
+import json
+import os
+import random
+import re
+import time
+import uuid
+import requests
+from datetime import datetime
+from urllib.parse import urlparse, parse_qs
+from loguru import logger
+
+from utils.aliyun_log import AliyunLogger
+from utils.feishu_utils import Feishu
+
+
+class Dy_KS:
+
+    @classmethod
+    def get_text_dy_video(cls,url):
+        max_retries = 3
+        retry_count = 0
+        while retry_count < max_retries:
+            try:
+                if "&vid=" in url:
+                    parsed_url = urlparse(url)
+                    params = parse_qs(parsed_url.query)
+                    video_id = params.get('vid', [None])[0]
+                elif "?modal_id=" in url:
+                    parsed_url = urlparse(url)
+                    params = parse_qs(parsed_url.query)
+                    video_id = params.get('modal_id', [None])[0]
+                else:
+                    headers = {
+                        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;'
+                                  'q=0.8,application/signed-exchange;v=b3;q=0.7',
+                        'Accept-Language': 'zh-CN,zh;q=0.9',
+                        'Cache-Control': 'no-cache',
+                        'Pragma': 'no-cache',
+                        'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) '
+                                      'Chrome/127.0.0.0 Safari/537.36',
+                    }
+                    response = requests.request(url=url, method='GET', headers=headers, allow_redirects=False, timeout=30)
+                    location = response.headers.get('Location', None)
+                    video_id = re.search(r'/video/(\d+)/?', location.split('?')[0] if location else url).group(1)
+                url = "http://8.217.192.46:8889/crawler/dou_yin/detail"
+                if not video_id or not video_id.strip():
+                    return None, None, None
+                payload = json.dumps({
+                    "content_id": str(video_id)
+                })
+                headers = {
+                    'Content-Type': 'application/json'
+                }
+
+                response = requests.request("POST", url, headers=headers, data=payload, timeout= 60)
+                response = response.json()
+                code = response["code"]
+                if code == 0:
+                    data = response["data"]["data"]
+                    video_url = data["video_url_list"][0]["video_url"]
+                    original_title = data["title"]
+                    return video_url, original_title, video_id
+                if code == 22002:
+                    if '抖音内容已被删除或无法访问' in response['msg']:
+                        return "作品不存在", None, None
+            except Exception as e:
+                retry_count += 1
+                logger.error(f"[+] 抖音{url}获取视频链接失败,失败信息{e}")
+                time.sleep(1)
+        return None, None, None
+
+    @classmethod
+    def get_text_ks_video(cls,url):
+        try:
+            headers = {
+                'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;'
+                          'q=0.8,application/signed-exchange;v=b3;q=0.7',
+                'Accept-Language': 'zh-CN,zh;q=0.9',
+                'Cache-Control': 'no-cache',
+                'Pragma': 'no-cache',
+                'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) '
+                              'Chrome/127.0.0.0 Safari/537.36',
+            }
+            response = requests.request(url=url, method='GET', headers=headers, allow_redirects=False, timeout= 30)
+            location = response.headers.get('Location', None)
+            video_id = re.search(r'/(f|photo|short-video|long-video)/(.*)/?',
+                                 location.split('?')[0] if location else url).group(2)
+            url = "http://8.217.192.46:8889/crawler/kuai_shou/detail"
+            if not video_id or not video_id.strip():
+                return None, None, None
+            payload = json.dumps({
+                "content_id": str(video_id)
+            })
+            headers = {
+                'Content-Type': 'application/json'
+            }
+            time.sleep(random.uniform(10, 50))
+            response = requests.request("POST", url, headers=headers, data=payload, timeout= 30)
+            response = response.json()
+            code = response["code"]
+            if code == 0:
+                data = response["data"]["data"]
+                content_type = data['content_type']
+                if content_type == 'note':
+                    return "note","note"
+                video_url = data["video_url_list"][0]["video_url"]
+                original_title = data["title"]
+                return video_url, original_title, video_id
+            elif code == 27006:
+                if "作品不存在" in response['msg'] or "内容不存在" in response['msg'] or "私密作品" in response['msg']:
+                    return "作品不存在", None, None
+            time.sleep(3)
+        except Exception as e:
+            logger.error(f"[+] 快手{url}获取视频链接失败,失败信息{e}")
+            return None, None,None
+
+    @classmethod
+    def get_video_url(cls, data, principal):
+        try:
+            url = data['video_url']
+            if "&vid=" in url or "?modal_id=" in url:
+                host = urlparse(url).netloc
+            else:
+                msg = html.unescape(url).split('?')[0]
+                pattern = re.search(r'https?://[^\s<>"\'\u4e00-\u9fff]+', msg)
+                if not pattern:
+                    return "重新处理",None,None,None
+                url = pattern.group()
+                host = urlparse(url).netloc
+            if host in ['v.douyin.com', 'www.douyin.com', 'www.iesdouyin.com']:
+                tag_transport_channel = "抖音"
+                logger.info(f"[+] {url}开始获取抖音视频链接")
+                url, original_title, video_id = cls.get_text_dy_video(url=url)
+            elif host in ['v.kuaishou.com', 'www.kuaishou.com', 'v.m.chenzhongtech.com', 'creater.eozatvmq.com']:
+                tag_transport_channel = "快手"
+                logger.info(f"[+] {url}开始获取快手视频链接")
+                url, original_title, video_id = cls.get_text_ks_video(url=url)
+            else:
+                logger.error(f"[+] {url}该链接不是抖/快 不做处理")
+                AliyunLogger.logging(data["name"], principal, "", data["video_url"],
+                                     "不是抖/快不做处理", "1001", str(data))
+                return "链接不是抖/快",None,None,None
+            if url == "作品不存在":
+                return "作品不存在",None,None,None
+            return url, original_title, video_id, tag_transport_channel
+        except Exception as e:
+            logger.info(f"[+] 获取视频链接异常{e}")
+            return "重新处理",None,None,None

+ 83 - 0
utils/feishu_form.py

@@ -0,0 +1,83 @@
+# -*- coding: utf-8 -*-
+import json
+import os
+import sys
+
+from utils.feishu_utils import Feishu
+
+sys.path.append(os.getcwd())
+
+
+class Material():
+    """
+    获取品类对应负责人任务明细
+    """
+    @classmethod
+    def get_carry_data(cls, dt, FS_SHEET,NAME):
+        data = Feishu.get_values_batch( "Wj0TsRKc0hZrHQtmtg4cZZIwn0c", FS_SHEET )
+        processed_list = []
+        try:
+            for row in data[2:]:
+                activate_data = row[4]  # 启动日期
+                if not activate_data:
+                    continue
+                if int(activate_data) != int(dt):
+                    continue
+                channel_mark = row[0]
+                pq_ids = row[2]
+                pq_label = row[3] # 站内标签
+                video_url = row[5]
+                title_category = row[6] #标题类别
+                tag_transport_channel = row[7] # 搬运平台
+                tag_transport_scene = row[8]  # 搬运场景
+                tag_transport_keyword = row[9] # 关键词
+                tag = row[10] # 标签
+                transform_rule = row[11] # 改造规则
+                video_share = row[12] # 片中分享
+                trailer_share = row[13]  # 片尾分享
+                trailer_share_audio = row[14]  # 片尾分享声音
+                video_clipping = row[15]  # 剪裁
+                video_clipping_time = row[16]  # 秒数剪裁
+                title_transform = row[17]  # 标题改造
+                number_dict = {
+                    "channel_mark": channel_mark,
+                    "name":NAME,
+                    "pq_ids": pq_ids,
+                    "pq_label": pq_label,
+                    "activate_data": activate_data,
+                    "video_url": video_url,
+                    "title_category": title_category,
+                    "tag_transport_channel": tag_transport_channel,
+                    "tag_transport_scene": tag_transport_scene,
+                    "tag_transport_keyword": tag_transport_keyword,
+                    "tag": tag,
+                    "transform_rule": transform_rule,
+                    "video_share": video_share,
+                    "trailer_share": trailer_share,
+                    "trailer_share_audio": trailer_share_audio,
+                    "video_clipping": video_clipping,
+                    "video_clipping_time": video_clipping_time,
+                    "title_transform": title_transform,
+                    "dt":dt
+                }
+                processed_list.append(json.dumps(number_dict, ensure_ascii=False))
+            return  processed_list
+        except:
+            return processed_list
+
+    @classmethod
+    def get_propmt_data(cls, trailer_share):
+        try:
+            if "AI片尾引导" not in trailer_share:
+                trailer_share = "AI片尾引导"
+            data = Feishu.get_values_batch( "Wj0TsRKc0hZrHQtmtg4cZZIwn0c", "Yxg7EK" )
+            for row in data[1:]:
+                name = row[0]
+                if trailer_share == name:
+                    return row[1]
+            return
+        except:
+            return None
+
+
+

+ 379 - 0
utils/feishu_utils.py

@@ -0,0 +1,379 @@
+# -*- coding: utf-8 -*-
+# @Time: 2023/12/26
+"""
+飞书表配置: token 鉴权 / 增删改查 / 机器人报警
+"""
+import json
+import os
+import sys
+import requests
+import urllib3
+from loguru import logger
+
+sys.path.append(os.getcwd())
+
+proxies = {"http": None, "https": None}
+
+
+class Feishu:
+    """
+    编辑飞书云文档
+    """
+    succinct_url = "https://w42nne6hzg.feishu.cn/sheets/"
+    # 飞书路径token
+    @classmethod
+    def spreadsheettoken(cls, crawler):
+        if crawler == "summary":
+            return "KsoMsyP2ghleM9tzBfmcEEXBnXg"
+        else:
+            return crawler
+
+
+
+    # 获取飞书api token
+    @classmethod
+    def get_token(cls):
+        """
+        获取飞书api token
+        :return:
+        """
+        url = "https://open.feishu.cn/open-apis/auth/v3/tenant_access_token/internal/"
+        post_data = {"app_id": "cli_a13ad2afa438d00b",  # 这里账号密码是发布应用的后台账号及密码
+                     "app_secret": "4tK9LY9VbiQlY5umhE42dclBFo6t4p5O"}
+
+        try:
+            urllib3.disable_warnings()
+            response = requests.post(url=url, data=post_data, proxies=proxies, verify=False)
+            tenant_access_token = response.json()["tenant_access_token"]
+            return tenant_access_token
+        except Exception as e:
+            logger.error(f"[+] 飞书获取飞书 api token 异常:{e}")
+
+
+    # 获取表格元数据
+    @classmethod
+    def get_metainfo(cls, crawler):
+        """
+        获取表格元数据
+        :return:
+        """
+        try:
+            get_metainfo_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                               + cls.spreadsheettoken(crawler) + "/metainfo"
+
+            headers = {
+                "Authorization": "Bearer " + cls.get_token(),
+                "Content-Type": "application/json; charset=utf-8"
+            }
+            params = {
+                "extFields": "protectedRange",  # 额外返回的字段,extFields=protectedRange时返回保护行列信息
+                "user_id_type": "open_id"  # 返回的用户id类型,可选open_id,union_id
+            }
+            urllib3.disable_warnings()
+            r = requests.get(url=get_metainfo_url, headers=headers, params=params, proxies=proxies, verify=False)
+            response = json.loads(r.content.decode("utf8"))
+            return response
+        except Exception as e:
+            logger.error(f"[+] 飞书获取表格元数据异常:{e}")
+
+    # 读取工作表中所有数据
+    @classmethod
+    def get_values_batch(cls, crawler, sheetid):
+        """
+        读取工作表中所有数据
+        :param crawler: 哪个爬虫
+        :param sheetid: 哪张表
+        :return: 所有数据
+        """
+        try:
+            get_values_batch_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                                   + cls.spreadsheettoken(crawler) + "/values_batch_get"
+            headers = {
+                "Authorization": "Bearer " + cls.get_token(),
+                "Content-Type": "application/json; charset=utf-8"
+            }
+            params = {
+                "ranges": sheetid,
+                "valueRenderOption": "ToString",
+                "dateTimeRenderOption": "",
+                "user_id_type": "open_id"
+            }
+            urllib3.disable_warnings()
+            r = requests.get(url=get_values_batch_url, headers=headers, params=params, proxies=proxies, verify=False)
+            response = json.loads(r.content.decode("utf8"))
+            values = response["data"]["valueRanges"][0]["values"]
+            return values
+        except Exception as e:
+            logger.error(f"[+] 飞书读取工作表所有数据异常:{e}")
+
+    # 工作表,插入行或列
+    @classmethod
+    def insert_columns(cls, crawler, sheetid, majordimension, startindex, endindex):
+        """
+        工作表插入行或列
+        :param log_type: 日志路径
+        :param crawler: 哪个爬虫的云文档
+        :param sheetid:哪张工作表
+        :param majordimension:行或者列, ROWS、COLUMNS
+        :param startindex:开始位置
+        :param endindex:结束位置
+        """
+        try:
+            insert_columns_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                                 + cls.spreadsheettoken(crawler) + "/insert_dimension_range"
+            headers = {
+                "Authorization": "Bearer " + cls.get_token(),
+                "Content-Type": "application/json; charset=utf-8"
+            }
+            body = {
+                "dimension": {
+                    "sheetId": sheetid,
+                    "majorDimension": majordimension,  # 默认 ROWS ,可选 ROWS、COLUMNS
+                    "startIndex": startindex,  # 开始的位置
+                    "endIndex": endindex  # 结束的位置
+                },
+                "inheritStyle": "AFTER"  # BEFORE 或 AFTER,不填为不继承 style
+            }
+
+            urllib3.disable_warnings()
+            r = requests.post(url=insert_columns_url, headers=headers, json=body, proxies=proxies, verify=False, timeout=10)
+        except Exception as e:
+            logger.error(f"[+] 飞书插入行或列异常:{e}")
+
+    # 写入数据
+    @classmethod
+    def update_values(cls, crawler, sheetid, ranges, values):
+        """
+        写入数据
+        :param log_type: 日志路径
+        :param crawler: 哪个爬虫的云文档
+        :param sheetid:哪张工作表
+        :param ranges:单元格范围
+        :param values:写入的具体数据,list
+        """
+        try:
+            update_values_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                                + cls.spreadsheettoken(crawler) + "/values_batch_update"
+            headers = {
+                "Authorization": "Bearer " + cls.get_token(),
+                "Content-Type": "application/json; charset=utf-8"
+            }
+            body = {
+                "valueRanges": [
+                    {
+                        "range": sheetid + "!" + ranges,
+                        "values": values
+                    },
+                ],
+            }
+            urllib3.disable_warnings()
+            r = requests.post(url=update_values_url, headers=headers, json=body, proxies=proxies, verify=False, timeout=10)
+        except Exception as e:
+            logger.error(f"[+] 飞书写入数据异常:{e}")
+
+    # 读取单元格数据
+    @classmethod
+    def get_range_value(cls, crawler, sheetid, cell):
+        """
+        读取单元格内容
+        :param log_type: 日志路径
+        :param crawler: 哪个爬虫
+        :param sheetid: 哪张工作表
+        :param cell: 哪个单元格
+        :return: 单元格内容
+        """
+        try:
+            get_range_value_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                                  + cls.spreadsheettoken(crawler) + "/values/" + sheetid + "!" + cell
+            headers = {
+                "Authorization": "Bearer " + cls.get_token(),
+                "Content-Type": "application/json; charset=utf-8"
+            }
+            params = {
+                "valueRenderOption": "FormattedValue",
+
+                # dateTimeRenderOption=FormattedString 计算并对时间日期按照其格式进行格式化,但不会对数字进行格式化,返回格式化后的字符串。
+                "dateTimeRenderOption": "",
+
+                # 返回的用户id类型,可选open_id,union_id
+                "user_id_type": "open_id"
+            }
+            urllib3.disable_warnings()
+            r = requests.get(url=get_range_value_url, headers=headers, params=params, proxies=proxies, verify=False, timeout=10)
+            return r.json()["data"]["valueRange"]["values"][0]
+        except Exception as e:
+            logger.error(f"[+] 飞书读取单元格数据异常:{e}")
+    # 获取表内容
+    @classmethod
+    def get_sheet_content(cls, crawler, sheet_id):
+        try:
+            sheet = Feishu.get_values_batch(crawler, sheet_id)
+            content_list = []
+            for x in sheet:
+                for y in x:
+                    if y is None:
+                        pass
+                    else:
+                        content_list.append(y)
+            return content_list
+        except Exception as e:
+            logger.error(f"[+] 飞书get_sheet_content:{e}")
+
+    # 删除行或列,可选 ROWS、COLUMNS
+    @classmethod
+    def dimension_range(cls, crawler, sheetid, major_dimension, startindex, endindex):
+        """
+        删除行或列
+        :param log_type: 日志路径
+        :param crawler: 哪个爬虫
+        :param sheetid:工作表
+        :param major_dimension:默认 ROWS ,可选 ROWS、COLUMNS
+        :param startindex:开始的位置
+        :param endindex:结束的位置
+        :return:
+        """
+        try:
+            dimension_range_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                                  + cls.spreadsheettoken(crawler) + "/dimension_range"
+            headers = {
+                "Authorization": "Bearer " + cls.get_token(),
+                "Content-Type": "application/json; charset=utf-8"
+            }
+            body = {
+                "dimension": {
+                    "sheetId": sheetid,
+                    "majorDimension": major_dimension,
+                    "startIndex": startindex,
+                    "endIndex": endindex
+                }
+            }
+            urllib3.disable_warnings()
+            r = requests.delete(url=dimension_range_url, headers=headers, json=body, proxies=proxies, verify=False)
+        except Exception as e:
+            logger.error(f"[+] 飞书删除视频数据异常:{e}")
+
+    # 获取用户 ID
+    @classmethod
+    def get_userid(cls, username):
+        try:
+            url = "https://open.feishu.cn/open-apis/user/v1/batch_get_id?"
+            headers = {
+                "Authorization": "Bearer " + cls.get_token(),
+                "Content-Type": "application/json; charset=utf-8"
+            }
+            name_phone_dict = {
+                "xinxin": "15546206651",
+                "muxinyi": "13699208058",
+                "wangxueke": "13513479926",
+                "yuzhuoyi": "18624010360",
+                "luojunhui": "18801281360",
+                "fanjun": "15200827642",
+                "zhangyong": "17600025055",
+                'liukunyu': "18810931977"
+            }
+            username = name_phone_dict.get(username)
+
+            data = {"mobiles": [username]}
+            urllib3.disable_warnings()
+            r = requests.get(url=url, headers=headers, params=data, verify=False, proxies=proxies)
+            open_id = r.json()["data"]["mobile_users"][username][0]["open_id"]
+
+            return open_id
+        except Exception as e:
+            logger.error(f"[+] 飞书get_userid异常:{e}")
+
+    # 飞书机器人
+    @classmethod
+    def bot(cls, log_type, crawler, text, mark_name):
+        try:
+
+            headers = {'Content-Type': 'application/json'}
+            if crawler == "机器自动改造消息通知":
+                url = "https://open.feishu.cn/open-apis/bot/v2/hook/e7697dc6-5254-4411-8b59-3cd0742bf703"
+                sheet_url = "https://w42nne6hzg.feishu.cn/sheets/KsoMsyP2ghleM9tzBfmcEEXBnXg?sheet=bc154d"
+                users = f"<at id=" + str(cls.get_userid(log_type)) + f">{mark_name}</at>"
+            elif crawler == "快手关键词搜索":
+                url = "https://open.feishu.cn/open-apis/bot/v2/hook/e7697dc6-5254-4411-8b59-3cd0742bf703"
+                sheet_url = "https://w42nne6hzg.feishu.cn/sheets/KsoMsyP2ghleM9tzBfmcEEXBnXg?sheet=U1gySe"
+                users = "".join([f'<at id="{cls.get_userid(type)}">{name}</at>' for type, name in
+                                 zip(log_type, mark_name)])
+                # users = f"<at id=" + str(cls.get_userid(log_type)) + f">{mark_name}</at>"
+            else:
+                url = "https://open.feishu.cn/open-apis/bot/v2/hook/7928f182-08c1-4c4d-b2f7-82e10c93ca80"
+                sheet_url = "https://w42nne6hzg.feishu.cn/sheets/KsoMsyP2ghleM9tzBfmcEEXBnXg?sheet=bc154d"
+                users = f"<at id=" + str(cls.get_userid(log_type)) + f">{mark_name}</at>"
+            data = json.dumps({
+                "msg_type": "interactive",
+                "card": {
+                    "config": {
+                        "wide_screen_mode": True,
+                        "enable_forward": True
+                    },
+                    "elements": [{
+                        "tag": "div",
+                        "text": {
+                            "content": users + text,
+                            "tag": "lark_md"
+                        }
+                    }, {
+                        "actions": [{
+                            "tag": "button",
+                            "text": {
+                                "content": "详情,点击~~~~~",
+                                "tag": "lark_md"
+                            },
+                            "url": sheet_url,
+                            "type": "default",
+                            "value": {}
+                        }],
+                        "tag": "action"
+                    }],
+                    "header": {
+                        "title": {
+                            "content": "📣消息提醒",
+                            "tag": "plain_text"
+                        }
+                    }
+                }
+            })
+            urllib3.disable_warnings()
+            r = requests.post(url, headers=headers, data=data, verify=False, proxies=proxies)
+        except Exception as e:
+            logger.error(f"[+] 飞书bot异常:{e}")
+
+
+    # 飞书机器人-改造计划完成通知
+    @classmethod
+    def finish_bot(cls, text, url, content):
+        try:
+            headers = {'Content-Type': 'application/json'}
+            data = json.dumps({
+                "msg_type": "interactive",
+                "card": {
+                    "config": {
+                        "wide_screen_mode": True,
+                        "enable_forward": True
+                    },
+                    "elements": [{
+                        "tag": "div",
+                        "text": {
+                            "content": text,
+                            "tag": "lark_md"
+                        }
+                    }],
+                    "header": {
+                        "title": {
+                            "content": content,
+                            "tag": "plain_text"
+                        }
+                    }
+                }
+            })
+            urllib3.disable_warnings()
+            r = requests.post(url, headers=headers, data=data, verify=False, proxies=proxies)
+        except Exception as e:
+            logger.error(f"[+] 飞书bot异常:{e}")
+
+if __name__ == "__main__":
+    Feishu.bot('recommend', '抖音', '测试: 抖音cookie失效,请及时更换')
+

+ 496 - 0
utils/ffmpeg.py

@@ -0,0 +1,496 @@
+import asyncio
+import json
+import os
+import time
+from typing import List
+
+import cv2
+import requests
+from loguru import logger
+from mutagen.mp3 import MP3
+
+
+
+class FFmpeg():
+
+    """
+    时间转换
+    """
+    @classmethod
+    def seconds_to_srt_time(cls, seconds):
+        hours = int(seconds // 3600)
+        minutes = int((seconds % 3600) // 60)
+        seconds = seconds % 60
+        milliseconds = int((seconds - int(seconds)) * 1000)
+        return f"{hours:02d}:{minutes:02d}:{int(seconds):02d},{milliseconds:03d}"
+
+    """
+    获取单个视频时长
+    """
+    @classmethod
+    def get_video_duration(cls, video_url):
+        cap = cv2.VideoCapture(video_url)
+        if cap.isOpened():
+            rate = cap.get(5)
+            frame_num = cap.get(7)
+            duration = int(frame_num / rate)
+            return duration
+        return 0
+
+
+    # """
+    # 获取视频文件的时长(秒)
+    # """
+    # @classmethod
+    # def get_videos_duration(cls, video_file):
+    #     result = cls.asyncio_run_subprocess(["ffprobe", "-v", "error", "-show_entries", "format=duration",
+    #          "-of", "default=noprint_wrappers=1:nokey=1", video_file], timeout=10)
+    #     return float(result)
+
+    """
+    获取视频宽高
+    """
+    @classmethod
+    def get_w_h_size(cls, new_video_path):
+        try:
+            # 获取视频的原始宽高信息
+            ffprobe_cmd = cls.asyncio_run_subprocess(["ffprobe", "-v" ,"error" ,"-select_streams" ,"v:0" ,"-show_entries", "stream=width,height" ,"-of" ,"csv=p=0" ,new_video_path],timeout=10)
+            output_decoded = ffprobe_cmd.strip()
+            split_output = [value for value in output_decoded.split(',') if value.strip()]
+            height, width = map(int, split_output)
+            return width, height
+        except ValueError as e:
+            return 1920, 1080
+
+
+    """
+    视频裁剪
+    """
+    @classmethod
+    def video_crop(cls, video_path, file_path):
+        crop_url = file_path + 'crop.mp4'
+        try:
+            # 获取视频的原始宽高信息
+            ffprobe_cmd = cls.asyncio_run_subprocess(
+                ["ffprobe", "-v", "error", "-select_streams", "v:0", "-show_entries", "stream=width,height", "-of",
+                 "csv=p=0", video_path], timeout=10)
+            width, height = map(int, ffprobe_cmd.strip().split(','))
+            # 计算裁剪后的高度
+            new_height = int(height * 0.8)
+
+            # 构建 FFmpeg 命令,裁剪视频高度为原始高度的80%
+            cls.asyncio_run_subprocess(
+                [
+                "ffmpeg",
+                "-i", video_path,
+                "-vf", f"crop={width}:{new_height}",
+                "-c:v", "libx264",
+                "-c:a", "aac",
+                "-y",
+                crop_url
+                ],timeout=240)
+            return crop_url
+        except Exception as e:
+            return crop_url
+
+    """
+    视频截断
+    """
+    @classmethod
+    def video_ggduration(cls, video_path, file_path, gg_duration_total):
+        gg_duration_url = file_path + 'gg_duration.mp4'
+        # 获取视频时长
+        try:
+            total_duration = cls.get_video_duration(video_path)
+            if total_duration == 0:
+                return gg_duration_url
+            duration = int(total_duration) - int(gg_duration_total)
+            if int(total_duration) < int(gg_duration_total):
+                return gg_duration_url
+            cls.asyncio_run_subprocess([
+                "ffmpeg",
+                "-i", video_path,
+                "-c:v", "libx264",
+                "-c:a", "aac",
+                "-t", str(duration),
+                "-y",
+                gg_duration_url
+            ], timeout= 360)
+            return gg_duration_url
+        except Exception as e:
+            return gg_duration_url
+
+    """
+     截取原视频最后一帧
+    """
+    @classmethod
+    def video_png(cls, video_path, file_path):
+        # 获取视频的原始宽高信息
+        jpg_url = file_path + 'png.jpg'
+        try:
+            cls.asyncio_run_subprocess(
+                ["ffmpeg", "-sseof", "-1", '-i', video_path, '-frames:v', '1',  "-y", jpg_url], timeout=120)
+            return jpg_url
+        except Exception as e:
+            return jpg_url
+
+    """
+    获取视频音频
+    """
+    @classmethod
+    def get_video_mp3(cls, video_file, video_path_url, pw_random_id):
+        pw_mp3_path = video_path_url + str(pw_random_id) +'pw_video.mp3'
+        try:
+            cls.asyncio_run_subprocess([
+                'ffmpeg',
+                '-i', video_file,
+                '-q:a', '0',
+                '-map', 'a',
+                pw_mp3_path
+            ], timeout=120)
+            time.sleep(1)
+            return pw_mp3_path
+        except Exception as e:
+            return pw_mp3_path
+
+    """横屏视频改为竖屏"""
+    @classmethod
+    def update_video_h_w(cls, video_path, file_path):
+        video_h_w_path = file_path +'video_h_w_video.mp4'
+        try:
+            cls.asyncio_run_subprocess(["ffmpeg" ,"-i" ,video_path ,"-vf" ,"scale=640:ih*640/iw,pad=iw:iw*16/9:(ow-iw)/2:(oh-ih)/2" ,video_h_w_path],timeout=420)
+            return video_h_w_path
+        except Exception as e:
+            return video_h_w_path
+
+    """视频转为640像素"""
+    @classmethod
+    def video_640(cls, video_path, file_path):
+        video_url = file_path + 'pixelvideo.mp4'
+        try:
+            cls.asyncio_run_subprocess(["ffmpeg" ,"-i" ,video_path ,"-vf" ,"scale=360:640" ,video_url],timeout=420)
+            return video_url
+        except Exception as e:
+            return video_url
+
+    @classmethod
+    def concatenate_videos(cls, videos_paths, file_path):
+        video_url = file_path + 'rg_pw.mp4'
+        list_filename = file_path + 'rg_pw.txt'
+        with open(list_filename, "w") as f:
+            for video_path in videos_paths:
+                f.write(f"file '{video_path}'\n")
+        try:
+            cls.asyncio_run_subprocess(
+                ["ffmpeg", "-f", "concat", "-safe", "0", "-i", list_filename, "-c", "copy", video_url], timeout=420)
+            logger.info(f"[+] 视频转为640像素成功")
+            return video_url
+        except Exception as e:
+            return video_url
+
+    """视频拼接到一起"""
+    @classmethod
+    def h_b_video(cls, video_path, pw_path, file_path):
+        video_url = file_path + 'hbvideo.mp4'
+        try:
+            cls.asyncio_run_subprocess(["ffmpeg","-i", video_path, "-i", pw_path, "-filter_complex" ,"[0:v]scale=360:640[v1]; [1:v]scale=360:640[v2]; [v1][0:a][v2][1:a]concat=n=2:v=1:a=1[outv][outa]" ,"-map" ,"[outv]" ,"-map" ,"[outa]" ,video_url],timeout=500)
+            return video_url
+        except Exception as e:
+            return video_url
+
+    """横屏视频顶部增加字幕"""
+    @classmethod
+    def add_video_zm(cls, new_video_path, video_path_url, pw_random_id, new_text):
+        single_video_srt = video_path_url + str(pw_random_id) +'video_zm.srt'
+        single_video_txt = video_path_url + str(pw_random_id) +'video_zm.txt'
+        single_video = video_path_url + str(pw_random_id) +'video_zm.mp4'
+        try:
+            duration = cls.get_video_duration(new_video_path)
+            if duration == 0:
+                return new_video_path
+            start_time = cls.seconds_to_srt_time(0)
+            end_time = cls.seconds_to_srt_time(duration)
+            # zm = '致敬伟大的教员,为整个民族\n感谢老人家历史向一代伟人'
+            with open(single_video_txt, 'w') as f:
+                f.write(f"file '{new_video_path}'\n")
+            with open(single_video_srt, 'w') as f:
+                f.write(f"1\n{start_time} --> {end_time}\n{new_text}\n\n")
+            subtitle_cmd = f"subtitles={single_video_srt}:force_style='Fontsize=12,Fontname=wqy-zenhei,Outline=2,PrimaryColour=&H00FFFF,SecondaryColour=&H000000,Bold=1,MarginV=225'"
+            draw = f"{subtitle_cmd}"
+            cls.asyncio_run_subprocess([
+                "ffmpeg",
+                "-f", "concat",
+                "-safe", "0",
+                "-i", single_video_txt,
+                "-c:v", "libx264",
+                "-c:a", "aac",
+                "-vf", draw,
+                "-y",
+                single_video
+            ],timeout=500)
+            # subprocess.run(ffmpeg_cmd)
+            return single_video
+        except Exception as e:
+            return single_video
+
+    """获取mp3时长"""
+    @classmethod
+    def get_mp3_duration(cls, file_path):
+        audio = MP3(file_path)
+        duration = audio.info.length
+        if duration:
+            return int(duration)
+        return 0
+
+
+    """
+     生成片尾视频
+    """
+    @classmethod
+    def pw_video(cls, jpg_path, file_path, pw_mp3_path, pw_srt):
+        # 添加音频到图片
+        """
+        jpg_url 图片地址
+        pw_video 提供的片尾视频
+        pw_duration  提供的片尾视频时长
+        new_video_path 视频位置
+        subtitle_cmd 字幕
+        pw_url 生成视频地址
+        :return:
+        """
+        pw_srt_path = file_path +'pw_video.srt'
+        with open(pw_srt_path, 'w') as f:
+            f.write(pw_srt)
+        pw_url_path = file_path + 'pw_video.mp4'
+        try:
+            pw_duration = cls.get_mp3_duration(pw_mp3_path)
+            if pw_duration == 0:
+                return pw_url_path
+            time.sleep(2)
+            # 添加字幕 wqy-zenhei  Hiragino Sans GB
+            height = 1080
+            margin_v = int(height) // 8  # 可根据需要调整字幕和背景之间的距离
+            subtitle_cmd = f"subtitles={pw_srt_path}:force_style='Fontsize=13,Fontname=wqy-zenhei,Outline=0,PrimaryColour=&H000000,SecondaryColour=&H000000,Bold=1,MarginV={margin_v}'"
+            bg_position_offset = (int(360) - 360//8) / 1.75
+            background_cmd = f"drawbox=y=(ih-{int(360)}/2-{bg_position_offset}):color=yellow@1.0:width=iw:height={int(360)}/4:t=fill"
+            if "mp4" in jpg_path:
+                pw_path_txt = file_path + 'pw_path_video.txt'
+                with open(pw_path_txt, 'w') as f:
+                    f.write(f"file '{jpg_path}'\n")
+                cls.asyncio_run_subprocess([
+                    "ffmpeg",
+                    "-f", "concat",
+                    "-safe", "0",
+                    "-i", f"{pw_path_txt}",  # 视频序列输入的文本文件
+                    "-i", pw_mp3_path,  # 音频文件
+                    "-c:v", "libx264",  # 视频编码格式
+                    "-t", str(pw_duration),  # 输出视频的持续时间
+                    "-c:a", "aac",  # 音频编码格式
+                    "-b:v", "260k",  # 视频比特率
+                    "-b:a", "96k",  # 音频比特率
+                    "-threads", "2",  # 线程数
+                    "-vf", f"{background_cmd},{subtitle_cmd}",  # 视频过滤器(背景和字幕)
+                    "-map", "0:v:0",  # 映射视频流来自第一个输入文件(视频)
+                    "-map", "1:a:0",  # 映射音频流来自第二个输入文件(音频)
+                    "-y",  # 强制覆盖输出文件
+                    pw_url_path  # 输出文件路径
+                ], timeout=500)
+            else:
+                cls.asyncio_run_subprocess([
+                    'ffmpeg',
+                    '-loop', '1',
+                    '-i', jpg_path,  # 输入的图片文件
+                    '-i', pw_mp3_path,  # 输入的音频文件
+                    '-c:v', 'libx264',  # 视频编码格式
+                    '-t', str(pw_duration),  # 输出视频的持续时间,与音频持续时间相同
+                    '-pix_fmt', 'yuv420p',  # 像素格式
+                    '-c:a', 'aac',  # 音频编码格式
+                    '-strict', 'experimental',  # 使用实验性编码器
+                    '-shortest',  # 确保输出视频的长度与音频一致
+                    '-vf', f"{background_cmd},{subtitle_cmd}",  # 视频过滤器,设置分辨率和其他过滤器
+                    pw_url_path  # 输出的视频文件路径
+                ], timeout=500)
+            if os.path.exists(pw_srt_path):
+                os.remove(pw_srt_path)
+            return pw_url_path
+        except Exception as e:
+            return pw_url_path
+
+
+    """
+    单个视频拼接
+    """
+    @classmethod
+    def single_video(cls, video_path, file_path, zm):
+        single_video_url = file_path + 'single_video.mp4'
+        single_video_srt = file_path + 'single_video.srt'
+        # 获取时长
+        try:
+            duration = cls.get_video_duration(video_path)
+            if duration == 0:
+                return single_video_url
+            start_time = cls.seconds_to_srt_time(2)
+            end_time = cls.seconds_to_srt_time(duration)
+            single_video_txt = file_path + 'single_video.txt'
+            with open(single_video_txt, 'w') as f:
+                f.write(f"file '{video_path}'\n")
+            if zm:
+                with open(single_video_srt, 'w') as f:
+                    f.write(f"1\n{start_time} --> {end_time}\n<font color=\"red\">\u2764\uFE0F</font>{zm}\n\n")
+                subtitle_cmd = f"subtitles={single_video_srt}:force_style='Fontsize=14,Fontname=wqy-zenhei,Outline=2,PrimaryColour=&H00FFFF,SecondaryColour=&H000000,Bold=1,MarginV=20'"
+            else:
+                subtitle_cmd = f"force_style='Fontsize=14,Fontname=wqy-zenhei,Outline=2,PrimaryColour=&H00FFFF,SecondaryColour=&H000000,Bold=1,MarginV=20'"
+            # 多线程数
+            num_threads = 5
+            # 构建 FFmpeg 命令,生成视频
+            cls.asyncio_run_subprocess([
+                    "ffmpeg",
+                    "-f", "concat",
+                    "-safe", "0",
+                    "-i",  f"{single_video_txt}",
+                    "-c:v", "libx264",
+                    "-c:a", "aac",
+                    '-b:v', '260k',
+                    "-b:a", "96k",
+                    "-threads", str(num_threads),
+                    "-vf", subtitle_cmd,
+                    "-y",
+                     single_video_url
+            ], timeout=400)
+            if os.path.exists(single_video_srt):
+                os.remove(single_video_srt)
+            return single_video_url
+        except Exception as e:
+            return single_video_url
+
+    @classmethod
+    def asyncio_run_subprocess(cls, params: List[str], timeout: int = 30) -> str:
+        async def run_subprocess():
+            process = await asyncio.create_subprocess_exec(
+                params[0],
+                *params[1:],
+                stdout=asyncio.subprocess.PIPE,
+                stderr=asyncio.subprocess.PIPE,
+            )
+            try:
+                out, err = await asyncio.wait_for(process.communicate(), timeout=timeout)
+                if process.returncode != 0:
+                    raise IOError(err)
+                return out.decode()
+            except asyncio.TimeoutError:
+                process.kill()
+                out, err = await process.communicate()
+                raise IOError(err)
+        return asyncio.run(run_subprocess())
+
+
+    @classmethod
+    def get_http_duration(cls, videos_path):
+        total_duration = 0
+        for video_path in videos_path:
+            url = "http://61.48.133.26:5555/api/v1/ffmpeg/get_meta"
+            payload = json.dumps({
+                "url": video_path,
+                "referer": ""
+            })
+            headers = {
+                'Authorization': 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyX2lkIjoiNGNhMTI4ZGYtYWMzMy00NWQ2LTg3MmEtMDAzOTk4MGVhM2ViIiwibmFtZSI6Inp5IiwiZXhwIjoyMDUwOTI3MjExfQ.k_rvuESjA62RgPDiLniVgJyLJn3Q8C1Y_AGq3CPRuKI',
+                'Content-Type': 'application/json'
+            }
+
+            try:
+                response = requests.request("POST", url, headers=headers, data=payload, timeout=30)
+                response =  response.json()
+                duration = response['data']['streams'][0]['duration']
+                total_duration += int(float(duration))
+            except Exception as e:
+                print(f"Error processing {video_path}: {e}")
+        return total_duration
+
+
+if __name__ == '__main__':
+    file_path = '/Users/z/Downloads/478de0b6-4e52-44a5-a5d4-967b2cf8ce49'
+    jpg_path = '/Users/z/Downloads/478de0b6-4e52-44a5-a5d4-967b2cf8ce49rg_pixelvideo.mp4'
+    mp3_path='/Users/z/Downloads/478de0b6-4e52-44a5-a5d4-967b2cf8ce49pw_video.mp3'
+    pw_srt = """1
+00:00:00,000 --> 00:00:02,842
+这个视频揭示了中国近代历史上
+
+2
+00:00:02,842 --> 00:00:05,685
+一个鲜为人知却又极为重要的故
+
+3
+00:00:05,685 --> 00:00:05,888
+事
+
+4
+00:00:05,888 --> 00:00:07,106
+真是让人震惊
+
+5
+00:00:07,106 --> 00:00:07,715
+看完后
+
+6
+00:00:07,715 --> 00:00:10,354
+我不禁对历史有了更深的思考
+
+7
+00:00:10,354 --> 00:00:12,588
+让我们一起重温这段历史
+
+8
+00:00:12,588 --> 00:00:14,212
+提醒自己珍惜当下
+
+9
+00:00:14,212 --> 00:00:17,055
+我相信很多朋友也会对这个话题
+
+10
+00:00:17,055 --> 00:00:17,664
+感兴趣
+
+11
+00:00:17,664 --> 00:00:20,506
+请把这个视频分享到你们的群聊
+
+12
+00:00:20,506 --> 00:00:20,709
+中
+
+13
+00:00:20,709 --> 00:00:22,740
+让更多人了解这段历史
+
+14
+00:00:22,820 --> 00:00:23,824
+共鸣与反思
+
+15
+00:00:23,824 --> 00:00:25,430
+是我们共同的责任
+
+16
+00:00:25,430 --> 00:00:28,242
+也许我们能从中汲取更多的智慧
+
+17
+00:00:28,242 --> 00:00:28,844
+与力量
+
+18
+00:00:28,844 --> 00:00:29,848
+快动动手指
+
+19
+00:00:29,848 --> 00:00:32,659
+让我们一起分享这段重要的历史
+
+20
+00:00:32,659 --> 00:00:32,860
+吧"""
+    FFmpeg.pw_video(jpg_path, file_path, mp3_path, pw_srt)
+
+

+ 76 - 0
utils/google_ai_studio.py

@@ -0,0 +1,76 @@
+import os
+import time
+import uuid
+from typing import  Optional
+
+import google.generativeai as genai
+import orjson
+import requests
+from google.generativeai.types import (HarmBlockThreshold, HarmCategory)
+from loguru import logger
+
+
+CACHE_DIR = '/app/cache/'
+# CACHE_DIR = '/Users/z/Downloads/'
+# PROXY_ADDR = 'http://localhost:1081'
+# os.environ['http_proxy'] = PROXY_ADDR
+# os.environ['https_proxy'] = PROXY_ADDR
+
+class GoogleAI(object):
+
+    @classmethod
+    def download_video(cls, video_link: str) -> Optional[str]:
+        file_path = os.path.join(CACHE_DIR, f'{str(uuid.uuid4())}.mp4')
+        for _ in range(3):
+            try:
+                response = requests.get(url=video_link, timeout=60)
+                if response.status_code == 200:
+                    with open(file_path, 'wb') as f:
+                        f.write(response.content)
+                    logger.info(f'[内容分析] 视频链接: {video_link}, 存储地址: {file_path}')
+                    return file_path
+            except Exception:
+                time.sleep(1)
+                continue
+        return
+
+    @classmethod
+    def run(cls, api_key, video_path):
+        try:
+            genai.configure(api_key=api_key)
+            video = genai.upload_file(path=video_path, mime_type='video/mp4')
+            while video.state.name == 'PROCESSING':
+                time.sleep(1)
+                video = genai.get_file(name=video.name)
+            if video.state.name != 'ACTIVE':
+                genai.delete_file(name=video.name)
+                return
+            model = genai.GenerativeModel(
+                model_name='gemini-1.5-flash',
+                generation_config=genai.GenerationConfig(response_mime_type='application/json'),
+                safety_settings={
+                    HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE,
+                },
+            )
+            response = model.generate_content(
+                contents=[
+                    video,
+                    "你是一名专业的短视频分析师,请你输出这个视频的完整口播,只输出文字即可。使用一下JSON格式输出:{'text': string}",
+                ],
+                stream=False,
+                request_options={
+                    'timeout': 600,
+                },
+            )
+            text = orjson.loads(response.text.strip())['text']
+            genai.delete_file(name=video.name)
+            return text
+        except Exception as e:
+            logger.error(f"[内容分析] 处理异常,异常信息{e}")
+            return
+
+
+if __name__ == '__main__':
+    GoogleAI.run("AIzaSyAwGqthDADh5NPVe3BMcOJBQkJaf0HWBuQ",
+                 "http://rescdn.yishihui.com/jq_oss/video/2025012215472528213")
+

+ 210 - 0
utils/gpt4o_help.py

@@ -0,0 +1,210 @@
+import json
+import random
+import re
+import time
+
+import requests
+
+
+
+class GPT4o:
+    @classmethod
+    def extract_title(cls, data):
+        if isinstance(data, str):
+            try:
+                data = json.loads(data)  # 尝试解析 JSON 字符串
+            except json.JSONDecodeError:
+                pass  # 如果无法解析成 JSON,保留原始字符串
+
+            # Step 2: 确保我们在正确的地方查找 content 字段
+        content = None
+        if isinstance(data, dict):
+            if 'data' in data and 'choices' in data['data']:
+                choices = data['data']['choices']
+                if choices and 'message' in choices[0]:
+                    content = choices[0]['message'].get('content', '')
+            elif isinstance(data, str):  # 如果是直接的 JSON 字符串
+                content = data
+
+        # Step 3: 使用正则表达式提取 title 内容
+        if content:
+            # 正则表达式匹配 `title` 字段
+            match = re.search(r'title[::]"(.*?)"', content)
+            if match:
+                return match.group(1)
+        return None
+
+    @classmethod
+    def get_ai_title(cls, old_title):
+
+        content = '''
+                    CONTEXT
+                    你是一名短视频标题优化专家,任务是为短视频生成吸引力高且符合规范的标题。
+                    OBJECTIVE
+                    基于优秀标题要求及示例,生成新的、吸引用户注意的标题。
+                    SCALE
+                    如果原标题无法按照规则生成新标题则重新尝试5次,失败后返回None
+                    TIME
+                    在标题生成请求提出后的1分钟内生成。
+                    ACTOR
+                    短视频标题优化专家
+                    RESOURCES
+                    - 原标题列表
+                    - 优秀标题示例
+                    - 标题生成规范和约束条件
+                    RESPONSE
+                    为每个原标题生成符合规范的新标题。
+                    生成示例标题案例及要求
+                    1. 标题开头包含醒目emoji🔴,整体字符长度必须控制在10-25个字
+                    2. 识别需要修改的标题中可能感兴趣的人群,并在标题中表现出来。人群示例如:群友们、退休人员、50~70后等
+                    3. 标题中可增加一些对内容的观点/态度,用人格化的方式表达,示例:太香了、老外至今难以相信
+                    4. 标题结尾可以根据标题内容增加一些引导语,格式参考示例但不必局限于示例。示例:你们见过吗、你听对不对、说的太好了、请听、太神奇了
+                    5. 对于包含#话题和@人的标题,若标题中包含其他元素,则去除#话题和@人元素后利用其他元素生成标题。若标题去除#话题和@人外无其他元素,则仅利用#话题的内容生成标题
+                    Goodcase示例:
+                    ⭕老外至今难以相信,中国人竟能把大桥建到天上,穿入云中
+                    🔴未来酒店体验,群友们,请看!
+                    ⭕六七十年代的《忠字舞》,你们见过吗?
+                    🔴哈哈哈!大哥说的太好了!太真实了
+                    🔴今天,请记住那1700个集体赴死的年轻人,平均23岁!
+                    🔴这才叫老同学聚会,到了这个年纪,还能聚在一起真不容易!
+                    🔴百善孝为先,心凉了捂不热了
+                    🔴养儿不如养狗,一件真实的事!
+                    🔴让人受益的一段话,写得真好!
+                    🔴“处暑三劝”!发给最好的朋友劝一劝!
+                    🔴世间公道自在人心,善恶有报,人生智慧建议收藏!
+                    🔴坐着电梯登上山顶!这怕是只有中国人敢想敢做!
+                    🔴人老了!就应该这样去活!
+                    生成约束
+                    1. 标题不能包含#话题标签和@人名。
+                    2. 如果原标题无法按照规则生成新标题则重新尝试5次,失败后返回None。
+                    3. 不能编造:不能加入原标题没有的实体信息,如原标题没有养老金,不能在生成的标题中出现养老金。
+                    4. 标题内不能使用强引导分享点击的词句,如:快来看看、大家都听一听、值得一看、都看看吧、你也来看看吧、大家注意、都听听等。
+                    5. 不能使用无实质信息和强烈诱导点击、紧急、夸张、震惊的描述,避免使用“震惊国人”、“速看”、“太震撼了”等类似描述。
+                    6. 标题需要简洁、清晰,不要使用网络流行语,如:太燃了、佛系、躺平、内卷等。
+                    badcase示例:
+                    🌸绝对不能错过,快打开看看,越快越好
+                            所有老年人一定要看
+                            天大的好消息,5月开始实施❗
+                            就在刚刚,中国突然传出重磅消息,所有人都不敢相信! 🚩
+                            丧尽天良!为什么生病的人越来越多,原来吃的是这些 🎈
+                            今年的端午节太特殊,一辈子难遇一次!一定要看!错过别后悔
+                            好消息来了,千万别划走!
+                            紧急!已爆发,错过就晚了😱 
+                输出格式:{title:""}
+        '''
+        url = "http://aigc.piaoquantv.com/aigc-server/aigc/conversation"
+        headers = {
+            "Content-Type": "application/json"
+        }
+        payload = {
+            "auth": "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",
+            "openAiGptParam": {
+                "model": "gpt-4o",
+                "temperature": 0.5,
+                "messages": [
+                    {
+                        "role": "system",
+                        "content": content
+                    },
+                    {
+                        "role": "user",
+                        "content": old_title
+                    }
+                ]
+            }
+        }
+        wait_time = random.uniform(1, 2)
+        time.sleep(wait_time)
+        response = requests.post(url, headers=headers, json=payload, timeout=30)
+        text = response.text
+        title = cls.extract_title(text)
+        if title:
+            return title
+        else:
+            return "这个视频,分享给我的老友,祝愿您能幸福安康"
+
+
+    @classmethod
+    def get_ai_pw(cls, title):
+        max_retries = 3
+        for attempt in range(max_retries):
+
+            content = '''
+                        请针对微信平台视频类小程序场景,面向人群是中国中老年人,在单聊、群聊场景,对不同类型的内容,生成结尾引导分享的脚本。引导用户发生更多的分享行为。要求优化过程综合考虑下面的要求:  
+                        第一.根据下面的示例case,总结分享引导脚本特点,生成分享引导脚本  
+                        示例case1: 
+                        标题:几十年前的老歌,现在很少听到了!
+                        返回分享引导脚本: 
+                        这个视频真是难得,太美太好听了,看完真是回忆满满,让人怀念那个时代,多么淳朴多么美好,今天大家都在群里转发这个视频,看了都说,那时候虽然手里不宽裕,但心里都满怀希望,那股子拼劲,真是让人热血沸腾,老朋友你还记得吗?如果你也跟我一样深深的怀念,赶紧把这个视频转发到你所有群里去吧,让大家伙都看看吧,多多转发,感恩有你!
+                        示例case2: 
+                        标题:🔺令人尊敬的老人❗❗这才是我们应该追的明星❗
+                        返回分享引导脚本: 
+                        这个视频发出来5天了,转发的人却寥寥无几,试想一次明星出轨就全民皆知,这样中国真正的正能量却无人关注,真是太讽刺了!如果你看到了这个视频,请分享到你加入的每个群,向杨老这种无私奉献的大爱致敬!千万别让那些低调做好事的人心寒!转发传递下去,让世界都看到,中华民族的正能量!
+                        第二:生成过程有如下的规范、约束和建议: 
+                        1.不能编造:不能加入原标题没有的实体信息,如原标题没有养老金,不能在生成的标题中出现养老金; 
+                        2.不能使用强烈的诱导、夸张词汇:在提高分享吸引力的同时,严格落实不使用“必做”、“错过悔恨终生”类描述; 
+                        3.分享引导阅读总字数控制在150个字以内,总时长控制30秒到1分钟,20-30句话,每句话字符长度控制在4-10字;
+                        4.分享引导如果无法生成文字描述,直接返回None
+                        输出格式:要求输出格式为符合RFC8259标准的JSON格式的字符串,不要以代码块形式返回,返回一个string类型的分享引导脚本内容。
+                        '''
+
+            url = "http://aigc.piaoquantv.com/aigc-server/aigc/conversation"
+            headers = {
+                "Content-Type": "application/json"
+            }
+            payload = {
+                "auth": "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",
+                "openAiGptParam": {
+                    "model": "gpt-4o",
+                    "temperature": 0.5,
+                    "messages": [
+                        {
+                            "role": "system",
+                            "content": content
+                        },
+                        {
+                            "role": "user",
+                            "content": title
+                        }
+                    ]
+                }
+            }
+            wait_time = random.uniform(5, 20)
+            time.sleep(wait_time)
+            response = requests.post(url, headers=headers, json=payload, timeout=30)
+            pw_srt_text = '老友们,这个视频说的太好了,请你在看完之后,不要私藏,分享给身边的亲人朋友,转发传递这份美好,这不仅是分享一个好视频,更是在传递一份深厚的祝福和好运,愿每个看到这消息的人,都能收获健康长寿和平安,希望您在看完后能够回传给我,并且把这条视频再传给3个群和3个好友,祝愿你我,健健康康,平平安安,因为3个数字代表着健康和平安,这么好的视频,千万不要在你的手里终止了,分享给群友们,他们看到以后一定会感谢你的,感谢分享,感谢传递'
+
+            try:
+                response = response.json()
+                content = response['data']['choices'][0]['message']['content']
+                pattern = re.compile(r'"分享引导脚本":\s*"(.*?)"')
+                match = pattern.search(content)
+                if match:
+                    pw = match.group(1)
+                    # 检查文本中是否包含英文字母
+                    if re.search(r'[a-zA-Z]', pw):
+                        pw = re.sub(r'[a-zA-Z]', '', pw)
+                    if pw == '"None"' or len(pw) <= 10:
+                        return pw_srt_text
+                    return pw
+                else:
+                    if content:
+                        if re.search(r'[a-zA-Z]', content):
+                            content = re.sub(r'[a-zA-Z]', '', content)
+                        if "分享引导脚本" in content:
+                            parsed_data = json.loads(content.strip('"'))
+                            content = parsed_data["分享引导脚本"]
+                        if content == '"None"' or len(content) <= 10:
+                            return pw_srt_text
+                        return content
+                    if attempt == max_retries - 1:
+                        return pw_srt_text
+            except Exception:
+                if attempt == max_retries - 1:
+                    return pw_srt_text
+
+
+if __name__ == '__main__':
+    title = '新加坡的退休金,全国统一!!'
+    newtitle = GPT4o.get_ai_pw(title)
+    print(newtitle)

+ 238 - 0
utils/gpt4o_mini_help.py

@@ -0,0 +1,238 @@
+import json
+import time
+
+import requests
+
+
+class GPT4oMini:
+
+    @classmethod
+    def get_ai_mini_pw(cls, title,  prompt):
+        if not prompt:
+            prompt = """"请针对微信平台视频类小程序场景,面向人群是中国中老年人,在单聊、群聊场景,"
+                    "根据以下示例生成结尾引导分享的脚本,引导老年用户分享给好友和群,必须有6个带引导分享的句子,以老年人祝福的语气结尾。\n"
+                    "示例case1:\n"
+                    "标题: 几十年前的老歌,现在很少听到了!\n"
+                    "返回分享引导脚本: 这个视频真是难得,太美太好听了,看完真是回忆满满," "让人怀念那个时代,多么淳朴多么美好,今天大家都在群里转发这个视频,"  "老朋友你还记得吗?如果你也跟我一样深深的怀念,赶紧把这个视频转发到你所有群里去吧," "有多少群就转发多少群,不要私藏" "‌每一次分享都是一次奉献,一份真诚的情感‌‌" "转发给每一位都怀念当年岁月的老友,他们都会感谢你的" "多多转发,情谊多多!,"
+                    "祝每位老友健健康康,平平安安\n\n" "示例case2:\n"
+                    "标题: 🔺令人尊敬的老人❗❗这才是我们应该追的明星❗\n"
+                    "返回分享引导脚本: 这个视频发出来5天了,转发的人却寥寥无几,"
+                    "试想一次明星出轨就全民皆知,这样中国真正的正能量却无人关注,真是太讽刺了!"
+                    "如果你看到了这个视频,请分享到你加入的每个群,向杨老这种无私奉献的大爱致敬!" "发给5个群和5个好友,"
+                    "千万别让那些低调做好事的人心寒!转发传递下去,让世界都看到,中华民族的正能量!"
+                    "我已经转发了,也希望你能帮助传递"
+                     "赠人玫瑰,手留余香,分享好视频,收获好福气\n\n" """
+        url = "http://aigc-api.cybertogether.net/aigc/dev/test/gpt"
+        payload = json.dumps({
+            "imageList": [],
+            "model": "gpt-4o-mini-2024-07-18",
+            "prompt": (
+                prompt+
+                f"请分析该标题,标题为:{title},返回结尾分享引导脚本。"
+            ),
+            "responseFormat": {
+                "type": "json_schema",
+                "json_schema": {
+                    "strict": True,
+                    "name": "share_script_result",
+                    "schema": {
+                        "type": "object",
+                        "properties": {
+                            "分享引导脚本": {
+                                "type": "string",
+                                "description": "生成的结尾分享引导脚本"
+                            }
+                        },
+                        "required": ["分享引导脚本"],
+                        "additionalProperties": False
+                    }
+                }
+            }
+        })
+        headers = {'Content-Type': 'application/json'}
+        try:
+            response = requests.post(url, headers=headers, data=payload, timeout=30)
+            response_data = response.json()
+
+            data = json.loads(response_data.get('data', '{}'))
+            pw  = data["分享引导脚本"]
+            return pw
+        except Exception as e:
+            pw_srt_text = '老友们,这个视频说的太好了,请你在看完之后,不要私藏,分享给身边的亲人朋友,转发传递这份美好,这不仅是分享一个好视频,更是在传递一份深厚的祝福和好运,愿每个看到这消息的人,都能收获健康长寿和平安,希望您在看完后能够回传给我,并且把这条视频再传给3个群和3个好友,祝愿你我,健健康康,平平安安,因为3个数字代表着健康和平安,这么好的视频,千万不要在你的手里终止了,分享给群友们,他们看到以后一定会感谢你的,感谢分享,感谢传递'
+            return pw_srt_text
+
+    @classmethod
+    def get_ai_mini_title(cls, title):
+        url = "http://aigc-api.cybertogether.net/aigc/dev/test/gpt"
+        payload = json.dumps({
+            "imageList": [],
+            "model": "gpt-4o-mini-2024-07-18",
+            "prompt": (
+                        "CONTEXT"
+                        "你是一名短视频标题优化专家,任务是为短视频生成吸引力高且符合规范的标题。"
+                        "OBJECTIVE"
+                        "基于优秀标题要求及示例,生成新的、吸引用户注意的标题。"
+                        "TIME"
+                        "在标题生成请求提出后的1分钟内生成。"
+                        "ACTOR"
+                        "短视频标题优化专家"
+                        "RESOURCES"
+                        "- 原标题列表"
+                        "- 优秀标题示例"
+                        "- 标题生成规范和约束条件"
+                        "RESPONSE"
+                        "为每个原标题生成符合规范的新标题。"
+                        "生成示例标题案例及要求"
+                        "1. 标题开头包含醒目emoji🔴,整体字符长度必须控制在10-25个字"
+                        "2. 识别需要修改的标题中可能感兴趣的人群,并在标题中表现出来。人群示例如:群友们、退休人员、50~70后等"
+                        "3. 标题中可增加一些对内容的观点/态度,用人格化的方式表达,示例:太香了、老外至今难以相信"
+                        "4. 标题结尾可以根据标题内容增加一些引导语,格式参考示例但不必局限于示例。示例:你们见过吗、你听对不对、说的太好了、请听、太神奇了"
+                        "5. 对于包含#话题和@人的标题,则去除@人元素后,利用剩下的全部信息生成标题。"
+                        "6. 不能编造、新增原标题没有的信息,如原标题没有养老金,不能在生成的标题中出现养老金。"
+                        "7. 标题内不能使用强引导分享点击的词句,如:快来看看、大家都听一听、值得一看、都看看吧、你也来看看吧、大家注意、都听听等。"
+                        "8. 不能使用无实质信息和强烈诱导点击、紧急、夸张、震惊的描述,避免使用“震惊国人”、“速看”、“太震撼了”等类似描述。"
+                        "9. 标题需要简洁、清晰,不要使用网络流行语,如:太燃了、佛系、躺平、内卷等。"
+                    
+                        "Goodcase示例:"
+                        "⭕老外至今难以相信,中国人竟能把大桥建到天上,穿入云中"
+                        "🔴未来酒店体验,群友们,请看!"
+                        "⭕六七十年代的《忠字舞》,你们见过吗?"
+                        "🔴哈哈哈!大哥说的太好了!太真实了"
+                        "🔴今天,请记住那1700个集体赴死的年轻人,平均23岁!"
+                        "🔴这才叫老同学聚会,到了这个年纪,还能聚在一起真不容易!"
+                        "🔴百善孝为先,心凉了捂不热了"
+                        "🔴养儿不如养狗,一件真实的事!"
+                        "🔴让人受益的一段话,写得真好!"
+                        "🔴“处暑三劝”!发给最好的朋友劝一劝!"
+                        "🔴世间公道自在人心,善恶有报,人生智慧建议收藏!"
+                        "🔴坐着电梯登上山顶!这怕是只有中国人敢想敢做!"
+                        "🔴人老了!就应该这样去活!"
+                    
+                        "badcase示例:"
+                        "🌸绝对不能错过,快打开看看,越快越好"
+                        "所有老年人一定要看"
+                        "天大的好消息,5月开始实施❗"
+                        "就在刚刚,中国突然传出重磅消息,所有人都不敢相信! 🚩"
+                        "丧尽天良!为什么生病的人越来越多,原来吃的是这些 🎈"
+                        "今年的端午节太特殊,一辈子难遇一次!一定要看!错过别后悔"
+                        "好消息来了,千万别划走!"
+                        "紧急!已爆发,错过就晚了😱"
+                    f"请分析该标题,标题为:{title},返回新的标题。"
+                    ),
+            "responseFormat": {
+                "type": "json_schema",
+                "json_schema": {
+                    "strict": True,
+                    "name": "share_script_result",
+                    "schema": {
+                        "type": "object",
+                        "properties": {
+                            "新标题": {
+                                "type": "string",
+                                "description": "生成新的标题"
+                            }
+                        },
+                        "required": ["新标题"],
+                        "additionalProperties": False
+                    }
+                }
+            }
+        })
+        headers = {'Content-Type': 'application/json'}
+        max_retries = 3
+        retry_count = 0
+        while retry_count < max_retries:
+            try:
+                response = requests.post(url, headers=headers, data=payload, timeout=30)
+                response_data = response.json()
+
+                data = json.loads(response_data.get('data', '{}'))
+                new_title = data["新标题"]
+                return new_title
+            except Exception as e:
+                retry_count += 1
+                # logger.error(f"尝试第 {retry_count} 次失败,错误: {e}")
+                time.sleep(1)  # 延迟1秒后重试
+        return "这个视频,分享给我的老友,祝愿您能幸福安康"
+
+    @classmethod
+    def get_content_understanding_pw(cls, pw):
+        """AI标题"""
+        url = "http://aigc-api.cybertogether.net//aigc/dev/test/gpt"
+        payload = json.dumps({
+            "imageList": [],
+            "model": "gpt-4o-mini-2024-07-18",
+            "prompt": (
+                "你是一名专业的短视频分析师,工作是帮助短视频平台的视频撰写视频结尾用于引导用户分享视频的文案。视频的主要用户为60岁以上中国老年人,请你理解视频内容,根据规则选择选择模板,并结合视频内容输出用于引导用户分享视频的文案。"
+
+                "请注意:"
+                "1、总的内容输出在100~150字之间,绝对不能低于100字,绝对不能超过150字,如果超过150字,可以针对模板进行适当删减。"
+                "2、针对视频核心内容的提炼,内容需要适配老年人的阅读习惯和理解能力,禁止出现太燃了、佛系、躺平、内卷、炸裂等形容词,以及阶级固化等专业名词。"
+                "3、模板中[]内的内容填充,不要超过15个字。"
+                
+                "规则:"
+                "1、视频内容是否围绕着健康科普/安全防范/政策解读/生活技巧/情感激励等知识科普/信息通知的主题?若是,则根据视频内容在“科普/信息分享模板”中,选择合适的模板进行输出,无需处理其他规则。若不是,则继续处理规则2。"
+                "2、视频内容是否围绕着对感动/温情/趣味/祝福、罕见画面、社会正能量等正面主题?若是,则根据视频内容在“正面情绪模板”中,选择合适的模板进行输出,无需处理其他规则。若不是,则继续处理规则3。"
+                "3、频内容是否围绕着对社会风气/现象的不满/批判、对生活的不满等主题?若是,则根据视频内容在“负面情绪模板”中,选择合适的模板进行输出,无需处理其他规则。若不是,则继续处理规则4。"
+                "4、若视频同时不符合规则1、2、3,则根据视频内容,在所有模板中选择适配的模板进行输出。"
+                
+                "输出内容包含:"
+                "1、视频内容符合规则1/2/3/4中的哪一条?"
+                "2、你选择的是哪一类模板,具体哪一条模板?"
+                "3、引导语文案"
+                
+                "模板List:"
+                "“科普/信息分享模板”"
+                "模板1:健康科普类 ""这个视频讲解的[健康知识]真是太实用了,很多人都不知道原来[具体内容]还有这么多讲究。看完后我才明白,平时我们[具体行为]都做错了。群里的朋友们都说,这些知识太及时了,简直是生活中的智慧宝典。老朋友们,你们[相关问题]做对了吗?如果觉得这个视频对你有帮助,请马上转发到3个群里,让更多人了解正确的[健康知识]。记住,健康是最重要的,您的转发可能会帮助一位需要的朋友,让我们一起关爱健康,守护幸福。多多分享,感恩有你。"
+                "模板2:安全防范类 ""各位老友们要注意了,这个视频提醒的[安全隐患]太重要了,现在的[具体威胁]真是防不胜防。看完后我心惊肉跳,原来我们身边竟然有这么多危险。为了家人的安全,请立即把这个视频转发到至少3个群里,让大家都提高警惕。特别是家里有老人和孩子的,一定要多加注意。记住,一个不小心就可能造成无法挽回的损失。您的一次转发,可能就会挽救一个家庭。让我们一起守护平安,转发传递,功德无量。"
+                "模板3:政策解读类 ""重要通知!这个关于[最新政策]的视频太及时了,从[具体日期]开始实施的新规定,将给我们的生活带来重大变化。很多老年朋友还不知道这些新政策,可能会影响到我们的切身利益。为了不让身边的朋友吃亏,请大家一定要把这个视频转发到至少3个群里。让更多人了解这些新变化,提前做好准备。您的分享就是对他人的关爱,让我们互帮互助,共同进步。转发分享,福气多多。"
+                "模板4:生活技巧类 ""看完这个视频,我才知道原来[日常行为]还可以这样做,太神奇了!视频里教的这些[具体技巧],简直是生活小妙招。群里的朋友们都说学到了,这些方法太实用了。特别是对我们这些[特定人群]来说,掌握这些技巧后,生活会变得更加便利。如果你也觉得有用,请马上转发到3个群,分享给身边的朋友,让大家一起学习这些生活智慧。记住,分享就是快乐,您的转发一定会帮助到需要的人。多多转发,感恩有你。"
+                "模板5:情感激励类 ""这个视频的内容太有意义了,让我深受触动。在这个快节奏的社会里,我们常常忽视了[具体内容]的重要性。视频里说的每一句话都说到心坎里,相信看完的朋友们都会有同感。为了让更多人受益,请把这个视频转发到至少3个群里。特别是那些正在经历[具体困境]的朋友,看了一定会很有启发。记住,您的一次分享,可能就会改变一个人的生活。让我们用爱心传递温暖,共同创造更美好的明天。转发分享,福气满满。"
+                
+                "“正面情绪模板”"
+                "模板1:感动温情类 ""亲爱的老朋友们,这个视频真是太[感动/温馨/暖心]了,看到[具体内容]的画面,让人心里暖暖的,相信您和我一样被深深打动。这份[幸福/感动/温情]太珍贵了,值得与更多人分享。请您一定要把这个视频转发到您所在的3个微信群,让更多的家人朋友感受这份美好。您的分享就是一份关爱,群友们看到后一定会感谢您的。多一次转发,多一份温暖,让我们一起传递这份感动,还在等什么,赶紧行动起来吧!"
+                "模板2:趣味互动类 ""老朋友们,这个[有趣/精彩]的视频里,您觉得[具体内容]哪一个最棒呢?快来说说您的看法吧!看完后千万别私藏,一定要把这么精彩的内容转发分享到您所在的3个微信群,让群友们也一起来欣赏评论。这么有意思的视频,大家看到后一定会眼前一亮,纷纷感谢您的分享。记住,您的每一次转发都能带来好运哦!多多转发,福气多多,快乐多多。动动手指,传递欢乐,一起分享精彩时刻!"
+                "模板3:祝福传递类 ""亲爱的朋友,这个充满[祝福/好运/福气]的视频,我第一时间就想到要分享给您。据说看到这个视频的人,只要转发给八个好友或三个群,就能收获满满的[福气/好运/健康]。我已经把这份祝福传递给了最重要的朋友,其中就包括您。请您也一定要把这份美好传递下去,转发到您所在的微信群,让更多人感受这份祝福。记住,千万不要让祝福在您手中终止,多多转发,幸福多多!"
+                "模板4:文化传承类 ""老友们,这段展现[中华文化/传统技艺/民族精神]的视频太珍贵了!看到[具体内容],让人深感自豪。作为炎黄子孙,我们要把这份文化瑰宝传承下去。请您一定要把视频转发到3个群,让更多人了解我们的传统文化。您的每一次转发,都是对中华文化的传播,都是对传统的守护。让我们共同努力,让中华文化发扬光大。转发视频,传承文明,让更多人感受中华文化的魅力!"
+                "模板5:稀奇见闻类 ""亲爱的朋友,这个展现[自然奇观/稀有现象/独特事物]的视频实在太难得了!这么[神奇/罕见/精彩]的画面,错过太可惜了。我第一时间就想分享给身边的好友,也包括您。请您看完后也别忘了转发分享到您所在的3个微信群,让群友们也大开眼界。这么稀奇的视频,绝对不能在您这里终止啊!据说转发的人都能沾上好运气呢。多多转发,惊喜多多,让我们一起分享这份难得的奇观,传递这份惊喜!"
+                
+                "“负面情绪模板”"
+                "模板1:真实共鸣型 ""这个视频说出了我们的心里话,[视频核心内容提炼]真是太真实了。相信很多朋友看了都会有同感,这样难得的真话不常见。如果您也觉得说得对,请一定要转发分享给3个群的群友们看看。这样的真实声音,明天可能就听不到了。您的每一次转发,都是在发声,都是在传递真相。让我们一起行动起来,点点转发,让更多人听到这样的心声。"
+                "模板2:善意提醒型 ""老朋友们,这个视频太重要了,[视频核心内容提炼]对我们每个人都很有帮助。看完后请您一定要转发分享到3个群,让所有的家人朋友都能从中受益。您的转发是对身边人的关心,是在传递祝福,他们看完一定会感谢您的。记得多多转发,这样明天您想再看的时候就能马上找到了。让我们一起把这份关爱传递下去,让更多人受益。祝愿您和家人平安健康!"
+                "模板3:社会责任型 ""这个视频提到的[视频核心内容],关系到每个人的切身利益。如果您也认同,请动动手指转发到3个群,让所有人都能看到。这不仅仅是一个视频的传播,更是一份社会责任。请老友们多多转发,让更多人了解这个问题,引起全社会的重视。千万不要让这个视频在您这里停止传播,您的每一次转发都是在为社会尽一份力。让我们共同行动,让社会变得更好。"
+                "模板4:情感共振型 ""看完这个视频,[视频核心内容提炼]真是让人感触良多。群里的朋友们都在转发,说看完后特别有感触。这样充满智慧和温度的内容值得分享,请您也加入传播的队伍,转发到您的群里,让更多人感受这份温暖。每一次转发都是在传递一份爱与理解,都是在为这个社会增添一份温暖。让我们一起把这份美好传递下去,让更多人受益。"
+                "模板5:紧迫感召型 ""这个视频太重要了,[视频核心内容提炼]说得太对了!如果您看到这个视频,说明大家都在转发支持。请您立即行动起来,转发到您所有的群,一人行动十人知晓,百人传递,千人支持。现在就是行动的最好时机,明天再想看可能就找不到了。让我们一起努力,让这个声音传得更远。您的每一次转发,都是在发声,赶快行动起来! "
+                f"请分析该内容,视频脚本内容为:{pw},返回新的片尾。"
+            ),
+            "responseFormat": {
+                "type": "json_schema",
+                "json_schema": {
+                    "strict": True,
+                    "name": "share_script_result",
+                    "schema": {
+                        "type": "object",
+                        "properties": {
+                            "新片尾": {
+                                "type": "string",
+                                "description": "生成新的片尾"
+                            }
+                        },
+                        "required": ["新片尾"],
+                        "additionalProperties": False
+                    }
+                }
+            }
+        })
+        headers = {'Content-Type': 'application/json'}
+        response = requests.post(url, headers=headers, data=payload)
+        response_data = response.json()
+
+        data = json.loads(response_data.get('data', '{}'))
+        new_pw = data["新片尾"]
+        if new_pw:
+            return new_pw
+        else:
+            return None
+
+if __name__ == '__main__':
+    GPT4oMini.get_ai_mini_pw("这段话说出了多少人的心声 #老百姓的心声 #老百姓关心的话题 #农民的心声 #老百姓不容易","AI片尾引导1")

+ 67 - 0
utils/mysql_db.py

@@ -0,0 +1,67 @@
+# -*- coding: utf-8 -*-
+"""
+数据库连接及操作
+"""
+import pymysql
+
+class MysqlHelper:
+    @classmethod
+    def connect_mysql(cls):
+        # 创建一个 Connection 对象,代表了一个数据库连接
+        connection = pymysql.connect(
+            # host="rm-bp1159bu17li9hi94.mysql.rds.aliyuncs.com",  # 数据库IP地址,内网地址
+            host="rm-bp1159bu17li9hi94ro.mysql.rds.aliyuncs.com",# 数据库IP地址,外网地址
+            port=3306,  # 端口号
+            user="crawler",  # mysql用户名
+            passwd="crawler123456@",  # mysql用户登录密码
+            db="piaoquan-crawler",  # 数据库名
+            # 如果数据库里面的文本是utf8编码的,charset指定是utf8
+            charset="utf8")
+        return connection
+
+    @classmethod
+    def get_values(cls, sql, params=None):
+        try:
+            # 连接数据库
+            connect = cls.connect_mysql()
+            # 返回一个 Cursor对象
+            mysql = connect.cursor()
+
+            if params:
+                # 如果传递了 params 参数
+                mysql.execute(sql, params)
+            else:
+                # 如果没有传递 params 参数
+                mysql.execute(sql)
+            # fetchall方法返回的是一个元组,里面每个元素也是元组,代表一行记录
+            data = mysql.fetchall()
+
+            # 关闭数据库连接
+            connect.close()
+
+            # 返回查询结果,元组
+            return data
+        except Exception as e:
+            print(f"get_values异常:{e}\n")
+
+    @classmethod
+    def update_values(cls, sql):
+        # 连接数据库
+        connect = cls.connect_mysql()
+        # 返回一个 Cursor对象
+        mysql = connect.cursor()
+        try:
+            # 执行 sql 语句
+            res = mysql.execute(sql)
+            # 注意 一定要commit,否则添加数据不生效
+            connect.commit()
+            return res
+        except Exception as e:
+            # 发生错误时回滚
+            connect.rollback()
+        # 关闭数据库连接
+        connect.close()
+
+
+
+

+ 46 - 0
utils/odps_data.py

@@ -0,0 +1,46 @@
+import json
+import datetime
+import math
+import random
+
+from odps import ODPS
+
+# ODPS服务配置
+ODPS_CONFIG = {
+    'ENDPOINT': 'http://service.cn.maxcompute.aliyun.com/api',
+    'ACCESSID': 'LTAIWYUujJAm7CbH',
+    'ACCESSKEY': 'RfSjdiWwED1sGFlsjXv0DlfTnZTG1P',
+    'PROJECT': 'loghubods'
+}
+class OdpsDataCount:
+    @classmethod
+    def get_data_count(cls, dt):
+        odps = ODPS(
+            access_id=ODPS_CONFIG['ACCESSID'],
+            secret_access_key=ODPS_CONFIG['ACCESSKEY'],
+            project=ODPS_CONFIG['PROJECT'],
+            endpoint=ODPS_CONFIG['ENDPOINT']
+        )
+        data_values = []
+        try:
+            sql = f'SELECT videoid,time,type,channel FROM loghubods.transport_spider_recommend_video_hour WHERE dt = "{dt}" and  channel = "搬运工具"'
+            with odps.execute_sql(sql).open_reader() as reader:
+                for row in reader:
+                    data_values.append(json.dumps( {"videoid": row[0], "time": row[1], "type": row[2], "channel": row[3], "dt": str(dt)}, ensure_ascii=False ))
+        except Exception as e:
+            print(f"An error occurred: {e}")
+            return data_values
+        return data_values
+
+    @classmethod
+    def main(cls):
+        dt = (datetime.datetime.now() - datetime.timedelta(hours=1)).strftime('%Y%m%d%H')
+        data_count = cls.get_data_count(dt= dt)
+        sample_size = math.ceil(len(data_count) / 2)
+        random_selection = random.sample(data_count, sample_size)
+
+        print(len(random_selection))
+        return random_selection
+
+if __name__ == '__main__':
+    OdpsDataCount.main()

+ 113 - 0
utils/piaoquan.py

@@ -0,0 +1,113 @@
+import json
+import time
+
+import requests
+from urllib.parse import urlencode
+
+
+
+class PQ:
+
+    """
+    新生成视频上传到对应账号下
+    """
+    @classmethod
+    def insert_piaoquantv(cls, new_video_path, new_title, n_id, cover):
+        url = "https://vlogapi.piaoquantv.com/longvideoapi/crawler/video/send?muid=999"
+        headers = {
+            'User-Agent': 'PQSpeed/486 CFNetwork/1410.1 Darwin/22.6.0',
+            'cookie': 'JSESSIONID=4DEA2B5173BB9A9E82DB772C0ACDBC9F; JSESSIONID=D02C334150025222A0B824A98B539B78',
+            'referer': 'http://appspeed.piaoquantv.com',
+            'token': '524a8bc871dbb0f4d4717895083172ab37c02d2f',
+            'accept-language': 'zh-CN,zh-Hans;q=0.9',
+            'Content-Type': 'application/x-www-form-urlencoded'
+        }
+        payload = {
+            'deviceToken': '9ef064f2f7869b3fd67d6141f8a899175dddc91240971172f1f2a662ef891408',
+            'fileExtensions': 'MP4',
+            'loginUid': n_id,
+            'networkType': 'Wi-Fi',
+            'platform': 'iOS',
+            'requestId': 'fb972cbd4f390afcfd3da1869cd7d001',
+            'sessionId': '362290597725ce1fa870d7be4f46dcc2',
+            'subSessionId': '362290597725ce1fa870d7be4f46dcc2',
+            'title': new_title,
+            'token': '524a8bc871dbb0f4d4717895083172ab37c02d2f',
+            'uid': n_id,
+            'versionCode': '486',
+            'versionName': '3.4.12',
+            'videoFromScene': '1',
+            'videoPath': new_video_path,
+            'viewStatus': '1'
+        }
+        if cover:
+            payload['coverImgPath'] = cover
+        encoded_payload = urlencode(payload)
+        response = requests.request("POST", url, headers=headers, data=encoded_payload, timeout=10)
+        data = response.json()
+        code = data["code"]
+        if code == 0:
+            new_video_id = data["data"]["id"]
+            return new_video_id
+        return None
+
+    @classmethod
+    def get_pq_oss_path(cls, video_id):
+        try:
+            url = "https://longvideoapi.piaoquantv.com/longvideoapi/openapi/video/getBaseInfo"
+
+            payload = json.dumps({
+                "videoId": int(video_id)
+            })
+            headers = {
+                'Content-Type': 'application/json',
+                'Cookie': 'JSESSIONID=658158EABFCF6AC9B9BB0D8B61897A88'
+            }
+            for i in range(3):
+                response = requests.request("POST", url, headers=headers, data=payload, timeout=30)
+                response = response.json()
+                code = response['code']
+                if code == 0:
+                    data = response['data']
+                    video_path = data["videoPath"]
+                    cover_path = data["coverImgPath"]
+                    title = data["title"]
+
+                    return video_path, cover_path, title
+            return None, None, None
+        except Exception as e:
+            return None, None, None
+
+    @classmethod
+    def get_pq_oss(cls, video_id_list):
+        url_list = []
+        for video_id in video_id_list:
+            try:
+                url = "https://longvideoapi.piaoquantv.com/longvideoapi/openapi/video/getBaseInfo"
+
+                payload = json.dumps({
+                    "videoId": int(video_id)
+                })
+                headers = {
+                    'Content-Type': 'application/json',
+                    'Cookie': 'JSESSIONID=658158EABFCF6AC9B9BB0D8B61897A88'
+                }
+
+                response = requests.request("POST", url, headers=headers, data=payload, timeout=10)
+                response = response.json()
+                code = response['code']
+                if code == 0:
+                    data = response['data']
+                    video_path = data["videoPath"]
+                    url_list.append(f"http://rescdn.yishihui.com/{video_path}")
+                    time.sleep(1)
+                continue
+            except Exception as e:
+                time.sleep(1)
+                continue
+        return url_list
+
+
+if __name__ == '__main__':
+    rg_pw = "47969744,47969804,47969813,47969815,47969816"
+    PQ.get_pq_oss(rg_pw)

+ 30 - 0
utils/redis.py

@@ -0,0 +1,30 @@
+import redis
+
+
+class RedisHelper(object):
+    _pool: redis.ConnectionPool = None
+    _instance = None
+
+    def __init__(self):
+        if not self._instance:
+            self._pool = self._get_pool()
+            self._instance = self
+
+    def _get_pool(self) -> redis.ConnectionPool:
+        if self._pool is None:
+            self._pool = redis.ConnectionPool(
+                host="r-bp1mb0v08fqi4hjffupd.redis.rds.aliyuncs.com",  # 外网地址
+                port=6379,
+                db=0,
+                password="Wqsd@2019",
+                max_connections=100)
+        return self._pool
+
+    def get_client(self) -> redis.Redis:
+        pool = self._get_pool()
+        client = redis.Redis(connection_pool=pool)
+        return client
+
+    def close(self):
+        if self._pool:
+            self._pool.disconnect(inuse_connections=True)

+ 18 - 0
utils/sql_help.py

@@ -0,0 +1,18 @@
+import os
+import sys
+
+from utils.mysql_db import MysqlHelper
+
+sys.path.append(os.getcwd())
+
+
+class sqlCollect():
+
+
+
+    @classmethod
+    def insert_machine_making_data(cls, name: str, task_mark: str, channel_id: str, url: str, v_id: str, piaoquan_id: str, new_title: str, code: str, formatted_time, old_title: str, oss_object_key: str):
+        insert_sql = f"""INSERT INTO machine_making_data (name, task_mark, channel, user, v_id, pq_uid, title, pq_vid, data_time, old_title, oss_object_key) values ("{name}", "{task_mark}", "{channel_id}", "{url}", "{v_id}" , "{piaoquan_id}", "{new_title}", "{code}", "{formatted_time}", "{old_title}", "{oss_object_key}")"""
+        MysqlHelper.update_values(
+            sql=insert_sql
+        )

+ 23 - 0
utils/tag_video.py

@@ -0,0 +1,23 @@
+import requests
+import json
+
+class Tag:
+    @classmethod
+    def video_tag(cls, pq_id: str, tag: str):
+        try:
+            url = "https://admin.piaoquantv.com/manager/video/tag/addVideoTags"
+
+            payload = json.dumps({
+                "videoId": pq_id,
+                "tagNames": tag
+            })
+            headers = {
+                'Content-Type': 'application/json'
+            }
+
+            response = requests.request("POST", url, headers=headers, data=payload, timeout=10)
+            response = response.json()
+            code = response['code']
+            return code
+        except:
+            return 1

+ 207 - 0
utils/tts_help.py

@@ -0,0 +1,207 @@
+from datetime import timedelta
+
+import requests
+import json
+import random
+import re
+import time
+
+
+
+class TTS:
+    @classmethod
+    def get_pw_zm(cls, text, voice):
+        max_retries = 3
+        for attempt in range(max_retries):
+            url = "http://api.piaoquantv.com/produce-center/speechSynthesis"
+            payload = json.dumps({
+                "params": {
+                    "text": text,
+                    "voice": voice,
+                    # "vocie": "zhiyuan",
+                    "format": "pcm",
+                    "volume": 90,
+                    "speechRate": 80,
+                    "pitchRate": 0
+                }
+            })
+
+            headers = {
+                'Content-Type': 'application/json'
+            }
+            wait_time = random.uniform(1, 10)
+            time.sleep(wait_time)
+            try:
+                response = requests.request("POST", url, headers=headers, data=payload, timeout=60)
+                response = response.json()
+                code = response["code"]
+                if code == 0:
+                    mp3 = response["data"]
+                    return mp3
+            except Exception:
+                if attempt == max_retries - 1:
+                    return None
+        return None
+
+    """
+    音频下载到本地
+    """
+    @classmethod
+    def download_mp3(cls,  pw_url, file_path):
+        pw_mp3_path = file_path +'pw_video.mp3'
+        for i in range(3):
+            payload = {}
+            headers = {}
+            response = requests.request("GET", pw_url, headers=headers, data=payload, timeout= 30)
+            if response.status_code == 200:
+                # 以二进制写入模式打开文件
+                with open(f"{pw_mp3_path}", "wb") as file:
+                    # 将响应内容写入文件
+                    file.write(response.content)
+                return pw_mp3_path
+        return None
+
+    @classmethod
+    def get_srt_format(cls, pw_srt_text, pw_url_sec):
+        segments = re.split(r'(,|。|!|?)', pw_srt_text)
+        segments = [segments[i] + segments[i + 1] for i in range(0, len(segments) - 1, 2)]
+        pw_url_sec = int(pw_url_sec) + 1
+        # 确定每段显示时间
+        num_segments = len(segments)
+        duration_per_segment = pw_url_sec / num_segments
+        srt_content = ""
+        start_time = 0.0
+        for i, segment in enumerate(segments):
+            end_time = start_time + duration_per_segment
+            srt_content += f"{i + 1}\n"
+            srt_content += f"{int(start_time // 3600):02}:{int((start_time % 3600) // 60):02}:{int(start_time % 60):02},{int((start_time % 1) * 1000):03} --> "
+            srt_content += f"{int(end_time // 3600):02}:{int((end_time % 3600) // 60):02}:{int(end_time % 60):02},{int((end_time % 1) * 1000):03}\n"
+            srt_content += f"{segment.strip()}\n\n"
+            start_time = end_time
+
+        print(srt_content)
+        return srt_content
+
+    @classmethod
+    def process_srt(cls, srt):
+        lines = srt.strip().split('\n')
+        processed_lines = []
+
+        for line in lines:
+            if re.match(r'^\d+$', line):
+                processed_lines.append(line)
+            elif re.match(r'^\d{2}:\d{2}:\d{2}\.\d{1,3}-->\d{2}:\d{2}:\d{2}\.\d{1,3}$', line):
+                processed_lines.append(line.replace('-->', ' --> '))
+            else:
+                line = re.sub(r'[,。!?;、]$', '', line)
+                # 添加换行符
+                processed_lines.append(line + '\n')
+
+        return '\n'.join(processed_lines)
+
+    @classmethod
+    def parse_timecode(cls, timecode):
+        h, m, s = map(float, timecode.replace(',', '.').split(':'))
+        return timedelta(hours=h, minutes=m, seconds=s)
+
+    @classmethod
+    def format_timecode(cls, delta):
+        total_seconds = delta.total_seconds()
+        hours, remainder = divmod(total_seconds, 3600)
+        minutes, seconds = divmod(remainder, 60)
+        return f"{int(hours):02}:{int(minutes):02}:{seconds:06.3f}".replace('.', ',')
+
+    @classmethod
+    def split_subtitle(cls, subtitle_string):
+        max_len = 14
+        lines = subtitle_string.strip().split('\n')
+        subtitles = []
+        for i in range(0, len(lines), 4):
+            sub_id = int(lines[i].strip())
+            timecode_line = lines[i + 1].strip()
+            start_time, end_time = timecode_line.split(' --> ')
+            text = lines[i + 2].strip()
+            if re.search(r'[a-zA-Z]', text):
+                text = re.sub(r'[a-zA-Z]', '', text)
+            start_delta = cls.parse_timecode(start_time)
+            end_delta = cls.parse_timecode(end_time)
+            total_duration = (end_delta - start_delta).total_seconds()
+            char_duration = total_duration / len(text)
+
+            current_start = start_delta
+            for j in range(0, len(text), max_len):
+                segment = text[j:j + max_len]
+                current_end = current_start + timedelta(seconds=char_duration * len(segment))
+                subtitles.append((sub_id, current_start, current_end, segment))
+                current_start = current_end
+                sub_id += 1
+
+        return subtitles
+
+    @classmethod
+    def generate_srt(cls, subtitles):
+        srt_content = ''
+        for idx, sub in enumerate(subtitles, start=1):
+            srt_content += f"{idx}\n"
+            srt_content += f"{cls.format_timecode(sub[1])} --> {cls.format_timecode(sub[2])}\n"
+            srt_content += f"{sub[3]}\n\n"
+        return srt_content.strip()
+
+    @classmethod
+    def getSrt(cls, mp3_id):
+        url = "http://api.piaoquantv.com/produce-center/srt/get/content"
+
+        payload = json.dumps({
+            "params": {
+                "resourceChannel": "outer",
+                "videoPath": mp3_id
+            }
+        })
+        headers = {
+            'User-Agent': 'Apifox/1.0.0 (https://apifox.com)',
+            'Content-Type': 'application/json',
+            'Accept': '*/*',
+            'Host': 'api-internal.piaoquantv.com',
+            'Connection': 'keep-alive'
+        }
+
+        response = requests.request("POST", url, headers=headers, data=payload, timeout=30)
+        time.sleep(1)
+        data_list = response.json()
+        code = data_list["code"]
+        if code == 0:
+            srt = data_list["data"]
+            if srt:
+                srt = srt.replace("/n", "\n")
+                new_srt = cls.process_srt(srt)
+                result = cls.split_subtitle(new_srt)
+                # 生成SRT格式内容
+                srt_content = cls.generate_srt(result)
+                return srt_content
+            else:
+                return None
+        else:
+            return None
+
+
+if __name__ == '__main__':
+    # text = "真是太实用了,分享给身边的准妈妈们吧!这些孕期禁忌一定要记住,赶紧转发给更多人,帮助更多的宝妈们。一起为宝宝的健康加油!"
+    # mp3 = TTS.get_pw_zm(text)
+    # print(mp3)
+    # command = [
+    #   'ffmpeg',
+    #   '-i', mp3,
+    #   '-q:a', '0',
+    #   '-map', 'a',
+    #   # '-codec:a', 'libmp3lame',  # 指定 MP3 编码器
+    #   "/Users/tzld/Desktop/video_rewriting/path/pw_video.mp3"
+    # ]
+    # subprocess.run(command)
+    # print("完成")
+    video_file = 'http://clipres.yishihui.com/longvideo/crawler/voice/pre/20240821/37fbb8cfc7f1439b8d8a032a1d01d37f1724219959925.mp3'
+    TTS.getSrt(video_file)
+    # result = subprocess.run(
+    #     ["ffprobe", "-v", "error", "-show_entries", "format=duration",
+    #      "-of", "default=noprint_wrappers=1:nokey=1", video_file],
+    #     capture_output=True, text=True)
+    # print(float(result.stdout))

+ 0 - 0
workers/__init__.py


+ 481 - 0
workers/consumption_work.py

@@ -0,0 +1,481 @@
+import json
+import os
+import random
+import re
+import sys
+import time
+import uuid
+from datetime import datetime
+import orjson
+from apscheduler.schedulers.blocking import BlockingScheduler
+from apscheduler.triggers.interval import IntervalTrigger
+from loguru import logger
+sys.path.append('/app')
+from utils.redis import RedisHelper
+from utils.aliyun_log import AliyunLogger
+from utils.aliyun_oss import Oss
+from utils.download_video import DownLoad
+from utils.dy_ks_get_url import Dy_KS
+from utils.feishu_form import Material
+from utils.feishu_utils import Feishu
+from utils.ffmpeg import FFmpeg
+from utils.gpt4o_mini_help import GPT4oMini
+from utils.piaoquan import PQ
+from utils.sql_help import sqlCollect
+from utils.tag_video import Tag
+from utils.tts_help import TTS
+
+
+
+
+
+# ENV = os.getenv('ENV', 'dev')
+# CACHE_DIR = '/app/cache/' if ENV == 'prod' else os.path.expanduser('~/Downloads/')
+CACHE_DIR = '/Users/z/Downloads/'
+class ConsumptionRecommend(object):
+    @classmethod
+    def insert_pq(cls, data, oss_object_key, title, tags, tag_transport_channel, channel_mark, task_mark):
+        logger.info(f"[+] 开始写入票圈")
+        n_ids = str(data["pq_ids"])
+        if ',' in n_ids:
+            n_id_list = n_ids.split(',')
+        else:
+            n_id_list = [n_ids]
+        pq_list = []
+        for n_id in n_id_list:
+            code = PQ.insert_piaoquantv(oss_object_key, title, n_id, None)
+            if not code:
+                logger.error(f"[+] 写入票圈后台失败")
+                AliyunLogger.logging(data["name"], "效率工具", tag_transport_channel, data["video_url"],
+                                     "改造失败,写入票圈后台失败", "3001", str(data))
+                text = (
+                    f"**负责人**: {data['name']}\n"
+                    f"**内容**: {data}\n"
+                    f"**失败信息**: 视频写入票圈后台失败,视频ID{code}\n"
+                )
+                Feishu.finish_bot(text,
+                                  "https://open.feishu.cn/open-apis/bot/v2/hook/65bc5463-dee9-46d0-bc2d-ec6c49a8f3cd",
+                                  "【 搬运&改造效率工具失败通知 】")
+                continue
+
+            pq_list.append(code)
+            logger.info(f"[+] 写入票圈成功,返回视频id{code}")
+            tag_status = Tag.video_tag(code, str(tags))
+            if tag_status == 0:
+                logger.info(f"[+] 写入标签成功,后台视频ID为{code}")
+            try:
+                current_time = datetime.now()
+                formatted_time = current_time.strftime("%Y-%m-%d %H:%M:%S")
+                sqlCollect.insert_machine_making_data(data["name"], task_mark, tag_transport_channel,
+                                                      data["video_url"], data["video_url"], data["pq_ids"],
+                                                      data["title_category"],
+                                                      code,
+                                                      formatted_time, data["title_category"], oss_object_key)
+                pq_url = f'https://admin.piaoquantv.com/cms/post-detail/{code}/detail'  # 站内视频链接
+
+                values = [
+                    [
+                        str(code),
+                        str(n_id),
+                        formatted_time,
+                        channel_mark,
+                        data["name"],
+                        data["pq_ids"],
+                        data["pq_label"],
+                        data["activate_data"],
+                        data["video_url"],
+                        data["title_category"],
+                        tag_transport_channel,
+                        data["tag_transport_scene"],
+                        data["tag_transport_keyword"],
+                        data["tag"],
+                        data["transform_rule"],
+                        data["video_share"],
+                        data["trailer_share"],
+                        data["trailer_share_audio"],
+                        data["video_clipping"],
+                        data["video_clipping_time"],
+                        data["title_transform"],
+                        pq_url
+                    ]
+                ]
+                name_to_sheet = {
+                    "范军": "276ffc",
+                    "鲁涛": "QqrKRY",
+                    "余海涛": "dTzUlI",
+                    "罗情": "8JPv9g",
+                    "刘诗雨": "HqwG0o",
+                    "王媛": "vtWvle",
+                    "周仙琴": "MWUqWt",
+                    "王雪珂": "xN1KrU",
+                    "信欣": "PtoeGT",
+                    "邓锋": "dgV2Af"
+                }
+                name = re.sub(r"\s+", "", data.get("name", ""))
+                sheet = name_to_sheet.get(name)
+                Feishu.insert_columns("R4dLsce8Jhz9oCtDMr9ccpFHnbI", sheet, "ROWS", 1, 2)
+                time.sleep(0.5)
+                Feishu.update_values("R4dLsce8Jhz9oCtDMr9ccpFHnbI", sheet, "A2:Z2", values)
+                logger.info(f"[处理] 写入飞书成功")
+            except Exception as e:
+                logger.error(f"[处理] 写入飞书失败{e}")
+                pass
+        AliyunLogger.logging(data["name"], "效率工具", tag_transport_channel, data["video_url"],
+                             "改造成功", "1000", str(data), str(pq_list))
+        return
+
+    @classmethod
+    def data_handle(cls, data, file_path, redis_name):
+        url, original_title, video_id, tag_transport_channel = Dy_KS.get_video_url(data, "效率工具")
+        if url == "重新处理" or not url:
+            RedisHelper().get_client().rpush(redis_name, json.dumps(data))
+            text = (
+                f"**负责人**: {data['name']}\n"
+                f"**内容**: {data}\n"
+                f"**失败信息**: 没有获取到视频链接,等待重新处理\n"
+            )
+            Feishu.finish_bot(text,
+                              "https://open.feishu.cn/open-apis/bot/v2/hook/65bc5463-dee9-46d0-bc2d-ec6c49a8f3cd",
+                              f"【  搬运&改造效率工具失败通知 】")
+            return
+        elif url == "作品不存在" or url == "链接不是抖/快" or url == "note":
+            if url == "note":
+                url = "图文"
+            text = (
+                f"**负责人**: {data['name']}\n"
+                f"**内容**: {data}\n"
+                f"**失败信息**: {url},不做处理\n"
+            )
+            Feishu.finish_bot(text,
+                              "https://open.feishu.cn/open-apis/bot/v2/hook/65bc5463-dee9-46d0-bc2d-ec6c49a8f3cd",
+                              f"【  搬运&改造效率工具失败通知 】")
+            return
+        AliyunLogger.logging(data["name"], "效率工具", tag_transport_channel, data["video_url"], "扫描到一条视频",
+                             "2001", str(data))
+        AliyunLogger.logging(data["name"], "效率工具", tag_transport_channel, data["video_url"], "符合规则等待改造",
+                             "2004", str(data))
+        logger.info(f"[处理] {url}开始下载视频")
+        video_path = DownLoad.download_video(url, file_path, tag_transport_channel, video_id)
+        if not os.path.exists(video_path) or os.path.getsize(video_path) == 0:
+            RedisHelper().get_client().rpush(redis_name, json.dumps(data))
+            logger.error(f"[处理] {url}下载失败")
+            AliyunLogger.logging(data["name"], "效率工具", tag_transport_channel, data["video_url"],
+                                 "视频下载失败等待重新处理", "3002", str(data))
+            text = (
+                f"**负责人**: {data['name']}\n"
+                f"**内容**: {data}\n"
+                f"**失败信息**: 视频下载失败等待重新处理,视频链接{url}\n"
+            )
+            Feishu.finish_bot(text,
+                              "https://open.feishu.cn/open-apis/bot/v2/hook/65bc5463-dee9-46d0-bc2d-ec6c49a8f3cd",
+                              "【 搬运&改造效率工具失败通知 】")
+            return
+        logger.info(f"[处理] {url}视频下载成功")
+        if data["title_category"] == "AI标题" or data["trailer_share"] == "AI标题":
+            title = GPT4oMini.get_ai_mini_title(
+                original_title if data["title_category"] == "AI标题" else data["title_category"])
+        else:
+            title = original_title if data["title_category"] == "原标题" else data["title_category"]
+        if tag_transport_channel == "抖音":
+            if "复制打开抖音" in data['video_url']:
+                channel_mark = "APP"
+            else:
+                channel_mark = "PC"
+        else:
+            if "https://www.kuaishou.com/f" in data['video_url']:
+                channel_mark = "PC"
+            else:
+                channel_mark = "APP"
+        if data["transform_rule"] == '否' or data["transform_rule"] == "是":
+            logger.info(f"[处理] 数据开始发送oss")
+            oss_object_key = Oss.stitching_sync_upload_oss(video_path, str(uuid.uuid4()))  # 视频发送OSS
+            oss_object_key = oss_object_key.get("oss_object_key")
+            tags = ','.join(filter(None, [
+                data['pq_label'],
+                channel_mark,
+                tag_transport_channel,
+                data['tag_transport_scene'],
+                data['tag_transport_keyword'],
+                "搬运工具",
+                data['tag']
+            ]))
+            cls.insert_pq(data, oss_object_key, title, tags, tag_transport_channel, channel_mark,
+                          "搬运工具")
+        if data["transform_rule"] == "仅改造" or data["transform_rule"] == "是":
+            try:
+                width, height = FFmpeg.get_w_h_size(video_path)
+                if width < height:  # 判断是否需要修改为竖屏
+                    video_path = FFmpeg.update_video_h_w(video_path, file_path)
+                logger.info(f"[处理] 视频更改分辨率处理")
+                video_path = FFmpeg.video_640(video_path, file_path)
+                if not os.path.exists(video_path) or os.path.getsize(video_path) == 0:
+                    RedisHelper().get_client().rpush(redis_name, json.dumps(data))
+                    logger.error(f"[处理] 视频更改分辨率失败")
+                    AliyunLogger.logging(data["name"], "效率工具", tag_transport_channel, data["video_url"],
+                                         "改造失败,片尾拼接失败", "3001", str(data))
+                    text = (
+                        f"**负责人**: {data['name']}\n"
+                        f"**内容**: {data}\n"
+                        f"**失败信息**: 视频更改分辨率失败\n"
+                    )
+                    Feishu.finish_bot(text,
+                                      "https://open.feishu.cn/open-apis/bot/v2/hook/65bc5463-dee9-46d0-bc2d-ec6c49a8f3cd",
+                                      "【 搬运&改造效率工具失败通知 】")
+                    return
+                logger.info(f"[处理] 视频更改分辨率处理成功")
+                if data["video_clipping"]:  # 判断是否需要裁剪
+                    video_path = FFmpeg.video_crop(video_path, file_path)
+                if data["video_clipping_time"]:  # 判断是否需要指定视频时长
+                    video_path = FFmpeg.video_ggduration(video_path, file_path, data["video_clipping_time"])
+                if data['trailer_share']:
+                    prompt = Material.get_propmt_data(data['trailer_share'])
+                    pw_srt_text = GPT4oMini.get_ai_mini_pw(title, prompt)
+                    voice = data['trailer_share_audio']
+                    if voice:
+                        if ',' in voice:
+                            voices = voice.split(',')
+                        else:
+                            voices = [voice]
+                        voice = random.choice(voices)
+                    else:
+                        voice = "zhifeng_emo"
+                    pw_url = TTS.get_pw_zm(pw_srt_text, voice)
+                    if not pw_url:
+                        logger.error(f"[处理] 数据片尾获取失败")
+                        data["transform_rule"] = "仅改造"
+                        RedisHelper().get_client().rpush(redis_name, json.dumps(data))
+                        AliyunLogger.logging(data["name"], "效率工具", tag_transport_channel, data["video_url"],
+                                             "改造失败,片尾获取失败", "3001", str(data))
+                        text = (
+                            f"**负责人**: {data['name']}\n"
+                            f"**内容**: {data}\n"
+                            f"**失败信息**: 获取片尾失败\n"
+                        )
+                        Feishu.finish_bot(text,
+                                          "https://open.feishu.cn/open-apis/bot/v2/hook/65bc5463-dee9-46d0-bc2d-ec6c49a8f3cd",
+                                          "【 搬运&改造效率工具失败通知 】")
+                        return
+                    logger.info(f"[处理] 数据片尾获取成功")
+                    pw_srt = TTS.getSrt(pw_url)
+                    if not pw_srt:
+                        data["transform_rule"] = "仅改造"
+                        RedisHelper().get_client().rpush(redis_name, json.dumps(data))
+                        logger.error(f"[处理] 数据片尾音频srt获取失败")
+                        AliyunLogger.logging(data["name"], "效率工具", tag_transport_channel, data["video_url"],
+                                             "改造失败,片尾音频下载失败", "3001", str(data))
+                        text = (
+                            f"**负责人**: {data['name']}\n"
+                            f"**内容**: {data}\n"
+                            f"**失败信息**: 片尾音频下载失败\n"
+                        )
+                        Feishu.finish_bot(text,
+                                          "https://open.feishu.cn/open-apis/bot/v2/hook/65bc5463-dee9-46d0-bc2d-ec6c49a8f3cd",
+                                          "【 搬运&改造效率工具失败通知 】")
+                        return
+                    pw_mp3_path = TTS.download_mp3(pw_url, file_path)
+                    if not pw_mp3_path:
+                        data["transform_rule"] = "仅改造"
+                        RedisHelper().get_client().rpush(redis_name, json.dumps(data))
+                        logger.error(f"[处理] 数据片尾音频下载失败")
+                        AliyunLogger.logging(data["name"], "效率工具", tag_transport_channel, data["video_url"],
+                                             "改造失败,片尾音频下载失败", "3001", str(data))
+                        text = (
+                            f"**负责人**: {data['name']}\n"
+                            f"**内容**: {data}\n"
+                            f"**失败信息**: 片尾音频下载失败\n"
+                        )
+                        Feishu.finish_bot(text,
+                                          "https://open.feishu.cn/open-apis/bot/v2/hook/65bc5463-dee9-46d0-bc2d-ec6c49a8f3cd",
+                                          "【 搬运&改造效率工具失败通知 】")
+                        return
+                    logger.info(f"[处理] 数据片尾音频下载成功")
+                    if "AI片尾引导" in data['trailer_share']:
+                        jpg_path = FFmpeg.video_png(video_path, file_path)  # 生成视频最后一帧jpg
+                        if not os.path.exists(jpg_path) or os.path.getsize(jpg_path) == 0:
+                            data["transform_rule"] = "仅改造"
+                            RedisHelper().get_client().rpush(redis_name, json.dumps(data))
+                            logger.error(f"[处理] 数据片尾获取最后一帧失败")
+                            AliyunLogger.logging(data["name"], "效率工具", tag_transport_channel, data["video_url"],
+                                                 "改造失败,获取最后一帧失败", "3001", str(data))
+                            text = (
+                                f"**负责人**: {data['name']}\n"
+                                f"**内容**: {data}\n"
+                                f"**失败信息**: 获取视频最后一帧失败\n"
+                            )
+                            Feishu.finish_bot(text,
+                                              "https://open.feishu.cn/open-apis/bot/v2/hook/65bc5463-dee9-46d0-bc2d-ec6c49a8f3cd",
+                                              "【 搬运&改造效率工具失败通知 】")
+                            return
+                        logger.info(f"[处理] 数据片尾获取最后一帧成功")
+                    else:
+                        rg_pw = str(data["trailer_share"])
+                        if ',' in rg_pw:
+                            rg_pw_list = rg_pw.split(',')
+                        else:
+                            rg_pw_list = [rg_pw]
+                        rg_pw_url_list = PQ.get_pq_oss(rg_pw_list)
+                        if not rg_pw_url_list:
+                            AliyunLogger.logging(data["name"], "效率工具", tag_transport_channel, data["video_url"],
+                                                 "无法获取站内视频链接", "3001", str(data))
+                            text = (
+                                f"**负责人**: {data['name']}\n"
+                                f"**内容**: {data}\n"
+                                f"**失败信息**: 无法获取站内视频链接\n"
+                            )
+                            Feishu.finish_bot(text,
+                                              "https://open.feishu.cn/open-apis/bot/v2/hook/65bc5463-dee9-46d0-bc2d-ec6c49a8f3cd",
+                                              "【 搬运&改造效率工具失败通知 】")
+                            return
+                        pw_url_duration = FFmpeg.get_http_duration([pw_url])
+                        pw_videos_duration = FFmpeg.get_http_duration(rg_pw_url_list)
+                        if pw_videos_duration < pw_url_duration:
+                            jpg_path = FFmpeg.video_png(video_path, file_path)  # 生成视频最后一帧jpg
+                            if not os.path.exists(jpg_path) or os.path.getsize(jpg_path) == 0:
+                                data["transform_rule"] = "仅改造"
+                                RedisHelper().get_client().rpush(redis_name, json.dumps(data))
+                                logger.error(f"[处理] 数据片尾获取最后一帧失败")
+                                AliyunLogger.logging(data["name"], "效率工具", tag_transport_channel, data["video_url"],
+                                                     "改造失败,获取最后一帧失败", "3001", str(data))
+                                text = (
+                                    f"**负责人**: {data['name']}\n"
+                                    f"**内容**: {data}\n"
+                                    f"**失败信息**: 获取视频最后一帧失败\n"
+                                )
+                                Feishu.finish_bot(text,
+                                                  "https://open.feishu.cn/open-apis/bot/v2/hook/65bc5463-dee9-46d0-bc2d-ec6c49a8f3cd",
+                                                  "【 搬运&改造效率工具失败通知 】")
+                                return
+                            logger.info(f"[处理] 数据片尾获取最后一帧成功")
+                        else:
+                            rg_pw_url = DownLoad.download_pq_video(file_path, rg_pw_url_list)
+                            rg_pw_list = FFmpeg.concatenate_videos(rg_pw_url, file_path)
+                            if not os.path.exists(rg_pw_list) or os.path.getsize(rg_pw_list) == 0:
+                                data["transform_rule"] = "仅改造"
+                                RedisHelper().get_client().rpush(redis_name, json.dumps(data))
+                                logger.error(f"[处理] 数据片尾拼接失败")
+                                AliyunLogger.logging(data["name"], "效率工具", tag_transport_channel, data["video_url"],
+                                                     "改造失败,片尾拼接失败", "3001", str(data))
+                                text = (
+                                    f"**负责人**: {data['name']}\n"
+                                    f"**内容**: {data}\n"
+                                    f"**失败信息**: 片尾拼接失败\n"
+                                )
+                                Feishu.finish_bot(text,
+                                                  "https://open.feishu.cn/open-apis/bot/v2/hook/65bc5463-dee9-46d0-bc2d-ec6c49a8f3cd",
+                                                  "【 搬运&改造效率工具失败通知 】")
+                                return
+                            jpg_path = FFmpeg.video_640(rg_pw_list, file_path + "rg_")
+                            logger.info(f"[处理] 生成人工片尾成功")
+                    pw_path = FFmpeg.pw_video(jpg_path, file_path, pw_mp3_path, pw_srt)  # 生成片尾视频
+                    if not os.path.exists(pw_path) or os.path.getsize(pw_path) == 0:
+                        data["transform_rule"] = "仅改造"
+                        RedisHelper().get_client().rpush(redis_name, json.dumps(data))
+                        logger.error(f"[处理] 数据片尾拼接失败")
+                        AliyunLogger.logging(data["name"], "效率工具", tag_transport_channel, data["video_url"],
+                                             "改造失败,片尾拼接失败", "3001", str(data))
+                        text = (
+                            f"**负责人**: {data['name']}\n"
+                            f"**内容**: {data}\n"
+                            f"**失败信息**: 片尾拼接失败\n"
+                        )
+                        Feishu.finish_bot(text,
+                                          "https://open.feishu.cn/open-apis/bot/v2/hook/65bc5463-dee9-46d0-bc2d-ec6c49a8f3cd",
+                                          "【 搬运&改造效率工具失败通知 】")
+                        return
+
+                    logger.info(f"[处理] 数据合并开始拼接")
+                    video_path = FFmpeg.h_b_video(video_path, pw_path, file_path)
+                    video_path = FFmpeg.single_video(video_path, file_path, data["video_share"])
+                    if not os.path.exists(video_path) or os.path.getsize(video_path) == 0:
+                        data["transform_rule"] = "仅改造"
+                        RedisHelper().get_client().rpush(redis_name, json.dumps(data))
+                        logger.error(f"[处理] 数据添加片中字幕失败")
+                        AliyunLogger.logging(data["name"], "效率工具", tag_transport_channel, data["video_url"],
+                                             "改造失败,添加片中字幕失败", "3001", str(data))
+                        text = (
+                            f"**负责人**: {data['name']}\n"
+                            f"**内容**: {data}\n"
+                            f"**失败信息**: 视频片中增加字幕失败\n"
+                        )
+                        Feishu.finish_bot(text,
+                                          "https://open.feishu.cn/open-apis/bot/v2/hook/65bc5463-dee9-46d0-bc2d-ec6c49a8f3cd",
+                                          "【 搬运&改造效率工具失败通知 】")
+                        return
+                    logger.info(f"[处理] 数据添加片中字幕成功")
+                logger.info(f"[处理] 数据开始发送oss")
+                oss_object_key = Oss.stitching_sync_upload_oss(video_path, str(uuid.uuid4()))  # 视频发送OSS
+                logger.info(f"[处理] 数据发送oss成功")
+                oss_object_key = oss_object_key.get("oss_object_key")
+
+                tags = ','.join(filter(None, [
+                    data['pq_label'],
+                    channel_mark,
+                    tag_transport_channel,
+                    data['tag_transport_scene'],
+                    data['tag_transport_keyword'],
+                    "搬运改造",
+                    data['tag']
+                ]))
+                cls.insert_pq(data, oss_object_key, title, tags, tag_transport_channel, channel_mark,
+                              "搬运改造")
+                return
+            except Exception as e:
+                data["transform_rule"] = "仅改造"
+                RedisHelper().get_client().rpush(redis_name, json.dumps(data))
+                logger.error(f"[+] 视频改造失败{e}")
+                text = (
+                    f"**负责人**: {data['name']}\n"
+                    f"**内容**: {data}\n"
+                    f"**失败信息**: 视频改造失败\n"
+                )
+                Feishu.finish_bot(text,
+                                  "https://open.feishu.cn/open-apis/bot/v2/hook/65bc5463-dee9-46d0-bc2d-ec6c49a8f3cd",
+                                  "【 搬运&改造效率工具失败通知 】")
+                return
+
+    @classmethod
+    def run(cls):
+        logger.info(f"[处理] 开始获取redis数据")
+        # fs_data = os.getenv("FS_DATA")
+        fs_data = '邓锋,DEpi6V,task:carry_data_redis_df'
+        fs_data_list = fs_data.split(',')
+        redis_name = fs_data_list[2]
+        data = RedisHelper().get_client().rpop(name = redis_name)
+        if not data:
+            logger.info('[处理] 无待执行的扫描任务')
+            return
+        data = orjson.loads(data)
+        uid = str(uuid.uuid4())
+        file_path = os.path.join(CACHE_DIR, uid)
+        cls.data_handle(data, file_path, redis_name)
+        for filename in os.listdir(CACHE_DIR):
+            # 检查文件名是否包含关键字
+            if uid in filename:
+                file_path = os.path.join(CACHE_DIR, filename)
+                try:
+                    # 删除文件
+                    os.remove(file_path)
+                    logger.info(f"已删除文件: {file_path}")
+                except Exception as e:
+                    logger.error(f"删除文件时出错: {file_path}, 错误: {e}")
+        return
+
+
+
+def run():
+    scheduler = BlockingScheduler()
+    try:
+        logger.info(f"[处理] 开始启动")
+        scheduler.add_job(ConsumptionRecommend.run, trigger=IntervalTrigger(minutes=5))  # 每5分钟启动一次
+        scheduler.start()
+    except KeyboardInterrupt:
+        pass
+    except Exception as e:
+        logger.error(f"[处理] 启动异常,异常信息:{e}")
+        pass
+    finally:
+        scheduler.shutdown()
+
+
+if __name__ == '__main__':
+    run()

+ 284 - 0
workers/consumption_work_studio.py

@@ -0,0 +1,284 @@
+import asyncio
+import os
+
+import sys
+import time
+import uuid
+from datetime import datetime
+
+import orjson
+from apscheduler.schedulers.blocking import BlockingScheduler
+from apscheduler.triggers.interval import IntervalTrigger
+from loguru import logger
+sys.path.append('/app')
+from utils.redis import RedisHelper
+from utils.aliyun_log import AliyunLogger
+from utils.aliyun_oss import Oss
+from utils.download_video import DownLoad
+from utils.feishu_utils import Feishu
+from utils.ffmpeg import FFmpeg
+from utils.google_ai_studio import GoogleAI
+from utils.gpt4o_mini_help import GPT4oMini
+from utils.piaoquan import PQ
+from utils.sql_help import sqlCollect
+from utils.tag_video import Tag
+from utils.tts_help import TTS
+
+
+
+
+
+# ENV = os.getenv('ENV', 'dev')
+# CACHE_DIR = '/app/cache/' if ENV == 'prod' else os.path.expanduser('~/Downloads/')
+CACHE_DIR = '/Users/z/Downloads/'
+
+class ConsumptionRecommend(object):
+    @classmethod
+    def insert_pq(cls, data, oss_object_key, title, cover):
+        logger.info(f"[内容分析] 开始写入票圈")
+
+        code = PQ.insert_piaoquantv(oss_object_key, title, '50322062', cover)
+        if not code:
+            logger.error(f"[内容分析] 写入票圈后台失败")
+            text = (
+                f"**渠道**: {data['channel']}\n"
+                f"**内容**: {data}\n"
+                f"**失败信息**: 写入票圈后台失败\n"
+            )
+            Feishu.finish_bot(text,
+                              "https://open.feishu.cn/open-apis/bot/v2/hook/65bc5463-dee9-46d0-bc2d-ec6c49a8f3cd",
+                              f"【 内容理解-{data['channel']}失败通知 】")
+            return
+        logger.info(f"[内容分析] 写入票圈成功,返回视频id{code}")
+        tag_status = Tag.video_tag(code, "lev-供给,rol-机器,#str-搬运改造内容理解引导语实验_60")
+        Tag.video_tag(data["videoid"], "lev-供给,rol-机器,#str-搬运改造内容理解引导语base_61")
+
+        if tag_status == 0:
+            logger.info(f"[内容分析] 写入标签成功,后台视频ID为{code}")
+        try:
+            current_time = datetime.now()
+            formatted_time = current_time.strftime("%Y-%m-%d %H:%M:%S")
+            sqlCollect.insert_machine_making_data(data["channel"], data["channel"], data["channel"],
+                                                  data["videoid"], data["videoid"], "50322062",
+                                                  title,
+                                                  code,
+                                                  formatted_time, title, oss_object_key)
+            pq_url = f'https://admin.piaoquantv.com/cms/post-detail/{code}/detail'  # 站内视频链接
+            values = [
+                [
+                    data["videoid"],
+                    code,
+                    data["channel"],
+                    data["dt"],
+                    formatted_time,
+                    pq_url
+                ]
+            ]
+            Feishu.insert_columns("R4dLsce8Jhz9oCtDMr9ccpFHnbI", '1Ycd37', "ROWS", 1, 2)
+            time.sleep(0.5)
+            Feishu.update_values("R4dLsce8Jhz9oCtDMr9ccpFHnbI", '1Ycd37', "A2:Z2", values)
+            logger.info(f"[内容分析] 写入飞书成功")
+            return
+        except Exception as e:
+            logger.error(f"[内容分析] 写入飞书失败{e}")
+            return
+
+
+
+    @classmethod
+    def data_handle(cls, data, file_path):
+        video_id = data["videoid"]
+        AliyunLogger.logging(data["type"], "片尾引导", data["channel"], video_id, "扫描到一条视频", "2001", str(data))
+        AliyunLogger.logging(data["type"], "片尾引导", data["channel"], video_id, "符合规则等待改造", "2004", str(data))
+        logger.info(f"[内容分析] 获取{video_id}的视频链接")
+        video_path, cover_path, old_title = PQ.get_pq_oss_path(video_id)
+        if not video_path:
+            AliyunLogger.logging(data["type"], "片尾引导", data["channel"], video_id, "没有获取到视频链接", "3001",
+                                 str(data))
+            text = (
+                f"**渠道**: {data['channel']}\n"
+                f"**内容**: {data}\n"
+                f"**失败信息**: 没有获取到视频链接\n"
+            )
+            Feishu.finish_bot(text,
+                              "https://open.feishu.cn/open-apis/bot/v2/hook/65bc5463-dee9-46d0-bc2d-ec6c49a8f3cd",
+                              f"【 内容理解-{data['channel']}失败通知 】")
+
+            return
+        video_url = f"http://rescdn.yishihui.com/{video_path}"
+        video_path = DownLoad.download_video(video_url, file_path, '', video_id)
+        if not os.path.exists(video_path) or os.path.getsize(video_path) == 0:
+            logger.error(f"[内容分析] {video_url}下载失败")
+            AliyunLogger.logging(data["type"], "片尾引导", data["channel"], video_id, "视频下载失败等待重新处理",
+                                 "3002",
+                                 str(data))
+            text = (
+                f"**渠道**: {data['channel']}\n"
+                f"**内容**: {data}\n"
+                f"**失败信息**: 视频下载失败\n"
+            )
+            Feishu.finish_bot(text,
+                              "https://open.feishu.cn/open-apis/bot/v2/hook/65bc5463-dee9-46d0-bc2d-ec6c49a8f3cd",
+                              f"【 内容理解-{data['channel']}失败通知 】")
+            return
+        logger.info(f"[内容分析] {video_url}视频下载成功")
+        logger.info(f"[内容分析] {video_url}开始处理标题")
+        video_path = FFmpeg.video_640(video_path, file_path)
+        logger.info(f"[内容分析] 视频更改分辨率处理成功")
+        logger.info(f"[内容分析] 片尾引导-开始获取视频口播内容")
+        video_text = GoogleAI.run("AIzaSyCor0q5w37Dy6fGxloLlCT7KqyEFU3PWP8", video_path)
+        if not video_text:
+            AliyunLogger.logging(data["type"], "片尾引导", data["channel"], video_id, "片尾引导,获取口播文案失败",
+                                 "3003",
+                                 str(data))
+            text = (
+                f"**渠道**: {data['channel']}\n"
+                f"**内容**: {data}\n"
+                f"**失败信息**: 获取口播文案失败\n"
+            )
+            Feishu.finish_bot(text,
+                              "https://open.feishu.cn/open-apis/bot/v2/hook/65bc5463-dee9-46d0-bc2d-ec6c49a8f3cd",
+                              f"【 内容理解-{data['channel']}失败通知 】")
+            return
+        logger.info(f"[内容分析] 片尾引导-开始获取AI片尾")
+        pw_srt_text = GPT4oMini.get_content_understanding_pw(video_text)
+        pw_url = TTS.get_pw_zm(pw_srt_text, 'zhifeng_emo')
+        if not pw_url:
+            logger.error(f"[内容分析] 片尾引导-片尾获取失败")
+            data["transform_rule"] = "仅改造"
+            AliyunLogger.logging(data["type"], "片尾引导", data["channel"], video_id, "片尾引导,片尾获取失败",
+                                 "3003",
+                                 str(data))
+            text = (
+                f"**渠道**: {data['channel']}\n"
+                f"**内容**: {data}\n"
+                f"**失败信息**: 片尾获取失败\n"
+            )
+            Feishu.finish_bot(text,
+                              "https://open.feishu.cn/open-apis/bot/v2/hook/65bc5463-dee9-46d0-bc2d-ec6c49a8f3cd",
+                              f"【 内容理解-{data['channel']}失败通知 】")
+
+            return
+        logger.info(f"[内容分析] 片尾引导-片尾获取成功")
+        pw_srt = TTS.getSrt(pw_url)
+        if not pw_srt:
+            AliyunLogger.logging(data["type"], "片尾引导", data["channel"], video_id, "片尾引导,片尾音频获取失败",
+                                 "3003",
+                                 str(data))
+            text = (
+                f"**渠道**: {data['channel']}\n"
+                f"**内容**: {data}\n"
+                f"**失败信息**: 片尾音频获取失败\n"
+            )
+            Feishu.finish_bot(text,
+                              "https://open.feishu.cn/open-apis/bot/v2/hook/65bc5463-dee9-46d0-bc2d-ec6c49a8f3cd",
+                              f"【 内容理解-{data['channel']}失败通知 】")
+            return
+        pw_mp3_path = TTS.download_mp3(pw_url, file_path)
+        if not pw_mp3_path:
+            AliyunLogger.logging(data["type"], "片尾引导", data["channel"], video_id, "片尾引导,片尾音频下载失败",
+                                 "3003",
+                                 str(data))
+            text = (
+                f"**渠道**: {data['channel']}\n"
+                f"**内容**: {data}\n"
+                f"**失败信息**: 片尾音频下载失败\n"
+            )
+            Feishu.finish_bot(text,
+                              "https://open.feishu.cn/open-apis/bot/v2/hook/65bc5463-dee9-46d0-bc2d-ec6c49a8f3cd",
+                              f"【 内容理解-{data['channel']}失败通知 】")
+            return
+        logger.info(f"[内容分析] 片尾引导-片尾音频下载成功")
+        logger.info(f"[内容分析] 片尾引导-片尾获取最后一帧成功")
+        jpg_path = FFmpeg.video_png(video_path, file_path)  # 生成视频最后一帧jpg
+        pw_path = FFmpeg.pw_video(jpg_path, file_path, pw_mp3_path, pw_srt)  # 生成片尾视频
+        if not os.path.exists(pw_path) or os.path.getsize(pw_path) == 0:
+            logger.error(f"[内容分析] 片尾引导-片尾拼接失败")
+            AliyunLogger.logging(data["type"], "片尾引导", "", data["video_url"],
+                                 "片尾引导,片尾拼接失败", "3003", str(data))
+            text = (
+                f"**渠道**: {data['channel']}\n"
+                f"**内容**: {data}\n"
+                f"**失败信息**: 片尾拼接失败\n"
+            )
+            Feishu.finish_bot(text,
+                              "https://open.feishu.cn/open-apis/bot/v2/hook/65bc5463-dee9-46d0-bc2d-ec6c49a8f3cd",
+                              f"【 内容理解-{data['channel']}失败通知 】")
+            return
+        logger.info(f"[内容分析] 片尾引导-合并开始拼接")
+        video_path = FFmpeg.h_b_video(video_path, pw_path, file_path)
+        if not os.path.exists(video_path) or os.path.getsize(video_path) == 0:
+            logger.error(f"[内容分析] 片尾引导-添加片尾失败")
+            text = (
+                f"**渠道**: {data['channel']}\n"
+                f"**内容**: {data}\n"
+                f"**失败信息**: 添加片尾失败\n"
+            )
+            Feishu.finish_bot(text,
+                              "https://open.feishu.cn/open-apis/bot/v2/hook/65bc5463-dee9-46d0-bc2d-ec6c49a8f3cd",
+                              f"【 内容理解-{data['channel']}失败通知 】")
+            return
+        logger.info(f"[内容分析] 片尾引导-开始发送oss")
+        oss_object_key = Oss.stitching_sync_upload_oss(video_path, str(uuid.uuid4()))  # 视频发送OSS
+        status = oss_object_key.get("status")
+        if status != 200:
+            logger.error(f"[内容分析] 片尾引导-发送oss失败")
+            AliyunLogger.logging(data["type"], "片尾引导", data["channel"], video_id, "片尾引导,发送oss失败",
+                                 "3003",
+                                 str(data))
+            text = (
+                f"**渠道**: {data['channel']}\n"
+                f"**内容**: {data}\n"
+                f"**失败信息**: 发送oss失败\n"
+            )
+            Feishu.finish_bot(text,
+                              "https://open.feishu.cn/open-apis/bot/v2/hook/65bc5463-dee9-46d0-bc2d-ec6c49a8f3cd",
+                              f"【 内容理解-{data['channel']}失败通知 】")
+            return
+        logger.info(f"[内容分析] 片尾引导-发送oss成功")
+        oss_object_key = oss_object_key.get("oss_object_key")
+        cls.insert_pq(data, oss_object_key, old_title, cover_path)
+        return
+
+
+    @classmethod
+    def run(cls):
+        logger.info(f"[处理] 开始获取redis数据")
+        data = RedisHelper().get_client().rpop(name = "task:carry_redis_by_nrfx")
+        if not data:
+            logger.info('[处理] 无待执行的扫描任务')
+            return
+        data = orjson.loads(data)
+        uid = str(uuid.uuid4())
+        file_path = os.path.join(CACHE_DIR, uid)
+        cls.data_handle(data, file_path)
+        for filename in os.listdir(CACHE_DIR):
+            # 检查文件名是否包含关键字
+            if uid in filename:
+                file_path = os.path.join(CACHE_DIR, filename)
+                try:
+                    # 删除文件
+                    os.remove(file_path)
+                    logger.info(f"已删除文件: {file_path}")
+                except Exception as e:
+                    logger.error(f"删除文件时出错: {file_path}, 错误: {e}")
+        return
+
+
+def run():
+    scheduler = BlockingScheduler()
+    try:
+        logger.info(f"[内容分析] 开始启动")
+        scheduler.add_job(ConsumptionRecommend.run, trigger=IntervalTrigger(minutes=5))  # 每5分钟启动一次
+        scheduler.start()
+    except KeyboardInterrupt:
+        pass
+    except Exception as e:
+        logger.error(f"[内容分析] 启动异常,异常信息:{e}")
+        pass
+    finally:
+        scheduler.shutdown()
+
+
+if __name__ == '__main__':
+    run()

+ 45 - 0
workers/select_work.py

@@ -0,0 +1,45 @@
+import asyncio
+import datetime
+import os
+import sys
+from apscheduler.schedulers.blocking import BlockingScheduler
+from apscheduler.triggers.cron import CronTrigger
+from loguru import logger
+sys.path.append('/app')
+from utils.feishu_form import Material
+from utils.redis import RedisHelper
+
+
+class StartGetRecommend(object):
+    @classmethod
+    def run(cls):
+        dt = int(datetime.datetime.now().strftime('%Y%m%d%H'))
+        fs_data = os.getenv("FS_DATA")
+        fs_data_list = fs_data.split(',')
+        name = fs_data_list[0]
+        fs_sheet = fs_data_list[1]
+        redis_name = fs_data_list[2]
+        logger.info(f"[FS] 开始获取{name},时区为{dt}")
+        data = Material.get_carry_data(dt, fs_sheet, name)
+        if not data:
+            logger.info(f"[FS] {name},时区为{dt}没有获取到数据")
+            return
+        RedisHelper().get_client().rpush(redis_name, *data)
+        logger.info(f"[FS] {name},时区为{dt}共获取{len(data)}条,写入成功")
+
+def run():
+    scheduler = BlockingScheduler()
+    try:
+        scheduler.add_job(StartGetRecommend.run, trigger=CronTrigger(minute=55, second=0))  # 每小时获取一次
+        scheduler.start()
+    except KeyboardInterrupt:
+        pass
+    except Exception as e:
+        pass
+    finally:
+        scheduler.shutdown()
+
+
+if __name__ == '__main__':
+    run()
+

+ 36 - 0
workers/select_work_studio.py

@@ -0,0 +1,36 @@
+import sys
+from apscheduler.schedulers.blocking import BlockingScheduler
+from apscheduler.triggers.cron import CronTrigger
+from loguru import logger
+sys.path.append('/app')
+from utils.odps_data import OdpsDataCount
+from utils.redis import RedisHelper
+
+
+class StartGetRecommend(object):
+    @classmethod
+    def run_nrfx(cls):
+        logger.info(f"[ODPS] 开始获取内容分析数据")
+        data = OdpsDataCount.main()
+        if not data:
+            logger.info(f"[ODPS] 片尾内容分析没有获取到数据")
+            return
+        RedisHelper().get_client().rpush("task:carry_redis_by_nrfx", *data)
+        logger.info(f"[ODPS] 片尾内容分析共获取{len(data)}条,写入成功")
+
+
+def run():
+    scheduler = BlockingScheduler()
+    try:
+        scheduler.add_job(StartGetRecommend.run_nrfx, trigger=CronTrigger(minute=55, second=0))  # 每小时获取一次
+        scheduler.start()
+    except KeyboardInterrupt:
+        pass
+    except Exception as e:
+        pass
+    finally:
+        scheduler.shutdown()
+
+
+if __name__ == '__main__':
+    run()