wangkun 2 年之前
當前提交
02d5acde79
共有 16 個文件被更改,包括 1213 次插入0 次删除
  1. 3 0
      .idea/.gitignore
  2. 27 0
      README.MD
  3. 二進制
      logs/.DS_Store
  4. 二進制
      main/.DS_Store
  5. 3 0
      main/__init__.py
  6. 二進制
      main/__pycache__/__init__.cpython-310.pyc
  7. 二進制
      main/__pycache__/common.cpython-310.pyc
  8. 二進制
      main/__pycache__/feishu_lib.cpython-310.pyc
  9. 136 0
      main/common.py
  10. 53 0
      main/demo.py
  11. 315 0
      main/feishu_lib.py
  12. 25 0
      main/run_zongjiao.py
  13. 380 0
      main/zongjiao.py
  14. 255 0
      main/zongjiao_publish.py
  15. 二進制
      videos/.DS_Store
  16. 16 0
      zongjiao.sh

+ 3 - 0
.idea/.gitignore

@@ -0,0 +1,3 @@
+# 默认忽略的文件
+/shelf/
+/workspace.xml

+ 27 - 0
README.MD

@@ -0,0 +1,27 @@
+# 宗教类公众号爬虫 crawler_zongjiao
+1. git: https://git.yishihui.com/Server/crawler_zongjiao.git
+2. feishu: https://w42nne6hzg.feishu.cn/sheets/shtcn73NW0CyoOeF21HWO15KBsb
+
+#### 软件架构
+1. loguru==0.6.0 
+2. oss2==2.15.0 
+3. requests==2.27.1 
+4. urllib3==1.26.9 
+5. python==3.10
+
+#### 使用说明
+1. cd ./crawler_zongjiao
+2. python3 main/run_zongjiao.py
+
+#### 需求列表
+2022/11/28
+1. 视频时长:50秒以上
+2. 视频数据(点赞+播放量)无要求
+3. 视频分辨率无要求
+4. 发布时间:不限制
+5. 过滤词:毛泽东、毛主席、周恩来、林彪、习近平、习大大、彭丽媛、拜登、普京、佩洛西、蔡英文、邪教、后续继续补充
+6. 运行时间:
+   1. 每个用户抓取完,休眠 1 分钟;
+   2. 所有用户抓取完,休眠 1 小时;
+   3. 24 小时一直抓取
+7. 上传账号:[26117622, 26117623, 26117624, 26117625, 26117626, 26117627, 26117628, 26117629, 26117630, 26117631]

二進制
logs/.DS_Store


二進制
main/.DS_Store


+ 3 - 0
main/__init__.py

@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/11/28

二進制
main/__pycache__/__init__.cpython-310.pyc


二進制
main/__pycache__/common.cpython-310.pyc


二進制
main/__pycache__/feishu_lib.cpython-310.pyc


+ 136 - 0
main/common.py

@@ -0,0 +1,136 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/11/28
+"""
+公共方法,包含:生成log / 删除log / 下载方法 / 读取文件 / 统计下载数
+"""
+from datetime import date, timedelta
+from loguru import logger
+import datetime
+import os
+import time
+import requests
+import urllib3
+proxies = {"http": None, "https": None}
+
+
+class Common:
+    # 统一获取当前时间 <class 'datetime.datetime'>  2022-04-14 20:13:51.244472
+    now = datetime.datetime.now()
+    # 昨天 <class 'str'>  2022-04-13
+    yesterday = (date.today() + timedelta(days=-1)).strftime("%Y-%m-%d")
+    # 今天 <class 'datetime.date'>  2022-04-14
+    today = date.today()
+    # 明天 <class 'str'>  2022-04-15
+    tomorrow = (date.today() + timedelta(days=1)).strftime("%Y-%m-%d")
+
+    # 使用 logger 模块生成日志
+    @staticmethod
+    def logger(log_type):
+        """
+        使用 logger 模块生成日志
+        """
+        # 日志路径
+        log_dir = "./logs/"
+        log_path = os.getcwd() + os.sep + log_dir
+        if not os.path.isdir(log_path):
+            os.makedirs(log_path)
+
+        # 日志文件名
+        log_name = time.strftime("%Y-%m-%d", time.localtime(time.time())) + '-'+str(log_type)+'.log'
+
+        # 日志不打印到控制台
+        logger.remove(handler_id=None)
+
+        # rotation="500 MB",实现每 500MB 存储一个文件
+        # rotation="12:00",实现每天 12:00 创建一个文件
+        # rotation="1 week",每周创建一个文件
+        # retention="10 days",每隔10天之后就会清理旧的日志
+        # 初始化日志
+        logger.add(log_dir + log_name, level="INFO", rotation='00:00')
+
+        return logger
+
+    # 清除日志,保留最近 10 个文件
+    @classmethod
+    def del_logs(cls, log_type):
+        """
+        清除冗余日志文件
+        :d_dir: 需要删除的 log 地址
+        :return: 保留最近 10 个日志
+        """
+        logs_dir = "./logs/"
+        if not os.path.exists(logs_dir):
+            os.mkdir(logs_dir)
+
+        all_files = sorted(os.listdir(logs_dir))
+        all_logs = []
+        for log in all_files:
+            name = os.path.splitext(log)[-1]
+            if name == ".log":
+                all_logs.append(log)
+
+        if len(all_logs) <= 10:
+            pass
+        else:
+            for file in all_logs[:len(all_logs) - 10]:
+                os.remove(logs_dir + file)
+        cls.logger(log_type).info("清除冗余日志成功\n")
+
+    # 封装下载视频或封面的方法
+    @classmethod
+    def download_method(cls, log_type, text, d_name, d_url):
+        """
+        下载封面:text == "cover" ; 下载视频:text == "video"
+        需要下载的视频标题:d_title
+        视频封面,或视频播放地址:d_url
+        下载保存路径:"./files/{d_title}/"
+        """
+        videos_dir = "./videos/"
+        if not os.path.exists(videos_dir):
+            os.mkdir(videos_dir)
+        # 首先创建一个保存该视频相关信息的文件夹
+        video_dir = "./videos/" + d_name + "/"
+        if not os.path.exists(video_dir):
+            os.mkdir(video_dir)
+
+        # 下载视频
+        if text == "video":
+            # 需要下载的视频地址
+            video_url = d_url
+            # 视频名
+            video_name = "video.mp4"
+
+            # 下载视频
+            urllib3.disable_warnings()
+            response = requests.get(video_url, stream=True, proxies=proxies, verify=False)
+            try:
+                with open(video_dir + video_name, "wb") as f:
+                    for chunk in response.iter_content(chunk_size=10240):
+                        f.write(chunk)
+                cls.logger(log_type).info("==========视频下载完成==========")
+            except Exception as e:
+                cls.logger(log_type).exception("视频下载失败:{}\n", e)
+
+        # 下载封面
+        elif text == "cover":
+            # 需要下载的封面地址
+            cover_url = d_url
+            # 封面名
+            cover_name = "image.jpg"
+            # # 封面名
+            # cover_name = d_name + ".jpg"
+
+            # 下载封面
+            urllib3.disable_warnings()
+            response = requests.get(cover_url, proxies=proxies, verify=False)
+            try:
+                with open(video_dir + cover_name, "wb") as f:
+                    f.write(response.content)
+                cls.logger(log_type).info("==========封面下载完成==========")
+            except Exception as e:
+                cls.logger(log_type).exception("封面下载失败:{}\n", e)
+
+
+if __name__ == "__main__":
+    common = Common()

+ 53 - 0
main/demo.py

@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/11/29
+import ffmpeg
+import os
+import sys
+sys.path.append(os.getcwd())
+from main.feishu_lib import Feishu
+
+
+class Demo:
+    @classmethod
+    def get_sheet(cls, log_type, crawler, sheetid):
+        sheet = Feishu.get_values_batch(log_type, crawler, sheetid)
+        print(sheet)
+
+    @classmethod
+    def ffmpeg_test(cls):
+        probe = ffmpeg.probe('/root/piaoquan_crawler/crawler_zongjiao/videos/ ▶此视频非常难得,发出去,很多人都还不知道!/video.mp4')
+        video_stream = next((stream for stream in probe['streams'] if stream['codec_type'] == 'video'), None)
+        if video_stream is None:
+            print('No video stream found!')
+            return
+        width = int(video_stream['width'])
+        height = int(video_stream['height'])
+        duration = float(video_stream['duration'])
+        print(f'width:{width}')
+        print(f'height:{height}')
+        print(f'duration:{duration}')
+
+    @classmethod
+    def ffmpeg_test1(cls):
+        probe = ffmpeg.probe('./crawler_zongjiao/videos/ ▶此视频非常难得,发出去,很多人都还不知道!/video.mp4')
+        video_stream = next((stream for stream in probe['streams'] if stream['codec_type'] == 'video'), None)
+        if video_stream is None:
+            print('No video stream found!')
+            return
+        width = int(video_stream['width'])
+        height = int(video_stream['height'])
+        duration = float(video_stream['duration'])
+        print(f'width:{width}')
+        print(f'height:{height}')
+        print(f'duration:{duration}')
+
+
+if __name__ == '__main__':
+    # Demo.get_sheet('demo', 'zongjiao', 'TOoMpj')
+    Demo.ffmpeg_test()
+    print('\n')
+    Demo.ffmpeg_test1()
+    # Demo.get_video_info('g0118uklv86')
+
+    pass

+ 315 - 0
main/feishu_lib.py

@@ -0,0 +1,315 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/11/28
+import json
+import requests
+import urllib3
+from main.common import Common
+proxies = {"http": None, "https": None}
+
+
+class Feishu:
+    """
+    编辑飞书云文档
+    """
+    # 看一看爬虫数据表
+    kanyikan_url = "https://w42nne6hzg.feishu.cn/sheets/shtcngRPoDYAi24x52j2nDuHMih?"
+    # 快手爬虫数据表
+    kuaishou_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnp4SaJt37q6OOOrYzPMjQkg?"
+    # 微视爬虫数据表
+    weishi_url = "https://w42nne6hzg.feishu.cn/sheets/shtcn5YSWg91JfVGzj0SFZIRRPh?"
+    # 小年糕爬虫数据表
+    xiaoniangao_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnYxiyQ1wLklo1W5Kdqc9cGh?"
+    # 数据监控表
+    crawler_monitor = "https://w42nne6hzg.feishu.cn/sheets/shtcnlZWYazInhf7Z60jkbLRJyd?"
+    # 本山祝福数据表
+    crawler_benshanzhufu = "https://w42nne6hzg.feishu.cn/sheets/shtcnGh2rrsPYM4iVNEBO7OqWrb?"
+    # 公众号爬虫表
+    gzh_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnexNXnpDLHhARw0QdiwbYuA?"
+    # 宗教爬虫表
+    zongjiao = 'https://w42nne6hzg.feishu.cn/sheets/shtcn73NW0CyoOeF21HWO15KBsb?'
+
+    # 飞书路径token
+    @classmethod
+    def spreadsheettoken(cls, crawler):
+        """
+        :param crawler: 哪个爬虫
+        """
+        if crawler == "kanyikan":
+            return "shtcngRPoDYAi24x52j2nDuHMih"
+        elif crawler == "kuaishou":
+            return "shtcnp4SaJt37q6OOOrYzPMjQkg"
+        elif crawler == "weishi":
+            return "shtcn5YSWg91JfVGzj0SFZIRRPh"
+        elif crawler == "xiaoniangao":
+            return "shtcnYxiyQ1wLklo1W5Kdqc9cGh"
+        elif crawler == "monitor":
+            return "shtcnlZWYazInhf7Z60jkbLRJyd"
+        elif crawler == "bszf":
+            return "shtcnGh2rrsPYM4iVNEBO7OqWrb"
+        elif crawler == "gzh":
+            return "shtcnexNXnpDLHhARw0QdiwbYuA"
+        elif crawler == "zongjiao":
+            return "shtcn73NW0CyoOeF21HWO15KBsb"
+
+    # 获取飞书api token
+    @classmethod
+    def get_token(cls, log_type):
+        """
+        获取飞书api token
+        :return:
+        """
+        url = "https://open.feishu.cn/open-apis/auth/v3/tenant_access_token/internal/"
+        post_data = {"app_id": "cli_a13ad2afa438d00b",  # 这里账号密码是发布应用的后台账号及密码
+                     "app_secret": "4tK9LY9VbiQlY5umhE42dclBFo6t4p5O"}
+
+        try:
+            urllib3.disable_warnings()
+            response = requests.post(url=url, data=post_data, proxies=proxies, verify=False)
+            tenant_access_token = response.json()["tenant_access_token"]
+            return tenant_access_token
+        except Exception as e:
+            Common.logger(log_type).error("获取飞书 api token 异常:{}", e)
+
+    # 获取表格元数据
+    @classmethod
+    def get_metainfo(cls, log_type, crawler):
+        """
+        获取表格元数据
+        :return:
+        """
+        get_metainfo_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                           + cls.spreadsheettoken(crawler) + "/metainfo"
+
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        params = {
+            "extFields": "protectedRange",  # 额外返回的字段,extFields=protectedRange时返回保护行列信息
+            "user_id_type": "open_id"  # 返回的用户id类型,可选open_id,union_id
+        }
+        try:
+            urllib3.disable_warnings()
+            r = requests.get(url=get_metainfo_url, headers=headers, params=params, proxies=proxies, verify=False)
+            response = json.loads(r.content.decode("utf8"))
+            return response
+        except Exception as e:
+            Common.logger(log_type).error("获取表格元数据异常:{}", e)
+
+    # 读取工作表中所有数据
+    @classmethod
+    def get_values_batch(cls, log_type, crawler, sheetid):
+        """
+        读取工作表中所有数据
+        :param log_type: 启用哪个 log
+        :param crawler: 哪个爬虫
+        :param sheetid: 哪张表
+        :return: 所有数据
+        """
+        get_values_batch_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                               + cls.spreadsheettoken(crawler) + "/values_batch_get"
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        params = {
+            # 多个查询范围 如 url?ranges=range1,range2 ,其中 range 包含 sheetId 与单元格范围两部分
+            "ranges": sheetid,
+
+            # valueRenderOption=ToString 可返回纯文本的值(数值类型除外);
+            # valueRenderOption=FormattedValue 计算并格式化单元格;
+            # valueRenderOption=Formula单元格中含有公式时返回公式本身;
+            # valueRenderOption=UnformattedValue计算但不对单元格进行格式化
+            "valueRenderOption": "ToString",
+
+            # dateTimeRenderOption=FormattedString 计算并将时间日期按照其格式进行格式化,但不会对数字进行格式化,返回格式化后的字符串。
+            "dateTimeRenderOption": "",
+
+            # 返回的用户id类型,可选open_id,union_id
+            "user_id_type": "open_id"
+        }
+        try:
+            urllib3.disable_warnings()
+            r = requests.get(url=get_values_batch_url, headers=headers, params=params, proxies=proxies, verify=False)
+            # print(r.text)
+            response = json.loads(r.content.decode("utf8"))
+            values = response["data"]["valueRanges"][0]["values"]
+            return values
+        except Exception as e:
+            Common.logger(log_type).error("读取工作表所有数据异常:{}", e)
+
+    # 工作表,插入行或列
+    @classmethod
+    def insert_columns(cls, log_type, crawler, sheetid, majordimension, startindex, endindex):
+        """
+        工作表插入行或列
+        :param log_type: 日志路径
+        :param crawler: 哪个爬虫的云文档
+        :param sheetid:哪张工作表
+        :param majordimension:行或者列, ROWS、COLUMNS
+        :param startindex:开始位置
+        :param endindex:结束位置
+        """
+        insert_columns_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                             + cls.spreadsheettoken(crawler) + "/insert_dimension_range"
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        body = {
+            "dimension": {
+                "sheetId": sheetid,
+                "majorDimension": majordimension,  # 默认 ROWS ,可选 ROWS、COLUMNS
+                "startIndex": startindex,  # 开始的位置
+                "endIndex": endindex  # 结束的位置
+            },
+            "inheritStyle": "AFTER"  # BEFORE 或 AFTER,不填为不继承 style
+        }
+        try:
+            urllib3.disable_warnings()
+            r = requests.post(url=insert_columns_url, headers=headers, json=body, proxies=proxies, verify=False)
+            Common.logger(log_type).info("插入行或列:{}", r.json()["msg"])
+        except Exception as e:
+            Common.logger(log_type).error("插入行或列异常:{}", e)
+
+    # 写入数据
+    @classmethod
+    def update_values(cls, log_type, crawler, sheetid, ranges, values):
+        """
+        写入数据
+        :param log_type: 日志路径
+        :param crawler: 哪个爬虫的云文档
+        :param sheetid:哪张工作表
+        :param ranges:单元格范围
+        :param values:写入的具体数据,list
+        """
+        update_values_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                            + cls.spreadsheettoken(crawler) + "/values_batch_update"
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        body = {
+            "valueRanges": [
+                {
+                    "range": sheetid + "!" + ranges,
+                    "values": values
+                },
+            ],
+        }
+
+        try:
+            urllib3.disable_warnings()
+            r = requests.post(url=update_values_url, headers=headers, json=body, proxies=proxies, verify=False)
+            Common.logger(log_type).info("写入数据:{}", r.json()["msg"])
+        except Exception as e:
+            Common.logger(log_type).error("写入数据异常:{}", e)
+
+    # 合并单元格
+    @classmethod
+    def merge_cells(cls, log_type, crawler, sheetid, ranges):
+        """
+        合并单元格
+        :param log_type: 日志路径
+        :param crawler: 哪个爬虫
+        :param sheetid:哪张工作表
+        :param ranges:需要合并的单元格范围
+        """
+        merge_cells_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                          + cls.spreadsheettoken(crawler) + "/merge_cells"
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+
+        body = {
+            "range": sheetid + "!" + ranges,
+            "mergeType": "MERGE_ROWS"
+        }
+
+        try:
+            urllib3.disable_warnings()
+            r = requests.post(url=merge_cells_url, headers=headers, json=body, proxies=proxies, verify=False)
+            Common.logger(log_type).info("合并单元格:{}", r.json()["msg"])
+        except Exception as e:
+            Common.logger(log_type).error("合并单元格异常:{}", e)
+
+    # 读取单元格数据
+    @classmethod
+    def get_range_value(cls, log_type, crawler, sheetid, cell):
+        """
+        读取单元格内容
+        :param log_type: 日志路径
+        :param crawler: 哪个爬虫
+        :param sheetid: 哪张工作表
+        :param cell: 哪个单元格
+        :return: 单元格内容
+        """
+        get_range_value_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                              + cls.spreadsheettoken(crawler) + "/values/" + sheetid + "!" + cell
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        params = {
+            # valueRenderOption=ToString 可返回纯文本的值(数值类型除外);
+            # valueRenderOption=FormattedValue 计算并格式化单元格;
+            # valueRenderOption=Formula 单元格中含有公式时返回公式本身;
+            # valueRenderOption=UnformattedValue 计算但不对单元格进行格式化。
+            "valueRenderOption": "FormattedValue",
+
+            # dateTimeRenderOption=FormattedString 计算并对时间日期按照其格式进行格式化,但不会对数字进行格式化,返回格式化后的字符串。
+            "dateTimeRenderOption": "",
+
+            # 返回的用户id类型,可选open_id,union_id
+            "user_id_type": "open_id"
+        }
+        try:
+            urllib3.disable_warnings()
+            r = requests.get(url=get_range_value_url, headers=headers, params=params, proxies=proxies, verify=False)
+            # print(r.text)
+            return r.json()["data"]["valueRange"]["values"][0]
+        except Exception as e:
+            Common.logger(log_type).error("读取单元格数据异常:{}", e)
+
+    # 删除行或列,可选 ROWS、COLUMNS
+    @classmethod
+    def dimension_range(cls, log_type, crawler, sheetid, major_dimension, startindex, endindex):
+        """
+        删除行或列
+        :param log_type: 日志路径
+        :param crawler: 哪个爬虫
+        :param sheetid:工作表
+        :param major_dimension:默认 ROWS ,可选 ROWS、COLUMNS
+        :param startindex:开始的位置
+        :param endindex:结束的位置
+        :return:
+        """
+        dimension_range_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                              + cls.spreadsheettoken(crawler) + "/dimension_range"
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        body = {
+            "dimension": {
+                "sheetId": sheetid,
+                "majorDimension": major_dimension,
+                "startIndex": startindex,
+                "endIndex": endindex
+                }
+            }
+        try:
+            urllib3.disable_warnings()
+            r = requests.delete(url=dimension_range_url, headers=headers, json=body, proxies=proxies, verify=False)
+            Common.logger(log_type).info("删除视频数据:{}", r.json()["msg"])
+        except Exception as e:
+            Common.logger(log_type).error("删除视频数据异常:{}", e)
+
+
+if __name__ == "__main__":
+    print(Feishu.get_token('dev'))
+    # print(Feishu.get_token('gzh'))
+
+    pass

+ 25 - 0
main/run_zongjiao.py

@@ -0,0 +1,25 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/11/30
+import os
+import sys
+import time
+sys.path.append(os.getcwd())
+from main.common import Common
+from main.zongjiao import ZongJiao
+
+
+class Main:
+    @classmethod
+    def main(cls, log_type, env):
+        while True:
+            Common.logger(log_type).info('开始抓取宗教公众号视频\n')
+            ZongJiao.get_all_videos(log_type, env)
+            Common.del_logs(log_type)
+            ZongJiao.begin = 0
+            Common.logger(log_type).info('休眠 1 小时\n')
+            time.sleep(3600)
+
+
+if __name__ == '__main__':
+    Main.main('zongjiao', 'prod')

+ 380 - 0
main/zongjiao.py

@@ -0,0 +1,380 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/11/28
+import json
+# import os
+# import sys
+import random
+import shutil
+import time
+import ffmpeg
+import requests
+import urllib3
+from selenium.webdriver import DesiredCapabilities
+from selenium.webdriver.chrome.service import Service
+from selenium.webdriver.common.by import By
+from selenium import webdriver
+from main.common import Common
+from main.feishu_lib import Feishu
+from main.zongjiao_publish import Publish
+
+
+class ZongJiao:
+    # 翻页参数
+    begin = 0
+
+    # 获取已下载视频宽高、时长等信息
+    @classmethod
+    def get_video_info_from_ffmpeg(cls, log_type, video_path):
+        probe = ffmpeg.probe(video_path)
+        video_stream = next((stream for stream in probe['streams'] if stream['codec_type'] == 'video'), None)
+        if video_stream is None:
+            Common.logger(log_type).info('No video stream found!')
+            return
+        width = int(video_stream['width'])
+        height = int(video_stream['height'])
+        duration = float(video_stream['duration'])
+        return width, height, duration
+
+    # 过滤词库
+    @classmethod
+    def filter_words(cls, log_type):
+        try:
+            filter_word_list = []
+            filter_sheet = Feishu.get_values_batch(log_type, 'zongjiao', 'KeAfT7')
+            for x in filter_sheet:
+                for y in x:
+                    if y is None:
+                        pass
+                    else:
+                        filter_word_list.append(y)
+            return filter_word_list
+        except Exception as e:
+            Common.logger(log_type).info(f'filter_words异常:{e}\n')
+
+    # 获取 token
+    @classmethod
+    def get_token(cls, log_type):
+        try:
+            sheet = Feishu.get_values_batch(log_type, "zongjiao", "LpKzTD")
+            token = sheet[0][1]
+            cookie = sheet[1][1]
+            token_dict = {'token': token, 'cookie': cookie}
+            return token_dict
+        except Exception as e:
+            Common.logger(log_type).error(f"get_cookie_token异常:{e}\n")
+
+    # 获取用户 fakeid
+    @classmethod
+    def get_fakeid(cls, log_type, user, index):
+        try:
+            url = "https://mp.weixin.qq.com/cgi-bin/searchbiz?"
+            headers = {
+                "accept": "*/*",
+                "accept-encoding": "gzip, deflate, br",
+                "accept-language": "zh-CN,zh;q=0.9",
+                "referer": "https://mp.weixin.qq.com/cgi-bin/appmsg?"
+                           "t=media/appmsg_edit_v2&action=edit&isNew=1"
+                           "&type=77&createType=5&token=1011071554&lang=zh_CN",
+                'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
+                "sec-ch-ua-mobile": "?0",
+                "sec-ch-ua-platform": '"Windows"',
+                "sec-fetch-dest": "empty",
+                "sec-fetch-mode": "cors",
+                "sec-fetch-site": "same-origin",
+                "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
+                              " (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
+                "x-requested-with": "XMLHttpRequest",
+                'cookie': cls.get_token(log_type)['cookie'],
+            }
+            params = {
+                "action": "search_biz",
+                "begin": "0",
+                "count": "5",
+                "query": str(user),
+                "token": cls.get_token(log_type)['token'],
+                "lang": "zh_CN",
+                "f": "json",
+                "ajax": "1",
+            }
+            urllib3.disable_warnings()
+            r = requests.get(url=url, headers=headers, params=params, verify=False)
+            if "list" not in r.json() or len(r.json()["list"]) == 0:
+                Common.logger(log_type).warning(f"get_fakeid:{r.text},随机休眠 3-5 分钟\n")
+                time.sleep(random.randint(60 * 3, 60 * 5))
+            else:
+                fakeid = r.json()["list"][int(index) - 1]["fakeid"]
+                head_url = r.json()["list"][int(index) - 1]["round_head_img"]
+                fakeid_dict = {'fakeid': fakeid, 'head_url': head_url}
+                return fakeid_dict
+        except Exception as e:
+            Common.logger(log_type).error(f"get_fakeid异常:{e}\n")
+
+    # 获取腾讯视频下载链接
+    @classmethod
+    def get_tencent_video_url(cls, log_type, video_id):
+        try:
+            url = 'https://vv.video.qq.com/getinfo?vids='+str(video_id)+'&platform=101001&charge=0&otype=json'
+            response = requests.get(url=url).text.replace('QZOutputJson=', '').replace('"};', '"}')
+            response = json.loads(response)
+            url = response['vl']['vi'][0]['ul']['ui'][0]['url']
+            fvkey = response['vl']['vi'][0]['fvkey']
+            video_url = url + str(video_id) + '.mp4?vkey=' + fvkey
+            return video_url
+        except Exception as e:
+            Common.logger(log_type).error(f"get_tencent_video_url异常:{e}\n")
+
+    @classmethod
+    def get_video_url(cls, log_type, article_url):
+        try:
+            # 打印请求配置
+            ca = DesiredCapabilities.CHROME
+            ca["goog:loggingPrefs"] = {"performance": "ALL"}
+
+            # 不打开浏览器运行
+            chrome_options = webdriver.ChromeOptions()
+            chrome_options.add_argument("headless")
+            chrome_options.add_argument(f'user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36')
+            chrome_options.add_argument("--no-sandbox")
+
+            # driver初始化
+            # Common.logger(log_type).info('初始化 webdriver')
+            driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options,
+                                      service=Service('/root/chrome/chromedriver'))
+            # driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options, service=Service('/Users/wangkun/Downloads/chromedriver_v107/chromedriver'))
+
+            driver.implicitly_wait(10)
+            # Common.logger(log_type).info('打开文章链接')
+            driver.get(article_url)
+            time.sleep(5)
+
+            if len(driver.find_elements(By.XPATH, '//div[@class="js_video_poster video_poster"]/*[2]')) != 0:
+                video_url = driver.find_element(
+                    By.XPATH, '//div[@class="js_video_poster video_poster"]/*[2]').get_attribute('src')
+            elif len(driver.find_elements(By.XPATH, '//span[@class="js_tx_video_container"]/*[1]')) != 0:
+                iframe = driver.find_element(By.XPATH, '//span[@class="js_tx_video_container"]/*[1]').get_attribute('src')
+                video_id = iframe.split('vid=')[-1].split('&')[0]
+                video_url = cls.get_tencent_video_url(log_type, video_id)
+            else:
+                video_url = 0
+
+            return video_url
+        except Exception as e:
+            Common.logger(log_type).info(f'get_video_url异常:{e}\n')
+
+    # 获取文章列表
+    @classmethod
+    def get_articles(cls, log_type, user, index, env):
+        fakeid_dict = cls.get_fakeid(log_type, user, index)
+        while True:
+            # try:
+            url = "https://mp.weixin.qq.com/cgi-bin/appmsg?"
+            headers = {
+                "accept": "*/*",
+                "accept-encoding": "gzip, deflate, br",
+                "accept-language": "zh-CN,zh;q=0.9",
+                "referer": "https://mp.weixin.qq.com/cgi-bin/appmsg?"
+                           "t=media/appmsg_edit_v2&action=edit&isNew=1"
+                           "&type=77&createType=5&token=" + str(cls.get_token(log_type)['token']) + "&lang=zh_CN",
+                'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
+                "sec-ch-ua-mobile": "?0",
+                "sec-ch-ua-platform": '"Windows"',
+                "sec-fetch-dest": "empty",
+                "sec-fetch-mode": "cors",
+                "sec-fetch-site": "same-origin",
+                "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
+                              " (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
+                "x-requested-with": "XMLHttpRequest",
+                'cookie': cls.get_token(log_type)['cookie'],
+            }
+            params = {
+                "action": "list_ex",
+                "begin": str(cls.begin),
+                "count": "5",
+                "fakeid": fakeid_dict['fakeid'],
+                "type": "9",
+                "query": "",
+                "token": str(cls.get_token(log_type)['token']),
+                "lang": "zh_CN",
+                "f": "json",
+                "ajax": "1",
+            }
+            urllib3.disable_warnings()
+            r = requests.get(url=url, headers=headers, params=params, verify=False)
+            cls.begin += 5
+            if 'app_msg_list' not in r.json():
+                Common.logger(log_type).warning(f"get_gzh_url:{r.text}\n")
+                break
+            elif len(r.json()['app_msg_list']) == 0:
+                Common.logger(log_type).info('没有更多视频了\n')
+            else:
+                app_msg_list = r.json()['app_msg_list']
+                for article_url in app_msg_list:
+                    # title
+                    if 'title' in article_url:
+                        title = article_url['title'].replace('/', '').replace('\n', '')\
+                            .replace('.', '').replace('“', '').replace('”', '').replace(' ', '')
+                    else:
+                        title = 0
+
+                    # aid
+                    if 'aid' in article_url:
+                        aid = article_url['aid']
+                    else:
+                        aid = 0
+
+                    # create_time
+                    if 'create_time' in article_url:
+                        create_time = article_url['create_time']
+                    else:
+                        create_time = 0
+
+                    head_url = fakeid_dict['head_url']
+
+                    # cover_url
+                    if 'cover' in article_url:
+                        cover_url = article_url['cover']
+                    else:
+                        cover_url = 0
+
+                    # article_url
+                    if 'link' in article_url:
+                        article_url = article_url['link']
+                    else:
+                        article_url = 0
+
+                    video_url = cls.get_video_url(log_type, article_url)
+
+                    Common.logger(log_type).info(f"title:{title}")
+                    # Common.logger(log_type).info(f"aid:{aid}, type{type(aid)}")
+                    # Common.logger(log_type).info("create_time:{}", create_time)
+                    # Common.logger(log_type).info("head_url:{}", head_url)
+                    # Common.logger(log_type).info("cover_url:{}", cover_url)
+                    Common.logger(log_type).info(f"article_url:{article_url}")
+                    Common.logger(log_type).info(f"video_url:{video_url}")
+
+                    video_dict = {
+                        'video_title': title,
+                        'aid': aid,
+                        'create_time': create_time,
+                        'user_name': user,
+                        'user_id': fakeid_dict['fakeid'],
+                        'head_url': head_url,
+                        'cover_url': cover_url,
+                        'article_url': article_url,
+                        'video_url': video_url
+                    }
+                    cls.download_publish(log_type, video_dict, env)
+
+                Common.logger(log_type).info('休眠 10 秒\n')
+                time.sleep(10)
+            # except Exception as e:
+            #     Common.logger(log_type).error("get_gzh_url异常:{}\n", e)
+
+    # 下载/上传
+    @classmethod
+    def download_publish(cls, log_type, video_dict, env):
+        # try:
+        if video_dict['article_url'] == 0 or video_dict['video_url'] == 0:
+            Common.logger(log_type).info("文章涉嫌违反相关法律法规和政策\n")
+        # 标题敏感词过滤
+        elif any(word if word in video_dict['video_title'] else False for word in cls.filter_words(log_type)) is True:
+            Common.logger(log_type).info("标题已中过滤词\n")
+        # 已下载判断
+        elif video_dict['aid'] in [x for y in Feishu.get_values_batch(log_type, 'zongjiao', 'xf9wC2') for x in y]:
+            Common.logger(log_type).info("视频已下载\n")
+        else:
+            # 下载视频
+            Common.download_method(log_type, "video", video_dict['video_title'], video_dict['video_url'])
+            # 获取视频时长
+            video_info = cls.get_video_info_from_ffmpeg(log_type, "./videos/" + video_dict['video_title'] + "/video.mp4")
+            video_width = str(video_info[0])
+            video_height = str(video_info[1])
+            duration = video_info[2]
+            # 视频时长<50s,直接删除
+            if int(duration) < 50:
+                # 删除视频文件夹
+                shutil.rmtree("./videos/" + video_dict['video_title'] + "/")
+                Common.logger(log_type).info("时长:{}<50秒,删除成功\n")
+                return
+            else:
+                # 下载封面
+                Common.download_method(log_type, 'cover', video_dict['video_title'], video_dict['cover_url'])
+                # 保存视频信息至 "./videos/{download_video_title}/info.txt"
+                with open("./videos/" + video_dict['video_title'] + "/" + "info.txt", "a", encoding="UTF-8") as f_a:
+                    f_a.write(str(video_dict['aid']) + "\n" +
+                              video_dict['video_title'] + "\n" +
+                              str(int(duration)) + "\n" +
+                              '0' + "\n" +
+                              '0' + "\n" +
+                              '0' + "\n" +
+                              '0' + "\n" +
+                              str(video_width)+'*'+str(video_height) + "\n" +
+                              str(video_dict['create_time']) + "\n" +
+                              video_dict['user_name'] + "\n" +
+                              video_dict['head_url'] + "\n" +
+                              video_dict['video_url'] + "\n" +
+                              video_dict['cover_url'] + "\n" +
+                              "zongjiao"+str(time.time()) + "\n")
+                Common.logger(log_type).info("==========视频信息已保存至info.txt==========")
+
+            # 上传视频
+            Common.logger(log_type).info("开始上传视频")
+            our_video_id = Publish.upload_and_publish(log_type, env, "play")
+            if env == 'prod':
+                our_video_link = "https://admin.piaoquantv.com/cms/post-detail/" + str(our_video_id) + "/info"
+            else:
+                our_video_link = "https://testadmin.piaoquantv.com/cms/post-detail/" + str(our_video_id) + "/info"
+            Common.logger(log_type).info("视频上传完成")
+
+            # 保存视频 ID 到云文档
+            Common.logger(log_type).info("保存视频信息至云文档")
+            # 视频ID工作表,插入首行
+            Feishu.insert_columns(log_type, "zongjiao", "xf9wC2", "ROWS", 1, 2)
+            # 视频ID工作表,首行写入数据
+            upload_time = int(time.time())
+            values = [[time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(upload_time)),
+                       "宗教公众号",
+                       video_dict['video_title'],
+                       video_dict['aid'],
+                       our_video_link,
+                       int(duration),
+                       str(video_width)+'*'+str(video_height),
+                       time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(video_dict['create_time'])),
+                       video_dict['user_name'],
+                       video_dict['user_id'],
+                       video_dict['head_url'],
+                       video_dict['cover_url'],
+                       video_dict['article_url'],
+                       video_dict['video_url']]]
+            time.sleep(1)
+            Feishu.update_values(log_type, "zongjiao", "xf9wC2", "F2:Z2", values)
+            Common.logger(log_type).info("视频下载/上传成功\n")
+        # except Exception as e:
+        #     Common.logger(log_type).error(f"download_publish异常:{e}\n")
+
+    @classmethod
+    def get_all_videos(cls, log_type, env):
+        # try:
+        user_sheet = Feishu.get_values_batch(log_type, 'zongjiao', '7cac48')
+        for i in range(2, len(user_sheet)):
+            user_name = user_sheet[i][2]
+            index = user_sheet[i][5]
+            Common.logger(log_type).info(f'获取 {user_name} 公众号视频\n')
+            cls.get_articles(log_type, user_name, index, env)
+            cls.begin = 0
+            Common.logger(log_type).info('休眠1分钟')
+            time.sleep(60)
+        # except Exception as e:
+        #     Common.logger(log_type).info(f'get_all_videos异常:{e}\n')
+
+
+if __name__ == "__main__":
+    print(ZongJiao.get_video_info_from_ffmpeg('demo', ''))
+    # print(ZongJiao.filter_words('zongjiao'))
+    # print(ZongJiao.get_token('zongjiao'))
+    # print(ZongJiao.get_fakeid('zongjao', '圣经讲道集', 1))
+    # ZongJiao.get_articles('zongjao', '圣经讲道集', 1)
+    # ZongJiao.get_all_videos('zongjiao', 'dev')
+
+    pass

+ 255 - 0
main/zongjiao_publish.py

@@ -0,0 +1,255 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/11/28
+import json
+import os
+import random
+import shutil
+import time
+import oss2
+import requests
+import urllib3
+from main.common import Common
+proxies = {"http": None, "https": None}
+
+
+class Publish:
+    @classmethod
+    def publish_video_dev(cls, log_type, request_data):
+        """
+        loginUid  站内uid (随机)
+        appType  默认:888888
+        crawlerSrcId   站外视频ID
+        crawlerSrcCode   渠道(自定义 KYK)
+        crawlerSrcPublishTimestamp  视频原发布时间
+        crawlerTaskTimestamp   爬虫创建时间(可以是当前时间)
+        videoPath  视频oss地址
+        coverImgPath  视频封面oss地址
+        title  标题
+        totalTime  视频时长
+        viewStatus  视频的有效状态 默认1
+        versionCode  版本 默认1
+        :return:
+        """
+        # Common.logger().info('publish request data: {}'.format(request_data))
+        result = cls.request_post('https://videotest.yishihui.com/longvideoapi/crawler/video/send', request_data)
+        # Common.logger(log_type).info('publish result: {}'.format(result))
+        video_id = result["data"]["id"]
+        # Common.logger(log_type).info('video_id: {}'.format(video_id))
+        if result['code'] != 0:
+            Common.logger(log_type).error('pushlish failure msg = {}'.format(result['msg']))
+        else:
+            Common.logger(log_type).info('publish success video_id = : {}'.format(request_data['crawlerSrcId']))
+        return video_id
+
+    @classmethod
+    def publish_video_prod(cls, log_type, request_data):
+        """
+        loginUid  站内uid (随机)
+        appType  默认:888888
+        crawlerSrcId   站外视频ID
+        crawlerSrcCode   渠道(自定义 KYK)
+        crawlerSrcPublishTimestamp  视频原发布时间
+        crawlerTaskTimestamp   爬虫创建时间(可以是当前时间)
+        videoPath  视频oss地址
+        coverImgPath  视频封面oss地址
+        title  标题
+        totalTime  视频时长
+        viewStatus  视频的有效状态 默认1
+        versionCode  版本 默认1
+        :return:
+        """
+        result = cls.request_post('https://longvideoapi.piaoquantv.com/longvideoapi/crawler/video/send', request_data)
+        # Common.logger(log_type).info('publish result: {}'.format(result))
+        video_id = result["data"]["id"]
+        # Common.logger(log_type).info('video_id: {}'.format(video_id))
+        if result['code'] != 0:
+            Common.logger(log_type).error('pushlish failure msg = {}'.format(result['msg']))
+        else:
+            Common.logger(log_type).info('publish success video_id = : {}'.format(request_data['crawlerSrcId']))
+        return video_id
+
+    @classmethod
+    def request_post(cls, request_url, request_data):
+        """
+        post 请求 HTTP接口
+        :param request_url: 接口URL
+        :param request_data: 请求参数
+        :return: res_data json格式
+        """
+        urllib3.disable_warnings()
+        response = requests.post(url=request_url, data=request_data, proxies=proxies, verify=False)
+        if response.status_code == 200:
+            res_data = json.loads(response.text)
+            return res_data
+
+    # 以下代码展示了基本的文件上传、下载、罗列、删除用法。
+
+    # 首先初始化AccessKeyId、AccessKeySecret、Endpoint等信息。
+    # 通过环境变量获取,或者把诸如“<你的AccessKeyId>”替换成真实的AccessKeyId等。
+    #
+    # 以杭州区域为例,Endpoint可以是:
+    #   http://oss-cn-hangzhou.aliyuncs.com
+    #   https://oss-cn-hangzhou.aliyuncs.com
+    # 分别以HTTP、HTTPS协议访问。
+    access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', 'LTAIP6x1l3DXfSxm')
+    access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', 'KbTaM9ars4OX3PMS6Xm7rtxGr1FLon')
+    bucket_name = os.getenv('OSS_TEST_BUCKET', 'art-pubbucket')
+    endpoint = os.getenv('OSS_TEST_ENDPOINT', 'oss-cn-hangzhou-internal.aliyuncs.com')
+    # endpoint = os.getenv('OSS_TEST_ENDPOINT', 'oss-cn-hangzhou.aliyuncs.com')
+
+    # 确认上面的参数都填写正确了
+    for param in (access_key_id, access_key_secret, bucket_name, endpoint):
+        assert '<' not in param, '请设置参数:' + param
+
+    # 创建Bucket对象,所有Object相关的接口都可以通过Bucket对象来进行
+    bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)
+
+    """
+    处理流程:
+    1. 定时(每天凌晨1点执行一次)循环files文件下的内容 结构:files -> 视频文件夹 -> 视频文件 + 封面图 + 基本信息
+    2. 视频文件和封面上传到oss
+    - 视频文件oss目录  longvideo/crawler_local/video/prod/文件名
+    - 视频封面oss目录  longvideo/crawler_local/image/prod/文件名
+    3. 发布视频
+    - 读取 基本信息 调用发布接口
+    """
+    # env 日期20220225 文件名
+    oss_file_path_video = 'longvideo/crawler_local/video/{}/{}/{}'
+    oss_file_path_image = 'longvideo/crawler_local/image/{}/{}/{}'
+
+    @classmethod
+    def put_file(cls, log_type, oss_file, local_file):
+        cls.bucket.put_object_from_file(oss_file, local_file)
+        Common.logger(log_type).info("put oss file = {}, local file = {} success".format(oss_file, local_file))
+
+    # 清除本地文件
+    @classmethod
+    def remove_local_file(cls, log_type, local_file):
+        os.remove(local_file)
+        Common.logger(log_type).info("remove local file = {} success".format(local_file))
+
+    # 清除本地文件夹
+    @classmethod
+    def remove_local_file_dir(cls, log_type, local_file):
+        os.rmdir(local_file)
+        Common.logger(log_type).info("remove local file dir = {} success".format(local_file))
+
+    local_file_path = './videos'
+    video_file = 'video'
+    image_file = 'image'
+    info_file = 'info'
+    uids_dev_up = [6267140, 6267141]
+    uids_dev_play = [6267140, 6267141]
+    uids_prod_up = [26117622, 26117623, 26117624, 26117625, 26117626, 26117627, 26117628, 26117629, 26117630, 26117631]
+    uids_prod_play = [26117622, 26117623, 26117624, 26117625, 26117626, 26117627, 26117628, 26117629, 26117630, 26117631]
+
+    @classmethod
+    def upload_and_publish(cls, log_type, env, job):
+        """
+        上传视频到 oss
+        :param log_type: 选择的 log
+        :param env: 测试环境:dev,正式环境:prod
+        :param job: 上升榜:up,播放量:play
+        """
+        Common.logger(log_type).info("upload_and_publish starting...")
+        today = time.strftime("%Y%m%d", time.localtime())
+        # videos 目录下的所有视频文件夹
+        files = os.listdir(cls.local_file_path)
+        for f in files:
+            try:
+                # 单个视频文件夹
+                fi_d = os.path.join(cls.local_file_path, f)
+                # 确认为视频文件夹
+                if os.path.isdir(fi_d):
+                    Common.logger(log_type).info('dir = {}'.format(fi_d))
+                    # 列出所有视频文件夹
+                    dir_files = os.listdir(fi_d)
+                    data = {'appType': '888888',
+                            'crawlerSrcCode': 'ZONGJIAO',
+                            'viewStatus': '1',
+                            'versionCode': '1'}
+                    now_timestamp = int(round(time.time() * 1000))
+                    data['crawlerTaskTimestamp'] = str(now_timestamp)
+                    global uid
+                    if env == "dev" and job == "up":
+                        uid = str(random.choice(cls.uids_dev_up))
+                    elif env == "dev" and job == "play":
+                        uid = str(random.choice(cls.uids_dev_play))
+                    elif env == "prod" and job == "up":
+                        uid = str(random.choice(cls.uids_prod_up))
+                    elif env == "prod" and job == "play":
+                        uid = str(random.choice(cls.uids_prod_play))
+                    data['loginUid'] = uid
+                    # 单个视频文件夹下的所有视频文件
+                    for fi in dir_files:
+                        # 视频文件夹下的所有文件路径
+                        fi_path = fi_d + '/' + fi
+                        Common.logger(log_type).info('dir fi_path = {}'.format(fi_path))
+                        # 读取 info.txt,赋值给 data
+                        if cls.info_file in fi:
+                            f = open(fi_path, "r", encoding="UTF-8")
+                            # 读取数据 数据准确性写入的时候保证 读取暂不处理
+                            for i in range(14):
+                                line = f.readline()
+                                line = line.replace('\n', '')
+                                if line is not None and len(line) != 0 and not line.isspace():
+                                    # Common.logger(log_type).info("line = {}".format(line))
+                                    if i == 0:
+                                        data['crawlerSrcId'] = line
+                                    elif i == 1:
+                                        data['title'] = line
+                                    elif i == 2:
+                                        data['totalTime'] = line
+                                    elif i == 8:
+                                        data['crawlerSrcPublishTimestamp'] = line
+                                else:
+                                    Common.logger(log_type).warning("{} line is None".format(fi_path))
+                            f.close()
+                            # remove info.txt
+                            cls.remove_local_file(log_type, fi_path)
+                    # 刷新数据
+                    dir_files = os.listdir(fi_d)
+                    for fi in dir_files:
+                        fi_path = fi_d + '/' + fi
+                        # Common.logger(log_type).info('dir fi_path = {}'.format(fi_path))
+                        # 上传oss
+                        if cls.video_file in fi:
+                            global oss_video_file
+                            if env == "dev":
+                                oss_video_file = cls.oss_file_path_video.format("dev", today, data['crawlerSrcId'])
+                            elif env == "prod":
+                                oss_video_file = cls.oss_file_path_video.format("prod", today, data['crawlerSrcId'])
+                            Common.logger(log_type).info("oss_video_file = {}".format(oss_video_file))
+                            cls.put_file(log_type, oss_video_file, fi_path)
+                            data['videoPath'] = oss_video_file
+                            Common.logger(log_type).info("videoPath = {}".format(oss_video_file))
+                        elif cls.image_file in fi:
+                            global oss_image_file
+                            if env == "dev":
+                                oss_image_file = cls.oss_file_path_image.format("env", today, data['crawlerSrcId'])
+                            elif env == "prod":
+                                oss_image_file = cls.oss_file_path_image.format("prod", today, data['crawlerSrcId'])
+                            Common.logger(log_type).info("oss_image_file = {}".format(oss_image_file))
+                            cls.put_file(log_type, oss_image_file, fi_path)
+                            data['coverImgPath'] = oss_image_file
+                            Common.logger(log_type).info("coverImgPath = {}".format(oss_image_file))
+                        # 全部remove
+                        cls.remove_local_file(log_type, fi_path)
+
+                    # 发布
+                    if env == "dev":
+                        video_id = cls.publish_video_dev(log_type, data)
+                    elif env == "prod":
+                        video_id = cls.publish_video_prod(log_type, data)
+                    else:
+                        video_id = cls.publish_video_dev(log_type, data)
+                    cls.remove_local_file_dir(log_type, fi_d)
+                    return video_id
+
+                else:
+                    Common.logger(log_type).error('file not a dir = {}'.format(fi_d))
+            except Exception as e:
+                # 删除视频文件夹
+                shutil.rmtree("./videos/" + f + "/")
+                Common.logger(log_type).exception('upload_and_publish error', e)

二進制
videos/.DS_Store


+ 16 - 0
zongjiao.sh

@@ -0,0 +1,16 @@
+#!/bin/bash
+echo "开始"
+echo "$(date "+%Y-%m-%d %H:%M:%S") 正在杀进程..."
+# shellcheck disable=SC2009
+# ps aux | grep run_zongjiao | grep -v grep | awk '{print $2}' | xargs kill -9
+ps aux | grep run_zongjiao.py | grep -v grep | awk '{print $2}' | xargs kill -9
+echo "$(date "+%Y-%m-%d %H:%M:%S") 进程已杀死!"
+
+echo "$(date "+%Y-%m-%d %H:%M:%S") 正在更新代码..."
+cd /root/piaoquan_crawler/crawler_zongjiao && git pull origin master --force
+echo "$(date "+%Y-%m-%d %H:%M:%S") 代码更新完成!"
+
+echo "$(date "+%Y-%m-%d %H:%M:%S") 正在重启服务..."
+nohup python3 -u /main/run_zongjiao.py >>./nohup.log 2>&1 &
+echo "$(date "+%Y-%m-%d %H:%M:%S") 服务重启完毕!"
+exit 0