wangkun 3 rokov pred
rodič
commit
641e884653
10 zmenil súbory, kde vykonal 1208 pridanie a 0 odobranie
  1. 1 0
      README.md
  2. 3 0
      logs/__init__.py
  3. 3 0
      main/__init__.py
  4. 129 0
      main/common.py
  5. 164 0
      main/download.py
  6. 275 0
      main/feishu_lib.py
  7. 333 0
      main/get_feeds.py
  8. 244 0
      main/publish.py
  9. 53 0
      main/run.py
  10. 3 0
      videos/__init__.py

+ 1 - 0
README.md

@@ -0,0 +1 @@
+小年糕爬虫 Mac 版本

+ 3 - 0
logs/__init__.py

@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/5/11

+ 3 - 0
main/__init__.py

@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/5/11

+ 129 - 0
main/common.py

@@ -0,0 +1,129 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/5/11
+"""
+公共方法,包含:生成log / 删除log / 下载方法 / 读取文件 / 统计下载数
+"""
+from datetime import date, timedelta
+from loguru import logger
+import datetime
+import os
+import time
+import requests
+import urllib3
+
+proxies = {"http": None, "https": None}
+
+
+class Common:
+    # 统一获取当前时间 <class 'datetime.datetime'>  2022-04-14 20:13:51.244472
+    now = datetime.datetime.now()
+    # 昨天 <class 'str'>  2022-04-13
+    yesterday = (date.today() + timedelta(days=-1)).strftime("%Y-%m-%d")
+    # 今天 <class 'datetime.date'>  2022-04-14
+    today = date.today()
+    # 明天 <class 'str'>  2022-04-15
+    tomorrow = (date.today() + timedelta(days=1)).strftime("%Y-%m-%d")
+
+    # 使用 logger 模块生成日志
+    @staticmethod
+    def logger():
+        """
+        使用 logger 模块生成日志
+        """
+        # 日志路径
+        log_dir = "./logs/"
+        log_path = os.getcwd() + os.sep + log_dir
+        if not os.path.isdir(log_path):
+            os.makedirs(log_path)
+
+        # 日志文件名
+        log_name = time.strftime("%Y-%m-%d", time.localtime(time.time())) + '.log'
+
+        # 日志不打印到控制台
+        logger.remove(handler_id=None)
+
+        # rotation="500 MB",实现每 500MB 存储一个文件
+        # rotation="12:00",实现每天 12:00 创建一个文件
+        # rotation="1 week",每周创建一个文件
+        # retention="10 days",每隔10天之后就会清理旧的日志
+        # 初始化日志
+        logger.add(log_dir + log_name, level="INFO", rotation='00:00')
+
+        return logger
+
+    # 清除日志,保留最近 7 个文件
+    @classmethod
+    def del_logs(cls):
+        """
+        清除冗余日志文件
+        :return: 保留最近 7 个日志
+        """
+        log_dir = "./logs/"
+        all_files = sorted(os.listdir(log_dir))
+        all_logs = []
+        for log in all_files:
+            name = os.path.splitext(log)[-1]
+            if name == ".log":
+                all_logs.append(log)
+
+        if len(all_logs) <= 7:
+            pass
+        else:
+            for file in all_logs[:len(all_logs) - 7]:
+                os.remove(log_dir + file)
+        cls.logger().info("清除冗余日志成功")
+
+    # 封装下载视频或封面的方法
+    @classmethod
+    def download_method(cls, text, d_name, d_url):
+        """
+        下载封面:text == "cover" ; 下载视频:text == "video"
+        需要下载的视频标题:d_title
+        视频封面,或视频播放地址:d_url
+        下载保存路径:"./files/{d_title}/"
+        """
+        # 首先创建一个保存该视频相关信息的文件夹
+        # video_dir = "./videos/" + d_name + "/"
+        video_dir = "./videos/"
+        if not os.path.exists(video_dir):
+            os.mkdir(video_dir)
+
+        # 下载视频
+        if text == "video":
+            # 需要下载的视频地址
+            video_url = d_url
+            # 视频名
+            video_name = d_name + ".mp4"
+
+            # 下载视频
+            urllib3.disable_warnings()
+            response = requests.get(video_url, stream=True, proxies=proxies, verify=False)
+            try:
+                with open(video_dir + video_name, "wb") as f:
+                    for chunk in response.iter_content(chunk_size=10240):
+                        f.write(chunk)
+                cls.logger().info("==========视频下载完成==========")
+            except Exception as e:
+                cls.logger().exception("视频下载失败:{}", e)
+
+        # 下载封面
+        elif text == "cover":
+            # 需要下载的封面地址
+            cover_url = d_url
+            # 封面名
+            cover_name = d_name + ".jpg"
+
+            # 下载封面
+            urllib3.disable_warnings()
+            response = requests.get(cover_url, proxies=proxies, verify=False)
+            try:
+                with open(video_dir + cover_name, "wb") as f:
+                    f.write(response.content)
+                cls.logger().info("==========封面下载完成==========")
+            except Exception as e:
+                cls.logger().exception("封面下载失败:{}", e)
+
+
+if __name__ == "__main__":
+    common = Common()

+ 164 - 0
main/download.py

@@ -0,0 +1,164 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/5/12
+import time
+
+from main.common import Common
+from main.feishu_lib import Feishu
+from main.publish import Publish
+
+
+class Download:
+    # 已下载视频列表
+    download_video_list = []
+
+    # 下载规则
+    @staticmethod
+    def download_rule(d_duration, d_width, d_height, d_play_cnt, d_like_cnt, d_share_cnt):
+        """
+        下载视频的基本规则
+        :param d_duration: 时长
+        :param d_width: 宽
+        :param d_height: 高
+        :param d_play_cnt: 播放量
+        :param d_like_cnt: 点赞量
+        :param d_share_cnt: 分享量
+        :return: 满足规则,返回 True;反之,返回 False
+        """
+        if 600 >= int(float(d_duration)) >= 60:
+            if int(d_width) >= 0 or int(d_height) >= 0:
+                if int(d_play_cnt) >= 10000:
+                    if int(d_like_cnt) >= 0:
+                        if int(d_share_cnt) >= 0:
+                            return True
+                        else:
+                            return False
+                    else:
+                        return False
+                else:
+                    return False
+            return False
+        return False
+
+    # 下载 / 上传 视频
+    @classmethod
+    def download(cls, env):
+        """
+        1.从云文档中取视频:https://w42nne6hzg.feishu.cn/sheets/shtcngRPoDYAi24x52j2nDuHMih?sheet=S714lO
+        2.满足规则,则下载及上传
+        :param env: 测试环境:dev ; 正式环境:prod
+        """
+        try:
+            if len(Feishu.get_values_batch("S714lO")) == 1:
+                pass
+            else:
+                for i in range(len(Feishu.get_values_batch("S714lO"))):
+                    time.sleep(1)
+                    try:
+                        download_video_id = Feishu.get_values_batch("S714lO")[i + 1][1]
+                        download_video_play_cnt = Feishu.get_values_batch("S714lO")[i + 1][2]
+                        download_video_title = Feishu.get_values_batch("S714lO")[i + 1][3]
+                        download_video_duration = Feishu.get_values_batch("S714lO")[i + 1][4]
+                        download_video_comment_cnt = Feishu.get_values_batch("S714lO")[i + 1][5]
+                        download_video_like_cnt = Feishu.get_values_batch("S714lO")[i + 1][6]
+                        download_video_share_cnt = Feishu.get_values_batch("S714lO")[i + 1][7]
+                        download_video_resolution = Feishu.get_values_batch("S714lO")[i + 1][8]
+                        download_video_width = download_video_resolution.split("*")[0]
+                        download_video_height = download_video_resolution.split("*")[-1]
+                        download_video_send_time = Feishu.get_values_batch("S714lO")[i + 1][9]
+                        download_user_name = Feishu.get_values_batch("S714lO")[i + 1][10]
+                        download_head_url = Feishu.get_values_batch("S714lO")[i + 1][11]
+                        download_cover_url = Feishu.get_values_batch("S714lO")[i + 1][12]
+                        download_video_url = Feishu.get_values_batch("S714lO")[i + 1][13]
+                        download_video_session = Feishu.get_values_batch("S714lO")[i + 1][14]
+
+                        if int(time.time()) - int(download_video_send_time) > 604800:
+                            Common.logger().info("发布时间大于3天,删除该视频:{}", download_video_title)
+                            # 删除行或列,可选 ROWS、COLUMNS
+                            Feishu.dimension_range("S714lO", "ROWS", i + 2, i + 2)
+                        # 去重
+                        elif download_video_id in [j for m in Feishu.get_values_batch("onyBDH") for j in m]:
+                            Common.logger().info("视频已下载,删除该视频:{}", download_video_title)
+                            # 删除行或列,可选 ROWS、COLUMNS
+                            Feishu.dimension_range("S714lO", "ROWS", i + 2, i + 2)
+                        # 判断是否满足下载规则
+                        elif cls.download_rule(
+                                download_video_duration,
+                                download_video_width,
+                                download_video_height,
+                                download_video_play_cnt,
+                                download_video_like_cnt,
+                                download_video_share_cnt) is False:
+                            Common.logger().info("不满足下载规则,删除该视频:{}", download_video_title)
+                            # 删除行或列,可选 ROWS、COLUMNS
+                            Feishu.dimension_range("S714lO", "ROWS", i + 2, i + 2)
+                        else:
+                            Common.logger().info("开始下载视频:{}".format(download_video_title))
+                            # 下载封面
+                            Common.download_method(text="cover", d_name=download_video_title, d_url=download_cover_url)
+                            # 下载视频
+                            Common.download_method(text="video", d_name=download_video_title, d_url=download_video_url)
+                            # # 保存视频信息至 "./videos/{download_video_title}/info.txt"
+                            # with open("./videos/" + download_video_title + "/"
+                            #           + "info.txt", "a", encoding="UTF-8") as f_a:
+                            #     f_a.write(str(download_video_id) + "\n" +
+                            #               str(download_video_title) + "\n" +
+                            #               str(download_video_duration) + "\n" +
+                            #               str(download_video_play_cnt) + "\n" +
+                            #               str(download_video_comment_cnt) + "\n" +
+                            #               str(download_video_like_cnt) + "\n" +
+                            #               str(download_video_share_cnt) + "\n" +
+                            #               str(download_video_resolution) + "\n" +
+                            #               str(download_video_send_time) + "\n" +
+                            #               str(download_user_name) + "\n" +
+                            #               str(download_head_url) + "\n" +
+                            #               str(download_video_url) + "\n" +
+                            #               str(download_cover_url) + "\n" +
+                            #               str(download_video_session))
+                            # Common.logger().info("==========视频信息已保存至info.txt==========")
+
+                            # 添加视频 ID 到 list,用于统计当次下载总数
+                            cls.download_video_list.append(download_video_id)
+
+                            # # 上传视频
+                            # Common.logger().info("开始上传视频:{}".format(download_video_title))
+                            # Publish.upload_and_publish(env, "play")
+
+                            # 保存视频 ID 到云文档:https://w42nne6hzg.feishu.cn/sheets/shtcngRPoDYAi24x52j2nDuHMih?sheet=onyBDH
+                            Common.logger().info("保存视频ID至云文档:{}", download_video_title)
+                            # 视频ID工作表,插入首行
+                            Feishu.insert_columns("onyBDH")
+                            # 视频ID工作表,首行写入数据
+                            upload_time = int(time.time())
+                            Feishu.update_values("onyBDH",
+                                                 str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time))),
+                                                 str(download_video_id),
+                                                 str(download_video_play_cnt),
+                                                 str(download_video_title),
+                                                 str(download_video_duration),
+                                                 str(download_video_comment_cnt),
+                                                 str(download_video_like_cnt),
+                                                 str(download_video_share_cnt),
+                                                 str(download_video_resolution),
+                                                 str(time.strftime("%Y-%m-%d %H:%M:%S",
+                                                                   time.localtime(
+                                                                       int(download_video_send_time)/1000))),
+                                                 str(download_user_name),
+                                                 str(download_head_url),
+                                                 str(download_cover_url),
+                                                 str(download_video_url),
+                                                 str(download_video_session))
+
+                            # 从云文档删除该视频信息:https://w42nne6hzg.feishu.cn/sheets/shtcngRPoDYAi24x52j2nDuHMih?sheet=S714lO
+                            Common.logger().info("从云文档删除该视频信息:{}", download_video_title)
+                            # 删除行或列,可选 ROWS、COLUMNS
+                            Feishu.dimension_range("S714lO", "ROWS", i + 2, i + 2)
+
+                    except Exception as e:
+                        Common.logger().error("视频 info 异常,删除该视频信息:{}", e)
+                        # 删除行或列,可选 ROWS、COLUMNS
+                        Feishu.dimension_range("S714lO", "ROWS", i + 2, i + 2)
+                    cls.download("prod")
+
+        except Exception as e:
+            Common.logger().error("获取视频数据异常:{}", e)

+ 275 - 0
main/feishu_lib.py

@@ -0,0 +1,275 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/5/11
+import json
+import requests
+import urllib3
+
+from main.common import Common
+
+proxies = {"http": None, "https": None}
+
+
+class Feishu:
+    """
+    编辑飞书云文档
+    """
+    feishu_url = "https://w42nne6hzg.feishu.cn/sheets/shtcngRPoDYAi24x52j2nDuHMih?"
+    spreadsheetToken = "shtcngRPoDYAi24x52j2nDuHMih"
+
+    # 获取飞书api token
+    @classmethod
+    def get_token(cls):
+        """
+        获取飞书api token
+        :return:
+        """
+        url = "https://open.feishu.cn/open-apis/auth/v3/tenant_access_token/internal/"
+        post_data = {"app_id": "cli_a13ad2afa438d00b",  # 这里账号密码是发布应用的后台账号及密码
+                     "app_secret": "4tK9LY9VbiQlY5umhE42dclBFo6t4p5O"}
+
+        try:
+            urllib3.disable_warnings()
+            response = requests.post(url=url, data=post_data, proxies=proxies, verify=False)
+            tenant_access_token = response.json()["tenant_access_token"]
+            return tenant_access_token
+        except Exception as e:
+            Common.logger().error("获取飞书 api token 异常:{}", e)
+
+    # 获取表格元数据
+    @classmethod
+    def get_metainfo(cls):
+        """
+        获取表格元数据
+        :return:
+        """
+        url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" + cls.spreadsheetToken + "/metainfo"
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        params = {
+            "extFields": "protectedRange",  # 额外返回的字段,extFields=protectedRange时返回保护行列信息
+            "user_id_type": "open_id"  # 返回的用户id类型,可选open_id,union_id
+        }
+        try:
+            urllib3.disable_warnings()
+            r = requests.get(url=url, headers=headers, params=params, proxies=proxies, verify=False)
+            response = json.loads(r.content.decode("utf8"))
+            return response
+        except Exception as e:
+            Common.logger().error("获取表格元数据异常:{}", e)
+
+    # 读取工作表中所有数据
+    @classmethod
+    def get_values_batch(cls, sheetid):
+        """
+        读取工作表中所有数据
+        :param sheetid: 哪张表
+        :return: 所有数据
+        """
+
+        url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" + cls.spreadsheetToken + "/values_batch_get"
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        params = {
+            # 多个查询范围 如 url?ranges=range1,range2 ,其中 range 包含 sheetId 与单元格范围两部分
+            "ranges": sheetid,
+
+            # valueRenderOption=ToString 可返回纯文本的值(数值类型除外);
+            # valueRenderOption=FormattedValue 计算并格式化单元格;
+            # valueRenderOption=Formula单元格中含有公式时返回公式本身;
+            # valueRenderOption=UnformattedValue计算但不对单元格进行格式化
+            "valueRenderOption": "ToString",
+
+            # dateTimeRenderOption=FormattedString 计算并将时间日期按照其格式进行格式化,但不会对数字进行格式化,返回格式化后的字符串。
+            "dateTimeRenderOption": "",
+
+            # 返回的用户id类型,可选open_id,union_id
+            "user_id_type": "open_id"
+        }
+        try:
+            urllib3.disable_warnings()
+            r = requests.get(url=url, headers=headers, params=params, proxies=proxies, verify=False)
+            response = json.loads(r.content.decode("utf8"))
+            values = response["data"]["valueRanges"][0]["values"]
+            return values
+        except Exception as e:
+            Common.logger().error("读取工作表所有数据异常:{}", e)
+
+    # 工作表,插入首行
+    @classmethod
+    def insert_columns(cls, sheetid):
+        """
+        插入行或列
+        :return:插入首行
+        """
+        url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/"\
+              + cls.spreadsheetToken + "/insert_dimension_range"
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        body = {
+            "dimension": {
+                "sheetId": sheetid,
+                "majorDimension": "ROWS",  # 默认 ROWS ,可选 ROWS、COLUMNS
+                "startIndex": 1,  # 开始的位置
+                "endIndex": 2  # 结束的位置
+            },
+            "inheritStyle": "AFTER"  # BEFORE 或 AFTER,不填为不继承 style
+        }
+        try:
+            urllib3.disable_warnings()
+            r = requests.post(url=url, headers=headers, json=body, proxies=proxies, verify=False)
+            Common.logger().info("插入空行:{}", r.json()["msg"])
+        except Exception as e:
+            Common.logger().error("插入空行异常:{}", e)
+
+    # 工作表,首行写入数据
+    @classmethod
+    def update_values(cls, sheetid, a1, b1, c1, d1, e1, f1, g1, h1, i1, j1, k1, l1, m1, n1, o1):
+        """
+        写入数据
+        :param sheetid: 哪张工作表
+        :param a1: 单元格
+        :param b1: 单元格
+        :param c1: 单元格
+        :param d1: 单元格
+        :param e1: 单元格
+        :param f1: 单元格
+        :param g1: 单元格
+        :param h1: 单元格
+        :param i1: 单元格
+        :param j1: 单元格
+        :param k1: 单元格
+        :param l1: 单元格
+        :param m1: 单元格
+        :param n1: 单元格
+        :param o1: 单元格
+        :return:
+        """
+
+        url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" + cls.spreadsheetToken + "/values_batch_update"
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        body = {
+            "valueRanges": [
+                {
+                    "range": sheetid + "!A2:O2",
+                    "values": [
+                        [a1, b1, c1, d1, e1, f1, g1, h1, i1, j1, k1, l1, m1, n1, o1]
+                    ]
+                },
+            ],
+        }
+        try:
+            urllib3.disable_warnings()
+            r = requests.post(url=url, headers=headers, json=body, proxies=proxies, verify=False)
+            Common.logger().info("空行写入视频数据:{}", r.json()["msg"])
+        except Exception as e:
+            Common.logger().error("空行写入视频数据异常:{}", e)
+
+    # 读取单元格数据
+    @classmethod
+    def get_range_value(cls, sheetid, cell):
+        """
+        读取单元格内容
+        :param sheetid: 哪张工作表
+        :param cell: 哪个单元格
+        :return: 单元格内容
+        """
+        url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+              + cls.spreadsheetToken + "/values/" + sheetid + "!" + cell
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        params = {
+            # valueRenderOption=ToString 可返回纯文本的值(数值类型除外);
+            # valueRenderOption=FormattedValue 计算并格式化单元格;
+            # valueRenderOption=Formula单元格中含有公式时返回公式本身;
+            # valueRenderOption=UnformattedValue计算但不对单元格进行格式化。
+            "valueRenderOption": "ToString",
+
+            # dateTimeRenderOption=FormattedString 计算并对时间日期按照其格式进行格式化,但不会对数字进行格式化,返回格式化后的字符串。
+            "dateTimeRenderOption": "",
+
+            # 返回的用户id类型,可选open_id,union_id
+            "user_id_type": "open_id"
+        }
+        try:
+            urllib3.disable_warnings()
+            r = requests.get(url=url, headers=headers, params=params, proxies=proxies, verify=False)
+            return r.json()["data"]["valueRange"]["values"][0]
+        except Exception as e:
+            Common.logger().error("读取单元格数据异常:{}", e)
+
+    # 删除行或列,可选 ROWS、COLUMNS
+    @classmethod
+    def dimension_range(cls, sheetid, major_dimension, startindex, endindex):
+        """
+        删除行或列
+        :param sheetid:工作表
+        :param major_dimension:默认 ROWS ,可选 ROWS、COLUMNS
+        :param startindex:开始的位置
+        :param endindex:结束的位置
+        :return:
+        """
+        url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" + cls.spreadsheetToken + "/dimension_range"
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        body = {
+            "dimension": {
+                "sheetId": sheetid,
+                "majorDimension": major_dimension,
+                "startIndex": startindex,
+                "endIndex": endindex
+                }
+            }
+        try:
+            urllib3.disable_warnings()
+            r = requests.delete(url=url, headers=headers, json=body, proxies=proxies, verify=False)
+            Common.logger().info("删除视频数据:{}", r.json()["msg"])
+        except Exception as e:
+            Common.logger().error("删除视频数据异常:{}", e)
+
+
+if __name__ == "__main__":
+    feishu = Feishu()
+
+    # # 获取飞书api token
+    # feishu.get_token()
+    # # 获取表格元数据
+    # feishu.get_metainfo()
+
+    # 读取工作表中所有数据
+    # print(feishu.get_values_batch("Y8N3Vl"))
+    # print(len(feishu.get_values_batch("SdCHOM")))
+    # for i in range(len(feishu.get_values_batch("Y8N3Vl"))):
+    #     videoid = feishu.get_values_batch("Y8N3Vl")[i][1]
+    #     if videoid == "b3":
+    #         # 删除行或列,可选 ROWS、COLUMNS
+    #         feishu.dimension_range("Y8N3Vl", "ROWS", i+1, i+1)
+    #         print(videoid)
+
+    # # 看一看+工作表,插入首行
+    # print(feishu.insert_columns("Y8N3Vl"))
+    #
+    # # 看一看+工作表,首行写入数据
+    # print(feishu.update_values("Y8N3Vl", "a1", "b1", "c1", "d1", "e1", "f1", "g1",
+    #                            "h1", "i1", "j1", "k1", "l1", "m1", "n1", "o1"))
+
+    # # 查询单元格内容
+    # print(feishu.get_range_value("Y8N3Vl", "B8:C8"))
+    #
+    # # 删除行或列,可选 ROWS、COLUMNS
+    # feishu.dimension_range("Y8N3Vl", "ROWS")
+
+    pass

+ 333 - 0
main/get_feeds.py

@@ -0,0 +1,333 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/5/11
+import os
+import sys
+import time
+import requests
+import urllib3
+sys.path.append(os.getcwd())
+from main.common import Common
+from main.feishu_lib import Feishu
+
+proxies = {"http": None, "https": None}
+
+
+# 敏感词库
+def sensitive_words():
+    # 敏感词库列表
+    word_list = []
+    # 从云文档读取所有敏感词,添加到词库列表
+    lists = Feishu.get_values_batch("QQrfQ7")
+    for i in lists:
+        for j in i:
+            # 过滤空的单元格内容
+            if j is None:
+                pass
+            else:
+                word_list.append(j)
+    return word_list
+
+
+# 获取列表数据
+def get_feeds():
+    url = "https://kapi.xiaoniangao.cn/trends/get_recommend_trends"
+    headers = {
+        "x-b3-traceid": "17e832d1a42807",
+        "X-Token-Id": "4bff41a8c35f054fa915dc71b937ac70-1145266232",
+        "uid": "uid	250e6514-fd83-446c-a880-e274c7f17bce",
+        "content-type": "application/json",
+        "Accept-Encoding": "gzip,compress,br,deflate",
+        "User-Agent": 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X)'
+                      ' AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 '
+                      'MicroMessenger/8.0.20(0x18001432) NetType/WIFI Language/zh_CN',
+        "Referer": "https://servicewechat.com/wxd7911e4c177690e4/616/page-frame.html"
+    }
+    data = {
+        "log_params": {
+            "page": "discover_rec",
+            "common": {
+                "brand": "iPhone",
+                "device": "iPhone 11",
+                "os": "iOS 14.7.1",
+                "weixinver": "8.0.20",
+                "srcver": "2.24.2",
+                "net": "none"
+            }
+        },
+        "qs": "imageMogr2/gravity/center/rotate/$/thumbnail/!750x500r/crop/750x500/interlace/1/format/jpg",
+        "h_qs": "imageMogr2/gravity/center/rotate/$/thumbnail/!80x80r/crop/80x80/interlace/1/format/jpg",
+        "share_width": 625,
+        "share_height": 500,
+        "ext": {
+            "fmid": 0,
+            "items": {
+                "15408539": {
+                    "type": "rec",
+                    "pd": 328.31,
+                    "ct": 1649248176494,
+                    "ut": 1649248509951
+                },
+                "36776414": {
+                    "type": "nice",
+                    "pd": 33.501,
+                    "ct": 1648893152909,
+                    "ut": 1648893189506
+                },
+                "39467179": {
+                    "type": "rec",
+                    "pd": 49.343,
+                    "ct": 1649247992882,
+                    "ut": 1649248043566
+                },
+                "40347940": {
+                    "type": "rec",
+                    "pd": 3.681,
+                    "ct": 1649248080042,
+                    "ut": 1649248084907
+                },
+                "42285576": {
+                    "type": "rec",
+                    "pd": 0.66,
+                    "ct": 1649248087683,
+                    "ut": 1649248089445
+                },
+                "42300668": {
+                    "type": "rec",
+                    "pd": 13.5,
+                    "ct": 1649248059189,
+                    "ut": 1649248074218
+                },
+                "42306954": {
+                    "type": "rec",
+                    "pd": 4.918,
+                    "ct": 1649248091376,
+                    "ut": 1649248097663
+                },
+                "42328061": {
+                    "type": "reflux",
+                    "pd": 4.001,
+                    "ct": 1649248510102,
+                    "ut": 1649248516806
+                },
+                "90004473051301": {
+                    "type": "my",
+                    "pd": 0,
+                    "ct": 1649247069688,
+                    "ut": 1649247069688
+                },
+                "80004478730156": {
+                    "type": "other",
+                    "pd": 153,
+                    "ct": 1649247850322,
+                    "ut": 1649247919263
+                }
+            }
+        },
+        "app": "xng",
+        "rec_scene": "discover_rec",
+        "log_common_params": {
+            "e": [{
+                "data": {
+                    "page": "discoverIndexPage",
+                    "topic": "recommend"
+                },
+                "ab": {}
+            }],
+            "ext": {
+                "brand": "iPhone",
+                "device": "iPhone 11",
+                "os": "iOS 14.7.1",
+                "weixinver": "8.0.20",
+                "srcver": "2.24.2",
+                "net": "wifi",
+                "scene": "1089"
+            },
+            "pj": "1",
+            "pf": "2",
+            "session_id": "cd944ac9-f35f-47ac-9aa4-2f354afec0e2"
+        },
+        "refresh": False,
+        "token": "132645b6e2b996aaad1713a557456816",
+        "uid": "250e6514-fd83-446c-a880-e274c7f17bce",
+        "proj": "ma",
+        "wx_ver": "8.0.20",
+        "code_ver": "3.61.0"
+    }
+
+    try:
+        urllib3.disable_warnings()
+        r = requests.post(url=url, headers=headers, json=data, proxies=proxies, verify=False)
+        if "data" not in r.json():
+            Common.logger().warning("获取视频feeds错误:{}", r.text)
+        elif "list" not in r.json()["data"]:
+            Common.logger().warning("获取视频feeds无数据,休眠10s:{}", r.json()["data"])
+        else:
+            # 视频列表数据
+            feeds = r.json()["data"]["list"]
+            for i in range(len(feeds)):
+                # 标题
+                if "title" in feeds[i]:
+                    video_title = feeds[i]["title"].strip().replace("\n", "")\
+                        .replace("/", "").replace("\r", "").replace("#", "")\
+                        .replace(".", "。").replace("\\", "").replace("&NBSP", "")\
+                        .replace(":", "").replace("*", "").replace("?", "") \
+                        .replace("?", "").replace('"', "").replace("<", "") \
+                        .replace(">", "").replace("|", "").replace(" ", "")
+                    Common.logger().info("标题:{}", video_title)
+                else:
+                    video_title = ""
+                    Common.logger().info("当前视频无标题:{}", video_title)
+
+                # 视频 ID
+                if "vid" in feeds[i]:
+                    video_id = feeds[i]["vid"]
+                    Common.logger().info("视频ID:{}", video_id)
+                else:
+                    video_id = ""
+                    Common.logger().info("当前视频无ID:{}", video_id)
+
+                # 播放量
+                if "play_pv" in feeds[i]:
+                    video_play_cnt = feeds[i]["play_pv"]
+                    Common.logger().info("视频播放量:{}", video_play_cnt)
+                else:
+                    video_play_cnt = ""
+                    Common.logger().info("当前视频无播放量:{}", video_play_cnt)
+
+                # 点赞量
+                if "favor" in feeds[i]:
+                    video_like_cnt = feeds[i]["favor"]["total"]
+                    Common.logger().info("视频点赞量:{}", video_like_cnt)
+                else:
+                    video_like_cnt = ""
+                    Common.logger().info("当前视频无点赞量:{}", video_like_cnt)
+
+                # 分享量
+                if "share" in feeds[i]:
+                    video_share_cnt = feeds[i]["share"]
+                    Common.logger().info("视频分享量:{}", video_share_cnt)
+                else:
+                    video_share_cnt = ""
+                    Common.logger().info("当前视频无分享量:{}", video_share_cnt)
+
+                # 评论量
+                if "comment_count" in feeds[i]:
+                    video_comment_cnt = feeds[i]["comment_count"]
+                    Common.logger().info("视频评论数:{}", video_comment_cnt)
+                else:
+                    video_comment_cnt = ""
+                    Common.logger().info("当前视频无评论:{}", video_comment_cnt)
+
+                # 时长
+                if "du" in feeds[i]:
+                    video_duration = int(feeds[i]["du"] / 1000)
+                    Common.logger().info("视频时长:{}秒", video_duration)
+                else:
+                    video_duration = ""
+                    Common.logger().info("当前视频无时长:{}", video_duration)
+
+                # 宽和高
+                if "w" or "h" in feeds[i]:
+                    video_width = feeds[i]["w"]
+                    video_height = feeds[i]["h"]
+                    Common.logger().info("视频宽高:{}*{}", video_width, video_height)
+                else:
+                    video_width = ""
+                    video_height = ""
+                    Common.logger().info("当前视频无宽高:{}{}", video_width, video_height)
+
+                # 发布时间
+                if "t" in feeds[i]:
+                    video_send_time = feeds[i]["t"]
+                    Common.logger().info(
+                        "视频发布时间:{}", time.strftime(
+                            "%Y-%m-%d %H:%M:%S", time.localtime(int(video_send_time) / 1000)))
+                else:
+                    video_send_time = ""
+                    Common.logger().info("当前视频无发布时间:{}", video_send_time)
+
+                # 用户名 / 头像
+                if "user" in feeds[i]:
+                    user_name = feeds[i]["user"]["nick"].strip().replace("\n", "")\
+                        .replace("/", "").replace("快手", "").replace(" ", "") \
+                        .replace(" ", "").replace("&NBSP", "").replace("\r", "")
+                    head_url = feeds[i]["user"]["hurl"]
+                    Common.logger().info("用户名:{}", user_name)
+                    Common.logger().info("用户头像:{}", head_url)
+                else:
+                    user_name = ""
+                    head_url = ""
+                    Common.logger().info("当前视频无用户名:{}", user_name)
+                    Common.logger().info("当前视频无用户头像:{}", head_url)
+
+                # 视频封面
+                if "url" in feeds[i]:
+                    cover_url = feeds[i]["url"]
+                    Common.logger().info("视频封面:{}", cover_url)
+                else:
+                    cover_url = ""
+                    Common.logger().info("当前视频无视频封面:{}", cover_url)
+
+                # 视频播放地址
+                if "v_url" in feeds[i]:
+                    video_url = feeds[i]["v_url"]
+                    Common.logger().info("播放地址:{}", video_url)
+                else:
+                    video_url = ""
+                    Common.logger().info("当前视频无播放地址:{}", video_url)
+
+                # 视频水印:0 无 1 有
+                if "v_ort" in feeds[i]:
+                    video_logo = feeds[i]["v_ort"]
+                else:
+                    video_logo = "1"
+
+                # 过滤无效视频
+                if video_title == "" or video_id == "" or video_duration == ""\
+                        or video_send_time == "" or user_name == "" or head_url == ""\
+                        or cover_url == "" or video_url == "":
+                    Common.logger().warning("无效视频")
+                # 过滤敏感词
+                elif any(word if word in video_title else False for word in sensitive_words()) is True:
+                    Common.logger().info("视频已中敏感词:{}".format(video_title))
+                # 过滤水印视频
+                elif str(video_logo) == "1" and feeds[i]["tpl_id"] != 0:
+                    Common.logger().info("视频有水印:{}", video_title)
+                # 从云文档去重:https://w42nne6hzg.feishu.cn/sheets/shtcngRPoDYAi24x52j2nDuHMih?sheet=onyBDH
+                elif video_id in [j for i in Feishu.get_values_batch("onyBDH") for j in i]:
+                    Common.logger().info("该视频已下载:{}", video_title)
+                # 从 云文档 去重:https://w42nne6hzg.feishu.cn/sheets/shtcngRPoDYAi24x52j2nDuHMih?sheet=S714lO
+                elif video_id in [j for i in Feishu.get_values_batch("S714lO") for j in i]:
+                    Common.logger().info("该视频已在feeds中:{}", video_title)
+                else:
+                    Common.logger().info("该视频未下载,添加至feeds中:{}".format(video_title))
+                    # feeds工作表,插入首行
+                    Feishu.insert_columns("S714lO")
+
+                    # 获取当前时间
+                    get_feeds_time = int(time.time())
+                    # 看一看云文档,工作表 kanyikan_feeds 中写入数据
+                    Feishu.update_values("S714lO",
+                                         a1=str(get_feeds_time),
+                                         b1=str(video_id),
+                                         c1=str(video_play_cnt),
+                                         d1=str(video_title),
+                                         e1=str(video_duration),
+                                         f1=str(video_comment_cnt),
+                                         g1=str(video_like_cnt),
+                                         h1=str(video_share_cnt),
+                                         i1=str(video_width)+"*"+str(video_height),
+                                         j1=str(video_send_time),
+                                         k1=str(user_name),
+                                         l1=str(head_url),
+                                         m1=str(cover_url),
+                                         n1=str(video_url),
+                                         o1=str("132645b6e2b996aaad1713a557456816"))
+
+    except Exception as e:
+        Common.logger().error("获取视频列表异常:{}", e)
+
+
+if __name__ == "__main__":
+    get_feeds()

+ 244 - 0
main/publish.py

@@ -0,0 +1,244 @@
+"""
+上传视频到阿里云 OSS
+上传视频到管理后台
+"""
+import json
+import os
+import random
+import time
+
+import oss2
+import requests
+import urllib3
+from main.common import Common
+
+proxies = {"http": None, "https": None}
+
+
+class Publish:
+    @classmethod
+    def publish_video_dev(cls, request_data):
+        """
+        loginUid  站内uid (随机)
+        appType  默认:888888
+        crawlerSrcId   站外视频ID
+        crawlerSrcCode   渠道(自定义 KYK)
+        crawlerSrcPublishTimestamp  视频原发布时间
+        crawlerTaskTimestamp   爬虫创建时间(可以是当前时间)
+        videoPath  视频oss地址
+        coverImgPath  视频封面oss地址
+        title  标题
+        totalTime  视频时长
+        viewStatus  视频的有效状态 默认1
+        versionCode  版本 默认1
+        :return:
+        """
+        Common.logger().info('publish request data: {}'.format(request_data))
+        result = cls.request_post('https://videotest.yishihui.com/longvideoapi/crawler/video/send', request_data)
+        Common.logger().info('publish result: {}'.format(result))
+        if result['code'] != 0:
+            Common.logger().error('pushlish failure msg = {}'.format(result['msg']))
+        else:
+            Common.logger().info('publish success video_id = : {}'.format(request_data['crawlerSrcId']))
+
+    @classmethod
+    def publish_video_prod(cls, request_data):
+        """
+        loginUid  站内uid (随机)
+        appType  默认:888888
+        crawlerSrcId   站外视频ID
+        crawlerSrcCode   渠道(自定义 KYK)
+        crawlerSrcPublishTimestamp  视频原发布时间
+        crawlerTaskTimestamp   爬虫创建时间(可以是当前时间)
+        videoPath  视频oss地址
+        coverImgPath  视频封面oss地址
+        title  标题
+        totalTime  视频时长
+        viewStatus  视频的有效状态 默认1
+        versionCode  版本 默认1
+        :return:
+        """
+        result = cls.request_post('https://longvideoapi.piaoquantv.com/longvideoapi/crawler/video/send', request_data)
+        Common.logger().info('publish result: {}'.format(result))
+        if result['code'] != 0:
+            Common.logger().error('pushlish failure msg = {}'.format(result['msg']))
+        else:
+            Common.logger().info('publish success video_id = : {}'.format(request_data['crawlerSrcId']))
+
+    @classmethod
+    def request_post(cls, request_url, request_data):
+        """
+        post 请求 HTTP接口
+        :param request_url: 接口URL
+        :param request_data: 请求参数
+        :return: res_data json格式
+        """
+        urllib3.disable_warnings()
+        response = requests.post(url=request_url, data=request_data, proxies=proxies, verify=False)
+        if response.status_code == 200:
+            res_data = json.loads(response.text)
+            return res_data
+
+    # 以下代码展示了基本的文件上传、下载、罗列、删除用法。
+
+    # 首先初始化AccessKeyId、AccessKeySecret、Endpoint等信息。
+    # 通过环境变量获取,或者把诸如“<你的AccessKeyId>”替换成真实的AccessKeyId等。
+    #
+    # 以杭州区域为例,Endpoint可以是:
+    #   http://oss-cn-hangzhou.aliyuncs.com
+    #   https://oss-cn-hangzhou.aliyuncs.com
+    # 分别以HTTP、HTTPS协议访问。
+    access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', 'LTAIP6x1l3DXfSxm')
+    access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', 'KbTaM9ars4OX3PMS6Xm7rtxGr1FLon')
+    bucket_name = os.getenv('OSS_TEST_BUCKET', 'art-pubbucket')
+    # endpoint = os.getenv('OSS_TEST_ENDPOINT', 'oss-cn-hangzhou-internal.aliyuncs.com')
+    endpoint = os.getenv('OSS_TEST_ENDPOINT', 'oss-cn-hangzhou.aliyuncs.com')
+
+    # 确认上面的参数都填写正确了
+    for param in (access_key_id, access_key_secret, bucket_name, endpoint):
+        assert '<' not in param, '请设置参数:' + param
+
+    # 创建Bucket对象,所有Object相关的接口都可以通过Bucket对象来进行
+    bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)
+
+    """
+    处理流程:
+    1. 定时(每天凌晨1点执行一次)循环files文件下的内容 结构:files -> 视频文件夹 -> 视频文件 + 封面图 + 基本信息
+    2. 视频文件和封面上传到oss
+    - 视频文件oss目录  longvideo/crawler_local/video/prod/文件名
+    - 视频封面oss目录  longvideo/crawler_local/image/prod/文件名
+    3. 发布视频
+    - 读取 基本信息 调用发布接口
+    """
+    # env 日期20220225 文件名
+    oss_file_path_video = 'longvideo/crawler_local/video/{}/{}/{}'
+    oss_file_path_image = 'longvideo/crawler_local/image/{}/{}/{}'
+
+    @classmethod
+    def put_file(cls, oss_file, local_file):
+        cls.bucket.put_object_from_file(oss_file, local_file)
+        Common.logger().info("put oss file = {}, local file = {} success".format(oss_file, local_file))
+
+    # 清除本地文件
+    @classmethod
+    def remove_local_file(cls, local_file):
+        os.remove(local_file)
+        Common.logger().info("remove local file = {} success".format(local_file))
+
+    # 清除本地文件夹
+    @classmethod
+    def remove_local_file_dir(cls, local_file):
+        os.rmdir(local_file)
+        Common.logger().info("remove local file dir = {} success".format(local_file))
+
+    local_file_path = './videos'
+    video_file = 'video'
+    image_file = 'image'
+    info_file = 'info'
+    uids_dev_up = [6267140]
+    uids_dev_play = [6267141]
+    uids_prod_up = [20631208, 20631209, 20631210, 20631211, 20631212,
+                    20631213, 20631214, 20631215, 20631216, 20631217]
+    uids_prod_play = [20631228, 20631229, 20631230, 20631231, 20631232,
+                      20631233, 20631234, 20631235, 20631236, 20631237]
+
+    @classmethod
+    def upload_and_publish(cls, env, job):
+        """
+        上传视频到 oss
+        :param env: 测试环境:dev,正式环境:prod
+        :param job: 上升榜:up,播放量:play
+        """
+        Common.logger().info("upload_and_publish starting...")
+        today = time.strftime("%Y%m%d", time.localtime())
+        # videos 目录下的所有视频文件夹
+        files = os.listdir(cls.local_file_path)
+        for f in files:
+            try:
+                # 单个视频文件夹
+                fi_d = os.path.join(cls.local_file_path, f)
+                # 确认为视频文件夹
+                if os.path.isdir(fi_d):
+                    Common.logger().info('dir = {}'.format(fi_d))
+                    # 列出所有视频文件夹
+                    dir_files = os.listdir(fi_d)
+                    data = {'appType': '888888', 'crawlerSrcCode': 'XIAONIANGAO_XCX', 'viewStatus': '1', 'versionCode': '1'}
+                    now_timestamp = int(round(time.time() * 1000))
+                    data['crawlerTaskTimestamp'] = str(now_timestamp)
+                    global uid
+                    if env == "dev" and job == "up":
+                        uid = str(random.choice(cls.uids_dev_up))
+                    elif env == "dev" and job == "play":
+                        uid = str(random.choice(cls.uids_dev_play))
+                    elif env == "prod" and job == "up":
+                        uid = str(random.choice(cls.uids_prod_up))
+                    elif env == "prod" and job == "play":
+                        uid = str(random.choice(cls.uids_prod_play))
+                    data['loginUid'] = uid
+                    # 单个视频文件夹下的所有视频文件
+                    for fi in dir_files:
+                        # 视频文件夹下的所有文件路径
+                        fi_path = fi_d + '/' + fi
+                        Common.logger().info('dir fi_path = {}'.format(fi_path))
+                        # 读取 info.txt,赋值给 data
+                        if cls.info_file in fi:
+                            f = open(fi_path, "r", encoding="UTF-8")
+                            # 读取数据 数据准确性写入的时候保证 读取暂不处理
+                            for i in range(14):
+                                line = f.readline()
+                                line = line.replace('\n', '')
+                                if line is not None and len(line) != 0 and not line.isspace():
+                                    Common.logger().info("line = {}".format(line))
+                                    if i == 0:
+                                        data['crawlerSrcId'] = line
+                                    elif i == 1:
+                                        data['title'] = line
+                                    elif i == 2:
+                                        data['totalTime'] = line
+                                    elif i == 8:
+                                        data['crawlerSrcPublishTimestamp'] = line
+                                else:
+                                    Common.logger().warning("{} line is None".format(fi_path))
+                            f.close()
+                            # remove info.txt
+                            cls.remove_local_file(fi_path)
+                    # 刷新数据
+                    dir_files = os.listdir(fi_d)
+                    for fi in dir_files:
+                        fi_path = fi_d + '/' + fi
+                        Common.logger().info('dir fi_path = {}'.format(fi_path))
+                        # 上传oss
+                        if cls.video_file in fi:
+                            global oss_video_file
+                            if env == "dev":
+                                oss_video_file = cls.oss_file_path_video.format("dev", today, data['crawlerSrcId'])
+                            elif env == "prod":
+                                oss_video_file = cls.oss_file_path_video.format("prod", today, data['crawlerSrcId'])
+                            Common.logger().info("oss_video_file = {}".format(oss_video_file))
+                            cls.put_file(oss_video_file, fi_path)
+                            data['videoPath'] = oss_video_file
+                            Common.logger().info("videoPath = {}".format(oss_video_file))
+                        elif cls.image_file in fi:
+                            global oss_image_file
+                            if env == "dev":
+                                oss_image_file = cls.oss_file_path_image.format("env", today, data['crawlerSrcId'])
+                            elif env == "prod":
+                                oss_image_file = cls.oss_file_path_image.format("prod", today, data['crawlerSrcId'])
+                            Common.logger().info("oss_image_file = {}".format(oss_image_file))
+                            cls.put_file(oss_image_file, fi_path)
+                            data['coverImgPath'] = oss_image_file
+                            Common.logger().info("coverImgPath = {}".format(oss_image_file))
+                        # 全部remove
+                        cls.remove_local_file(fi_path)
+
+                    # 发布
+                    if env == "dev":
+                        cls.publish_video_dev(data)
+                    elif env == "prod":
+                        cls.publish_video_prod(data)
+                    cls.remove_local_file_dir(fi_d)
+
+                else:
+                    Common.logger().error('file not a dir = {}'.format(fi_d))
+            except Exception as e:
+                Common.logger().exception('upload_and_publish error', e)

+ 53 - 0
main/run.py

@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/5/12
+import datetime
+import os
+import random
+import sys
+import time
+sys.path.append(os.getcwd())
+from main.common import Common
+from main.download import Download
+from main.get_feeds import get_feeds
+
+
+def xiaoniangao_prod_job():
+    """
+    执行正式环境快手脚本
+    """
+    while True:
+        # 当天下载及上传的视频数:150 条
+        if len(Download.download_video_list) >= 100:
+            Common.logger().info("已下载视频数:{}".format(len(Download.download_video_list)))
+            time.sleep(1800)
+        else:
+            Common.logger().info("开始抓取小年糕视频")
+            time.sleep(1)
+
+            # 获取视频列表
+            get_feeds()
+            # 下载视频,并上传
+            Download.download("prod")
+            # 随机睡眠1-3s
+            time.sleep(random.randint(1, 3))
+
+        # 删除冗余日志
+        Common.del_logs()
+
+
+def main_prod():
+    """
+    正式环境主函数
+    """
+    while True:
+        while True:
+            main_prod_time = datetime.datetime.now()
+            if main_prod_time.hour >= 8:
+                xiaoniangao_prod_job()
+            else:
+                break
+
+
+if __name__ == "__main__":
+    main_prod()

+ 3 - 0
videos/__init__.py

@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/5/11