Jelajahi Sumber

add feishu api

wangkun 3 tahun lalu
induk
melakukan
b1161aec70

+ 40 - 49
main/common.py

@@ -5,6 +5,7 @@
 公共方法,包含:生成log / 删除log / 下载方法 / 读取文件 / 统计下载数
 """
 from datetime import date, timedelta
+from loguru import logger
 import datetime
 import logging
 import os
@@ -25,6 +26,7 @@ class Common:
     # 明天 <class 'str'>  2022-04-15
     tomorrow = (date.today() + timedelta(days=1)).strftime("%Y-%m-%d")
 
+    # 使用 logging 模块生成日志
     @staticmethod
     def crawler_log():
         """
@@ -47,6 +49,34 @@ class Common:
 
         return crawler_logger
 
+    # 使用 logger 模块生成日志
+    @staticmethod
+    def logger():
+        """
+        使用 logger 模块生成日志
+        """
+        # 日志路径
+        log_dir = r"./logs/"
+        log_path = os.getcwd() + os.sep + log_dir
+        if not os.path.isdir(log_path):
+            os.makedirs(log_path)
+
+        # 日志文件名
+        log_name = time.strftime("%Y-%m-%d", time.localtime(time.time())) + '.log'
+
+        # 日志不打印到控制台
+        logger.remove(handler_id=None)
+
+        # rotation="500 MB",实现每 500MB 存储一个文件
+        # rotation="12:00",实现每天 12:00 创建一个文件
+        # rotation="1 week",每周创建一个文件
+        # retention="10 days",每隔10天之后就会清理旧的日志
+        # 初始化日志
+        logger.add(log_dir + log_name, level="INFO", rotation='00:00')
+
+        return logger
+
+    # 清除日志,保留最近 7 个文件
     @classmethod
     def del_logs(cls):
         """
@@ -66,8 +96,9 @@ class Common:
         else:
             for file in all_logs[:len(all_logs) - 7]:
                 os.remove(log_dir + file)
-        cls.crawler_log().info("清除冗余日志成功")
+        cls.logger().info("清除冗余日志成功")
 
+    # 封装下载视频或封面的方法
     @classmethod
     def download_method(cls, text, d_name, d_url):
         """
@@ -80,6 +111,7 @@ class Common:
         video_dir = "./videos/" + d_name + "/"
         if not os.path.exists(video_dir):
             os.mkdir(video_dir)
+        cls.logger().info("删除 charles 缓存文件成功")
 
         # 下载视频
         if text == "video":
@@ -95,11 +127,9 @@ class Common:
                 with open(video_dir + video_name, "wb") as f:
                     for chunk in response.iter_content(chunk_size=10240):
                         f.write(chunk)
-                cls.crawler_log().info("==========视频下载完成==========")
+                cls.logger().info("==========视频下载完成==========")
             except Exception as e:
-                cls.crawler_log().info("视频下载失败:{}".format(e))
-            # except FileNotFoundError:
-            #     cls.kuaishou_log().info("==========视频下载失败==========")
+                cls.logger().exception("视频下载失败:{}", e)
 
         # 下载封面
         elif text == "cover":
@@ -114,12 +144,11 @@ class Common:
             try:
                 with open(video_dir + cover_name, "wb") as f:
                     f.write(response.content)
-                cls.crawler_log().info("==========封面下载完成==========")
+                cls.logger().info("==========封面下载完成==========")
             except Exception as e:
-                cls.crawler_log().info("封面下载失败:{}".format(e))
-            # except FileNotFoundError:
-            #     cls.kuaishou_log().info("==========封面下载失败==========")
+                cls.logger().exception("封面下载失败:{}", e)
 
+    # 读取 txt 内容,返回 f.readlines()
     @staticmethod
     def read_txt(t_name):
         """
@@ -130,53 +159,15 @@ class Common:
         with open(r"./txt/" + t_name, "r", encoding="UTF-8") as f:
             return f.readlines()
 
+    # 统计 txt 内容数量
     @classmethod
     def kuaishou_download_count(cls):
         videoid_path = r"./txt/kuaishou_videoid.txt"
         count = 0
         for count, line in enumerate(open(videoid_path, "rb").readlines()):
             count += 1
-        cls.crawler_log().info('累计下载视频数: {}\n'.format(count))
-
-    @classmethod
-    def weishi_download_count(cls):
-        videoid_path = r"./txt/weishi_videoid.txt"
-        count = 0
-        for count, line in enumerate(open(videoid_path, "rb").readlines()):
-            count += 1
-        cls.crawler_log().info('累计下载视频数: {}\n'.format(count))
-
-    @classmethod
-    def kuaishou_today_download_count(cls):
-        """
-        统计快手渠道当日下载视频数
-        :return:
-        """
-        # 创建空文件
-        with open(r"./txt/" + str(cls.today) + "_kuaishou_videoid.txt", "a") as f:
-            f.write("")
-        videoid_path = r"./txt/" + str(cls.today) + "_kuaishou_videoid.txt"
-        count = 0
-        for count, line in enumerate(open(videoid_path, "rb").readlines()):
-            count += 1
-        return count
-
-    @classmethod
-    def del_yesterday_kuaishou_videoid_txt(cls):
-        """
-        删除快手渠道昨日下载视频数的 txt 文件
-        :return:
-        """
-        yesterday_kuaishou_videoid_txt_dir = r"./txt/"
-        all_files = sorted(os.listdir(yesterday_kuaishou_videoid_txt_dir))
-        for file in all_files:
-            name = os.path.splitext(file)[0]
-            if name == cls.yesterday + "_kuaishou_videoid":
-                os.remove(yesterday_kuaishou_videoid_txt_dir + file)
-        Common.crawler_log().info("删除快手昨天下载统计文件成功")
+        cls.logger().info('累计下载视频数: {}\n', count)
 
 
 if __name__ == "__main__":
     common = Common()
-    common.del_yesterday_kuaishou_videoid_txt()
-    print(common.kuaishou_today_download_count())

+ 34 - 10
main/demo.py

@@ -1,10 +1,11 @@
 # -*- coding: utf-8 -*-
 # @Author: wangkun
 # @Time: 2022/3/31
+import os
 from datetime import date, timedelta
+from loguru import logger
 import datetime
 import json
-import re
 import time
 
 import requests
@@ -246,15 +247,38 @@ class Demo:
                     print(f"正常视频:{title}")
                     cls.kuaishou_sensitive_words().remove(word)
 
+    @classmethod
+    def logger(cls):
+        # 日志路径
+        log_dir = r"./logs/"
+        log_path = os.getcwd() + os.sep + log_dir
+        if not os.path.isdir(log_path):
+            os.makedirs(log_path)
+
+        # 日志参数
+        log_name = time.strftime("%Y-%m-%d", time.localtime(time.time())) + '.log'
+
+        # 日志不打印到控制台
+        logger.remove(handler_id=None)
+        # logger.add(log_dir+log_name, format="{time} {level} {message}",
+        # level="INFO", rotation='5 MB', encoding='utf-8')
+        logger.add(log_dir+log_name, level="INFO", rotation='13:56')
+        return logger
+
 
 if __name__ == "__main__":
-    demo = Demo()
-    # demo.demo1()
-    demo.time()
-    # demo.get_douyin_feeds()
-    # demo.demo2()
-    # demo.get_weishi_feeds()
-    # demo.edit_str()
-    # demo.sensitive_words()
+    while True:
+        demo = Demo()
+        # demo.demo1()
+        # demo.time()
+        # demo.get_douyin_feeds()
+        # demo.demo2()
+        # demo.get_weishi_feeds()
+        # demo.edit_str()
+        # demo.sensitive_words()
+        demo.logger().info("hello")
+        time.sleep(10)
+        a = "hahaha"
+        demo.logger().exception("what:{}", a)
 
-    pass
+    # pass

+ 218 - 218
main/download_kuaishou.py

@@ -10,6 +10,7 @@ import time
 import requests
 import urllib3
 from main.common import Common
+from main.feishu_lib import Feishu
 from main.publish import Publish
 
 proxies = {"http": None, "https": None}
@@ -71,9 +72,9 @@ class KuaiShou:
         """
         if 600 >= int(float(d_duration)) >= 60:
             if int(d_width) >= 720 or int(d_height) >= 720:
-                if int(d_play_cnt) >= 50000:
-                    if int(d_like_cnt) >= 50000:
-                        if int(d_share_cnt) >= 2000:
+                if int(d_play_cnt) >= 5:
+                    if int(d_like_cnt) >= 30000:
+                        if int(d_share_cnt) >= 1000:
                             return True
                         else:
                             return False
@@ -87,10 +88,10 @@ class KuaiShou:
     @classmethod
     def kuaishou_get_recommend(cls):
         """
-        从快手小程序首页推荐获取视频list:
-            1.在 kuaishou_videoid.txt 中去重
-            2.在 kuaishou_feeds.txt 中去重
-            3.添加视频信息到 kuaishou_feeds.txt
+        1.从快手小程序首页推荐,获取视频列表
+        2.先在 https://w42nne6hzg.feishu.cn/sheets/shtcngRPoDYAi24x52j2nDuHMih?sheet=kCSk2e 中去重
+        3.再从 https://w42nne6hzg.feishu.cn/sheets/shtcngRPoDYAi24x52j2nDuHMih?sheet=YWeCh7 中去重
+        4.添加视频信息至 https://w42nne6hzg.feishu.cn/sheets/shtcngRPoDYAi24x52j2nDuHMih?sheet=YWeCh7
         """
         url = "https://wxmini-api.uyouqu.com/rest/wd/wechatApp/feed/recommend"
         params = {
@@ -165,100 +166,129 @@ class KuaiShou:
             r = requests.post(url=url, params=params, cookies=cookies, json=json_data, proxies=proxies, verify=False)
             response = json.loads(r.content.decode("utf8"))
             if "feeds" not in response:
-                Common.crawler_log().info("获取快手视频 list 出错:{},休眠 10s".format(response))
+                Common.logger().warning("获取快手视频 list 出错:{},休眠 10s".format(response))
                 time.sleep(10)
             else:
                 feeds = response["feeds"]
                 for i in range(len(feeds)):
+                    # 视频标题过滤话题及处理特殊字符
+                    kuaishou_title = feeds[i]["caption"]
+                    title_split1 = kuaishou_title.split(" #")
+                    if title_split1[0] != "":
+                        title1 = title_split1[0]
+                    else:
+                        title1 = title_split1[-1]
+
+                    title_split2 = title1.split(" #")
+                    if title_split2[0] != "":
+                        title2 = title_split2[0]
+                    else:
+                        title2 = title_split2[-1]
+
+                    title_split3 = title2.split("@")
+                    if title_split3[0] != "":
+                        title3 = title_split3[0]
+                    else:
+                        title3 = title_split3[-1]
+
+                    video_title = title3.strip().replace("\n", "") \
+                        .replace("/", "").replace("快手", "").replace(" ", "") \
+                        .replace(" ", "").replace("&NBSP", "").replace("\r", "") \
+                        .replace("#", "").replace(".", "。").replace("\\", "") \
+                        .replace(":", "").replace("*", "").replace("?", "") \
+                        .replace("?", "").replace('"', "").replace("<", "") \
+                        .replace(">", "").replace("|", "")
+                    Common.logger().info("video_title:{}".format(video_title))
+
                     if "photoId" not in feeds[i]:
                         photo_id = "0"
-                        Common.crawler_log().info("photo_id:{}".format(photo_id))
+                        Common.logger().info("photo_id:{}".format(photo_id))
                     else:
                         photo_id = feeds[i]["photoId"]
-                        Common.crawler_log().info("photo_id:{}".format(photo_id))
+                        Common.logger().info("photo_id:{}".format(photo_id))
 
                     if "viewCount" not in feeds[i]:
                         video_play_cnt = "0"
-                        Common.crawler_log().info("video_play_cnt:0")
+                        Common.logger().info("video_play_cnt:0")
                     else:
                         video_play_cnt = feeds[i]["viewCount"]
-                        Common.crawler_log().info("video_play_cnt:{}".format(video_play_cnt))
+                        Common.logger().info("video_play_cnt:{}".format(video_play_cnt))
 
                     if "likeCount" not in feeds[i]:
                         video_like_cnt = "0"
-                        Common.crawler_log().info("video_like_cnt:0")
+                        Common.logger().info("video_like_cnt:0")
                     else:
                         video_like_cnt = feeds[i]["likeCount"]
-                        Common.crawler_log().info("video_like_cnt:{}".format(video_like_cnt))
-
-                    if "headUrl" not in feeds[i]:
-                        head_url = "0"
-                        Common.crawler_log().info("head_url:不存在")
-                    else:
-                        head_url = feeds[i]["headUrl"]
-                        Common.crawler_log().info("head_url:{}".format(head_url))
-
-                    if len(feeds[i]["coverUrls"]) == 0:
-                        cover_url = "0"
-                        Common.crawler_log().info("cover_url:不存在")
-                    else:
-                        cover_url = feeds[i]["coverUrls"][0]["url"]
-                        Common.crawler_log().info("cover_url:{}".format(cover_url))
-
-                    if len(feeds[i]["mainMvUrls"]) == 0:
-                        video_url = "0"
-                        Common.crawler_log().info("video_url:不存在")
-                    else:
-                        video_url = feeds[i]["mainMvUrls"][0]["url"]
-                        Common.crawler_log().info("video_url:{}".format(video_url))
+                        Common.logger().info("video_like_cnt:{}".format(video_like_cnt))
 
                     if "shareCount" not in feeds[i]:
                         video_share_cnt = "0"
-                        Common.crawler_log().info("video_share_cnt:0")
+                        Common.logger().info("video_share_cnt:0")
                     else:
                         video_share_cnt = feeds[i]["shareCount"]
-                        Common.crawler_log().info("video_share_cnt:{}".format(video_share_cnt))
-
-                    if "width" not in feeds[i] or "height"not in feeds[i]:
-                        video_width = "0"
-                        video_height = "0"
-                        video_resolution = str(video_width) + "*" + str(video_height)
-                        Common.crawler_log().info("无分辨率")
-                    else:
-                        video_width = feeds[i]["width"]
-                        video_height = feeds[i]["height"]
-                        video_resolution = str(video_width) + "*" + str(video_height)
-                        Common.crawler_log().info("video_resolution:{}".format(video_resolution))
+                        Common.logger().info("video_share_cnt:{}".format(video_share_cnt))
 
                     if "commentCount" not in feeds[i]:
                         video_comment_cnt = "0"
-                        Common.crawler_log().info("video_comment_cnt:0")
+                        Common.logger().info("video_comment_cnt:0")
                     else:
                         video_comment_cnt = feeds[i]["commentCount"]
-                        Common.crawler_log().info("video_comment_cnt:{}".format(video_comment_cnt))
+                        Common.logger().info("video_comment_cnt:{}".format(video_comment_cnt))
 
                     if "duration" not in feeds[i]:
                         video_duration = "0"
-                        Common.crawler_log().info("video_duration:不存在")
+                        Common.logger().info("video_duration:不存在")
                     else:
-                        video_duration = int(int(feeds[i]["duration"])/1000)
-                        Common.crawler_log().info("video_duration:{}秒".format(video_duration))
+                        video_duration = int(int(feeds[i]["duration"]) / 1000)
+                        Common.logger().info("video_duration:{}秒".format(video_duration))
+
+                    if "width" not in feeds[i] or "height" not in feeds[i]:
+                        video_width = "0"
+                        video_height = "0"
+                        video_resolution = str(video_width) + "*" + str(video_height)
+                        Common.logger().info("无分辨率")
+                    else:
+                        video_width = feeds[i]["width"]
+                        video_height = feeds[i]["height"]
+                        video_resolution = str(video_width) + "*" + str(video_height)
+                        Common.logger().info("video_resolution:{}".format(video_resolution))
 
                     if "timestamp" not in feeds[i]:
                         video_send_time = "0"
-                        Common.crawler_log().info("video_send_time:不存在")
+                        Common.logger().info("video_send_time:不存在")
                     else:
                         video_send_time = feeds[i]["timestamp"]
-                        Common.crawler_log().info("video_send_time:{}".format(
-                            time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(video_send_time)/1000))))
+                        Common.logger().info("video_send_time:{}".format(
+                            time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(video_send_time) / 1000))))
 
-                    user_name = feeds[i]["userName"].strip().replace("\n", "")\
-                        .replace("/", "").replace("快手", "").replace(" ", "")\
+                    user_name = feeds[i]["userName"].strip().replace("\n", "") \
+                        .replace("/", "").replace("快手", "").replace(" ", "") \
                         .replace(" ", "").replace("&NBSP", "").replace("\r", "")
-                    Common.crawler_log().info("user_name:{}".format(user_name))
+                    Common.logger().info("user_name:{}".format(user_name))
 
                     user_id = feeds[i]["userId"]
-                    Common.crawler_log().info("user_id:{}".format(user_id))
+                    Common.logger().info("user_id:{}".format(user_id))
+
+                    if "headUrl" not in feeds[i]:
+                        head_url = "0"
+                        Common.logger().info("head_url:不存在")
+                    else:
+                        head_url = feeds[i]["headUrl"]
+                        Common.logger().info("head_url:{}".format(head_url))
+
+                    if len(feeds[i]["coverUrls"]) == 0:
+                        cover_url = "0"
+                        Common.logger().info("cover_url:不存在")
+                    else:
+                        cover_url = feeds[i]["coverUrls"][0]["url"]
+                        Common.logger().info("cover_url:{}".format(cover_url))
+
+                    if len(feeds[i]["mainMvUrls"]) == 0:
+                        video_url = "0"
+                        Common.logger().info("video_url:不存在")
+                    else:
+                        video_url = feeds[i]["mainMvUrls"][0]["url"]
+                        Common.logger().info("video_url:{}".format(video_url))
 
                     # 视频标题过滤话题及处理特殊字符
                     kuaishou_title = feeds[i]["caption"]
@@ -288,173 +318,143 @@ class KuaiShou:
                         .replace("?", "").replace('"', "").replace("<", "")\
                         .replace(">", "").replace("|", "")
 
-                    Common.crawler_log().info("video_title:{}".format(video_title))
-
-                    # 从 kuaishou_videoid.txt 中去重
-                    photo_ids = Common.read_txt("kuaishou_videoid.txt")
-                    if photo_id in [p_id.strip() for p_id in photo_ids]:
-                        Common.crawler_log().info("该视频已下载:{}".format(video_title))
-                        pass
-                    else:
-                        Common.crawler_log().info("该视频未下载:{}".format(video_title))
-
-                        # 从 kuaishou_feeds.txt 中去重
-                        contents = Common.read_txt("kuaishou_feeds.txt")
-                        # kuaishou_feeds.txt 为空时,直接保存
-                        if len(contents) == 0 and head_url != "0" \
-                                and cover_url != "0" and video_url != "0" \
-                                and video_duration != "0" and photo_id != "0":
-                            # 判断敏感词
-                            if any(word if word in kuaishou_title else False
+                    Common.logger().info("video_title:{}".format(video_title))
+
+                    # 过滤无效视频
+                    if photo_id == "0" \
+                            or head_url == "0" \
+                            or cover_url == "0"\
+                            or video_url == "0"\
+                            or video_duration == "0"\
+                            or video_send_time == "0"\
+                            or user_name == ""\
+                            or video_title == "":
+                        Common.logger().info("无效视频")
+                    # 从 云文档 去重:https://w42nne6hzg.feishu.cn/sheets/shtcngRPoDYAi24x52j2nDuHMih?sheet=kCSk2e
+                    elif photo_id in [j for i in Feishu.get_values_batch("kCSk2e") for j in i]:
+                        Common.logger().info("该视频已下载:{}", video_title)
+                    # 从 云文档 去重:https://w42nne6hzg.feishu.cn/sheets/shtcngRPoDYAi24x52j2nDuHMih?sheet=YWeCh7
+                    elif photo_id in [j for i in Feishu.get_values_batch("YWeCh7") for j in i]:
+                        Common.logger().info("该视频已在feeds中:{}", video_title)
+                    # 判断敏感词
+                    elif any(word if word in kuaishou_title else False
                                    for word in cls.kuaishou_sensitive_words()) is True:
-                                Common.crawler_log().info("视频已中敏感词:{}".format(kuaishou_title))
-                            else:
-                                basic_time = int(time.time())
-                                Common.crawler_log().info("添加视频信息至kuaishou_feeds.txt:{}".format(video_title))
-                                with open(r"./txt/kuaishou_feeds.txt", "a", encoding="UTF-8") as f_a:
-                                    f_a.write(str(basic_time) + " + " +
-                                              str(photo_id) + " + " +
-                                              str(video_play_cnt) + " + " +
-                                              str(video_title) + " + " +
-                                              str(video_duration) + " + " +
-                                              str(video_comment_cnt) + " + " +
-                                              str(video_like_cnt) + " + " +
-                                              str(video_share_cnt) + " + " +
-                                              str(video_resolution) + " + " +
-                                              str(video_send_time) + " + " +
-                                              str(user_name) + " + " +
-                                              str(head_url) + " + " +
-                                              str(cover_url) + " + " +
-                                              str(video_url) + " + " +
-                                              str(user_id) + " + " +
-                                              str("wxo_b07ba02ad4340205d89b47c76030bb090977") + "\n")
-                        else:
-                            if photo_id in [content.split(" + ")[1] for content in contents]:
-                                Common.crawler_log().info("该视频已在 kuaishou_feeds.txt 中:{}".format(video_title))
-                            elif head_url == "0" or cover_url == "0" \
-                                    or video_url == "0" or video_duration == "0" or photo_id == "0":
-                                Common.crawler_log().info("视频封面/播放地址/播放时长/用户头像不存在")
-                            else:
-                                # 判断敏感词
-                                if any(word if word in kuaishou_title else False
-                                       for word in cls.kuaishou_sensitive_words()) is True:
-                                    Common.crawler_log().info("视频已中敏感词:{}".format(kuaishou_title))
-                                else:
-                                    basic_time = int(time.time())
-                                    Common.crawler_log().info("添加视频信息至kuaishou_feeds.txt:{}".format(video_title))
-                                    with open(r"./txt/kuaishou_feeds.txt", "a", encoding="UTF-8") as f_a:
-                                        f_a.write(str(basic_time) + " + " +
-                                                  str(photo_id) + " + " +
-                                                  str(video_play_cnt) + " + " +
-                                                  str(video_title) + " + " +
-                                                  str(video_duration) + " + " +
-                                                  str(video_comment_cnt) + " + " +
-                                                  str(video_like_cnt) + " + " +
-                                                  str(video_share_cnt) + " + " +
-                                                  str(video_resolution) + " + " +
-                                                  str(video_send_time) + " + " +
-                                                  str(user_name) + " + " +
-                                                  str(head_url) + " + " +
-                                                  str(cover_url) + " + " +
-                                                  str(video_url) + " + " +
-                                                  str(user_id) + " + " +
-                                                  str("wxo_b07ba02ad4340205d89b47c76030bb090977") + "\n")
+                        Common.logger().info("视频已中敏感词:{}".format(kuaishou_title))
+                    else:
+                        Common.logger().info("该视频未下载,添加至feeds中:{}".format(video_title))
+                        # feeds工作表,插入首行
+                        Feishu.insert_columns("YWeCh7")
+
+                        # 获取当前时间
+                        get_feeds_time = int(time.time())
+                        # 看一看云文档,工作表 kanyikan_feeds_1 中写入数据
+                        Feishu.update_values("YWeCh7",
+                                             a1=str(get_feeds_time),
+                                             b1=str(photo_id),
+                                             c1=str(video_play_cnt),
+                                             d1=str(video_title),
+                                             e1=str(video_duration),
+                                             f1=str(video_comment_cnt),
+                                             g1=str(video_like_cnt),
+                                             h1=str(video_share_cnt),
+                                             i1=str(video_resolution),
+                                             j1=str(video_send_time),
+                                             k1=str(user_name),
+                                             l1=str(head_url),
+                                             m1=str(cover_url),
+                                             n1=str(video_url),
+                                             o1=str("wxo_b07ba02ad4340205d89b47c76030bb090977"))
         except Exception as e:
-            Common.crawler_log().error("获取视频 list 异常:{}".format(e))
+            Common.logger().error("获取视频 list 异常:{}".format(e))
 
     @classmethod
     def kuaishou_download_play_video(cls, env):
         """
-        下载播放量视频
+        1.从 https://w42nne6hzg.feishu.cn/sheets/shtcngRPoDYAi24x52j2nDuHMih?sheet=YWeCh7 中读取视频信息
+        2.下载并上传符合规则的视频
         测试环境:env == dev
         正式环境:env == prod
         """
-        videos = Common.read_txt("kuaishou_feeds.txt")
-        for video in videos:
-            download_photo_id = video.strip().split(" + ")[1]
-            download_video_title = video.strip().split(" + ")[3]
-            download_video_duration = video.strip().split(" + ")[4]
-            download_video_play_cnt = video.strip().split(" + ")[2]
-            download_video_comment_cnt = video.strip().split(" + ")[5]
-            download_video_like_cnt = video.strip().split(" + ")[6]
-            download_video_share_cnt = video.strip().split(" + ")[7]
-            download_video_resolution = video.strip().split(" + ")[8]
-            download_video_width = download_video_resolution.split("*")[0]
-            download_video_height = download_video_resolution.split("*")[-1]
-            download_video_send_time = video.strip().split(" + ")[9]
-            download_user_name = video.strip().split(" + ")[10]
-            download_head_url = video.strip().split(" + ")[11]
-            download_cover_url = video.strip().split(" + ")[12]
-            download_video_url = video.strip().split(" + ")[13]
-            download_video_session = video.strip().split(" + ")[-1]
-
-            if cls.kuaishou_download_rule(download_video_duration,
-                                          download_video_width,
-                                          download_video_height,
-                                          download_video_play_cnt,
-                                          download_video_like_cnt,
-                                          download_video_share_cnt) is True:
-                Common.crawler_log().info("开始下载快手视频:{}".format(download_video_title))
-                # 下载封面
-                Common.download_method(text="cover", d_name=download_video_title, d_url=download_cover_url)
-                # 下载视频
-                Common.download_method(text="video", d_name=download_video_title, d_url=download_video_url)
-
-                # 保存视频信息至 kuaishou_videoid.txt
-                with open(r"./txt/kuaishou_videoid.txt", "a", encoding="UTF-8") as fa:
-                    fa.write(download_photo_id + "\n")
-
-                # 添加视频 ID 到 list,用于统计当次下载总数
-                cls.download_video_list.append(download_photo_id)
-
-                # # 保存视频信息至 {today}_kuaishou_videoid.txt
-                # with open("./txt/" + str(Common.today) + "_kuaishou_videoid.txt", "a", encoding="UTF-8") as fc:
-                #     fc.write(download_photo_id + "\n")
-
-                # 保存视频信息至 "./videos/{download_video_title}/info.txt"
-                with open(r"./videos/" + download_video_title + "/info.txt", "a", encoding="UTF-8") as f_a:
-                    f_a.write(str(download_photo_id) + "\n" +
-                              str(download_video_title) + "\n" +
-                              str(download_video_duration) + "\n" +
-                              str(download_video_play_cnt) + "\n" +
-                              str(download_video_comment_cnt) + "\n" +
-                              str(download_video_like_cnt) + "\n" +
-                              str(download_video_share_cnt) + "\n" +
-                              str(download_video_resolution) + "\n" +
-                              str(download_video_send_time) + "\n" +
-                              str(download_user_name) + "\n" +
-                              str(download_head_url) + "\n" +
-                              str(download_video_url) + "\n" +
-                              str(download_cover_url) + "\n" +
-                              str(download_video_session))
-
-                # 上传视频
-                if env == "dev":
-                    Common.crawler_log().info("开始上传视频:{}".format(download_video_title))
-                    Publish.upload_and_publish("dev", "play")
-                elif env == "prod":
-                    Common.crawler_log().info("开始上传视频:{}".format(download_video_title))
-                    Publish.upload_and_publish("prod", "play")
-
-                # 删除该视频在kuaishou_feeds.txt中的信息
-                Common.crawler_log().info("删除该视频在kuaishou_feeds.txt中的信息:{}".format(download_video_title))
-                with open(r"./txt/kuaishou_feeds.txt", "r", encoding="UTF-8") as f_r:
-                    lines = f_r.readlines()
-                with open(r"./txt/kuaishou_feeds.txt", "w", encoding="utf-8") as f_w:
-                    for line in lines:
-                        if download_photo_id in line.split(" + ")[1]:
-                            continue
-                        f_w.write(line)
-            else:
-                # 删除该视频在 recommend.txt中的信息
-                Common.crawler_log().info("该视频不满足下载规则,删除在kuaishou_feeds.txt中的信息:{}".format(download_video_title))
-                with open(r"./txt/kuaishou_feeds.txt", "r", encoding="UTF-8") as f_r:
-                    lines = f_r.readlines()
-                with open(r"./txt/kuaishou_feeds.txt", "w", encoding="utf-8") as f_w:
-                    for line in lines:
-                        if download_photo_id in line.split(" + ")[1]:
-                            continue
-                        f_w.write(line)
+        if len(Feishu.get_values_batch("YWeCh7")) == 1:
+            pass
+        else:
+            for i in range(len(Feishu.get_values_batch("YWeCh7"))):
+                try:
+                    download_photo_id = Feishu.get_values_batch("YWeCh7")[i + 1][1]
+                    download_video_play_cnt = Feishu.get_values_batch("YWeCh7")[i + 1][2]
+                    download_video_title = Feishu.get_values_batch("YWeCh7")[i + 1][3]
+                    download_video_duration = Feishu.get_values_batch("YWeCh7")[i + 1][4]
+                    download_video_comment_cnt = Feishu.get_values_batch("YWeCh7")[i + 1][5]
+                    download_video_like_cnt = Feishu.get_values_batch("YWeCh7")[i + 1][6]
+                    download_video_share_cnt = Feishu.get_values_batch("YWeCh7")[i + 1][7]
+                    download_video_resolution = Feishu.get_values_batch("YWeCh7")[i + 1][8]
+                    download_video_width = download_video_resolution.split("*")[0]
+                    download_video_height = download_video_resolution.split("*")[-1]
+                    download_video_send_time = Feishu.get_values_batch("YWeCh7")[i + 1][9]
+                    download_user_name = Feishu.get_values_batch("YWeCh7")[i + 1][10]
+                    download_head_url = Feishu.get_values_batch("YWeCh7")[i + 1][11]
+                    download_cover_url = Feishu.get_values_batch("YWeCh7")[i + 1][12]
+                    download_video_url = Feishu.get_values_batch("YWeCh7")[i + 1][13]
+                    download_video_session = Feishu.get_values_batch("YWeCh7")[i + 1][14]
+
+                    # 下载规则
+                    if cls.kuaishou_download_rule(download_video_duration,
+                                                  download_video_width,
+                                                  download_video_height,
+                                                  download_video_play_cnt,
+                                                  download_video_like_cnt,
+                                                  download_video_share_cnt) is True:
+                        Common.logger().info("开始下载快手视频:{}".format(download_video_title))
+                        # 下载封面
+                        Common.download_method(text="cover", d_name=download_video_title, d_url=download_cover_url)
+                        # 下载视频
+                        Common.download_method(text="video", d_name=download_video_title, d_url=download_video_url)
+                        # 保存视频信息至 "./videos/{download_video_title}/info.txt"
+                        with open(r"./videos/" + download_video_title + "/info.txt", "a", encoding="UTF-8") as f_a:
+                            f_a.write(str(download_photo_id) + "\n" +
+                                      str(download_video_title) + "\n" +
+                                      str(download_video_duration) + "\n" +
+                                      str(download_video_play_cnt) + "\n" +
+                                      str(download_video_comment_cnt) + "\n" +
+                                      str(download_video_like_cnt) + "\n" +
+                                      str(download_video_share_cnt) + "\n" +
+                                      str(download_video_resolution) + "\n" +
+                                      str(download_video_send_time) + "\n" +
+                                      str(download_user_name) + "\n" +
+                                      str(download_head_url) + "\n" +
+                                      str(download_video_url) + "\n" +
+                                      str(download_cover_url) + "\n" +
+                                      str(download_video_session))
+
+                        # 添加视频 ID 到 list,用于统计当次下载总数
+                        cls.download_video_list.append(download_photo_id)
+
+                        # 上传视频
+                        Common.logger().info("开始上传视频:{}".format(download_video_title))
+                        Publish.upload_and_publish(env, "play")
+
+                        # 保存视频 ID 到云文档:https://w42nne6hzg.feishu.cn/sheets/shtcngRPoDYAi24x52j2nDuHMih?sheet=kCSk2e
+                        Common.logger().info("保存视频ID至云文档:{}", download_video_title)
+                        # 视频ID工作表,插入首行
+                        Feishu.insert_columns("kCSk2e")
+                        # 视频ID工作表,首行写入数据
+                        Feishu.update_values("kCSk2e", download_photo_id, "", "", "",
+                                             "", "", "", "", "", "", "", "", "", "", "")
+
+                        # 从云文档删除该视频信息:https://w42nne6hzg.feishu.cn/sheets/shtcngRPoDYAi24x52j2nDuHMih?sheet=YWeCh7
+                        Common.logger().info("从云文档删除该视频信息:{}", download_video_title)
+                        # 删除行或列,可选 ROWS、COLUMNS
+                        Feishu.dimension_range("YWeCh7", "ROWS", i + 2, i + 2)
+                    else:
+                        # 从云文档删除该视频信息:https://w42nne6hzg.feishu.cn/sheets/shtcngRPoDYAi24x52j2nDuHMih?sheet=YWeCh7
+                        Common.logger().info("该视频不满足下载规则,删除在云文档中的信息:{}", download_video_title)
+                        # 删除行或列,可选 ROWS、COLUMNS
+                        Feishu.dimension_range("YWeCh7", "ROWS", i + 2, i + 2)
+                except Exception as e:
+                    Common.logger().error("视频 info 异常,删除该视频信息", e)
+                    # 删除行或列,可选 ROWS、COLUMNS
+                    Feishu.dimension_range("YWeCh7", "ROWS", i + 2, i + 2)
+                cls.kuaishou_download_play_video("prod")
 
 
 if __name__ == "__main__":

+ 0 - 346
main/download_weishi.py

@@ -1,346 +0,0 @@
-# -*- coding: utf-8 -*-
-# @Author: wangkun
-# @Time: 2022/4/8
-import json
-import time
-
-import requests
-import urllib3
-from main.common import Common
-from main.publish import Publish
-
-proxies = {"http": None, "https": None}
-
-
-class Weishi:
-    @staticmethod
-    def weishi_download_rule(d_duration, d_width, d_height, d_play_cnt):
-        """
-        下载视频的基本规则
-        :param d_duration: 时长
-        :param d_width: 宽
-        :param d_height: 高
-        :param d_play_cnt: 播放量
-        :return: 满足规则,返回 True;反之,返回 False
-        """
-        if 600 >= int(float(d_duration)) >= 60:
-            if int(d_width) >= 720 or int(d_height) >= 720:
-                if int(d_play_cnt) >= 100000:
-                    return True
-                else:
-                    return False
-            return False
-        return False
-
-    @classmethod
-    def get_weishi_recommend(cls):
-        """
-        从微视小程序首页推荐获取视频list:
-            1.在 weishi_videoid.txt 中去重
-            2.在 weishi_feeds.txt 中去重
-            3.添加视频信息到 weishi_feeds.txt
-        """
-        url = "https://api.weishi.qq.com/trpc.weishi.weishi_h5_proxy.weishi_h5_proxy/WxminiGetFeedList"
-        cookies = {
-            "wesee_authtype": "3",
-            "wesee_openid": "oWGa05FrwkuUvT-4n1qGeQuhVsc8",
-            "wesee_openkey": "8c3ec202f5d679fb5ee6d9f643640d9a2580ba504612e2d979a881d3169caf189e2a5c1d532eeff172bc21cf2"
-                             "6230941ccbc10243a7879e8165ca608c17060de606a6d08afe0a3abd5250629314f9a99e9d1003b201bf5ec",
-            "wesee_personid": "1593522421826902",
-            "wesee_refresh_token": "",
-            "wesee_access_token": "8c3ec202f5d679fb5ee6d9f643640d9a2580ba504612e2d979a881d3169caf18"
-                                  "9e2a5c1d532eeff172bc21cf26230941ccbc10243a7879e8165ca608c17060de6"
-                                  "06a6d08afe0a3abd5250629314f9a99e9d1003b201bf5ec",
-            "wesee_thr_appid": "wx75ee9f19b93e5c46",
-            "wesee_ichid": "8"
-        }
-        json_data = {
-            "req_body": {
-                "requestType": 16,
-                "isrefresh": 0,
-                "isfirst": 0,
-                "attachInfo": "",
-                "scene_id": 22,
-                "requestExt": {
-                    "mini_openid": "oWGa05FrwkuUvT-4n1qGeQuhVsc8",
-                    "notLogin-personid": "1593522421826902"
-                }
-            },
-            "req_header": {
-                "mapExt": "{\"imageSize\":\"480\",\"adaptScene\":\"PicHDWebpLimitScene\"}"
-            }
-        }
-
-        try:
-            urllib3.disable_warnings()
-            r = requests.post(url=url, cookies=cookies, json=json_data, proxies=proxies, verify=False)
-            response = json.loads(r.content.decode("utf8"))
-            if "rsp_body" not in response:
-                Common.crawler_log().info("获取微视视频 list 出错:{},休眠 10s".format(response))
-                time.sleep(10)
-            else:
-                feeds = response["rsp_body"]["feeds"]
-                for i in range(len(feeds)):
-                    if "video" not in feeds[i]:
-                        Common.crawler_log().info("无视频信息")
-                    else:
-                        # 视频 ID
-                        if "id" not in feeds[i]["video"]:
-                            video_id = "0"
-                            Common.crawler_log().info("video_id:{}".format(video_id))
-                        else:
-                            video_id = feeds[i]["video"]["id"]
-                            Common.crawler_log().info("video_id:{}".format(video_id))
-
-                        # 视频标题
-                        video_title = feeds[i]["desc"].strip().replace("\n", "") \
-                            .replace("/", "").replace("快手", "").replace(" ", "") \
-                            .replace(" ", "").replace("&NBSP", "").replace("\r", "")
-                        Common.crawler_log().info("video_title:{}".format(video_title))
-
-                        # 视频发布时间
-                        if "createTime" not in feeds[i]:
-                            video_send_time = "0"
-                            Common.crawler_log().info("video_send_time:不存在")
-                        else:
-                            video_send_time = int(feeds[i]["createTime"])*1000
-                            Common.crawler_log().info(
-                                "video_send_time:{}".format(time.strftime(
-                                    "%Y-%m-%d %H:%M:%S", time.localtime(int(video_send_time)/1000))))
-
-                        # 视频封面地址
-                        if len(feeds[i]["images"]) == 0:
-                            cover_url = "0"
-                            Common.crawler_log().info("cover_url:不存在")
-                        else:
-                            cover_url = feeds[i]["images"][0]["url"]
-                            Common.crawler_log().info("cover_url:{}".format(cover_url))
-
-                        # 视频播放地址
-                        if "url" not in feeds[i]["video"]:
-                            video_url = "0"
-                            Common.crawler_log().info("video_url:不存在")
-                        else:
-                            video_url = feeds[i]["video"]["url"]
-                            Common.crawler_log().info("video_url:{}".format(video_url))
-
-                        # 视频分辨率
-                        if "width" not in feeds[i]["video"] or "height" not in feeds[i]["video"]:
-                            video_width = "0"
-                            video_height = "0"
-                            video_resolution = str(video_width) + "*" + str(video_height)
-                            Common.crawler_log().info("无分辨率")
-                        else:
-                            video_width = feeds[i]["video"]["width"]
-                            video_height = feeds[i]["video"]["height"]
-                            video_resolution = str(video_width) + "*" + str(video_height)
-                            Common.crawler_log().info("video_resolution:{}".format(video_resolution))
-
-                        # 视频时长
-                        if "duration" not in feeds[i]["video"]:
-                            video_duration = "0"
-                            Common.crawler_log().info("video_duration:不存在")
-                        else:
-                            video_duration = int(int(feeds[i]["video"]["duration"]) / 1000)
-                            Common.crawler_log().info("video_duration:{}秒".format(video_duration))
-
-                        # 播放数
-                        if "playNum" not in feeds[i]["ugcData"]:
-                            video_play_cnt = "0"
-                            Common.crawler_log().info("video_play_cnt:{}".format(video_play_cnt))
-                        else:
-                            video_play_cnt = feeds[i]["ugcData"]["playNum"]
-                            Common.crawler_log().info("video_play_cnt:{}".format(video_play_cnt))
-
-                        # 点赞数
-                        if "dingCount" not in feeds[i]["ugcData"]:
-                            video_like_cnt = "0"
-                            Common.crawler_log().info("video_like_cnt:{}".format(video_like_cnt))
-                        else:
-                            video_like_cnt = feeds[i]["ugcData"]["dingCount"]
-                            Common.crawler_log().info("video_like_cnt:{}".format(video_like_cnt))
-
-                        # 分享数
-                        if "shareNum" not in feeds[i]["ugcData"]:
-                            video_share_cnt = "0"
-                            Common.crawler_log().info("video_share_cnt:{}".format(video_share_cnt))
-                        else:
-                            video_share_cnt = feeds[i]["ugcData"]["shareNum"]
-                            Common.crawler_log().info("video_share_cnt:{}".format(video_share_cnt))
-
-                        # 评论数
-                        if "totalCommentNum" not in feeds[i]["ugcData"]:
-                            video_comment_cnt = "0"
-                            Common.crawler_log().info("video_comment_cnt:{}".format(video_comment_cnt))
-                        else:
-                            video_comment_cnt = feeds[i]["ugcData"]["totalCommentNum"]
-                            Common.crawler_log().info("video_comment_cnt:{}".format(video_comment_cnt))
-
-                        # 用户 ID
-                        user_id = feeds[i]["poster"]["id"]
-                        Common.crawler_log().info("user_id:{}".format(user_id))
-
-                        # 用户昵称
-                        user_name = feeds[i]["poster"]["nick"].strip().replace("\n", "") \
-                            .replace("/", "").replace("快手", "").replace(" ", "") \
-                            .replace(" ", "").replace("&NBSP", "").replace("\r", "")
-                        Common.crawler_log().info("user_name:{}".format(user_name))
-
-                        # 用户头像地址
-                        if "thumbURL" not in feeds[i]["material"] and "avatar" not in feeds[i]["poster"]:
-                            head_url = "0"
-                            Common.crawler_log().info("head_url:不存在")
-                        elif "thumbURL" in feeds[i]["material"]:
-                            head_url = feeds[i]["material"]["thumbURL"]
-                            Common.crawler_log().info("head_url:{}".format(head_url))
-                        else:
-                            head_url = feeds[i]["poster"]["avatar"]
-                            Common.crawler_log().info("head_url:{}".format(head_url))
-
-                        # 从 weishi_videoid.txt 中去重
-                        videos_ids = Common.read_txt("weishi_videoid.txt")
-                        if video_id in [v_id.strip() for v_id in videos_ids]:
-                            Common.crawler_log().info("该视频已下载:{}".format(video_title))
-                            pass
-                        else:
-                            Common.crawler_log().info("该视频未下载:{}".format(video_title))
-
-                            # 从 weishi_feeds.txt 中去重
-                            contents = Common.read_txt("weishi_feeds.txt")
-                            # 若 weishi_feeds.txt 为空时,直接保存
-                            if len(contents) == 0 and head_url != "0" \
-                                    and cover_url != "0" and video_url != "0" \
-                                    and video_duration != "0" and video_id != "0":
-                                basic_time = int(time.time())
-                                Common.crawler_log().info("添加视频信息至weishi_feeds.txt:{}".format(video_title))
-                                with open(r"./txt/weishi_feeds.txt", "a", encoding="UTF-8") as f_a:
-                                    f_a.write(str(basic_time) + " + " +
-                                              str(video_id) + " + " +
-                                              str(video_play_cnt) + " + " +
-                                              str(video_title) + " + " +
-                                              str(video_duration) + " + " +
-                                              str(video_comment_cnt) + " + " +
-                                              str(video_like_cnt) + " + " +
-                                              str(video_share_cnt) + " + " +
-                                              str(video_resolution) + " + " +
-                                              str(video_send_time) + " + " +
-                                              str(user_name) + " + " +
-                                              str(head_url) + " + " +
-                                              str(cover_url) + " + " +
-                                              str(video_url) + " + " +
-                                              str(user_id) + " + " +
-                                              str("oWGa05FrwkuUvT-4n1qGeQuhVsc8") + "\n")
-                            else:
-                                if video_id in [content.split(" + ")[1] for content in contents]:
-                                    Common.crawler_log().info("该视频已在 weishi_feeds.txt 中:{}".format(video_title))
-                                elif head_url == "0" or cover_url == "0" \
-                                        or video_url == "0" or video_duration == "0" or video_id == "0":
-                                    Common.crawler_log().info("视频封面/播放地址/播放时长/用户头像不存在")
-                                else:
-                                    basic_time = int(time.time())
-                                    Common.crawler_log().info("添加视频信息至weishi_feeds.txt:{}".format(video_title))
-                                    with open(r"./txt/weishi_feeds.txt", "a", encoding="UTF-8") as f_a:
-                                        f_a.write(str(basic_time) + " + " +
-                                                  str(video_id) + " + " +
-                                                  str(video_play_cnt) + " + " +
-                                                  str(video_title) + " + " +
-                                                  str(video_duration) + " + " +
-                                                  str(video_comment_cnt) + " + " +
-                                                  str(video_like_cnt) + " + " +
-                                                  str(video_share_cnt) + " + " +
-                                                  str(video_resolution) + " + " +
-                                                  str(video_send_time) + " + " +
-                                                  str(user_name) + " + " +
-                                                  str(head_url) + " + " +
-                                                  str(cover_url) + " + " +
-                                                  str(video_url) + " + " +
-                                                  str(user_id) + " + " +
-                                                  str("oWGa05FrwkuUvT-4n1qGeQuhVsc8") + "\n")
-        except Exception as e:
-            Common.crawler_log().error("获取微视视频 list 异常:{}".format(e))
-
-    @classmethod
-    def download_weishi_play_video(cls, env):
-        """
-        下载播放量视频
-        测试环境:env == dev
-        正式环境:env == prod
-        """
-        videos = Common.read_txt("weishi_feeds.txt")
-        for video in videos:
-            download_video_id = video.strip().split(" + ")[1]
-            download_video_title = video.strip().split(" + ")[3]
-            download_video_duration = video.strip().split(" + ")[4]
-            download_video_play_cnt = video.strip().split(" + ")[2]
-            download_video_comment_cnt = video.strip().split(" + ")[5]
-            download_video_like_cnt = video.strip().split(" + ")[6]
-            download_video_share_cnt = video.strip().split(" + ")[7]
-            download_video_resolution = video.strip().split(" + ")[8]
-            download_video_width = download_video_resolution.split("*")[0]
-            download_video_height = download_video_resolution.split("*")[-1]
-            download_video_send_time = video.strip().split(" + ")[9]
-            download_user_name = video.strip().split(" + ")[10]
-            download_head_url = video.strip().split(" + ")[11]
-            download_cover_url = video.strip().split(" + ")[12]
-            download_video_url = video.strip().split(" + ")[13]
-            download_video_session = video.strip().split(" + ")[-1]
-
-            if cls.weishi_download_rule(download_video_duration, download_video_width,
-                                        download_video_height, download_video_play_cnt) is True:
-                Common.crawler_log().info("开始下载视频:{}".format(download_video_title))
-                # 下载封面
-                Common.download_method(text="cover", d_name=download_video_title, d_url=download_cover_url)
-                # 下载视频
-                Common.download_method(text="video", d_name=download_video_title, d_url=download_video_url)
-                # 保存视频信息至 weishi_videoid.txt
-                with open(r"./txt/weishi_videoid.txt", "a", encoding="UTF-8") as fa:
-                    fa.write(download_video_id + "\n")
-                # 保存视频信息至 "./videos/{download_video_title}/info.txt"
-                with open(r"./videos/" + download_video_title + "/info.txt", "a", encoding="UTF-8") as f_a:
-                    f_a.write(str(download_video_id) + "\n" +
-                              str(download_video_title) + "\n" +
-                              str(download_video_duration) + "\n" +
-                              str(download_video_play_cnt) + "\n" +
-                              str(download_video_comment_cnt) + "\n" +
-                              str(download_video_like_cnt) + "\n" +
-                              str(download_video_share_cnt) + "\n" +
-                              str(download_video_resolution) + "\n" +
-                              str(download_video_send_time) + "\n" +
-                              str(download_user_name) + "\n" +
-                              str(download_head_url) + "\n" +
-                              str(download_video_url) + "\n" +
-                              str(download_cover_url) + "\n" +
-                              str(download_video_session))
-
-                # 上传视频
-                if env == "dev":
-                    Common.crawler_log().info("开始上传视频:{}".format(download_video_title))
-                    Publish.upload_and_publish("dev", "play")
-                elif env == "prod":
-                    Common.crawler_log().info("开始上传视频:{}".format(download_video_title))
-                    Publish.upload_and_publish("prod", "play")
-
-                # 删除该视频在weishi_feeds.txt中的信息
-                Common.crawler_log().info("删除该视频在weishi_feeds.txt中的信息:{}".format(download_video_title))
-                with open(r"./txt/weishi_feeds.txt", "r", encoding="UTF-8") as f_r:
-                    lines = f_r.readlines()
-                with open(r"./txt/weishi_feeds.txt", "w", encoding="utf-8") as f_w:
-                    for line in lines:
-                        if download_video_id in line.split(" + ")[1]:
-                            continue
-                        f_w.write(line)
-            else:
-                # 删除该视频在weishi_feeds.txt中的信息
-                Common.crawler_log().info("该视频不满足下载规则,删除在weishi_feeds.txt中的信息:{}".format(download_video_title))
-                with open(r"./txt/weishi_feeds.txt", "r", encoding="UTF-8") as f_r:
-                    lines = f_r.readlines()
-                with open(r"./txt/weishi_feeds.txt", "w", encoding="utf-8") as f_w:
-                    for line in lines:
-                        if download_video_id in line.split(" + ")[1]:
-                            continue
-                        f_w.write(line)
-
-
-if __name__ == "__main__":
-    weishi = Weishi()
-    weishi.get_weishi_recommend()

+ 275 - 0
main/feishu_lib.py

@@ -0,0 +1,275 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/5/9
+import json
+import requests
+import urllib3
+
+from main.common import Common
+
+proxies = {"http": None, "https": None}
+
+
+class Feishu:
+    """
+    编辑飞书云文档
+    """
+    feishu_url = "https://w42nne6hzg.feishu.cn/sheets/shtcngRPoDYAi24x52j2nDuHMih?"
+    spreadsheetToken = "shtcngRPoDYAi24x52j2nDuHMih"
+
+    # 获取飞书api token
+    @classmethod
+    def get_token(cls):
+        """
+        获取飞书api token
+        :return:
+        """
+        url = "https://open.feishu.cn/open-apis/auth/v3/tenant_access_token/internal/"
+        post_data = {"app_id": "cli_a13ad2afa438d00b",  # 这里账号密码是发布应用的后台账号及密码
+                     "app_secret": "4tK9LY9VbiQlY5umhE42dclBFo6t4p5O"}
+
+        try:
+            urllib3.disable_warnings()
+            response = requests.post(url=url, data=post_data, proxies=proxies, verify=False)
+            tenant_access_token = response.json()["tenant_access_token"]
+            return tenant_access_token
+        except Exception as e:
+            Common.logger().error("获取飞书 api token 异常:{}", e)
+
+    # 获取表格元数据
+    @classmethod
+    def get_metainfo(cls):
+        """
+        获取表格元数据
+        :return:
+        """
+        url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" + cls.spreadsheetToken + "/metainfo"
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        params = {
+            "extFields": "protectedRange",  # 额外返回的字段,extFields=protectedRange时返回保护行列信息
+            "user_id_type": "open_id"  # 返回的用户id类型,可选open_id,union_id
+        }
+        try:
+            urllib3.disable_warnings()
+            r = requests.get(url=url, headers=headers, params=params, proxies=proxies, verify=False)
+            response = json.loads(r.content.decode("utf8"))
+            return response
+        except Exception as e:
+            Common.logger().error("获取表格元数据异常:{}", e)
+
+    # 读取工作表中所有数据
+    @classmethod
+    def get_values_batch(cls, sheetid):
+        """
+        读取工作表中所有数据
+        :param sheetid: 哪张表
+        :return: 所有数据
+        """
+
+        url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" + cls.spreadsheetToken + "/values_batch_get"
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        params = {
+            # 多个查询范围 如 url?ranges=range1,range2 ,其中 range 包含 sheetId 与单元格范围两部分
+            "ranges": sheetid,
+
+            # valueRenderOption=ToString 可返回纯文本的值(数值类型除外);
+            # valueRenderOption=FormattedValue 计算并格式化单元格;
+            # valueRenderOption=Formula单元格中含有公式时返回公式本身;
+            # valueRenderOption=UnformattedValue计算但不对单元格进行格式化
+            "valueRenderOption": "ToString",
+
+            # dateTimeRenderOption=FormattedString 计算并将时间日期按照其格式进行格式化,但不会对数字进行格式化,返回格式化后的字符串。
+            "dateTimeRenderOption": "",
+
+            # 返回的用户id类型,可选open_id,union_id
+            "user_id_type": "open_id"
+        }
+        try:
+            urllib3.disable_warnings()
+            r = requests.get(url=url, headers=headers, params=params, proxies=proxies, verify=False)
+            response = json.loads(r.content.decode("utf8"))
+            values = response["data"]["valueRanges"][0]["values"]
+            return values
+        except Exception as e:
+            Common.logger().error("读取工作表所有数据异常:{}", e)
+
+    # 工作表,插入首行
+    @classmethod
+    def insert_columns(cls, sheetid):
+        """
+        插入行或列
+        :return:插入首行
+        """
+        url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/"\
+              + cls.spreadsheetToken + "/insert_dimension_range"
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        body = {
+            "dimension": {
+                "sheetId": sheetid,
+                "majorDimension": "ROWS",  # 默认 ROWS ,可选 ROWS、COLUMNS
+                "startIndex": 1,  # 开始的位置
+                "endIndex": 2  # 结束的位置
+            },
+            "inheritStyle": "AFTER"  # BEFORE 或 AFTER,不填为不继承 style
+        }
+        try:
+            urllib3.disable_warnings()
+            r = requests.post(url=url, headers=headers, json=body, proxies=proxies, verify=False)
+            Common.logger().info("插入空行:{}", r.json()["msg"])
+        except Exception as e:
+            Common.logger().error("插入空行异常:{}", e)
+
+    # 工作表,首行写入数据
+    @classmethod
+    def update_values(cls, sheetid, a1, b1, c1, d1, e1, f1, g1, h1, i1, j1, k1, l1, m1, n1, o1):
+        """
+        写入数据
+        :param sheetid: 哪张工作表
+        :param a1: 单元格
+        :param b1: 单元格
+        :param c1: 单元格
+        :param d1: 单元格
+        :param e1: 单元格
+        :param f1: 单元格
+        :param g1: 单元格
+        :param h1: 单元格
+        :param i1: 单元格
+        :param j1: 单元格
+        :param k1: 单元格
+        :param l1: 单元格
+        :param m1: 单元格
+        :param n1: 单元格
+        :param o1: 单元格
+        :return:
+        """
+
+        url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" + cls.spreadsheetToken + "/values_batch_update"
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        body = {
+            "valueRanges": [
+                {
+                    "range": sheetid + "!A2:O2",
+                    "values": [
+                        [a1, b1, c1, d1, e1, f1, g1, h1, i1, j1, k1, l1, m1, n1, o1]
+                    ]
+                },
+            ],
+        }
+        try:
+            urllib3.disable_warnings()
+            r = requests.post(url=url, headers=headers, json=body, proxies=proxies, verify=False)
+            Common.logger().info("空行写入视频数据:{}", r.json()["msg"])
+        except Exception as e:
+            Common.logger().error("空行写入视频数据异常:{}", e)
+
+    # 读取单元格数据
+    @classmethod
+    def get_range_value(cls, sheetid, cell):
+        """
+        读取单元格内容
+        :param sheetid: 哪张工作表
+        :param cell: 哪个单元格
+        :return: 单元格内容
+        """
+        url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+              + cls.spreadsheetToken + "/values/" + sheetid + "!" + cell
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        params = {
+            # valueRenderOption=ToString 可返回纯文本的值(数值类型除外);
+            # valueRenderOption=FormattedValue 计算并格式化单元格;
+            # valueRenderOption=Formula单元格中含有公式时返回公式本身;
+            # valueRenderOption=UnformattedValue计算但不对单元格进行格式化。
+            "valueRenderOption": "ToString",
+
+            # dateTimeRenderOption=FormattedString 计算并对时间日期按照其格式进行格式化,但不会对数字进行格式化,返回格式化后的字符串。
+            "dateTimeRenderOption": "",
+
+            # 返回的用户id类型,可选open_id,union_id
+            "user_id_type": "open_id"
+        }
+        try:
+            urllib3.disable_warnings()
+            r = requests.get(url=url, headers=headers, params=params, proxies=proxies, verify=False)
+            return r.json()["data"]["valueRange"]["values"][0]
+        except Exception as e:
+            Common.logger().error("读取单元格数据异常:{}", e)
+
+    # 删除行或列,可选 ROWS、COLUMNS
+    @classmethod
+    def dimension_range(cls, sheetid, major_dimension, startindex, endindex):
+        """
+        删除行或列
+        :param sheetid:工作表
+        :param major_dimension:默认 ROWS ,可选 ROWS、COLUMNS
+        :param startindex:开始的位置
+        :param endindex:结束的位置
+        :return:
+        """
+        url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" + cls.spreadsheetToken + "/dimension_range"
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        body = {
+            "dimension": {
+                "sheetId": sheetid,
+                "majorDimension": major_dimension,
+                "startIndex": startindex,
+                "endIndex": endindex
+                }
+            }
+        try:
+            urllib3.disable_warnings()
+            r = requests.delete(url=url, headers=headers, json=body, proxies=proxies, verify=False)
+            Common.logger().info("删除视频数据:{}", r.json()["msg"])
+        except Exception as e:
+            Common.logger().error("删除视频数据异常:{}", e)
+
+
+if __name__ == "__main__":
+    feishu = Feishu()
+
+    # # 获取飞书api token
+    # feishu.get_token()
+    # # 获取表格元数据
+    # feishu.get_metainfo()
+
+    # 读取工作表中所有数据
+    # print(feishu.get_values_batch("Y8N3Vl"))
+    # print(len(feishu.get_values_batch("SdCHOM")))
+    # for i in range(len(feishu.get_values_batch("Y8N3Vl"))):
+    #     videoid = feishu.get_values_batch("Y8N3Vl")[i][1]
+    #     if videoid == "b3":
+    #         # 删除行或列,可选 ROWS、COLUMNS
+    #         feishu.dimension_range("Y8N3Vl", "ROWS", i+1, i+1)
+    #         print(videoid)
+
+    # # 看一看+工作表,插入首行
+    # print(feishu.insert_columns("Y8N3Vl"))
+    #
+    # # 看一看+工作表,首行写入数据
+    # print(feishu.update_values("Y8N3Vl", "a1", "b1", "c1", "d1", "e1", "f1", "g1",
+    #                            "h1", "i1", "j1", "k1", "l1", "m1", "n1", "o1"))
+
+    # # 查询单元格内容
+    # print(feishu.get_range_value("Y8N3Vl", "B8:C8"))
+    #
+    # # 删除行或列,可选 ROWS、COLUMNS
+    # feishu.dimension_range("Y8N3Vl", "ROWS")
+
+    pass

+ 22 - 26
main/publish.py

@@ -36,13 +36,13 @@ class Publish:
         versionCode  版本 默认1
         :return:
         """
-        # Common.crawler_log().info('publish request data: {}'.format(request_data))
+        Common.logger().info('publish request data: {}'.format(request_data))
         result = cls.request_post('https://videotest.yishihui.com/longvideoapi/crawler/video/send', request_data)
-        Common.crawler_log().info('publish result: {}'.format(result))
+        Common.logger().info('publish result: {}'.format(result))
         if result['code'] != 0:
-            Common.crawler_log().error('pushlish failure msg = {}'.format(result['msg']))
+            Common.logger().error('pushlish failure msg = {}'.format(result['msg']))
         else:
-            Common.crawler_log().info('publish success video_id = : {}'.format(request_data['crawlerSrcId']))
+            Common.logger().info('publish success video_id = : {}'.format(request_data['crawlerSrcId']))
 
     @classmethod
     def publish_video_prod(cls, request_data):
@@ -61,13 +61,12 @@ class Publish:
         versionCode  版本 默认1
         :return:
         """
-        # Common.crawler_log().info('publish request data: {}'.format(request_data))
         result = cls.request_post('https://longvideoapi.piaoquantv.com/longvideoapi/crawler/video/send', request_data)
-        Common.crawler_log().info('publish result: {}'.format(result))
+        Common.logger().info('publish result: {}'.format(result))
         if result['code'] != 0:
-            Common.crawler_log().error('pushlish failure msg = {}'.format(result['msg']))
+            Common.logger().error('pushlish failure msg = {}'.format(result['msg']))
         else:
-            Common.crawler_log().info('publish success video_id = : {}'.format(request_data['crawlerSrcId']))
+            Common.logger().info('publish success video_id = : {}'.format(request_data['crawlerSrcId']))
 
     @classmethod
     def request_post(cls, request_url, request_data):
@@ -120,23 +119,20 @@ class Publish:
 
     @classmethod
     def put_file(cls, oss_file, local_file):
-        # Common.crawler_log().info("put oss file = {}, local file = {}".format(oss_file, local_file))
         cls.bucket.put_object_from_file(oss_file, local_file)
-        Common.crawler_log().info("put oss file = {}, local file = {} success".format(oss_file, local_file))
+        Common.logger().info("put oss file = {}, local file = {} success".format(oss_file, local_file))
 
     # 清除本地文件
     @classmethod
     def remove_local_file(cls, local_file):
-        # Common.crawler_log().info("remove local file = {}".format(local_file))
         os.remove(local_file)
-        Common.crawler_log().info("remove local file = {} success".format(local_file))
+        Common.logger().info("remove local file = {} success".format(local_file))
 
     # 清除本地文件夹
     @classmethod
     def remove_local_file_dir(cls, local_file):
-        # Common.crawler_log().info("remove local file dir = {}".format(local_file))
         os.rmdir(local_file)
-        Common.crawler_log().info("remove local file dir = {} success".format(local_file))
+        Common.logger().info("remove local file dir = {} success".format(local_file))
 
     local_file_path = '.\\videos'
     video_file = 'video'
@@ -156,7 +152,7 @@ class Publish:
         :param env: 测试环境:dev,正式环境:prod
         :param job: 上升榜:up,播放量:play
         """
-        Common.crawler_log().info("upload_and_publish starting...")
+        Common.logger().info("upload_and_publish starting...")
         today = time.strftime("%Y%m%d", time.localtime())
         # videos 目录下的所有视频文件夹
         files = os.listdir(cls.local_file_path)
@@ -166,7 +162,7 @@ class Publish:
                 fi_d = os.path.join(cls.local_file_path, f)
                 # 确认为视频文件夹
                 if os.path.isdir(fi_d):
-                    Common.crawler_log().info('dir = {}'.format(fi_d))
+                    Common.logger().info('dir = {}'.format(fi_d))
                     # 列出所有视频文件夹
                     dir_files = os.listdir(fi_d)
                     data = {'appType': '888888', 'crawlerSrcCode': 'KANYIKAN', 'viewStatus': '1', 'versionCode': '1'}
@@ -186,7 +182,7 @@ class Publish:
                     for fi in dir_files:
                         # 视频文件夹下的所有文件路径
                         fi_path = fi_d + '\\' + fi
-                        Common.crawler_log().info('dir fi_path = {}'.format(fi_path))
+                        Common.logger().info('dir fi_path = {}'.format(fi_path))
                         # 读取 info.txt,赋值给 data
                         if cls.info_file in fi:
                             f = open(fi_path, "r", encoding="UTF-8")
@@ -195,7 +191,7 @@ class Publish:
                                 line = f.readline()
                                 line = line.replace('\n', '')
                                 if line is not None and len(line) != 0 and not line.isspace():
-                                    Common.crawler_log().info("line = {}".format(line))
+                                    Common.logger().info("line = {}".format(line))
                                     if i == 0:
                                         data['crawlerSrcId'] = line
                                     elif i == 1:
@@ -205,7 +201,7 @@ class Publish:
                                     elif i == 8:
                                         data['crawlerSrcPublishTimestamp'] = line
                                 else:
-                                    Common.crawler_log().warning("{} line is None".format(fi_path))
+                                    Common.logger().warning("{} line is None".format(fi_path))
                             f.close()
                             # remove info.txt
                             cls.remove_local_file(fi_path)
@@ -213,7 +209,7 @@ class Publish:
                     dir_files = os.listdir(fi_d)
                     for fi in dir_files:
                         fi_path = fi_d + '\\' + fi
-                        Common.crawler_log().info('dir fi_path = {}'.format(fi_path))
+                        Common.logger().info('dir fi_path = {}'.format(fi_path))
                         # 上传oss
                         if cls.video_file in fi:
                             global oss_video_file
@@ -221,20 +217,20 @@ class Publish:
                                 oss_video_file = cls.oss_file_path_video.format("dev", today, data['crawlerSrcId'])
                             elif env == "prod":
                                 oss_video_file = cls.oss_file_path_video.format("prod", today, data['crawlerSrcId'])
-                            Common.crawler_log().info("oss_video_file = {}".format(oss_video_file))
+                            Common.logger().info("oss_video_file = {}".format(oss_video_file))
                             cls.put_file(oss_video_file, fi_path)
                             data['videoPath'] = oss_video_file
-                            Common.crawler_log().info("videoPath = {}".format(oss_video_file))
+                            Common.logger().info("videoPath = {}".format(oss_video_file))
                         elif cls.image_file in fi:
                             global oss_image_file
                             if env == "dev":
                                 oss_image_file = cls.oss_file_path_image.format("env", today, data['crawlerSrcId'])
                             elif env == "prod":
                                 oss_image_file = cls.oss_file_path_image.format("prod", today, data['crawlerSrcId'])
-                            Common.crawler_log().info("oss_image_file = {}".format(oss_image_file))
+                            Common.logger().info("oss_image_file = {}".format(oss_image_file))
                             cls.put_file(oss_image_file, fi_path)
                             data['coverImgPath'] = oss_image_file
-                            Common.crawler_log().info("coverImgPath = {}".format(oss_image_file))
+                            Common.logger().info("coverImgPath = {}".format(oss_image_file))
                         # 全部remove
                         cls.remove_local_file(fi_path)
 
@@ -246,6 +242,6 @@ class Publish:
                     cls.remove_local_file_dir(fi_d)
 
                 else:
-                    Common.crawler_log().error('file not a dir = {}'.format(fi_d))
+                    Common.logger().error('file not a dir = {}'.format(fi_d))
             except Exception as e:
-                Common.crawler_log().exception('upload_and_publish error', e)
+                Common.logger().exception('upload_and_publish error', e)

+ 34 - 86
main/run.py

@@ -1,14 +1,14 @@
 # -*- coding: utf-8 -*-
 # @Author: wangkun
 # @Time: 2022/3/30
+# import datetime
+import datetime
 import os
 import random
 import sys
 import time
-from apscheduler.schedulers.blocking import BlockingScheduler
 sys.path.append(os.getcwd())
 from main.common import Common
-from main.download_weishi import Weishi
 from main.download_kuaishou import KuaiShou
 
 
@@ -18,13 +18,12 @@ def kuaishou_dev_job():
     """
     while True:
         # 当天下载及上传的视频数:20 条
-        if len(KuaiShou.download_video_list) >= 20:
-            time.sleep(60)
-            break
+        if len(KuaiShou.download_video_list) >= 10:
+            Common.logger().info("已下载视频数:{}".format(len(KuaiShou.download_video_list)))
+            time.sleep(1800)
         else:
-            Common.crawler_log().info("开始抓取快手视频")
+            Common.logger().info("开始抓取快手视频")
             time.sleep(1)
-
             # 抓取符合规则的视频,写入 kuaishou_feeds.txt
             KuaiShou.kuaishou_get_recommend()
             # 下载视频,并上传
@@ -32,91 +31,41 @@ def kuaishou_dev_job():
             # 随机睡眠1-3s
             time.sleep(random.randint(1, 3))
 
-    # 删除冗余日志
-    Common.del_logs()
-    # 统计下载视频数
-    Common.kuaishou_download_count()
-
-
-def weishi_dev_job():
-    """
-    执行测试环境微视脚本
-    """
-    while True:
-        if 14 >= Common.now.hour >= 5:
-            Common.crawler_log().info("结束抓取及上传任务")
-            break
-        else:
-            # 抓取符合规则的视频,写入 weishi_feeds.txt
-            Weishi.get_weishi_recommend()
-            # 下载视频,并上传
-            Weishi.download_weishi_play_video("dev")
-            # 随机睡眠1-3s
-            time.sleep(random.randint(1, 3))
-
-    # 删除冗余日志
-    Common.del_logs()
-    # 统计下载视频数
-    Common.weishi_download_count()
+        # 删除冗余日志
+        Common.del_logs()
 
 
 def main_dev():
-    """
-    测试环境主函数
-    """
-    scheduler = BlockingScheduler(timezone="Asia/Shanghai")
-    # 抓取视频的定时任务,在每天10点的40分,运行一次 job 方法
-    scheduler.add_job(kuaishou_dev_job, 'cron', hour=19, minute=10, misfire_grace_time=60)
-    # 开始运行脚本
-    scheduler.start()
+    while True:
+        while True:
+            main_time = datetime.datetime.now()
+            if main_time.hour >= 10:
+                kuaishou_dev_job()
+            else:
+                break
 
 
-def weishi_prod_job():
+def kuaishou_prod_job():
     """
-    执行正式环境微视脚本
+    执行正式环境快手脚本
     """
     while True:
-        if 20 >= Common.now.hour >= 5:
-            Common.crawler_log().info("结束抓取微视视频任务")
-            break
+        # 当天下载及上传的视频数:150 条
+        if len(KuaiShou.download_video_list) >= 150:
+            Common.logger().info("已下载视频数:{}".format(len(KuaiShou.download_video_list)))
+            time.sleep(1800)
         else:
-            # 抓取符合规则的视频,写入 weishi_feeds.txt
-            Weishi.get_weishi_recommend()
+            Common.logger().info("开始抓取快手视频")
+            time.sleep(1)
+            # 抓取符合规则的视频,写入 kuaishou_feeds.txt
+            KuaiShou.kuaishou_get_recommend()
             # 下载视频,并上传
-            Weishi.download_weishi_play_video("prod")
+            KuaiShou.kuaishou_download_play_video("prod")
             # 随机睡眠1-3s
             time.sleep(random.randint(1, 3))
 
-    # 删除冗余日志
-    Common.del_logs()
-    # 统计下载视频数
-    Common.weishi_download_count()
-
-
-def kuaishou_prod_job():
-    """
-    执行正式环境快手脚本
-    """
-    # while True:
-    #     # 当天下载及上传的视频数:200 条
-    #     if len(KuaiShou.download_video_list) >= 200:
-    #         time.sleep(60)
-    #         break
-    #     else:
-
-    Common.crawler_log().info("开始抓取快手视频")
-
-    # 抓取符合规则的视频,写入 kuaishou_feeds.txt
-    KuaiShou.kuaishou_get_recommend()
-    # 下载视频,并上传
-    KuaiShou.kuaishou_download_play_video("prod")
-    # 随机睡眠1-3s
-    time.sleep(random.randint(1, 3))
-
-    # 删除冗余日志
-    Common.del_logs()
-    # 统计下载视频数
-    Common.kuaishou_download_count()
+        # 删除冗余日志
+        Common.del_logs()
 
 
 def main_prod():
@@ -124,13 +73,12 @@ def main_prod():
     正式环境主函数
     """
     while True:
-        kuaishou_prod_job()
-
-    # scheduler = BlockingScheduler(timezone="Asia/Shanghai")
-    # # 抓取视频的定时任务,在每天10点的40分,运行一次 job 方法
-    # scheduler.add_job(kuaishou_prod_job, 'cron', hour=8, minute=00, misfire_grace_time=60)
-    # # 开始运行脚本
-    # scheduler.start()
+        while True:
+            main_time = datetime.datetime.now()
+            if main_time.hour >= 10:
+                kuaishou_prod_job()
+            else:
+                break
 
 
 if __name__ == "__main__":

+ 0 - 3
txt/__init__.py

@@ -1,3 +0,0 @@
-# -*- coding: utf-8 -*-
-# @Author: wangkun
-# @Time: 2022/3/30

+ 0 - 0
txt/kuaishou_feeds.txt


+ 0 - 0
txt/kuaishou_videoid.txt


+ 0 - 0
txt/weishi_feeds.txt


+ 0 - 0
txt/weishi_videoid.txt