Browse Source

first push

wangkun 2 years ago
parent
commit
4b53daadc9
8 changed files with 1180 additions and 683 deletions
  1. 37 85
      main/common.py
  2. 3 206
      main/demo.py
  3. 310 0
      main/feishu_lib.py
  4. 411 0
      main/follow.py
  5. 56 55
      main/publish.py
  6. 298 270
      main/recommend.py
  7. 45 0
      main/run_follow.py
  8. 20 67
      main/run_recommend.py

+ 37 - 85
main/common.py

@@ -6,12 +6,11 @@
 """
 from datetime import date, timedelta
 import datetime
-import logging
 import os
 import time
 import requests
 import urllib3
-
+from loguru import logger
 proxies = {"http": None, "https": None}
 
 
@@ -25,35 +24,46 @@ class Common:
     # 明天 <class 'str'>  2022-04-15
     tomorrow = (date.today() + timedelta(days=1)).strftime("%Y-%m-%d")
 
+    # 使用 logger 模块生成日志
     @staticmethod
-    def crawler_log():
+    def logger(log_type):
         """
-        生成 log 日志
+        使用 logger 模块生成日志
         """
         # 日志路径
-        log_dir = r"./logs/"
+        log_dir = "./logs/"
         log_path = os.getcwd() + os.sep + log_dir
         if not os.path.isdir(log_path):
             os.makedirs(log_path)
 
-        # 日志参数
-        log_format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
-        date_format = "%Y-%m-%d %p %H:%M:%S"
-        log_name = time.strftime("%Y-%m-%d", time.localtime(time.time())) + '.log'
+        # 日志文件名
+        if log_type == "follow":
+            log_name = time.strftime("%Y-%m-%d", time.localtime(time.time())) + '-weishi-follow.log'
+        elif log_type == "recommend":
+            log_name = time.strftime("%Y-%m-%d", time.localtime(time.time())) + '-weishi-recommend.log'
+        else:
+            log_name = time.strftime("%Y-%m-%d", time.localtime(time.time())) + '-weishi.log'
+
+        # 日志不打印到控制台
+        logger.remove(handler_id=None)
 
-        # 日志初始化
-        logging.basicConfig(filename=log_path + log_name, level=logging.INFO, format=log_format, datefmt=date_format)
-        crawler_logger = logging.getLogger("crawler-log")
+        # rotation="500 MB",实现每 500MB 存储一个文件
+        # rotation="12:00",实现每天 12:00 创建一个文件
+        # rotation="1 week",每周创建一个文件
+        # retention="10 days",每隔10天之后就会清理旧的日志
+        # 初始化日志
+        logger.add(log_dir + log_name, level="INFO", rotation='00:00')
 
-        return crawler_logger
+        return logger
 
+    # 清除日志,保留最近 7 个文件
     @classmethod
-    def del_logs(cls):
+    def del_logs(cls, log_type):
         """
         清除冗余日志文件
-        :return: 保留最近 7 个日志
+        :return: 保留最近 6 个日志
         """
-        log_dir = r"./logs/"
+        log_dir = "./logs/"
         all_files = sorted(os.listdir(log_dir))
         all_logs = []
         for log in all_files:
@@ -61,21 +71,24 @@ class Common:
             if name == ".log":
                 all_logs.append(log)
 
-        if len(all_logs) <= 7:
+        if len(all_logs) <= 6:
             pass
         else:
-            for file in all_logs[:len(all_logs) - 7]:
+            for file in all_logs[:len(all_logs) - 6]:
                 os.remove(log_dir + file)
-        cls.crawler_log().info("清除冗余日志成功")
+        cls.logger(log_type).info("清除冗余日志成功\n")
 
     @classmethod
-    def download_method(cls, text, d_name, d_url):
+    def download_method(cls, log_type, text, d_name, d_url):
         """
         下载封面:text == "cover" ; 下载视频:text == "video"
         需要下载的视频标题:d_title
         视频封面,或视频播放地址:d_url
         下载保存路径:"./files/{d_title}/"
         """
+        videos_dir = "./videos/"
+        if not os.path.exists(videos_dir):
+            os.mkdir(videos_dir)
         # 首先创建一个保存该视频相关信息的文件夹
         video_dir = "./videos/" + d_name + "/"
         if not os.path.exists(video_dir):
@@ -95,11 +108,9 @@ class Common:
                 with open(video_dir + video_name, "wb") as f:
                     for chunk in response.iter_content(chunk_size=10240):
                         f.write(chunk)
-                cls.crawler_log().info("==========视频下载完成==========")
+                cls.logger(log_type).info("==========视频下载完成==========")
             except Exception as e:
-                cls.crawler_log().info("视频下载失败:{}".format(e))
-            # except FileNotFoundError:
-            #     cls.kuaishou_log().info("==========视频下载失败==========")
+                cls.logger(log_type).error("视频下载失败:{}\n".format(e))
 
         # 下载封面
         elif text == "cover":
@@ -114,69 +125,10 @@ class Common:
             try:
                 with open(video_dir + cover_name, "wb") as f:
                     f.write(response.content)
-                cls.crawler_log().info("==========封面下载完成==========")
+                cls.logger(log_type).info("==========封面下载完成==========")
             except Exception as e:
-                cls.crawler_log().info("封面下载失败:{}".format(e))
-            # except FileNotFoundError:
-            #     cls.kuaishou_log().info("==========封面下载失败==========")
-
-    @staticmethod
-    def read_txt(t_name):
-        """
-        读取 txt 文件
-        :param t_name: 文件名
-        :return: 文件内容
-        """
-        with open(r"./txt/" + t_name, "r", encoding="UTF-8") as f:
-            return f.readlines()
-
-    @classmethod
-    def kuaishou_download_count(cls):
-        videoid_path = r"./txt/kuaishou_videoid.txt"
-        count = 0
-        for count, line in enumerate(open(videoid_path, "rb").readlines()):
-            count += 1
-        cls.crawler_log().info('累计下载视频数: {}\n'.format(count))
-
-    @classmethod
-    def weishi_download_count(cls):
-        videoid_path = r"./txt/weishi_videoid.txt"
-        count = 0
-        for count, line in enumerate(open(videoid_path, "rb").readlines()):
-            count += 1
-        cls.crawler_log().info('累计下载视频数: {}\n'.format(count))
-
-    @classmethod
-    def kuaishou_today_download_count(cls):
-        """
-        统计快手渠道当日下载视频数
-        :return:
-        """
-        # 创建空文件
-        with open(r"./txt/" + str(cls.today) + "_kuaishou_videoid.txt", "a") as f:
-            f.write("")
-        videoid_path = r"./txt/" + str(cls.today) + "_kuaishou_videoid.txt"
-        count = 0
-        for count, line in enumerate(open(videoid_path, "rb").readlines()):
-            count += 1
-        return count
-
-    @classmethod
-    def del_yesterday_kuaishou_videoid_txt(cls):
-        """
-        删除快手渠道昨日下载视频数的 txt 文件
-        :return:
-        """
-        yesterday_kuaishou_videoid_txt_dir = r"./txt/"
-        all_files = sorted(os.listdir(yesterday_kuaishou_videoid_txt_dir))
-        for file in all_files:
-            name = os.path.splitext(file)[0]
-            if name == cls.yesterday + "_kuaishou_videoid":
-                os.remove(yesterday_kuaishou_videoid_txt_dir + file)
-        Common.crawler_log().info("删除快手昨天下载统计文件成功")
+                cls.logger(log_type).error("封面下载失败:{}\n".format(e))
 
 
 if __name__ == "__main__":
     common = Common()
-    common.del_yesterday_kuaishou_videoid_txt()
-    print(common.kuaishou_today_download_count())

+ 3 - 206
main/demo.py

@@ -3,15 +3,11 @@
 # @Time: 2022/3/31
 from datetime import date, timedelta
 import datetime
-import json
-import re
 import time
 
-import requests
-import urllib3
-
 
 class Demo:
+
     @classmethod
     def demo1(cls):
         download_video_resolution = "720*1280"
@@ -24,10 +20,10 @@ class Demo:
     @classmethod
     def time(cls):
         # 推荐
-        time1 = int(time.time()*1000)
+        time1 = int(time.time() * 1000)
         print(time1)
         # 不推荐
-        time2 = round(time.time())*1000
+        time2 = round(time.time()) * 1000
         print(time2)
 
         # 统一获取当前时间
@@ -47,214 +43,15 @@ class Demo:
         print(type(tomorrow))
         print(f"明天:{tomorrow}")
 
-    @classmethod
-    def get_douyin_feeds(cls):
-        """
-        获取抖音feed流视频 https://www.douyin.com
-        """
-        url = "https://www.douyin.com/aweme/v1/web/tab/feed/?"
-        params = {
-            "device_platform": "webapp",
-            "aid": "6383",
-            "channel": "channel_pc_web",
-            "count": "10",
-            "refresh_index": "4",
-            "video_type_select": "0",
-            "version_code": "170400",
-            "version_name": "17.4.0",
-            "cookie_enabled": "true",
-            "screen_width": "1920",
-            "screen_height": "1080",
-            "browser_language": "zh-CN",
-            "browser_platform": "MacIntel",
-            "browser_name": "Chrome",
-            "browser_version": "99.0.4844.84",
-            "browser_online": "true",
-            "engine_name": "Blink",
-            "engine_version": "99.0.4844.84",
-            "os_name": "Mac OS",
-            "os_version": "10.15.7",
-            "platform": "PC",
-            "cpu_core_num": "8",
-            "device_memory": "8",
-            "downlink": "10",
-            "effective_type": "4g",
-            "round_trip_time": "50",
-            "msToken": "304uY1lV7HmHkR1G1QUaFqg0yrL5_WqrFOR8qCbl3hOsl8aSNI_18vIfpTGNhNRVZx7ysRiCHpcBKhpujTsbbC"
-                       "ZEDbG7pllZzlO3tlrBOs2TFYUgJdsvbw==",
-            "X-Bogus": "DFSzswVYPVsANat/Sl8eGc3WxM23",
-            "_signature": "qaJgTwAAy.aVqLslyfC7aKmiYF"
-        }
-        cookies = {
-            "_tea_utm_cache_6383": "undefined",
-            "ttwid": "1%7CETZk6sDMDSBgewWhKJXghFN4cwXTz0fLuhsLEngD_Nk%7C1648812136%7Cfa66fa81ccfe3f552f4"
-                     "e8b8327e72cbbc5e897141c25a5fcd32defaed1466d3e",
-            "passport_csrf_token": "e2d0f1ed9fd22463be9f389137a781ce",
-            "passport_csrf_token_default": "e2d0f1ed9fd22463be9f389137a781ce",
-            "s_v_web_id": "verify_l1h7nzwr_ABN0FA2f_BTrM_4zSH_8WPN_2KY2iZFmbhE2",
-            "_tea_utm_cache_1300": "undefined",
-            "_tea_utm_cache_2285": "undefined",
-            "ttcid": "3220eeda36a244beadd32a4b44d2044b31",
-            "douyin.com": "",
-            "__ac_nonce": "06247fb0f00f050ccc9b2",
-            "__ac_signature": "_02B4Z6wo00f01AN7DoAAAIDB5nv.qI7xGZQDWwoAAGKfo4rd5YCAYF8o5PyppIpsdKxV0k2NerO"
-                              "f1VEQr3eJftkpgon9tcveDVpmfY555vzTTvRznegS1ax3KJXnoav2ZdEoYzwR3wDszPCk5d",
-            "strategyABtestKey": "1648865029.449",
-            "AB_LOGIN_GUIDE_TIMESTAMP": "1648865029279",
-            "THEME_STAY_TIME": "299621",
-            "IS_HIDE_THEME_CHANGE": "1",
-            "home_can_add_dy_2_desktop": "0",
-            "tt_scid": "vUl8CBW1SMQp2l5GmUIja5A6ziY1LByrsoN.P-wvKuutiB8ftvlfK.9ZEeehNC5u821d",
-            "pwa_guide_count": "2",
-            "msToken": "EHCmp9Qw7PAChI3do-MQPjOR29hf4ZFLYNrGl89HkFKdO5Iwb8n7z5fpETrgim2zFTIkGT"
-                       "ObOxH7HCrHCLVEX5eAuwAS1A2sjKH4MHEfjfPqA06Lo4v9Pw==",
-        }
-        try:
-            urllib3.disable_warnings()
-            r = requests.get(url=url, params=params, cookies=cookies, verify=False)
-            # response = json.loads(r.content.decode("utf8"))
-            print(r)
-            print(type(r.text))
-            print(r.text)
-        except Exception as e:
-            print(e)
-
     @classmethod
     def demo2(cls):
         s = "0"
         print(int(int(s) / 10))
 
-    @classmethod
-    def get_weishi_feeds(cls):
-        url = "https://api.weishi.qq.com/trpc.weishi.weishi_h5_proxy.weishi_h5_proxy/WxminiGetFeedList"
-        cookies = {
-            "wesee_authtype": "3",
-            "wesee_openid":	"oWGa05FrwkuUvT-4n1qGeQuhVsc8",
-            "wesee_openkey": "8c3ec202f5d679fb5ee6d9f643640d9a2580ba504612e2d979a881d3169caf189e2a5c1d532eeff172bc21cf2"
-                             "6230941ccbc10243a7879e8165ca608c17060de606a6d08afe0a3abd5250629314f9a99e9d1003b201bf5ec",
-            "wesee_personid": "1593522421826902",
-            "wesee_refresh_token": "",
-            "wesee_access_token": "8c3ec202f5d679fb5ee6d9f643640d9a2580ba504612e2d979a881d3169caf18"
-                                  "9e2a5c1d532eeff172bc21cf26230941ccbc10243a7879e8165ca608c17060de6"
-                                  "06a6d08afe0a3abd5250629314f9a99e9d1003b201bf5ec",
-            "wesee_thr_appid": "wx75ee9f19b93e5c46",
-            "wesee_ichid": "8"
-        }
-        json_data = {
-            "req_body": {
-                "requestType": 16,
-                "isrefresh": 0,
-                "isfirst": 0,
-                "attachInfo": "",
-                "scene_id": 22,
-                "requestExt": {
-                    "mini_openid": "oWGa05FrwkuUvT-4n1qGeQuhVsc8",
-                    "notLogin-personid": "1593522421826902"
-                }
-            },
-            "req_header": {
-                "mapExt": "{\"imageSize\":\"480\",\"adaptScene\":\"PicHDWebpLimitScene\"}"
-            }
-        }
-        try:
-            urllib3.disable_warnings()
-            r = requests.post(url=url, cookies=cookies, json=json_data, verify=False)
-            response = json.loads(r.content.decode("utf8"))
-            feeds = response["rsp_body"]["feeds"]
-            for feed in feeds:
-                print(feed)
-        except Exception as e:
-            print(e)
-
-    @classmethod
-    def edit_str(cls):
-        title_list = ["#上海战疫 上海累计感染超20万!这条被淹没的热搜,令全网泪目… 疫情一定要攻克,但所有人都不该遗忘这些弱者。#上海累计报告本土阳性感染者超20万例 #农民工",
-                      "#重庆地火村 #地火村 #旅行",
-                      "第79集 | 湖南最值得去的六个景区,每一个都是绝色…… #快手带你去旅行 #旅游胜地 #旅游",
-                      "霸王条款不废除,断供有多可怕。 #涨知识 #生活小常识 # 生活常识",
-                      "秦始皇还活着?地宫中有不明物体缓缓移动 #历史 #秦始皇 #新春寄语  @快手热点(O40300129)",
-                      "#夏日荷花  #国花牡丹 #昙花一现",
-                      "国内最良心的8个景区,这才是景区最该有的样子,看看你去过几个? #旅行  #旅游 ",
-                      "狗子呆在水里三天三夜,终于练成捕鱼神功,一口一个大鲶鱼 #狗狗  #神奇动物  #快手放映室  @快手热点(O40300129) ",
-                      "#集结吧光合创作者  养鸡小伙:喂鸡摆出各种造型,被称为鸡司令。",
-                      "89岁农民老艺人自食其力,街头卖艺表演“捏碎碗片”绝技,现场听到咔吱咔吱响,人狠功夫硬!这功夫已失传,以后再看不到了!#集结吧光合创作者 #农民 #街头表演  @快手光合作者助手(O40300118)  @快手热点(O40300129)  @我要上热门(O1907752910)",
-                      "我国最贵最有名的三棵树,你知道哪三棵吗?#旅游 #旅行攻略 #黄山迎客松",
-                      "潘长江带来热舞,蔡明 郭达也来了!太嗨了!歌词太棒了! @快手涨粉助手(O1815060199)  @快手热点(O40300129)  @快手平台帐号(O90041) #潘长江 #搞笑 #集结吧光合创作者",
-                      "#带你看世界 给大家带来一期烟花盛宴,希望大家能够喜欢,带上你的那个她一起来看吧 #烟花 #视觉震撼"
-                      ]
-        for title in title_list:
-            title_split1 = title.split(" #")
-            if title_split1[0] != "":
-                title1 = title_split1[0]
-            else:
-                title1 = title_split1[0]
-
-            title_split2 = title1.split(" #")
-            if title_split2[0] != "":
-                title2 = title_split2[0]
-            else:
-                title2 = title_split2[-1]
-
-            title_split3 = title2.split("@")
-            if title_split3[0] != "":
-                title3 = title_split3[0]
-            else:
-                title3 = title_split3[-1]
-
-            print(title3)
-            title = title3.replace("\n", "").replace("#", "").replace("/", "").replace("\r", "")
-            print(title)
-
-        # new_title = re.compile(r'(#)(.*)(#)')
-        # print(new_title.sub(r'', title))
-
-    @classmethod
-    def kuaishou_sensitive_words(cls):
-        sensitive_words = [
-            "汽车",
-            "电影解说",
-            "放映室",
-            "解说电影",
-            "断供",
-        ]
-        return sensitive_words
-
-    @classmethod
-    def sensitive_words(cls):
-        title_list = ["#上海战疫 上海累计感染超20万!这条被淹没的热搜,令全网泪目… 疫情一定要攻克,但所有人都不该遗忘这些弱者。#上海累计报告本土阳性感染者超20万例 #农民工",
-                      "#重庆地火村 #地火村 #旅行",
-                      "第79集 | 湖南最值得去的六个景区,每一个都是绝色…… #快手带你去旅行 #旅游胜地 #旅游",
-                      "霸王条款不废除,断供有多可怕。 #涨知识 #生活小常识 # 生活常识",
-                      "秦始皇还活着?地宫中有不明物体缓缓移动 #历史 #秦始皇 #新春寄语  @快手热点(O40300129)",
-                      "#夏日荷花  #国花牡丹 #昙花一现",
-                      "国内最良心的8个景区,这才是景区最该有的样子,看看你去过几个? #旅行  #旅游 ",
-                      "狗子呆在水里三天三夜,终于练成捕鱼神功,一口一个大鲶鱼 #狗狗  #神奇动物  #快手放映室  @快手热点(O40300129) ",
-                      "#集结吧光合创作者  养鸡小伙:喂鸡摆出各种造型,被称为鸡司令。",
-                      "89岁农民老艺人自食其力,街头卖艺表演“捏碎碗片”绝技,现场听到咔吱咔吱响,人狠功夫硬!这功夫已失传,以后再看不到了!#集结吧光合创作者 #农民 #街头表演  @快手光合作者助手(O40300118)  @快手热点(O40300129)  @我要上热门(O1907752910)",
-                      "我国最贵最有名的三棵树,你知道哪三棵吗?#旅游 #旅行攻略 #黄山迎客松",
-                      "潘长江带来热舞,蔡明 郭达也来了!太嗨了!歌词太棒了! @快手涨粉助手(O1815060199)  @快手热点(O40300129)  @快手平台帐号(O90041) #潘长江 #搞笑 #集结吧光合创作者",
-                      "#带你看世界 给大家带来一期烟花盛宴,希望大家能够喜欢,带上你的那个她一起来看吧 #烟花 #视觉震撼"
-                      ]
-        print(cls.kuaishou_sensitive_words())
-        for title in title_list:
-            for word in cls.kuaishou_sensitive_words():
-                if word in title:
-                    print(f"敏感词:{word}")
-                    print(f"敏感词视频:{title}")
-                    cls.kuaishou_sensitive_words().remove(word)
-                else:
-                    print(f"正常视频:{title}")
-                    cls.kuaishou_sensitive_words().remove(word)
-
 
 if __name__ == "__main__":
     demo = Demo()
     # demo.demo1()
     demo.time()
-    # demo.get_douyin_feeds()
-    # demo.demo2()
-    # demo.get_weishi_feeds()
-    # demo.edit_str()
-    # demo.sensitive_words()
 
     pass

+ 310 - 0
main/feishu_lib.py

@@ -0,0 +1,310 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/5/18
+import json
+import requests
+import urllib3
+from main.common import Common
+proxies = {"http": None, "https": None}
+
+
+class Feishu:
+    """
+    编辑飞书云文档
+    """
+    # 看一看爬虫数据表
+    kanyikan_url = "https://w42nne6hzg.feishu.cn/sheets/shtcngRPoDYAi24x52j2nDuHMih?"
+    # 快手爬虫数据表
+    kuaishou_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnp4SaJt37q6OOOrYzPMjQkg?"
+    # 微视爬虫数据表
+    weishi_url = "https://w42nne6hzg.feishu.cn/sheets/shtcn5YSWg91JfVGzj0SFZIRRPh?"
+    # 小年糕爬虫数据表
+    xiaoniangao_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnYxiyQ1wLklo1W5Kdqc9cGh?"
+    # 数据监控表
+    crawler_monitor = "https://w42nne6hzg.feishu.cn/sheets/shtcnlZWYazInhf7Z60jkbLRJyd?"
+    # 本山祝福数据表
+    crawler_benshanzhufu = "https://w42nne6hzg.feishu.cn/sheets/shtcnGh2rrsPYM4iVNEBO7OqWrb?"
+    # 公众号爬虫表
+    gzh_url = "https://w42nne6hzg.feishu.cn/sheets/shtcnexNXnpDLHhARw0QdiwbYuA?"
+
+    # 飞书路径token
+    @classmethod
+    def spreadsheettoken(cls, crawler):
+        """
+        :param crawler: 哪个爬虫
+        """
+        if crawler == "kanyikan":
+            return "shtcngRPoDYAi24x52j2nDuHMih"
+        elif crawler == "kuaishou":
+            return "shtcnp4SaJt37q6OOOrYzPMjQkg"
+        elif crawler == "weishi":
+            return "shtcn5YSWg91JfVGzj0SFZIRRPh"
+        elif crawler == "xiaoniangao":
+            return "shtcnYxiyQ1wLklo1W5Kdqc9cGh"
+        elif crawler == "monitor":
+            return "shtcnlZWYazInhf7Z60jkbLRJyd"
+        elif crawler == "bszf":
+            return "shtcnGh2rrsPYM4iVNEBO7OqWrb"
+        elif crawler == "gzh":
+            return "shtcnexNXnpDLHhARw0QdiwbYuA"
+
+    # 获取飞书api token
+    @classmethod
+    def get_token(cls, log_type):
+        """
+        获取飞书api token
+        :return:
+        """
+        url = "https://open.feishu.cn/open-apis/auth/v3/tenant_access_token/internal/"
+        post_data = {"app_id": "cli_a13ad2afa438d00b",  # 这里账号密码是发布应用的后台账号及密码
+                     "app_secret": "4tK9LY9VbiQlY5umhE42dclBFo6t4p5O"}
+
+        try:
+            urllib3.disable_warnings()
+            response = requests.post(url=url, data=post_data, proxies=proxies, verify=False)
+            tenant_access_token = response.json()["tenant_access_token"]
+            return tenant_access_token
+        except Exception as e:
+            Common.logger(log_type).error("获取飞书 api token 异常:{}", e)
+
+    # 获取表格元数据
+    @classmethod
+    def get_metainfo(cls, log_type, crawler):
+        """
+        获取表格元数据
+        :return:
+        """
+        get_metainfo_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                           + cls.spreadsheettoken(crawler) + "/metainfo"
+
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        params = {
+            "extFields": "protectedRange",  # 额外返回的字段,extFields=protectedRange时返回保护行列信息
+            "user_id_type": "open_id"  # 返回的用户id类型,可选open_id,union_id
+        }
+        try:
+            urllib3.disable_warnings()
+            r = requests.get(url=get_metainfo_url, headers=headers, params=params, proxies=proxies, verify=False)
+            response = json.loads(r.content.decode("utf8"))
+            return response
+        except Exception as e:
+            Common.logger(log_type).error("获取表格元数据异常:{}", e)
+
+    # 读取工作表中所有数据
+    @classmethod
+    def get_values_batch(cls, log_type, crawler, sheetid):
+        """
+        读取工作表中所有数据
+        :param log_type: 启用哪个 log
+        :param crawler: 哪个爬虫
+        :param sheetid: 哪张表
+        :return: 所有数据
+        """
+        get_values_batch_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                               + cls.spreadsheettoken(crawler) + "/values_batch_get"
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        params = {
+            # 多个查询范围 如 url?ranges=range1,range2 ,其中 range 包含 sheetId 与单元格范围两部分
+            "ranges": sheetid,
+
+            # valueRenderOption=ToString 可返回纯文本的值(数值类型除外);
+            # valueRenderOption=FormattedValue 计算并格式化单元格;
+            # valueRenderOption=Formula单元格中含有公式时返回公式本身;
+            # valueRenderOption=UnformattedValue计算但不对单元格进行格式化
+            "valueRenderOption": "ToString",
+
+            # dateTimeRenderOption=FormattedString 计算并将时间日期按照其格式进行格式化,但不会对数字进行格式化,返回格式化后的字符串。
+            "dateTimeRenderOption": "",
+
+            # 返回的用户id类型,可选open_id,union_id
+            "user_id_type": "open_id"
+        }
+        try:
+            urllib3.disable_warnings()
+            r = requests.get(url=get_values_batch_url, headers=headers, params=params, proxies=proxies, verify=False)
+            # print(r.text)
+            response = json.loads(r.content.decode("utf8"))
+            values = response["data"]["valueRanges"][0]["values"]
+            return values
+        except Exception as e:
+            Common.logger(log_type).error("读取工作表所有数据异常:{}", e)
+
+    # 工作表,插入行或列
+    @classmethod
+    def insert_columns(cls, log_type, crawler, sheetid, majordimension, startindex, endindex):
+        """
+        工作表插入行或列
+        :param log_type: 日志路径
+        :param crawler: 哪个爬虫的云文档
+        :param sheetid:哪张工作表
+        :param majordimension:行或者列, ROWS、COLUMNS
+        :param startindex:开始位置
+        :param endindex:结束位置
+        """
+        insert_columns_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                             + cls.spreadsheettoken(crawler) + "/insert_dimension_range"
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        body = {
+            "dimension": {
+                "sheetId": sheetid,
+                "majorDimension": majordimension,  # 默认 ROWS ,可选 ROWS、COLUMNS
+                "startIndex": startindex,  # 开始的位置
+                "endIndex": endindex  # 结束的位置
+            },
+            "inheritStyle": "AFTER"  # BEFORE 或 AFTER,不填为不继承 style
+        }
+        try:
+            urllib3.disable_warnings()
+            r = requests.post(url=insert_columns_url, headers=headers, json=body, proxies=proxies, verify=False)
+            Common.logger(log_type).info("插入行或列:{}", r.json()["msg"])
+        except Exception as e:
+            Common.logger(log_type).error("插入行或列异常:{}", e)
+
+    # 写入数据
+    @classmethod
+    def update_values(cls, log_type, crawler, sheetid, ranges, values):
+        """
+        写入数据
+        :param log_type: 日志路径
+        :param crawler: 哪个爬虫的云文档
+        :param sheetid:哪张工作表
+        :param ranges:单元格范围
+        :param values:写入的具体数据,list
+        """
+        update_values_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                            + cls.spreadsheettoken(crawler) + "/values_batch_update"
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        body = {
+            "valueRanges": [
+                {
+                    "range": sheetid + "!" + ranges,
+                    "values": values
+                },
+            ],
+        }
+
+        try:
+            urllib3.disable_warnings()
+            r = requests.post(url=update_values_url, headers=headers, json=body, proxies=proxies, verify=False)
+            Common.logger(log_type).info("写入数据:{}", r.json()["msg"])
+        except Exception as e:
+            Common.logger(log_type).error("写入数据异常:{}", e)
+
+    # 合并单元格
+    @classmethod
+    def merge_cells(cls, log_type, crawler, sheetid, ranges):
+        """
+        合并单元格
+        :param log_type: 日志路径
+        :param crawler: 哪个爬虫
+        :param sheetid:哪张工作表
+        :param ranges:需要合并的单元格范围
+        """
+        merge_cells_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                          + cls.spreadsheettoken(crawler) + "/merge_cells"
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+
+        body = {
+            "range": sheetid + "!" + ranges,
+            "mergeType": "MERGE_ROWS"
+        }
+
+        try:
+            urllib3.disable_warnings()
+            r = requests.post(url=merge_cells_url, headers=headers, json=body, proxies=proxies, verify=False)
+            Common.logger(log_type).info("合并单元格:{}", r.json()["msg"])
+        except Exception as e:
+            Common.logger(log_type).error("合并单元格异常:{}", e)
+
+    # 读取单元格数据
+    @classmethod
+    def get_range_value(cls, log_type, crawler, sheetid, cell):
+        """
+        读取单元格内容
+        :param log_type: 日志路径
+        :param crawler: 哪个爬虫
+        :param sheetid: 哪张工作表
+        :param cell: 哪个单元格
+        :return: 单元格内容
+        """
+        get_range_value_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                              + cls.spreadsheettoken(crawler) + "/values/" + sheetid + "!" + cell
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        params = {
+            # valueRenderOption=ToString 可返回纯文本的值(数值类型除外);
+            # valueRenderOption=FormattedValue 计算并格式化单元格;
+            # valueRenderOption=Formula 单元格中含有公式时返回公式本身;
+            # valueRenderOption=UnformattedValue 计算但不对单元格进行格式化。
+            "valueRenderOption": "FormattedValue",
+
+            # dateTimeRenderOption=FormattedString 计算并对时间日期按照其格式进行格式化,但不会对数字进行格式化,返回格式化后的字符串。
+            "dateTimeRenderOption": "",
+
+            # 返回的用户id类型,可选open_id,union_id
+            "user_id_type": "open_id"
+        }
+        try:
+            urllib3.disable_warnings()
+            r = requests.get(url=get_range_value_url, headers=headers, params=params, proxies=proxies, verify=False)
+            # print(r.text)
+            return r.json()["data"]["valueRange"]["values"][0]
+        except Exception as e:
+            Common.logger(log_type).error("读取单元格数据异常:{}", e)
+
+    # 删除行或列,可选 ROWS、COLUMNS
+    @classmethod
+    def dimension_range(cls, log_type, crawler, sheetid, major_dimension, startindex, endindex):
+        """
+        删除行或列
+        :param log_type: 日志路径
+        :param crawler: 哪个爬虫
+        :param sheetid:工作表
+        :param major_dimension:默认 ROWS ,可选 ROWS、COLUMNS
+        :param startindex:开始的位置
+        :param endindex:结束的位置
+        :return:
+        """
+        dimension_range_url = "https://open.feishu.cn/open-apis/sheets/v2/spreadsheets/" \
+                              + cls.spreadsheettoken(crawler) + "/dimension_range"
+        headers = {
+            "Authorization": "Bearer " + cls.get_token(log_type),
+            "Content-Type": "application/json; charset=utf-8"
+        }
+        body = {
+            "dimension": {
+                "sheetId": sheetid,
+                "majorDimension": major_dimension,
+                "startIndex": startindex,
+                "endIndex": endindex
+                }
+            }
+        try:
+            urllib3.disable_warnings()
+            r = requests.delete(url=dimension_range_url, headers=headers, json=body, proxies=proxies, verify=False)
+            Common.logger(log_type).info("删除视频数据:{}", r.json()["msg"])
+        except Exception as e:
+            Common.logger(log_type).error("删除视频数据异常:{}", e)
+
+
+if __name__ == "__main__":
+    print(Feishu.get_token('gzh'))
+
+    pass

+ 411 - 0
main/follow.py

@@ -0,0 +1,411 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/6/15
+import os
+import sys
+import time
+import requests
+import urllib3
+sys.path.append(os.getcwd())
+from main.publish import Publish
+from main.common import Common
+from main.feishu_lib import Feishu
+proxies = {"http": None, "https": None}
+
+
+class DownloadFollow:
+    # 配置微信号
+    Referer = Feishu.get_range_value("follow", "9fTK1f", "C3:C3")[0]
+    wesee_openid = Feishu.get_range_value("follow", "9fTK1f", "C4:C4")[0]
+    wesee_openkey = Feishu.get_range_value("follow", "9fTK1f", "C5:C5")[0]
+    wesee_personid = Feishu.get_range_value("follow", "9fTK1f", "C6:C6")[0]
+    wesee_access_token = Feishu.get_range_value("follow", "9fTK1f", "C7:C7")[0]
+    wesee_thr_appid = Feishu.get_range_value("follow", "9fTK1f", "C8:C8")[0]
+    # 翻页参数
+    attachInfo = ""
+
+    # 过滤词库
+    @classmethod
+    def sensitive_words(cls):
+        # 词库列表
+        word_list = []
+        # 从云文档读取所有词,添加到词库列表
+        lists = Feishu.get_values_batch("follow", "2Oxf8C")
+        for i in lists:
+            for j in i:
+                # 过滤空的单元格内容
+                if j is None:
+                    pass
+                else:
+                    word_list.append(j)
+        return word_list
+
+    # 下载规则
+    @staticmethod
+    def download_rule(d_duration, d_width, d_height, d_play_cnt, d_like_cnt, d_share_cnt):
+        """
+        下载视频的基本规则
+        :param d_duration: 时长
+        :param d_width: 宽
+        :param d_height: 高
+        :param d_play_cnt: 播放量
+        :param d_like_cnt: 点赞量
+        :param d_share_cnt: 分享量
+        :return: 满足规则,返回 True;反之,返回 False
+        """
+        if int(float(d_duration)) >= 20:
+            if int(d_width) >= 0 or int(d_height) >= 0:
+                if int(d_play_cnt) >= 0:
+                    if int(d_like_cnt) >= 0 or int(d_share_cnt) >= 0:
+                        return True
+                    else:
+                        return False
+                else:
+                    return False
+            return False
+        return False
+
+    # 抓取列表
+    @classmethod
+    def get_feeds(cls):
+        """
+        1.从微视小程序首页推荐,获取视频列表
+        2.先在 https://w42nne6hzg.feishu.cn/sheets/shtcn5YSWg91JfVGzj0SFZIRRPh?sheet=caa3fa 中去重
+        3.再从 https://w42nne6hzg.feishu.cn/sheets/shtcn5YSWg91JfVGzj0SFZIRRPh?sheet=O7fCzr 中去重
+        4.添加视频信息至 https://w42nne6hzg.feishu.cn/sheets/shtcn5YSWg91JfVGzj0SFZIRRPh?sheet=O7fCzr
+        """
+        url = "https://api.weishi.qq.com/trpc.weishi.weishi_h5_proxy.weishi_h5_proxy/WxminiGetFollowFeedList"
+        headers = {
+            "content-type": "application/json",
+            "Accept-Encoding": "gzip,compress,br,deflate",
+            "User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X)"
+                          " AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148"
+                          " MicroMessenger/8.0.20(0x18001442) NetType/WIFI Language/zh_CN",
+            "Referer": str(cls.Referer)
+        }
+        cookies = {
+            "wesee_authtype": "3",
+            "wesee_openid": str(cls.wesee_openid),
+            "wesee_openkey": str(cls.wesee_openkey),
+            "wesee_personid": str(cls.wesee_personid),
+            "wesee_refresh_token": "",
+            "wesee_access_token": str(cls.wesee_access_token),
+            "wesee_thr_appid": str(cls.wesee_thr_appid),
+            "wesee_ichid": "8"
+        }
+        json_data = {
+            "req_body": {
+                "attachInfo": str(cls.attachInfo)
+            },
+            "req_header": {
+                "mapExt": "{\"imageSize\":\"480\",\"adaptScene\":\"PicHDWebpLimitScene\","
+                          "\"weseeCostTag\":\"WxMiniProgram\"}"
+            }
+        }
+
+        try:
+            urllib3.disable_warnings()
+            r = requests.post(headers=headers, url=url, cookies=cookies, json=json_data, proxies=proxies,
+                              verify=False)
+            if r.json()["rsp_header"]["errMsg"] != "":
+                Common.logger("follow").error("errMsg:{}", r.json()["rsp_header"]["errMsg"])
+            cls.attachInfo = r.json()["rsp_body"]["attachInfo"]
+            feeds = r.json()["rsp_body"]["feeds"]
+            for i in range(len(feeds)):
+                # 视频标题过滤话题及处理特殊字符
+                weishi_title = feeds[i]["desc"]
+                title_split1 = weishi_title.split(" #")
+                if title_split1[0] != "":
+                    title1 = title_split1[0]
+                else:
+                    title1 = title_split1[-1]
+
+                title_split2 = title1.split(" #")
+                if title_split2[0] != "":
+                    title2 = title_split2[0]
+                else:
+                    title2 = title_split2[-1]
+
+                title_split3 = title2.split("@")
+                if title_split3[0] != "":
+                    title3 = title_split3[0]
+                else:
+                    title3 = title_split3[-1]
+                # 视频标题
+                video_title = title3.strip().replace("\n", "") \
+                    .replace("/", "").replace("快手", "").replace(" ", "") \
+                    .replace(" ", "").replace("&NBSP", "").replace("\r", "") \
+                    .replace("#", "").replace(".", "。").replace("\\", "") \
+                    .replace(":", "").replace("*", "").replace("?", "") \
+                    .replace("?", "").replace('"', "").replace("<", "") \
+                    .replace(">", "").replace("|", "").replace("微视", "")
+
+                # 视频 ID
+                if "id" not in feeds[i]["video"]:
+                    video_id = 0
+                else:
+                    video_id = feeds[i]["video"]["id"]
+
+                # 播放数
+                if "playNum" not in feeds[i]["ugcData"]:
+                    video_play_cnt = 0
+                else:
+                    video_play_cnt = feeds[i]["ugcData"]["playNum"]
+
+                # 点赞数
+                if "dingCount" not in feeds[i]["ugcData"]:
+                    video_like_cnt = 0
+                else:
+                    video_like_cnt = feeds[i]["ugcData"]["dingCount"]
+
+                # 分享数
+                if "shareNum" not in feeds[i]["ugcData"]:
+                    video_share_cnt = 0
+                else:
+                    video_share_cnt = feeds[i]["ugcData"]["shareNum"]
+
+                # 评论数
+                if "totalCommentNum" not in feeds[i]["ugcData"]:
+                    video_comment_cnt = 0
+                else:
+                    video_comment_cnt = feeds[i]["ugcData"]["totalCommentNum"]
+
+                # 视频时长
+                if "duration" not in feeds[i]["video"]:
+                    video_duration = 0
+                else:
+                    video_duration = int(int(feeds[i]["video"]["duration"]) / 1000)
+
+                # 视频宽高
+                if "width" not in feeds[i]["video"] or "height" not in feeds[i]["video"]:
+                    video_width = 0
+                    video_height = 0
+                    video_resolution = str(video_width) + "*" + str(video_height)
+                else:
+                    video_width = feeds[i]["video"]["width"]
+                    video_height = feeds[i]["video"]["height"]
+                    video_resolution = str(video_width) + "*" + str(video_height)
+
+                # 视频发布时间
+                if "createTime" not in feeds[i]:
+                    video_send_time = 0
+                else:
+                    video_send_time = int(feeds[i]["createTime"]) * 1000
+
+                # 用户昵称
+                user_name = feeds[i]["poster"]["nick"].strip().replace("\n", "") \
+                    .replace("/", "").replace("快手", "").replace(" ", "") \
+                    .replace(" ", "").replace("&NBSP", "").replace("\r", "").replace("微视", "")
+
+                # 用户 ID
+                user_id = feeds[i]["poster"]["id"]
+
+                # 用户头像地址
+                if "thumbURL" not in feeds[i]["material"] and "avatar" not in feeds[i]["poster"]:
+                    head_url = 0
+                elif "thumbURL" in feeds[i]["material"]:
+                    head_url = feeds[i]["material"]["thumbURL"]
+                else:
+                    head_url = feeds[i]["poster"]["avatar"]
+
+                # 视频封面地址
+                if len(feeds[i]["images"]) == 0:
+                    cover_url = 0
+                else:
+                    cover_url = feeds[i]["images"][0]["url"]
+
+                # 视频播放地址
+                if "url" not in feeds[i]["video"]:
+                    video_url = 0
+                else:
+                    video_url = feeds[i]["video"]["url"]
+
+                Common.logger("follow").info("video_title:{}".format(video_title))
+                Common.logger("follow").info("video_id:{}".format(video_id))
+                Common.logger("follow").info("video_play_cnt:{}".format(video_play_cnt))
+                Common.logger("follow").info("video_like_cnt:{}".format(video_like_cnt))
+                Common.logger("follow").info("video_share_cnt:{}".format(video_share_cnt))
+                # Common.logger("follow").info("video_comment_cnt:{}".format(video_comment_cnt))
+                Common.logger("follow").info("video_duration:{}秒".format(video_duration))
+                # Common.logger("follow").info("video_resolution:{}".format(video_resolution))
+                Common.logger("follow").info(
+                    "video_send_time:{}".format(time.strftime(
+                        "%Y/%m/%d %H:%M:%S", time.localtime(int(video_send_time) / 1000))))
+                Common.logger("follow").info("user_name:{}".format(user_name))
+                # Common.logger("follow").info("user_id:{}".format(user_id))
+                # Common.logger("follow").info("head_url:{}".format(head_url))
+                # Common.logger("follow").info("cover_url:{}".format(cover_url))
+                Common.logger("follow").info("video_url:{}".format(video_url))
+
+                # 过滤无效视频
+                if video_id == 0 or video_duration == 0 or video_send_time == 0 or head_url == 0 \
+                        or cover_url == 0 or video_url == 0:
+                    Common.logger("follow").info("无效视频")
+                # 判断基础规则
+                elif cls.download_rule(video_duration, video_width, video_height,
+                                       video_play_cnt, video_like_cnt, video_share_cnt) is False:
+                    Common.logger("follow").info("不满足基础规则")
+                # 判断敏感词
+                elif any(word if word in weishi_title else False for word in cls.sensitive_words()) is True:
+                    Common.logger("follow").info("视频已中敏感词:{}".format(weishi_title))
+                # 从 云文档 去重:https://w42nne6hzg.feishu.cn/sheets/shtcn5YSWg91JfVGzj0SFZIRRPh?sheet=caa3fa
+                elif video_id in [j for m in Feishu.get_values_batch("follow", "caa3fa") for j in m]:
+                    Common.logger("follow").info("该视频已下载:{}", video_title)
+                # 从 云文档 去重:https://w42nne6hzg.feishu.cn/sheets/shtcn5YSWg91JfVGzj0SFZIRRPh?sheet=O7fCzr
+                elif video_id in [j for n in Feishu.get_values_batch("follow", "PamIy1") for j in n]:
+                    Common.logger("follow").info("该视频已在feeds中:{}", video_title)
+                else:
+                    Common.logger("follow").info("该视频未下载,添加至feeds中:{}".format(video_title))
+                    # feeds工作表,插入首行
+                    time.sleep(1)
+                    Feishu.insert_columns("follow", "PamIy1", "ROWS", 1, 2)
+
+                    # 获取当前时间
+                    get_feeds_time = int(time.time())
+                    # 云文档,工作表中写入数据
+                    values = [[str(time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(get_feeds_time))),
+                               "关注榜",
+                               video_id,
+                               video_title,
+                               video_play_cnt,
+                               video_comment_cnt,
+                               video_like_cnt,
+                               video_share_cnt,
+                               video_duration,
+                               video_resolution,
+                               time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(int(video_send_time) / 1000)),
+                               user_name,
+                               user_id,
+                               head_url,
+                               cover_url,
+                               video_url]]
+                    # 等待 1s,防止操作云文档太频繁,导致报错
+                    time.sleep(1)
+                    Feishu.update_values("follow", "PamIy1", "A2:P2", values)
+        except Exception as e:
+            Common.logger("follow").error("抓取关注列表异常:{}", e)
+
+    # 下载及上传
+    @classmethod
+    def download_publish(cls):
+        try:
+            for i in range(1, len(Feishu.get_values_batch("follow", "PamIy1")) + 1):
+                time.sleep(1)
+                download_video_id = Feishu.get_values_batch("follow", "PamIy1")[i][2]
+                download_video_title = Feishu.get_values_batch("follow", "PamIy1")[i][3]
+                download_video_play_cnt = Feishu.get_values_batch("follow", "PamIy1")[i][4]
+                download_video_comment_cnt = Feishu.get_values_batch("follow", "PamIy1")[i][5]
+                download_video_like_cnt = Feishu.get_values_batch("follow", "PamIy1")[i][6]
+                download_video_share_cnt = Feishu.get_values_batch("follow", "PamIy1")[i][7]
+                download_video_duration = Feishu.get_values_batch("follow", "PamIy1")[i][8]
+                download_video_resolution = Feishu.get_values_batch("follow", "PamIy1")[i][9]
+                # download_video_width = download_video_resolution.split("*")[0]
+                # download_video_height = download_video_resolution.split("*")[-1]
+                download_video_send_time = Feishu.get_values_batch("follow", "PamIy1")[i][10]
+                download_user_name = Feishu.get_values_batch("follow", "PamIy1")[i][11]
+                download_user_id = Feishu.get_values_batch("follow", "PamIy1")[i][12]
+                download_head_url = Feishu.get_values_batch("follow", "PamIy1")[i][13]
+                download_cover_url = Feishu.get_values_batch("follow", "PamIy1")[i][14]
+                download_video_url = Feishu.get_values_batch("follow", "PamIy1")[i][15]
+
+                # Common.logger("follow").info("download_video_id:{}", download_video_id)
+                # Common.logger("follow").info("download_video_title:{}", download_video_title)
+                # Common.logger("follow").info("download_video_play_cnt:{}", download_video_play_cnt)
+                # Common.logger("follow").info("download_video_comment_cnt:{}", download_video_comment_cnt)
+                # Common.logger("follow").info("download_video_like_cnt:{}", download_video_like_cnt)
+                # Common.logger("follow").info("download_video_share_cnt:{}", download_video_share_cnt)
+                # Common.logger("follow").info("download_video_duration:{}", download_video_duration)
+                # Common.logger("follow").info("download_video_resolution:{}", download_video_resolution)
+                # Common.logger("follow").info("download_video_send_time:{}", download_video_send_time)
+                # Common.logger("follow").info("download_user_name:{}", download_user_name)
+                # Common.logger("follow").info("download_user_id:{}", download_user_id)
+                # Common.logger("follow").info("download_head_url:{}", download_head_url)
+                # Common.logger("follow").info("download_cover_url:{}", download_cover_url)
+                # Common.logger("follow").info("download_video_url:{}", download_video_url)
+
+                Common.logger("follow").info("正在判断第{}行,视频:{}", i, download_video_title)
+
+                # 过滤空行
+                if download_video_id is None \
+                        or download_video_id == ""\
+                        or download_video_title is None \
+                        or download_video_title == "":
+                    Common.logger("follow").warning("空行,删除")
+                    # 删除行或列,可选 ROWS、COLUMNS
+                    Feishu.dimension_range("follow", "PamIy1", "ROWS", i + 1, i + 1)
+                    return
+                # 去重
+                elif download_video_id in [j for m in Feishu.get_values_batch("follow", "caa3fa") for j in m]:
+                    Common.logger("follow").info("该视频已下载:{}", download_video_title)
+                    # 删除行或列,可选 ROWS、COLUMNS
+                    Feishu.dimension_range("follow", "PamIy1", "ROWS", i + 1, i + 1)
+                    return
+                else:
+                    Common.logger("follow").info("开始下载视频:{}", download_video_title)
+                    # 下载封面
+                    Common.download_method(job="follow", text="cover",
+                                           d_name=str(download_video_title), d_url=str(download_cover_url))
+                    # 下载视频
+                    Common.download_method(job="follow", text="video",
+                                           d_name=str(download_video_title), d_url=str(download_video_url))
+                    # 保存视频信息至 "./videos/{download_video_title}/info.txt"
+                    with open("./videos/" + download_video_title
+                              + "/" + "info.txt", "a", encoding="UTF-8") as f_a:
+                        f_a.write(str(download_video_id) + "\n" +
+                                  str(download_video_title) + "\n" +
+                                  str(download_video_duration) + "\n" +
+                                  str(download_video_play_cnt) + "\n" +
+                                  str(download_video_comment_cnt) + "\n" +
+                                  str(download_video_like_cnt) + "\n" +
+                                  str(download_video_share_cnt) + "\n" +
+                                  str(download_video_resolution) + "\n" +
+                                  str(int(time.mktime(
+                                      time.strptime(download_video_send_time, "%Y/%m/%d %H:%M:%S")))) + "\n" +
+                                  str(download_user_name) + "\n" +
+                                  str(download_head_url) + "\n" +
+                                  str(download_video_url) + "\n" +
+                                  str(download_cover_url) + "\n" +
+                                  str(cls.wesee_access_token))
+                    Common.logger("follow").info("==========视频信息已保存至info.txt==========")
+
+                    # 上传视频
+                    Common.logger("follow").info("开始上传视频:{}".format(download_video_title))
+                    Publish.upload_and_publish("follow", "prod", "play")
+
+                    # 保存视频 ID 到云文档:https://w42nne6hzg.feishu.cn/sheets/shtcn5YSWg91JfVGzj0SFZIRRPh?sheet=caa3fa
+                    Common.logger("follow").info("保存视频ID至云文档:{}", download_video_title)
+                    # 视频ID工作表,插入首行
+                    Feishu.insert_columns("follow", "caa3fa", "ROWS", 1, 2)
+                    # 视频ID工作表,首行写入数据
+                    upload_time = int(time.time())
+                    values = [[str(time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(upload_time))),
+                               "关注榜",
+                               str(download_video_id),
+                               str(download_video_title),
+                               download_video_play_cnt,
+                               download_video_comment_cnt,
+                               download_video_like_cnt,
+                               download_video_share_cnt,
+                               download_video_duration,
+                               str(download_video_resolution),
+                               str(download_video_send_time),
+                               str(download_user_name),
+                               str(download_user_id),
+                               str(download_head_url),
+                               str(download_cover_url),
+                               str(download_video_url)]]
+                    time.sleep(1)
+                    Feishu.update_values("follow", "caa3fa", "A2:Q2", values)
+
+                    # 删除行或列,可选 ROWS、COLUMNS
+                    Feishu.dimension_range("follow", "PamIy1", "ROWS", i + 1, i + 1)
+                    return
+        except Exception as e:
+            Common.logger("follow").error("下载/上传视频异常:{}", e)
+            Feishu.dimension_range("follow", "PamIy1", "ROWS", 2, 2)
+
+
+if __name__ == "__main__":
+    download_follow = DownloadFollow()
+    download_follow.get_feeds()
+    download_follow.download_publish()

+ 56 - 55
main/publish.py

@@ -1,26 +1,20 @@
 # -*- coding: utf-8 -*-
 # @Author: wangkun
 # @Time: 2022/3/30
-"""
-上传视频到阿里云 OSS
-上传视频到管理后台
-"""
 import json
 import os
 import random
 import time
-
 import oss2
 import requests
 import urllib3
 from main.common import Common
-
 proxies = {"http": None, "https": None}
 
 
 class Publish:
     @classmethod
-    def publish_video_dev(cls, request_data):
+    def publish_video_dev(cls, log_type, request_data):
         """
         loginUid  站内uid (随机)
         appType  默认:888888
@@ -36,16 +30,19 @@ class Publish:
         versionCode  版本 默认1
         :return:
         """
-        # Common.crawler_log().info('publish request data: {}'.format(request_data))
+        # Common.logger().info('publish request data: {}'.format(request_data))
         result = cls.request_post('https://videotest.yishihui.com/longvideoapi/crawler/video/send', request_data)
-        Common.crawler_log().info('publish result: {}'.format(result))
+        # Common.logger(log_type).info('publish result: {}'.format(result))
+        video_id = result["data"]["id"]
+        # Common.logger(log_type).info('video_id: {}'.format(video_id))
         if result['code'] != 0:
-            Common.crawler_log().error('pushlish failure msg = {}'.format(result['msg']))
+            Common.logger(log_type).error('pushlish failure msg = {}'.format(result['msg']))
         else:
-            Common.crawler_log().info('publish success video_id = : {}'.format(request_data['crawlerSrcId']))
+            Common.logger(log_type).info('publish success video_id = : {}'.format(request_data['crawlerSrcId']))
+        return video_id
 
     @classmethod
-    def publish_video_prod(cls, request_data):
+    def publish_video_prod(cls, log_type, request_data):
         """
         loginUid  站内uid (随机)
         appType  默认:888888
@@ -61,13 +58,15 @@ class Publish:
         versionCode  版本 默认1
         :return:
         """
-        # Common.crawler_log().info('publish request data: {}'.format(request_data))
         result = cls.request_post('https://longvideoapi.piaoquantv.com/longvideoapi/crawler/video/send', request_data)
-        Common.crawler_log().info('publish result: {}'.format(result))
+        # Common.logger(log_type).info('publish result: {}'.format(result))
+        video_id = result["data"]["id"]
+        # Common.logger(log_type).info('video_id: {}'.format(video_id))
         if result['code'] != 0:
-            Common.crawler_log().error('pushlish failure msg = {}'.format(result['msg']))
+            Common.logger(log_type).error('pushlish failure msg = {}'.format(result['msg']))
         else:
-            Common.crawler_log().info('publish success video_id = : {}'.format(request_data['crawlerSrcId']))
+            Common.logger(log_type).info('publish success video_id = : {}'.format(request_data['crawlerSrcId']))
+        return video_id
 
     @classmethod
     def request_post(cls, request_url, request_data):
@@ -115,48 +114,44 @@ class Publish:
     - 读取 基本信息 调用发布接口
     """
     # env 日期20220225 文件名
-    oss_file_path_video = r'longvideo/crawler_local/video/{}/{}/{}'
-    oss_file_path_image = r'longvideo/crawler_local/image/{}/{}/{}'
+    oss_file_path_video = 'longvideo/crawler_local/video/{}/{}/{}'
+    oss_file_path_image = 'longvideo/crawler_local/image/{}/{}/{}'
 
     @classmethod
-    def put_file(cls, oss_file, local_file):
-        # Common.crawler_log().info("put oss file = {}, local file = {}".format(oss_file, local_file))
+    def put_file(cls, log_type, oss_file, local_file):
         cls.bucket.put_object_from_file(oss_file, local_file)
-        Common.crawler_log().info("put oss file = {}, local file = {} success".format(oss_file, local_file))
+        Common.logger(log_type).info("put oss file = {}, local file = {} success".format(oss_file, local_file))
 
     # 清除本地文件
     @classmethod
-    def remove_local_file(cls, local_file):
-        # Common.crawler_log().info("remove local file = {}".format(local_file))
+    def remove_local_file(cls, log_type, local_file):
         os.remove(local_file)
-        Common.crawler_log().info("remove local file = {} success".format(local_file))
+        Common.logger(log_type).info("remove local file = {} success".format(local_file))
 
     # 清除本地文件夹
     @classmethod
-    def remove_local_file_dir(cls, local_file):
-        # Common.crawler_log().info("remove local file dir = {}".format(local_file))
+    def remove_local_file_dir(cls, log_type, local_file):
         os.rmdir(local_file)
-        Common.crawler_log().info("remove local file dir = {} success".format(local_file))
+        Common.logger(log_type).info("remove local file dir = {} success".format(local_file))
 
-    local_file_path = '.\\videos'
+    local_file_path = './videos'
     video_file = 'video'
     image_file = 'image'
     info_file = 'info'
     uids_dev_up = [6267140]
     uids_dev_play = [6267141]
-    uids_prod_up = [20631208, 20631209, 20631210, 20631211, 20631212,
-                    20631213, 20631214, 20631215, 20631216, 20631217]
-    uids_prod_play = [20631228, 20631229, 20631230, 20631231, 20631232,
-                      20631233, 20631234, 20631235, 20631236, 20631237]
+    uids_prod_up = [20631248, 20631249, 20631250, 20631251, 20631252]
+    uids_prod_play = [20631248, 20631249, 20631250, 20631251, 20631252]
 
     @classmethod
-    def upload_and_publish(cls, env, job):
+    def upload_and_publish(cls, log_type, env, job):
         """
         上传视频到 oss
+        :param log_type: 选择的 log
         :param env: 测试环境:dev,正式环境:prod
         :param job: 上升榜:up,播放量:play
         """
-        Common.crawler_log().info("upload_and_publish starting...")
+        Common.logger(log_type).info("upload_and_publish starting...")
         today = time.strftime("%Y%m%d", time.localtime())
         # videos 目录下的所有视频文件夹
         files = os.listdir(cls.local_file_path)
@@ -166,10 +161,13 @@ class Publish:
                 fi_d = os.path.join(cls.local_file_path, f)
                 # 确认为视频文件夹
                 if os.path.isdir(fi_d):
-                    Common.crawler_log().info('dir = {}'.format(fi_d))
+                    Common.logger(log_type).info('dir = {}'.format(fi_d))
                     # 列出所有视频文件夹
                     dir_files = os.listdir(fi_d)
-                    data = {'appType': '888888', 'crawlerSrcCode': 'KANYIKAN', 'viewStatus': '1', 'versionCode': '1'}
+                    data = {'appType': '888888',
+                            'crawlerSrcCode': 'WEISHI',
+                            'viewStatus': '1',
+                            'versionCode': '1'}
                     now_timestamp = int(round(time.time() * 1000))
                     data['crawlerTaskTimestamp'] = str(now_timestamp)
                     global uid
@@ -185,8 +183,8 @@ class Publish:
                     # 单个视频文件夹下的所有视频文件
                     for fi in dir_files:
                         # 视频文件夹下的所有文件路径
-                        fi_path = fi_d + '\\' + fi
-                        Common.crawler_log().info('dir fi_path = {}'.format(fi_path))
+                        fi_path = fi_d + '/' + fi
+                        Common.logger(log_type).info('dir fi_path = {}'.format(fi_path))
                         # 读取 info.txt,赋值给 data
                         if cls.info_file in fi:
                             f = open(fi_path, "r", encoding="UTF-8")
@@ -195,7 +193,7 @@ class Publish:
                                 line = f.readline()
                                 line = line.replace('\n', '')
                                 if line is not None and len(line) != 0 and not line.isspace():
-                                    Common.crawler_log().info("line = {}".format(line))
+                                    # Common.logger(log_type).info("line = {}".format(line))
                                     if i == 0:
                                         data['crawlerSrcId'] = line
                                     elif i == 1:
@@ -205,15 +203,15 @@ class Publish:
                                     elif i == 8:
                                         data['crawlerSrcPublishTimestamp'] = line
                                 else:
-                                    Common.crawler_log().warning("{} line is None".format(fi_path))
+                                    Common.logger(log_type).warning("{} line is None".format(fi_path))
                             f.close()
                             # remove info.txt
-                            cls.remove_local_file(fi_path)
+                            cls.remove_local_file(log_type, fi_path)
                     # 刷新数据
                     dir_files = os.listdir(fi_d)
                     for fi in dir_files:
-                        fi_path = fi_d + '\\' + fi
-                        Common.crawler_log().info('dir fi_path = {}'.format(fi_path))
+                        fi_path = fi_d + '/' + fi
+                        # Common.logger(log_type).info('dir fi_path = {}'.format(fi_path))
                         # 上传oss
                         if cls.video_file in fi:
                             global oss_video_file
@@ -221,31 +219,34 @@ class Publish:
                                 oss_video_file = cls.oss_file_path_video.format("dev", today, data['crawlerSrcId'])
                             elif env == "prod":
                                 oss_video_file = cls.oss_file_path_video.format("prod", today, data['crawlerSrcId'])
-                            Common.crawler_log().info("oss_video_file = {}".format(oss_video_file))
-                            cls.put_file(oss_video_file, fi_path)
+                            Common.logger(log_type).info("oss_video_file = {}".format(oss_video_file))
+                            cls.put_file(log_type, oss_video_file, fi_path)
                             data['videoPath'] = oss_video_file
-                            Common.crawler_log().info("videoPath = {}".format(oss_video_file))
+                            Common.logger(log_type).info("videoPath = {}".format(oss_video_file))
                         elif cls.image_file in fi:
                             global oss_image_file
                             if env == "dev":
                                 oss_image_file = cls.oss_file_path_image.format("env", today, data['crawlerSrcId'])
                             elif env == "prod":
                                 oss_image_file = cls.oss_file_path_image.format("prod", today, data['crawlerSrcId'])
-                            Common.crawler_log().info("oss_image_file = {}".format(oss_image_file))
-                            cls.put_file(oss_image_file, fi_path)
+                            Common.logger(log_type).info("oss_image_file = {}".format(oss_image_file))
+                            cls.put_file(log_type, oss_image_file, fi_path)
                             data['coverImgPath'] = oss_image_file
-                            Common.crawler_log().info("coverImgPath = {}".format(oss_image_file))
+                            Common.logger(log_type).info("coverImgPath = {}".format(oss_image_file))
                         # 全部remove
-                        cls.remove_local_file(fi_path)
+                        cls.remove_local_file(log_type, fi_path)
 
                     # 发布
                     if env == "dev":
-                        cls.publish_video_dev(data)
+                        video_id = cls.publish_video_dev(log_type, data)
                     elif env == "prod":
-                        cls.publish_video_prod(data)
-                    cls.remove_local_file_dir(fi_d)
+                        video_id = cls.publish_video_prod(log_type, data)
+                    else:
+                        video_id = cls.publish_video_dev(log_type, data)
+                    cls.remove_local_file_dir(log_type, fi_d)
+                    return video_id
 
                 else:
-                    Common.crawler_log().error('file not a dir = {}'.format(fi_d))
+                    Common.logger(log_type).error('file not a dir = {}'.format(fi_d))
             except Exception as e:
-                Common.crawler_log().exception('upload_and_publish error', e)
+                Common.logger(log_type).exception('upload_and_publish error', e)

+ 298 - 270
main/recommend.py

@@ -3,6 +3,7 @@
 # @Time: 2022/4/8
 import json
 import os
+import random
 import sys
 import time
 import requests
@@ -14,23 +15,43 @@ from main.publish import Publish
 proxies = {"http": None, "https": None}
 
 
-class DownloadRecommend:
-
+class Recommend:
     # 配置微信号
-    Referer = Feishu.get_range_value("recommend", "9fTK1f", "C3:C3")[0]
-    wesee_openid = Feishu.get_range_value("recommend", "9fTK1f", "C4:C4")[0]
-    wesee_openkey = Feishu.get_range_value("recommend", "9fTK1f", "C5:C5")[0]
-    wesee_personid = Feishu.get_range_value("recommend", "9fTK1f", "C6:C6")[0]
-    wesee_access_token = Feishu.get_range_value("recommend", "9fTK1f", "C7:C7")[0]
-    wesee_thr_appid = Feishu.get_range_value("recommend", "9fTK1f", "C8:C8")[0]
-
-    # 过滤词库
+    wechat_sheet = Feishu.get_values_batch('recommend', 'weishi', '9fTK1f')
+    Referer = wechat_sheet[2][2]
+    wesee_openid = wechat_sheet[3][2]
+    wesee_openkey = wechat_sheet[4][2]
+    wesee_personid = wechat_sheet[5][2]
+    wesee_access_token = wechat_sheet[6][2]
+    wesee_thr_appid = wechat_sheet[7][2]
+
+    # 已抓取视频数
+    video_count = []
+    crawler_count = 50
+
+    # 标题过滤词库
     @classmethod
-    def sensitive_words(cls):
+    def video_title_sensitive_words(cls, log_type):
         # 敏感词库列表
         word_list = []
         # 从云文档读取所有敏感词,添加到词库列表
-        lists = Feishu.get_values_batch("recommend", "2Oxf8C")
+        lists = Feishu.get_values_batch(log_type, 'weishi', "2Oxf8C")
+        for a in lists:
+            for j in a:
+                # 过滤空的单元格内容
+                if j is None:
+                    pass
+                else:
+                    word_list.append(j)
+        return word_list
+
+    # 用户名过滤词库
+    @classmethod
+    def username_sensitive_words(cls, log_type):
+        # 敏感词库列表
+        word_list = []
+        # 从云文档读取所有敏感词,添加到词库列表
+        lists = Feishu.get_values_batch(log_type, 'weishi', "KnVAc2")
         for a in lists:
             for j in a:
                 # 过滤空的单元格内容
@@ -42,27 +63,19 @@ class DownloadRecommend:
 
     # 抓取基础规则
     @staticmethod
-    def download_rule(d_duration, d_width, d_height, d_play_cnt, d_like_cnt, d_share_cnt):
+    def download_rule(duration, width, height, like_cnt):
         """
         下载视频的基本规则
-        :param d_duration: 时长
-        :param d_width: 宽
-        :param d_height: 高
-        :param d_play_cnt: 播放量
-        :param d_like_cnt: 点赞量
-        :param d_share_cnt: 分享量
+        :param duration: 时长
+        :param width: 宽
+        :param height: 高
+        :param like_cnt: 点赞量
         :return: 满足规则,返回 True;反之,返回 False
         """
-        if int(float(d_duration)) >= 30:
-            if int(d_width) >= 720 or int(d_height) >= 720:
-                if int(d_play_cnt) >= 0:
-                    if int(d_like_cnt) >= 0:
-                        if int(d_share_cnt) >= 0:
-                            return True
-                        else:
-                            return False
-                    else:
-                        return False
+        if int(float(duration)) >= 60:
+            if int(width) >= 720 or int(height) >= 720:
+                if int(like_cnt) >= 1000:
+                    return True
                 else:
                     return False
             return False
@@ -70,7 +83,7 @@ class DownloadRecommend:
 
     # 抓取列表
     @classmethod
-    def get_feeds(cls):
+    def get_feeds(cls, log_type):
         """
         1.从微视小程序首页推荐,获取视频列表
         2.先在 https://w42nne6hzg.feishu.cn/sheets/shtcn5YSWg91JfVGzj0SFZIRRPh?sheet=caa3fa 中去重
@@ -112,253 +125,253 @@ class DownloadRecommend:
                 "mapExt": "{\"imageSize\":\"480\",\"adaptScene\":\"PicHDWebpLimitScene\"}"
             }
         }
-
         try:
-            urllib3.disable_warnings()
-            r = requests.post(headers=headers, url=url, cookies=cookies, json=json_data, proxies=proxies, verify=False)
-            response = json.loads(r.content.decode("utf8"))
-            feeds = response["rsp_body"]["feeds"]
-            for i in range(len(feeds)):
-                # 视频标题过滤话题及处理特殊字符
-                weishi_title = feeds[i]["desc"]
-                title_split1 = weishi_title.split(" #")
-                if title_split1[0] != "":
-                    title1 = title_split1[0]
-                else:
-                    title1 = title_split1[-1]
+            while True:
+                urllib3.disable_warnings()
+                r = requests.post(headers=headers, url=url, cookies=cookies, json=json_data, proxies=proxies,
+                                  verify=False)
+                response = json.loads(r.content.decode("utf8"))
+                feeds = response["rsp_body"]["feeds"]
+                for i in range(len(feeds)):
+                    # 视频标题过滤话题及处理特殊字符
+                    weishi_title = feeds[i]["desc"]
+                    title_split1 = weishi_title.split(" #")
+                    if title_split1[0] != "":
+                        title1 = title_split1[0]
+                    else:
+                        title1 = title_split1[-1]
 
-                title_split2 = title1.split(" #")
-                if title_split2[0] != "":
-                    title2 = title_split2[0]
-                else:
-                    title2 = title_split2[-1]
+                    title_split2 = title1.split(" #")
+                    if title_split2[0] != "":
+                        title2 = title_split2[0]
+                    else:
+                        title2 = title_split2[-1]
 
-                title_split3 = title2.split("@")
-                if title_split3[0] != "":
-                    title3 = title_split3[0]
-                else:
-                    title3 = title_split3[-1]
-                # 视频标题
-                video_title = title3.strip().replace("\n", "") \
-                    .replace("/", "").replace("快手", "").replace(" ", "") \
-                    .replace(" ", "").replace("&NBSP", "").replace("\r", "") \
-                    .replace("#", "").replace(".", "。").replace("\\", "") \
-                    .replace(":", "").replace("*", "").replace("?", "") \
-                    .replace("?", "").replace('"', "").replace("<", "") \
-                    .replace(">", "").replace("|", "").replace("微视", "")
-
-                # 视频 ID
-                if "id" not in feeds[i]["video"]:
-                    video_id = 0
-                else:
-                    video_id = feeds[i]["video"]["id"]
+                    title_split3 = title2.split("@")
+                    if title_split3[0] != "":
+                        title3 = title_split3[0]
+                    else:
+                        title3 = title_split3[-1]
+                    # 视频标题
+                    video_title = title3.strip().replace("\n", "").replace("/", "")\
+                        .replace("快手", "").replace(" ", "").replace(" ", "").replace("&NBSP", "")\
+                        .replace("\r", "").replace("#", "").replace(".", "。").replace("\\", "").replace(":", "")\
+                        .replace("*", "").replace("?", "").replace("?", "").replace('"', "").replace("<", "")\
+                        .replace(">", "").replace("|", "").replace("微视", "")[:40]
+
+                    # 视频 ID
+                    if "id" not in feeds[i]["video"]:
+                        video_id = 0
+                    else:
+                        video_id = feeds[i]["video"]["id"]
 
-                # 播放数
-                if "playNum" not in feeds[i]["ugcData"]:
-                    video_play_cnt = 0
-                else:
-                    video_play_cnt = feeds[i]["ugcData"]["playNum"]
+                    # 播放数
+                    if "playNum" not in feeds[i]["ugcData"]:
+                        video_play_cnt = 0
+                    else:
+                        video_play_cnt = feeds[i]["ugcData"]["playNum"]
 
-                # 点赞数
-                if "dingCount" not in feeds[i]["ugcData"]:
-                    video_like_cnt = 0
-                else:
-                    video_like_cnt = feeds[i]["ugcData"]["dingCount"]
+                    # 点赞数
+                    if "dingCount" not in feeds[i]["ugcData"]:
+                        video_like_cnt = 0
+                    else:
+                        video_like_cnt = feeds[i]["ugcData"]["dingCount"]
 
-                # 分享数
-                if "shareNum" not in feeds[i]["ugcData"]:
-                    video_share_cnt = 0
-                else:
-                    video_share_cnt = feeds[i]["ugcData"]["shareNum"]
+                    # 分享数
+                    if "shareNum" not in feeds[i]["ugcData"]:
+                        video_share_cnt = 0
+                    else:
+                        video_share_cnt = feeds[i]["ugcData"]["shareNum"]
 
-                # 评论数
-                if "totalCommentNum" not in feeds[i]["ugcData"]:
-                    video_comment_cnt = 0
-                else:
-                    video_comment_cnt = feeds[i]["ugcData"]["totalCommentNum"]
+                    # 评论数
+                    if "totalCommentNum" not in feeds[i]["ugcData"]:
+                        video_comment_cnt = 0
+                    else:
+                        video_comment_cnt = feeds[i]["ugcData"]["totalCommentNum"]
 
-                # 视频时长
-                if "duration" not in feeds[i]["video"]:
-                    video_duration = 0
-                else:
-                    video_duration = int(int(feeds[i]["video"]["duration"]) / 1000)
+                    # 视频时长
+                    if "duration" not in feeds[i]["video"]:
+                        video_duration = 0
+                    else:
+                        video_duration = int(int(feeds[i]["video"]["duration"]) / 1000)
 
-                # 视频宽高
-                if "width" not in feeds[i]["video"] or "height" not in feeds[i]["video"]:
-                    video_width = 0
-                    video_height = 0
-                    video_resolution = str(video_width) + "*" + str(video_height)
-                else:
-                    video_width = feeds[i]["video"]["width"]
-                    video_height = feeds[i]["video"]["height"]
-                    video_resolution = str(video_width) + "*" + str(video_height)
+                    # 视频宽高
+                    if "width" not in feeds[i]["video"] or "height" not in feeds[i]["video"]:
+                        video_width = 0
+                        video_height = 0
+                        video_resolution = str(video_width) + "*" + str(video_height)
+                    else:
+                        video_width = feeds[i]["video"]["width"]
+                        video_height = feeds[i]["video"]["height"]
+                        video_resolution = str(video_width) + "*" + str(video_height)
 
-                # 视频发布时间
-                if "createTime" not in feeds[i]:
-                    video_send_time = 0
-                else:
-                    video_send_time = int(feeds[i]["createTime"]) * 1000
+                    # 视频发布时间
+                    if "createTime" not in feeds[i]:
+                        video_send_time = 0
+                    else:
+                        video_send_time = int(feeds[i]["createTime"]) * 1000
 
-                # 用户昵称
-                user_name = feeds[i]["poster"]["nick"].strip().replace("\n", "") \
-                    .replace("/", "").replace("快手", "").replace(" ", "") \
-                    .replace(" ", "").replace("&NBSP", "").replace("\r", "").replace("微视", "")
+                    # 用户昵称
+                    user_name = feeds[i]["poster"]["nick"].strip().replace("\n", "") \
+                        .replace("/", "").replace("快手", "").replace(" ", "") \
+                        .replace(" ", "").replace("&NBSP", "").replace("\r", "").replace("微视", "")
 
-                # 用户 ID
-                user_id = feeds[i]["poster"]["id"]
+                    # 用户 ID
+                    user_id = feeds[i]["poster"]["id"]
 
-                # 用户头像地址
-                if "thumbURL" not in feeds[i]["material"] and "avatar" not in feeds[i]["poster"]:
-                    head_url = 0
-                elif "thumbURL" in feeds[i]["material"]:
-                    head_url = feeds[i]["material"]["thumbURL"]
-                else:
-                    head_url = feeds[i]["poster"]["avatar"]
+                    # 用户头像地址
+                    if "thumbURL" not in feeds[i]["material"] and "avatar" not in feeds[i]["poster"]:
+                        head_url = 0
+                    elif "thumbURL" in feeds[i]["material"]:
+                        head_url = feeds[i]["material"]["thumbURL"]
+                    else:
+                        head_url = feeds[i]["poster"]["avatar"]
 
-                # 视频封面地址
-                if len(feeds[i]["images"]) == 0:
-                    cover_url = 0
-                else:
-                    cover_url = feeds[i]["images"][0]["url"]
+                    # 视频封面地址
+                    if len(feeds[i]["images"]) == 0:
+                        cover_url = 0
+                    else:
+                        cover_url = feeds[i]["images"][0]["url"]
 
-                # 视频播放地址
-                if "url" not in feeds[i]["video"]:
-                    video_url = 0
-                else:
-                    video_url = feeds[i]["video"]["url"]
-
-                Common.logger("recommend").info("video_title:{}".format(video_title))
-                Common.logger("recommend").info("video_id:{}".format(video_id))
-                Common.logger("recommend").info("video_play_cnt:{}".format(video_play_cnt))
-                Common.logger("recommend").info("video_like_cnt:{}".format(video_like_cnt))
-                Common.logger("recommend").info("video_share_cnt:{}".format(video_share_cnt))
-                # Common.logger("recommend").info("video_comment_cnt:{}".format(video_comment_cnt))
-                Common.logger("recommend").info("video_duration:{}秒".format(video_duration))
-                # Common.logger("recommend").info("video_resolution:{}".format(video_resolution))
-                Common.logger("recommend").info(
-                    "video_send_time:{}".format(time.strftime(
-                        "%Y/%m/%d %H:%M:%S", time.localtime(int(video_send_time) / 1000))))
-                Common.logger("recommend").info("user_name:{}".format(user_name))
-                # Common.logger("recommend").info("user_id:{}".format(user_id))
-                # Common.logger("recommend").info("head_url:{}".format(head_url))
-                # Common.logger("recommend").info("cover_url:{}".format(cover_url))
-                Common.logger("recommend").info("video_url:{}".format(video_url))
-
-                # 过滤无效视频
-                if video_id == 0 or video_duration == 0 or video_send_time == 0 or head_url == 0 \
-                        or cover_url == 0 or video_url == 0:
-                    Common.logger("recommend").info("无效视频")
-                # 判断基础规则
-                elif cls.download_rule(video_duration, video_width, video_height,
-                                       video_play_cnt, video_like_cnt, video_share_cnt) is False:
-                    Common.logger("recommend").info("不满足基础规则")
-                # 判断敏感词
-                elif any(word if word in weishi_title else False for word in cls.sensitive_words()) is True:
-                    Common.logger("recommend").info("视频已中敏感词:{}".format(weishi_title))
-                # 从 云文档 去重:https://w42nne6hzg.feishu.cn/sheets/shtcn5YSWg91JfVGzj0SFZIRRPh?sheet=caa3fa
-                elif video_id in [j for m in Feishu.get_values_batch("recommend", "caa3fa") for j in m]:
-                    Common.logger("recommend").info("该视频已下载:{}", video_title)
-                # 从 云文档 去重:https://w42nne6hzg.feishu.cn/sheets/shtcn5YSWg91JfVGzj0SFZIRRPh?sheet=O7fCzr
-                elif video_id in [j for n in Feishu.get_values_batch("recommend", "O7fCzr") for j in n]:
-                    Common.logger("recommend").info("该视频已在feeds中:{}", video_title)
-                else:
-                    Common.logger("recommend").info("该视频未下载,添加至feeds中:{}".format(video_title))
-                    # feeds工作表,插入首行
-                    time.sleep(1)
-                    Feishu.insert_columns("recommend", "O7fCzr", "ROWS", 1, 2)
-                    # 获取当前时间
-                    get_feeds_time = int(time.time())
-                    # 工作表 feeds 中写入数据
-                    values = [[time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(int(get_feeds_time))),
-                               "推荐榜",
-                               video_id,
-                               video_title,
-                               video_play_cnt,
-                               video_comment_cnt,
-                               video_like_cnt,
-                               video_share_cnt,
-                               video_duration,
-                               video_resolution,
-                               time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(int(video_send_time / 1000))),
-                               user_name,
-                               user_id,
-                               head_url,
-                               cover_url,
-                               video_url]]
-                    # 等待 1s,防止操作云文档太频繁,导致报错
-                    time.sleep(1)
-                    Feishu.update_values("recommend", "O7fCzr", "A2:P2", values)
+                    # 视频播放地址
+                    if "url" not in feeds[i]["video"]:
+                        video_url = 0
+                    else:
+                        video_url = feeds[i]["video"]["url"]
+
+                    Common.logger(log_type).info("video_title:{}".format(video_title))
+                    Common.logger(log_type).info("video_id:{}".format(video_id))
+                    Common.logger(log_type).info("video_like_cnt:{}".format(video_like_cnt))
+                    Common.logger(log_type).info("video_share_cnt:{}".format(video_share_cnt))
+                    Common.logger(log_type).info("video_comment_cnt:{}".format(video_comment_cnt))
+                    Common.logger(log_type).info("video_duration:{}秒".format(video_duration))
+                    Common.logger(log_type).info(
+                        "video_send_time:{}".format(time.strftime(
+                            "%Y/%m/%d %H:%M:%S", time.localtime(int(video_send_time) / 1000))))
+                    Common.logger(log_type).info("user_name:{}".format(user_name))
+                    Common.logger(log_type).info("video_url:{}".format(video_url))
+                    # Common.logger(log_type).info("video_play_cnt:{}".format(video_play_cnt))
+                    # Common.logger(log_type).info("video_resolution:{}".format(video_resolution))
+                    # Common.logger(log_type).info("user_id:{}".format(user_id))
+                    # Common.logger(log_type).info("head_url:{}".format(head_url))
+                    # Common.logger(log_type).info("cover_url:{}".format(cover_url))
+
+                    # 过滤无效视频
+                    if video_id == 0 or video_duration == 0 or video_send_time == 0 or head_url == 0 \
+                            or cover_url == 0 or video_url == 0:
+                        Common.logger(log_type).info("无效视频\n")
+                    # 判断基础规则
+                    elif cls.download_rule(video_duration, video_width, video_height, video_like_cnt) is False:
+                        Common.logger(log_type).info("不满足基础规则\n")
+                    # 标题敏感词过滤
+                    elif any(word if word in weishi_title else False for word in
+                             cls.video_title_sensitive_words(log_type)) is True:
+                        Common.logger(log_type).info("标题已中敏感词:{}\n".format(weishi_title))
+                    # 用户名敏感词过滤
+                    elif any(word if word in user_name else False for word in
+                             cls.username_sensitive_words(log_type)) is True:
+                        Common.logger(log_type).info("用户名已中敏感词:{}\n".format(user_name))
+                    # 从已下载云文档去重
+                    elif str(video_id) in [j for m in Feishu.get_values_batch(log_type, 'weishi', "caa3fa") for j in m]:
+                        Common.logger(log_type).info("视频已下载:{}\n", video_title)
+                    # 从 云文档 去重:https://w42nne6hzg.feishu.cn/sheets/shtcn5YSWg91JfVGzj0SFZIRRPh?sheet=O7fCzr
+                    elif str(video_id) in [j for n in Feishu.get_values_batch(log_type, 'weishi', "O7fCzr") for j in n]:
+                        Common.logger(log_type).info("视频已存在:{}\n", video_title)
+                    else:
+                        # 添加到已下载视频列表
+                        cls.video_count.append(video_id)
+
+                        # feeds工作表,插入首行
+                        Feishu.insert_columns(log_type, 'weishi', "O7fCzr", "ROWS", 1, 2)
+                        # 获取当前时间
+                        get_feeds_time = int(time.time())
+                        # 工作表 feeds 中写入数据
+                        values = [[time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(int(get_feeds_time))),
+                                   "推荐榜",
+                                   str(video_id),
+                                   video_title,
+                                   int(video_play_cnt),
+                                   int(video_comment_cnt),
+                                   int(video_like_cnt),
+                                   int(video_share_cnt),
+                                   video_duration,
+                                   video_resolution,
+                                   time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(int(video_send_time / 1000))),
+                                   user_name,
+                                   user_id,
+                                   head_url,
+                                   cover_url,
+                                   video_url]]
+                        # 等待 1s,防止操作云文档太频繁,导致报错
+                        time.sleep(1)
+                        Feishu.update_values(log_type, 'weishi', "O7fCzr", "A2:T2", values)
+                        Common.logger(log_type).info("视频保存至云文档成功\n")
+                        time.sleep(random.randint(3, 5))
+
+                        # 每天抓取 50 条
+                        if len(cls.video_count) >= cls.crawler_count:
+                            Common.logger(log_type).info("已抓取{}条数据\n", len(cls.video_count))
+                            cls.video_count = []
+                            return
         except Exception as e:
-            Common.logger("recommend").error("获取微视视频list异常:{}".format(e))
+            Common.logger(log_type).error("get_feeds异常:{}\n".format(e))
 
-    # 下载/上传视频
+    # 下载/上传
     @classmethod
-    def download_publish(cls):
+    def download_publish(cls, log_type, env):
         try:
-            for i in range(1, len(Feishu.get_values_batch("recommend", "O7fCzr")) + 1):
-                time.sleep(1)
-                download_video_id = Feishu.get_values_batch("recommend", "O7fCzr")[i][2]
-                download_video_title = Feishu.get_values_batch("recommend", "O7fCzr")[i][3]
-                download_video_play_cnt = Feishu.get_values_batch("recommend", "O7fCzr")[i][4]
-                download_video_comment_cnt = Feishu.get_values_batch("recommend", "O7fCzr")[i][5]
-                download_video_like_cnt = Feishu.get_values_batch("recommend", "O7fCzr")[i][6]
-                download_video_share_cnt = Feishu.get_values_batch("recommend", "O7fCzr")[i][7]
-                download_video_duration = Feishu.get_values_batch("recommend", "O7fCzr")[i][8]
-                download_video_resolution = Feishu.get_values_batch("recommend", "O7fCzr")[i][9]
-                # download_video_width = download_video_resolution.split("*")[0]
-                # download_video_height = download_video_resolution.split("*")[-1]
-                download_video_send_time = Feishu.get_values_batch("recommend", "O7fCzr")[i][10]
-                download_user_name = Feishu.get_values_batch("recommend", "O7fCzr")[i][11]
-                download_user_id = Feishu.get_values_batch("recommend", "O7fCzr")[i][12]
-                download_head_url = Feishu.get_values_batch("recommend", "O7fCzr")[i][13]
-                download_cover_url = Feishu.get_values_batch("recommend", "O7fCzr")[i][14]
-                download_video_url = Feishu.get_values_batch("recommend", "O7fCzr")[i][15]
-
-                # Common.logger("recommend").info("download_video_id:{}", download_video_id)
-                # Common.logger("recommend").info("download_video_title:{}", download_video_title)
-                # Common.logger("recommend").info("download_video_play_cnt:{}", download_video_play_cnt)
-                # Common.logger("recommend").info("download_video_comment_cnt:{}", download_video_comment_cnt)
-                # Common.logger("recommend").info("download_video_like_cnt:{}", download_video_like_cnt)
-                # Common.logger("recommend").info("download_video_share_cnt:{}", download_video_share_cnt)
-                # Common.logger("recommend").info("download_video_duration:{}", download_video_duration)
-                # Common.logger("recommend").info("download_video_resolution:{}", download_video_resolution)
-                # Common.logger("recommend").info("download_video_send_time:{}", download_video_send_time)
-                # Common.logger("recommend").info("download_user_name:{}", download_user_name)
-                # Common.logger("recommend").info("download_user_id:{}", download_user_id)
-                # Common.logger("recommend").info("download_head_url:{}", download_head_url)
-                # Common.logger("recommend").info("download_cover_url:{}", download_cover_url)
-                # Common.logger("recommend").info("download_video_url:{}", download_video_url)
-
-                Common.logger("recommend").info("正在判断第{}行,视频:{}", i, download_video_title)
+            recommend_sheet = Feishu.get_values_batch(log_type, 'weishi', "O7fCzr")
+            for i in range(1, len(recommend_sheet)):
+                download_video_id = recommend_sheet[i][2]
+                download_video_title = recommend_sheet[i][3]
+                download_video_play_cnt = recommend_sheet[i][4]
+                download_video_comment_cnt = recommend_sheet[i][5]
+                download_video_like_cnt = recommend_sheet[i][6]
+                download_video_share_cnt = recommend_sheet[i][7]
+                download_video_duration = recommend_sheet[i][8]
+                download_video_resolution = recommend_sheet[i][9]
+                download_video_send_time = recommend_sheet[i][10]
+                download_user_name = recommend_sheet[i][11]
+                download_user_id = recommend_sheet[i][12]
+                download_head_url = recommend_sheet[i][13]
+                download_cover_url = recommend_sheet[i][14]
+                download_video_url = recommend_sheet[i][15]
+
+                # Common.logger(log_type).info("download_video_title:{}", download_video_title)
+                # Common.logger(log_type).info("download_video_id:{}", download_video_id)
+                # Common.logger(log_type).info("download_video_play_cnt:{}", download_video_play_cnt)
+                # Common.logger(log_type).info("download_video_comment_cnt:{}", download_video_comment_cnt)
+                # Common.logger(log_type).info("download_video_share_cnt:{}", download_video_share_cnt)
+                # Common.logger(log_type).info("download_user_name:{}", download_user_name)
+                # Common.logger(log_type).info("download_user_id:{}", download_user_id)
+                # Common.logger(log_type).info("download_head_url:{}", download_head_url)
+                # Common.logger(log_type).info("download_cover_url:{}", download_cover_url)
+
+                Common.logger(log_type).info("正在判断第{}行:{}", i+1, download_video_title)
+                Common.logger(log_type).info("like_cnt:{}", download_video_like_cnt)
+                Common.logger(log_type).info("duration:{}", download_video_duration)
+                Common.logger(log_type).info("resolution:{}", download_video_resolution)
+                Common.logger(log_type).info("send_time:{}", download_video_send_time)
+                Common.logger(log_type).info("video_url:{}", download_video_url)
 
                 # 过滤空行
-                if download_video_id is None \
-                        or download_video_id == "" \
-                        or download_video_title is None \
-                        or download_video_title == "":
-                    Common.logger("recommend").warning("空行,删除")
-                    # 删除行或列,可选 ROWS、COLUMNS
-                    Feishu.dimension_range("recommend", "O7fCzr", "ROWS", i + 1, i + 1)
-                    return
-                # 分享量>=1000
-                elif int(download_video_share_cnt) < 1000:
-                    Common.logger("recommend").info("分享量:{} < 1000", download_video_share_cnt)
+                if download_video_id is None or download_video_title is None:
                     # 删除行或列,可选 ROWS、COLUMNS
-                    Feishu.dimension_range("recommend", "O7fCzr", "ROWS", i + 1, i + 1)
+                    Feishu.dimension_range(log_type, 'weishi', "O7fCzr", "ROWS", i + 1, i + 1)
+                    Common.logger(log_type).warning("空行,已删除\n")
                     return
                 # 去重
-                elif download_video_id in [j for m in Feishu.get_values_batch("recommend", "caa3fa") for j in m]:
-                    Common.logger("recommend").info("该视频已下载:{}", download_video_title)
+                elif download_video_id in [j for m in Feishu.get_values_batch(log_type, 'weishi', "caa3fa") for j in m]:
                     # 删除行或列,可选 ROWS、COLUMNS
-                    Feishu.dimension_range("recommend", "O7fCzr", "ROWS", i + 1, i + 1)
+                    Feishu.dimension_range(log_type, 'weishi', "O7fCzr", "ROWS", i + 1, i + 1)
+                    Common.logger(log_type).info("视频已下载:{}\n", download_video_title)
                     return
                 else:
-                    Common.logger("recommend").info("开始下载视频:{}", download_video_title)
                     # 下载封面
-                    Common.download_method(job="recommend", text="cover",
+                    Common.download_method(log_type, text="cover",
                                            d_name=str(download_video_title), d_url=str(download_cover_url))
                     # 下载视频
-                    Common.download_method(job="recommend", text="video",
+                    Common.download_method(log_type, text="video",
                                            d_name=str(download_video_title), d_url=str(download_video_url))
                     # 保存视频信息至 "./videos/{download_video_title}/info.txt"
                     with open("./videos/" + download_video_title
@@ -378,22 +391,23 @@ class DownloadRecommend:
                                   str(download_video_url) + "\n" +
                                   str(download_cover_url) + "\n" +
                                   str(cls.wesee_access_token))
-                    Common.logger("recommend").info("==========视频信息已保存至info.txt==========")
+                    Common.logger(log_type).info("视频信息已保存至info.txt")
 
                     # 上传视频
-                    Common.logger("recommend").info("开始上传视频:{}".format(download_video_title))
-                    Publish.upload_and_publish("recommend", "prod", "play")
+                    Common.logger(log_type).info("开始上传视频:{}".format(download_video_title))
+                    our_video_id = Publish.upload_and_publish(log_type, env, "play")
+                    our_video_link = "https://admin.piaoquantv.com/cms/post-detail/" + str(our_video_id) + "/info"
+                    Common.logger(log_type).info("视频上传完成:{}", download_video_title)
 
-                    # 保存视频 ID 到云文档:https://w42nne6hzg.feishu.cn/sheets/shtcn5YSWg91JfVGzj0SFZIRRPh?sheet=caa3fa
-                    Common.logger("recommend").info("保存视频ID至云文档:{}", download_video_title)
                     # 视频ID工作表,插入首行
-                    Feishu.insert_columns("recommend", "caa3fa", "ROWS", 1, 2)
+                    Feishu.insert_columns(log_type, 'weishi', "caa3fa", "ROWS", 1, 2)
                     # 视频ID工作表,首行写入数据
                     upload_time = int(time.time())
                     values = [[str(time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(upload_time))),
                                "推荐榜",
-                               str(download_video_id),
                                str(download_video_title),
+                               str(download_video_id),
+                               our_video_link,
                                download_video_play_cnt,
                                download_video_comment_cnt,
                                download_video_like_cnt,
@@ -407,26 +421,40 @@ class DownloadRecommend:
                                str(download_cover_url),
                                str(download_video_url)]]
                     time.sleep(1)
-                    Feishu.update_values("recommend", "caa3fa", "A2:Q2", values)
+                    Feishu.update_values(log_type, 'weishi', "caa3fa", "F2:W2", values)
+                    Common.logger(log_type).info("视频已保存至云文档:{}", download_video_title)
 
                     # 删除行或列,可选 ROWS、COLUMNS
-                    Feishu.dimension_range("recommend", "O7fCzr", "ROWS", i + 1, i + 1)
+                    Feishu.dimension_range(log_type, 'weishi', "O7fCzr", "ROWS", i + 1, i + 1)
+                    Common.logger(log_type).info("视频:{},下载/上传成功\n", download_video_title)
                     return
         except Exception as e:
-            Common.logger("recommend").error("下载/上传视频异常:{}", e)
-            Feishu.dimension_range("recommend", "O7fCzr", "ROWS", 2, 2)
+            Feishu.dimension_range(log_type, 'weishi', "O7fCzr", "ROWS", 2, 2)
+            Common.logger(log_type).error("download_publish异常,已删除该条数据:{}\n", e)
+
+    # 执行 下载/上传
+    @classmethod
+    def run_download_publish(cls, log_type, env):
+        try:
+            while True:
+                if len(Feishu.get_values_batch(log_type, 'weishi', 'O7fCzr')) == 1:
+                    Common.logger(log_type).info("下载/上传完成\n")
+                    break
+                else:
+                    cls.download_publish(log_type, env)
+                    time.sleep(random.randint(1, 3))
+        except Exception as e:
+            Common.logger(log_type).error("run_download_publish异常:{}", e)
 
 
 if __name__ == "__main__":
-    weishi = DownloadRecommend()
-    for n in range(2):
-        Common.logger("recommend").info("正在抓取第{}页视频", n + 1)
-        weishi.get_feeds()
-
-    # print(weishi.Referer)
-    # print(weishi.wesee_openid)
-    # print(weishi.wesee_openkey)
-    # print(weishi.wesee_personid)
-    # print(weishi.wesee_access_token)
-    # print(weishi.wesee_thr_appid)
-    # print(weishi.json_text)
+    # Recommend.get_feeds('weishi')
+    Recommend.download_publish('weishi', 'dev')
+
+    # print(Recommend.Referer)
+    # print(Recommend.wesee_openid)
+    # print(Recommend.wesee_openkey)
+    # print(Recommend.wesee_personid)
+    # print(Recommend.wesee_access_token)
+    # print(Recommend.wesee_thr_appid)
+    pass

+ 45 - 0
main/run_follow.py

@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/6/15
+import os
+import random
+import sys
+import time
+
+sys.path.append(os.getcwd())
+from main.common import Common
+from main.feishu_lib import Feishu
+from main.follow import DownloadFollow
+
+
+class RunFollow:
+    # 抓取及上传 job
+    @classmethod
+    def run_follow_job(cls):
+        Common.logger("follow").info("开始抓取关注榜")
+        while True:
+            # 抓取 100 条视频
+            if len(Feishu.get_values_batch("follow", "PamIy1")) >= 30:
+                Common.logger("follow").info("当前共抓取{}条视频", len(Feishu.get_values_batch("follow", "PamIy1"))-1)
+                break
+            else:
+                # for i in range(30):
+                # Common.logger().info("正在请求第{}页视频", i+1)
+                DownloadFollow.get_feeds()
+                Common.logger("follow").info("随机休眠 1-5s")
+                time.sleep(random.randint(1, 5))
+        while True:
+            # 分析下载/上传视频
+            if len(Feishu.get_values_batch("follow", "PamIy1")) == 1:
+                Common.logger("follow").info("没有可分析的视频")
+                break
+            else:
+                Common.logger("follow").info("开始分析并下载/上传视频")
+                DownloadFollow.download_publish()
+        # 清除日志
+        Common.del_logs("follow")
+
+
+if __name__ == "__main__":
+    main = RunFollow()
+    main.run_follow_job()

+ 20 - 67
main/run_recommend.py

@@ -3,77 +3,30 @@
 # @Time: 2022/3/30
 import datetime
 import os
-import random
 import sys
 import time
-from apscheduler.schedulers.blocking import BlockingScheduler
 sys.path.append(os.getcwd())
 from main.common import Common
-from main.download_recommend import DownloadRecommend
-
-
-def weishi_dev_job():
-    """
-    执行测试环境微视脚本
-    """
-    while True:
-        weishi_dev_time = datetime.datetime.now()
-        if weishi_dev_time.hour >= 20 or weishi_dev_time.hour <= 10:
-            # 抓取符合规则的视频,写入 weishi_feeds.txt
-            DownloadRecommend.get_weishi_recommend()
-            # 下载视频,并上传
-            DownloadRecommend.download_weishi_play_video("dev")
-            # 随机睡眠1-3s
-            time.sleep(random.randint(1, 3))
-        else:
-            Common.crawler_log().info("结束抓取及上传任务")
-            break
-
-    # 删除冗余日志
-    Common.del_logs()
-    # 统计下载视频数
-    Common.weishi_download_count()
-
-
-def main_dev():
-    """
-    测试环境主函数
-    """
-    while True:
-        # Common.crawler_log().info("开始抓取微视视频")
-        weishi_dev_job()
-
-
-def weishi_prod_job():
-    """
-    执行正式环境微视脚本
-    """
-    while True:
-        if 20 >= Common.now.hour >= 5:
-            Common.crawler_log().info("结束抓取微视视频任务")
-            break
-        else:
-            # 抓取符合规则的视频,写入 weishi_feeds.txt
-            DownloadRecommend.get_weishi_recommend()
-            # 下载视频,并上传
-            DownloadRecommend.download_weishi_play_video("prod")
-            # 随机睡眠1-3s
-            time.sleep(random.randint(1, 3))
-
-    # 删除冗余日志
-    Common.del_logs()
-    # 统计下载视频数
-    Common.weishi_download_count()
-
-
-def main_prod():
-    """
-    正式环境主函数
-    """
-    while True:
-        weishi_prod_job()
+from main.recommend import Recommend
+
+
+class Main:
+    # 抓取及上传 job
+    @classmethod
+    def main(cls, env):
+        while True:
+            if 21 >= datetime.datetime.now().hour >= 8:
+                # 抓取视频列表
+                Common.logger('recommend').info('开始抓取微视视频\n')
+                Recommend.get_feeds('recommend')
+                # 下载/上传
+                Common.logger('recommend').info('开始下载/上传视频\n')
+                Recommend.run_download_publish('recommend', env)
+                # 清除日志
+                Common.del_logs('recommend')
+                Common.logger('recommend').info('今日抓取任务结束,休眠{}小时', 24-datetime.datetime.now().hour)
+                time.sleep(3600*(24-datetime.datetime.now().hour))
 
 
 if __name__ == "__main__":
-    main_dev()
-    # main_prod()
+    Main.main('prod')