wangkun 1 år sedan
förälder
incheckning
c77d76a008
27 ändrade filer med 225 tillägg och 898 borttagningar
  1. 16 0
      douyin/douyin_author/douyin_author_scheduling.py
  2. 0 0
      douyin/douyin_follow/__init__.py
  3. 0 356
      douyin/douyin_follow/follow_dy.py
  4. 0 43
      douyin/douyin_main/run_douyin_author_scheduling.py
  5. 0 45
      douyin/douyin_main/run_douyin_follow.py
  6. 0 46
      douyin/douyin_main/run_douyin_recommend.py
  7. 0 50
      douyin/douyin_main/run_douyin_recommend_scheduling.py
  8. 20 0
      douyin/douyin_main/run_dy_author.py
  9. 25 0
      douyin/douyin_main/run_dy_author_dev.py
  10. 20 0
      douyin/douyin_main/run_dy_recommend.py
  11. 24 0
      douyin/douyin_main/run_dy_recommend_dev.py
  12. 15 0
      douyin/douyin_recommend/douyin_recommend_scheduling.py
  13. 1 0
      gongzhonghao/gongzhonghao_author/gongzhonghao3_author.py
  14. 0 46
      gongzhonghao/gongzhonghao_main/run_gongzhonghao1_author_scheduling.py
  15. 0 37
      gongzhonghao/gongzhonghao_main/run_gongzhonghao2_author_scheduling.py
  16. 0 46
      gongzhonghao/gongzhonghao_main/run_gongzhonghao3_author_scheduling.py
  17. 0 46
      gongzhonghao/gongzhonghao_main/run_gongzhonghao4_author_scheduling.py
  18. 0 46
      gongzhonghao/gongzhonghao_main/run_gongzhonghao5_author_scheduling.py
  19. 0 43
      gongzhonghao/gongzhonghao_main/run_gongzhonghao_follow.py
  20. 0 43
      gongzhonghao/gongzhonghao_main/run_gongzhonghao_follow_2.py
  21. 0 43
      gongzhonghao/gongzhonghao_main/run_gongzhonghao_follow_3.py
  22. 21 2
      kuaishou/kuaishou_author/kuaishou_author_scheduling.py
  23. 20 0
      kuaishou/kuaishou_main/run_ks_author.py
  24. 20 0
      kuaishou/kuaishou_main/run_ks_recommend.py
  25. 2 0
      kuaishou/kuaishou_main/run_ks_recommend_dev.py
  26. 21 3
      kuaishou/kuaishou_recommend/kuaishou_recommend_cut_title.py
  27. 20 3
      kuaishou/kuaishou_recommend/kuaishou_recommend_shceduling.py

+ 16 - 0
douyin/douyin_author/douyin_author_scheduling.py

@@ -59,6 +59,7 @@ class DouyinauthorScheduling:
         url = "https://www.douyin.com/aweme/v1/web/aweme/post/?device_platform=webapp&aid=6383&channel=channel_pc_web&sec_user_id={sec_user_id}&max_cursor={max_cursor}&show_live_replay_strategy=1&count=10&publish_video_strategy_type=2&pc_client_type=1&version_code=170400&version_name=17.4.0&cookie_enabled=true&screen_width=1440&screen_height=900&browser_language=zh-CN&browser_platform=MacIntel&browser_name=Chrome&browser_version=112.0.0.0&browser_online=true&engine_name=Blink&engine_version=112.0.0.0&os_name=Mac+OS&os_version=10.15.7&cpu_core_num=8&device_memory=8&platform=PC&downlink=10&effective_type=4g&round_trip_time=50".format(
             sec_user_id=user_dict["link"].replace("https://www.douyin.com/user/", ""), max_cursor=max_cursor)
         Common.logger(log_type, crawler).info(f"url:{url}")
+        Common.logging(log_type, crawler, env, f"url:{url}")
         headers = {
             'authority': 'www.douyin.com',
             'accept': 'application/json, text/plain, */*',
@@ -83,6 +84,7 @@ class DouyinauthorScheduling:
         # max_cursor = res.json().get("max_cursor", "")
         if not aweme_list:
             Common.logger(log_type, crawler).info(f"没有更多数据啦~:{res.text}\n")
+            Common.logging(log_type, crawler, env, f"没有更多数据啦~:{res.text}\n")
             return
         for info in aweme_list:
             try:
@@ -121,14 +123,17 @@ class DouyinauthorScheduling:
 
                 for k, v in video_dict.items():
                     Common.logger(log_type, crawler).info(f"{k}:{v}")
+                Common.logging(log_type, crawler, env, f"{video_dict}")
 
                 if int((int(time.time()) - int(video_dict["publish_time_stamp"])) / (3600 * 24)) > int(rule_dict.get("period", {}).get("max", 1000)):
                     Common.logger(log_type, crawler).info(f'发布时间超过{int(rule_dict.get("period", {}).get("max", 1000))}天\n')
+                    Common.logging(log_type, crawler, env, f'发布时间超过{int(rule_dict.get("period", {}).get("max", 1000))}天\n')
                     return
 
                 if download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict,
                                  rule_dict=rule_dict) is False:
                     Common.logger(log_type, crawler).info("不满足抓取规则\n")
+                    Common.logging(log_type, crawler, env, "不满足抓取规则\n")
                 elif any(str(word) if str(word) in video_dict["video_title"] else False
                          for word in get_config_from_mysql(log_type=log_type,
                                                            source=crawler,
@@ -136,8 +141,10 @@ class DouyinauthorScheduling:
                                                            text="filter",
                                                            action="")) is True:
                     Common.logger(log_type, crawler).info('已中过滤词\n')
+                    Common.logging(log_type, crawler, env, '已中过滤词\n')
                 elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
                     Common.logger(log_type, crawler).info('视频已下载\n')
+                    Common.logging(log_type, crawler, env, '视频已下载\n')
                 else:
                     cls.download_publish(log_type=log_type,
                                          crawler=crawler,
@@ -147,6 +154,7 @@ class DouyinauthorScheduling:
                                          env=env)
             except Exception as e:
                 Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
+                Common.logging(log_type, crawler, env, f"抓取单条视频异常:{e}\n")
 
     @classmethod
     def repeat_video(cls, log_type, crawler, video_id, env):
@@ -165,11 +173,13 @@ class DouyinauthorScheduling:
                 # 删除视频文件夹
                 shutil.rmtree(f"./{crawler}/videos/{md_title}")
                 Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+                Common.logging(log_type, crawler, env, "视频size=0,删除成功\n")
                 return
         except FileNotFoundError:
             # 删除视频文件夹
             shutil.rmtree(f"./{crawler}/videos/{md_title}")
             Common.logger(log_type, crawler).info("视频文件不存在,删除文件夹成功\n")
+            Common.logging(log_type, crawler, env, "视频文件不存在,删除文件夹成功\n")
             return
         # 下载封面
         Common.download_method(log_type=log_type, crawler=crawler, text='cover', title=video_dict['video_title'], url=video_dict['cover_url'])
@@ -178,6 +188,7 @@ class DouyinauthorScheduling:
 
         # 上传视频
         Common.logger(log_type, crawler).info("开始上传视频...")
+        Common.logging(log_type, crawler, env, "开始上传视频...")
         if env == "dev":
             oss_endpoint = "out"
             our_video_id = Publish.upload_and_publish(log_type=log_type,
@@ -244,8 +255,10 @@ class DouyinauthorScheduling:
                         {int(video_dict['video_width'])},
                         {int(video_dict['video_height'])}) """
         Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+        Common.logging(log_type, crawler, env, f"insert_sql:{insert_sql}")
         MysqlHelper.update_values(log_type, crawler, insert_sql, env)
         Common.logger(log_type, crawler).info('视频信息写入数据库成功')
+        Common.logging(log_type, crawler, env, '视频信息写入数据库成功')
 
         # 视频写入飞书
         upload_time = int(time.time())
@@ -274,12 +287,14 @@ class DouyinauthorScheduling:
         time.sleep(0.5)
         Feishu.update_values(log_type, 'douyin', "qV9VC0", "A2:Z2", values)
         Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
+        Common.logging(log_type, crawler, env, f"视频已保存至云文档\n")
 
     @classmethod
     def get_author_videos(cls, log_type, crawler, rule_dict, user_list, env):
         for user_dict in user_list:
             try:
                 Common.logger(log_type, crawler).info(f"开始抓取 {user_dict['nick_name']} 用户主页视频\n")
+                Common.logging(log_type, crawler, env, f"开始抓取 {user_dict['nick_name']} 用户主页视频\n")
                 cls.get_videoList(log_type=log_type,
                                   crawler=crawler,
                                   rule_dict=rule_dict,
@@ -287,6 +302,7 @@ class DouyinauthorScheduling:
                                   env=env)
             except Exception as e:
                 Common.logger(log_type, crawler).error(f"抓取用户{user_dict['nick_name']}主页视频时异常:{e}\n")
+                Common.logging(log_type, crawler, env, f"抓取用户{user_dict['nick_name']}主页视频时异常:{e}\n")
 
 
 if __name__ == '__main__':

+ 0 - 0
douyin/douyin_follow/__init__.py


+ 0 - 356
douyin/douyin_follow/follow_dy.py

@@ -1,356 +0,0 @@
-# -*- coding: utf-8 -*-
-# @Author: lierqiang
-# @Time: 2023/4/12
-import json
-import os
-import random
-import shutil
-import sys
-import time
-import requests
-from hashlib import md5
-sys.path.append(os.getcwd())
-from douyin.douyin_recommend import get_xb
-from common.common import Common
-from common.db import MysqlHelper
-from common.feishu import Feishu
-from common.publish import Publish
-from common.public import random_title
-from common.userAgent import get_random_user_agent
-from common.public import get_user_from_mysql, get_config_from_mysql
-
-
-class DyFollow(object):
-    platform = "抖音"
-    tag = "抖音定向爬虫策略"
-
-    @classmethod
-    def get_rule(cls, log_type, crawler):
-        try:
-            while True:
-                rule_sheet = Feishu.get_values_batch(log_type, crawler, "fn2hEO")
-                if rule_sheet is None:
-                    Common.logger(log_type, crawler).warning("rule_sheet is None! 10秒后重新获取")
-                    time.sleep(10)
-                    continue
-                rule_dict = {
-                    "video_width": int(rule_sheet[0][2]),
-                    "video_height": int(rule_sheet[1][2]),
-                    "like_cnt": int(rule_sheet[2][2]),
-                    "duration": int(rule_sheet[3][2]),
-                    "publish_time": int(rule_sheet[4][2]),
-
-                }
-                return rule_dict
-        except Exception as e:
-            Common.logger(log_type, crawler).error(f"get_rule:{e}\n")
-
-    # 下载规则
-    @classmethod
-    def download_rule(cls, video_info_dict, rule_dict):
-        # if video_info_dict['play_cnt'] >= rule_dict['play_cnt']:
-        #     if video_info_dict['comment_cnt'] >= rule_dict['comment_cnt']:
-        if video_info_dict['like_cnt'] >= rule_dict['like_cnt']:
-            if video_info_dict['duration'] >= rule_dict['duration']:
-                if video_info_dict['video_width'] >= rule_dict['video_width'] \
-                        or video_info_dict['video_height'] >= rule_dict['video_height']:
-                    return True
-                else:
-                    return False
-            else:
-                return False
-        else:
-            return False
-
-    # else:
-    #     return False
-    # else:
-    #     return False
-
-    # 过滤词库
-    @classmethod
-    def filter_words(cls, log_type, crawler):
-        try:
-            while True:
-                filter_words_sheet = Feishu.get_values_batch(log_type, crawler, '6BS2RR')
-                if filter_words_sheet is None:
-                    Common.logger(log_type, crawler).warning(f"filter_words_sheet:{filter_words_sheet} 10秒钟后重试")
-                    continue
-                filter_words_list = []
-                for x in filter_words_sheet:
-                    for y in x:
-                        if y is None:
-                            pass
-                        else:
-                            filter_words_list.append(y)
-                return filter_words_list
-        except Exception as e:
-            Common.logger(log_type, crawler).error(f'filter_words异常:{e}\n')
-
-    @classmethod
-    def video_title(cls, log_type, env, crawler, title):
-        title_split1 = title.split(" #")
-        if title_split1[0] != "":
-            title1 = title_split1[0]
-        else:
-            title1 = title_split1[-1]
-
-        title_split2 = title1.split(" #")
-        if title_split2[0] != "":
-            title2 = title_split2[0]
-        else:
-            title2 = title_split2[-1]
-
-        title_split3 = title2.split("@")
-        if title_split3[0] != "":
-            title3 = title_split3[0]
-        else:
-            title3 = title_split3[-1]
-
-        video_title = title3.strip().split('#')[0].replace("\n", "") \
-                          .replace("/", "").replace("抖音", "").replace(" ", "") \
-                          .replace(" ", "").replace("&NBSP", "").replace("\r", "") \
-                          .replace(".", "。").replace("\\", "") \
-                          .replace(":", "").replace("*", "").replace("?", "") \
-                          .replace("?", "").replace('"', "").replace("<", "") \
-                          .replace(">", "").replace("|", "").replace("@", "").replace('"', '').replace("'", '')[:40]
-        if video_title.replace(" ", "") == "" or video_title == "。。。" or video_title == "...":
-            return random_title(log_type, crawler, env, text='title')
-        else:
-            return video_title
-
-    @classmethod
-    def get_videoList(cls, log_type, crawler, strategy, our_uid, out_uid, oss_endpoint, env, machine, rule_dict):
-
-        try:
-            max_cursor = ''
-            url = "https://www.douyin.com/aweme/v1/web/aweme/post/?device_platform=webapp&aid=6383&channel=channel_pc_web&sec_user_id={sec_user_id}&max_cursor={max_cursor}&show_live_replay_strategy=1&count=10&publish_video_strategy_type=2&pc_client_type=1&version_code=170400&version_name=17.4.0&cookie_enabled=true&screen_width=1440&screen_height=900&browser_language=zh-CN&browser_platform=MacIntel&browser_name=Chrome&browser_version=112.0.0.0&browser_online=true&engine_name=Blink&engine_version=112.0.0.0&os_name=Mac+OS&os_version=10.15.7&cpu_core_num=8&device_memory=8&platform=PC&downlink=10&effective_type=4g&round_trip_time=50".format(
-                sec_user_id=out_uid, max_cursor=max_cursor)
-            headers = {
-                'authority': 'www.douyin.com',
-                'accept': 'application/json, text/plain, */*',
-                'accept-language': 'zh-CN,zh;q=0.9',
-                # 'cookie': '__ac_nonce=06437a18000f23ad954f0; __ac_signature=_02B4Z6wo00f01Sb71TAAAIDCsi2OPpjonN0m29GAAC2M85; s_v_web_id=verify_lgeqr3uq_3aDaqQXf_juHS_40Yi_BE8b_tI8FCILZQXPK; _tea_utm_cache_2018=undefined; ttwid=1%7Cq_IBs6hbBUOIEcRR1gxtgY6GiTbTE3U1XhJNLL_9BZA%7C1681367431%7Cf77b36ae4721884fec1c3fa9d6a08c29e308236ae13df58d1be3b0d1f82f8668; strategyABtestKey=%221681367433.454%22; passport_csrf_token=bff0289a5846e58b4b9db6e1f64665f4; passport_csrf_token_default=bff0289a5846e58b4b9db6e1f64665f4; bd_ticket_guard_client_data=eyJiZC10aWNrZXQtZ3VhcmQtdmVyc2lvbiI6MiwiYmQtdGlja2V0LWd1YXJkLWl0ZXJhdGlvbi12ZXJzaW9uIjoxLCJiZC10aWNrZXQtZ3VhcmQtY2xpZW50LWNzciI6Ii0tLS0tQkVHSU4gQ0VSVElGSUNBVEUgUkVRVUVTVC0tLS0tXHJcbk1JSUJEVENCdFFJQkFEQW5NUXN3Q1FZRFZRUUdFd0pEVGpFWU1CWUdBMVVFQXd3UFltUmZkR2xqYTJWMFgyZDFcclxuWVhKa01Ga3dFd1lIS29aSXpqMENBUVlJS29aSXpqMERBUWNEUWdBRVhzcHJ5TElFT3E4Z2tPc2l5MTdSS1dEcVxyXG5xTXZkWEt5Y1V5NStiL3JpSmJ6VkMwMlYrU1dNaWtZTlNOK29IU2g2WVVTTmdUUjJrZEhvRUxISmxGdU9scUFzXHJcbk1Db0dDU3FHU0liM0RRRUpEakVkTUJzd0dRWURWUjBSQkJJd0VJSU9kM2QzTG1SdmRYbHBiaTVqYjIwd0NnWUlcclxuS29aSXpqMEVBd0lEUndBd1JBSWdIeW9SblFNK0h0Z3ZOU2dFMVpHdGpmYWQxT3BuWHJRdVFSNXNSaDkwakRJQ1xyXG5JRG1tVkthRkN5djBLemtpZ0J0RExaTVJSNndURzRBWUVoNUlWUmlZUU9UVVxyXG4tLS0tLUVORCBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS1cclxuIn0=; msToken=ZPkeAqCnLbjDWCkWcWf36ZZIoZTl07X33ca0xcNGk3tZPoMvgx-lo28oNb5JhbCKsXLgLNIoojAbocBrjdAv54Hwf-Tk3_yAjLW7WJxxSa0=; ttcid=54b98e03a03e43e09211ee28db90fdd126; home_can_add_dy_2_desktop=%221%22; msToken=nqMSwn8xJAXLZk2AikdOeJ6P3JvoCsbYjHROoR55KXMDvWs8auYCMpZFGQSClHhitgB0l_vwg8m0-lE-aWQovfN7Ga1QupM3SpdNktiamFRiHMdJExREc9_uxS1ln8E=; tt_scid=DnO5GFg1oLONpPMFuFeL-OveKBn43mRynRVTvHsh1KUQm283ocN6JX6qPKDDrFHbfdf1; download_guide=%222%2F20230413%22; msToken=b9haS5RjLaFgqVDoGp5xSqc8B4kl-miQB5Nku0BSIvHVutKT81Nzk_pPb0wm7xYlAp_nz1gytQng5gYeIRNxcMgZJ_MB7lhejt_093miXlHtvqAaxL0FNg==',
-                'referer': 'https://www.douyin.com/user/{}?showTab=post'.format(out_uid),
-                'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
-                'sec-ch-ua-mobile': '?0',
-                'sec-ch-ua-platform': '"macOS"',
-                'sec-fetch-dest': 'empty',
-                'sec-fetch-mode': 'cors',
-                'sec-fetch-site': 'same-origin',
-                'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
-            }
-            x_bogus = get_xb(url, headers['user-agent'])
-            url = url + '&X-Bogus={}'.format(x_bogus)
-            if not x_bogus:
-                return
-            res = requests.get(url=url, headers=headers, data={}, proxies=Common.tunnel_proxies(), timeout=10).json()
-            aweme_list = res.get('aweme_list', [])
-        except Exception as e:
-            Common.logger(log_type, crawler).error(f"获取抖音作者:{out_uid},视频列表失败:{e}")
-            return
-        if not aweme_list:
-            Common.logger(log_type, crawler).warning(f"抖音作者没有获取到更多数据")
-            return
-        for info in aweme_list:
-            if info.get('is_ads'):
-                continue
-            publish_time = info.get('create_time')
-            if not publish_time:
-                continue
-            publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time))
-            publish_day = int((int(time.time()) - publish_time) / (3600 * 24))
-
-            video_title = cls.video_title(log_type, env, crawler, info['desc'])
-            if not video_title:
-                video_title = random_title(log_type, crawler, env, text='title')
-
-            video_dict = {'video_title': video_title,
-                          'video_id': info['aweme_id'],
-                          'play_cnt': info['statistics']['play_count'],
-                          'comment_cnt': info['statistics']['comment_count'],
-                          'like_cnt': info['statistics']['digg_count'],
-                          'share_cnt': info['statistics']['share_count'],
-                          'video_width': info['video']['width'],
-                          'video_height': info['video']['height'],
-                          'duration': round(info['video']['duration'] / 1000),
-                          'publish_time': publish_day,
-                          'publish_time_stamp': publish_time * 1000,
-                          'publish_time_str': publish_time_str,
-                          'user_name': info['author']['nickname'],
-                          'user_id': info['author_user_id'],
-                          'user_sec_id': info['author']['sec_uid'],
-                          'avatar_url': info['author']['avatar_thumb']['url_list'][0],
-                          'cover_url': info['video']['origin_cover']['url_list'][0].replace('\u0026', '&'),
-                          'video_url': info['video']['play_addr']['url_list'][0],
-                          'session': f"douyin{int(time.time())}"
-                          }
-
-            for k, v in video_dict.items():
-                Common.logger(log_type, crawler).info(f"{k}:{v}")
-            cls.download_publish(log_type=log_type,
-                                 crawler=crawler,
-                                 video_dict=video_dict,
-                                 rule_dict=rule_dict,
-                                 strategy=strategy,
-                                 our_uid=our_uid,
-                                 oss_endpoint=oss_endpoint,
-                                 env=env,
-                                 machine=machine)
-
-    @classmethod
-    def repeat_video(cls, log_type, crawler, video_id, env, machine):
-        sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}"; """
-        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env, machine)
-        return len(repeat_video)
-
-    # 下载 / 上传
-    @classmethod
-    def download_publish(cls, log_type, crawler, strategy, video_dict, rule_dict, our_uid, oss_endpoint, env, machine):
-        try:
-            filter_words = get_config_from_mysql(log_type, crawler, env, text='filter')
-            for filter_word in filter_words:
-                if filter_word in video_dict['video_title']:
-                    Common.logger(log_type, crawler).info('标题已中过滤词:{}\n', video_dict['video_title'])
-                    return
-            if cls.download_rule(video_dict, rule_dict) is False:
-                Common.logger(log_type, crawler).info('不满足抓取规则\n')
-            elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env, machine) != 0:
-                Common.logger(log_type, crawler).info('视频已下载\n')
-            else:
-                # 下载视频
-                Common.download_method(log_type=log_type, crawler=crawler, text='video',
-                                       title=video_dict['video_title'], url=video_dict['video_url'])
-                md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
-                if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
-                    # 删除视频文件夹
-                    shutil.rmtree(f"./{crawler}/videos/{md_title}")
-                    Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
-                    return
-                # ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
-                # if ffmpeg_dict is None or ffmpeg_dict['size'] == 0:
-                #     Common.logger(log_type, crawler).warning(f"下载的视频无效,已删除\n")
-                #     # 删除视频文件夹
-                #     shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
-                #     return
-                # 下载封面
-                Common.download_method(log_type=log_type, crawler=crawler, text='cover',
-                                       title=video_dict['video_title'], url=video_dict['cover_url'])
-                # 保存视频信息至txt
-                Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
-
-                # 上传视频
-                Common.logger(log_type, crawler).info("开始上传视频...")
-                our_video_id = Publish.upload_and_publish(log_type=log_type,
-                                                          crawler=crawler,
-                                                          strategy=strategy,
-                                                          our_uid=our_uid,
-                                                          env=env,
-                                                          oss_endpoint=oss_endpoint)
-                if env == 'dev':
-                    our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
-                else:
-                    our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
-                Common.logger(log_type, crawler).info("视频上传完成")
-
-                if our_video_id is None:
-                    # 删除视频文件夹
-                    shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
-                    return
-
-                # 视频写入飞书
-                upload_time = int(time.time())
-                values = [[
-                    our_video_id,
-                    time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
-                    strategy,
-                    str(video_dict['video_id']),
-                    video_dict['video_title'],
-                    our_video_link,
-                    # video_dict['gid'],
-                    video_dict['play_cnt'],
-                    video_dict['comment_cnt'],
-                    video_dict['like_cnt'],
-                    video_dict['share_cnt'],
-                    video_dict['duration'],
-                    str(video_dict['video_width']) + '*' + str(video_dict['video_height']),
-                    video_dict['publish_time_str'],
-                    video_dict['user_name'],
-                    video_dict['user_id'],
-                    video_dict['avatar_url'],
-                    video_dict['cover_url'],
-                    video_dict['video_url']
-                ]]
-                # time.sleep(1)
-                Feishu.insert_columns(log_type, 'douyin', "qV9VC0", "ROWS", 1, 2)
-                Feishu.update_values(log_type, 'douyin', "qV9VC0", "A2:Z2", values)
-                Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
-
-                # 视频信息保存数据库
-                insert_sql = f""" insert into crawler_video(video_id,
-                                user_id,
-                                out_user_id,
-                                platform,
-                                strategy,
-                                out_video_id,
-                                video_title,
-                                cover_url,
-                                video_url,
-                                duration,
-                                publish_time,
-                                play_cnt,
-                                comment_cnt,
-                                like_cnt,
-                                share_cnt,
-                                crawler_rule,
-                                width,
-                                height)
-                                values({our_video_id},
-                                {our_uid},
-                                "{video_dict['user_id']}",
-                                "{cls.platform}",
-                                "{strategy}",
-                                "{video_dict['video_id']}",
-                                "{video_dict['video_title']}",
-                                "{video_dict['cover_url']}",
-                                "{video_dict['video_url']}",
-                                {int(video_dict['duration'])},
-                                "{video_dict['publish_time_str']}",
-                                {int(video_dict['play_cnt'])},
-                                {int(video_dict['comment_cnt'])},
-                                {int(video_dict['like_cnt'])},
-                                {int(video_dict['share_cnt'])},
-                                '{json.dumps(rule_dict)}',
-                                {int(video_dict['video_width'])},
-                                {int(video_dict['video_height'])}) """
-                Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
-                MysqlHelper.update_values(log_type, crawler, insert_sql, env, machine)
-                Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
-        except Exception as e:
-            Common.logger(log_type, crawler).error(f'download_publish异常:{e}\n')
-
-    @classmethod
-    def get_follow_videos(cls, log_type, crawler, strategy, oss_endpoint, env, machine):
-        user_list = get_user_from_mysql(log_type, crawler, crawler, env)
-        rule_dict = cls.get_rule(log_type, crawler)
-
-        for user in user_list:
-            spider_link = user["link"]
-            out_uid = spider_link
-            user_name = user["nick_name"]
-            our_uid = user["uid"]
-            Common.logger(log_type, crawler).info(f"开始抓取 {user_name} 用户主页视频\n")
-            cls.get_videoList(log_type=log_type,
-                              crawler=crawler,
-                              strategy=strategy,
-                              our_uid=our_uid,
-                              out_uid=out_uid,
-                              oss_endpoint=oss_endpoint,
-                              env=env,
-                              machine=machine,
-                              rule_dict=rule_dict,
-                              )
-
-
-if __name__ == '__main__':
-    DyFollow.get_follow_videos('author', 'douyin', '定向抓取策略', 'outer', 'prod', 'aliyun')

+ 0 - 43
douyin/douyin_main/run_douyin_author_scheduling.py

@@ -1,43 +0,0 @@
-# -*- coding: utf-8 -*-
-# @Author: wangkun
-# @Time: 2023/5/26
-import argparse
-import os
-import sys
-sys.path.append(os.getcwd())
-from common.common import Common
-from common.public import task_fun
-from common.scheduling_db import MysqlHelper
-from douyin.douyin_author.douyin_author_scheduling import DouyinauthorScheduling
-
-
-def main(log_type, crawler, task, env):
-    task_dict = task_fun(task)['task_dict']
-    rule_dict = task_fun(task)['rule_dict']
-    task_id = task_dict['task_id']
-    select_user_sql = f"""select * from crawler_user_v3 where task_id={task_id}"""
-    user_list = MysqlHelper.get_values(log_type, crawler, select_user_sql, env, action="")
-    Common.logger(log_type, crawler).info(f"调度任务:{task_dict}")
-    Common.logger(log_type, crawler).info(f"抓取规则:{rule_dict}")
-    # Common.logger(log_type, crawler).info(f"用户列表:{user_list}\n")
-    Common.logger(log_type, crawler).info(f'开始抓取 {task_dict["task_name"]}\n')
-    DouyinauthorScheduling.get_author_videos(log_type=log_type,
-                                            crawler=crawler,
-                                            rule_dict=rule_dict,
-                                            user_list=user_list,
-                                            env=env)
-    Common.del_logs(log_type, crawler)
-    Common.logger(log_type, crawler).info('抓取完一轮\n')
-
-
-if __name__ == "__main__":
-    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
-    parser.add_argument('--log_type', default='recommend')  ## 添加参数,注明参数类型
-    parser.add_argument('--crawler', default='kuaishou')  ## 添加参数
-    parser.add_argument('--task')  ## 添加参数
-    parser.add_argument('--env', default='prod')  ## 添加参数
-    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
-    main(log_type=args.log_type,
-         crawler=args.crawler,
-         task=args.task,
-         env=args.env)

+ 0 - 45
douyin/douyin_main/run_douyin_follow.py

@@ -1,45 +0,0 @@
-# -*- coding: utf-8 -*-
-# @Author: lierqiang
-# @Time: 2023/4/07
-import argparse
-import os
-import sys
-
-sys.path.append(os.getcwd())
-from common.common import Common
-from douyin.douyin_follow.follow_dy import DyFollow
-
-
-def main(log_type, crawler, strategy, oss_endpoint, env, machine):
-    try:
-        Common.logger(log_type, crawler).info('开始抓取 抖音 作者\n')
-        DyFollow.get_follow_videos(
-            log_type=log_type,
-            crawler=crawler,
-            strategy=strategy,
-            oss_endpoint=oss_endpoint,
-            env=env,
-            machine=machine)
-        Common.del_logs(log_type, crawler)
-        Common.logger(log_type, crawler).info('抓取完一轮\n')
-    except Exception as e:
-        Common.logger(log_type, crawler).info(f"抖音作者异常,触发报警:{e}\n")
-        # Feishu.bot(log_type, crawler, f"快手定向榜异常,触发报警:{e}")
-
-
-if __name__ == "__main__":
-    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
-    parser.add_argument('--log_type', default='author', type=str)  ## 添加参数,注明参数类型
-    parser.add_argument('--crawler', default='douyin')  ## 添加参数
-    parser.add_argument('--strategy', default='定向抓取策略')  ## 添加参数
-    parser.add_argument('--oss_endpoint', default='inner')  ## 添加参数
-    parser.add_argument('--env', default='prod')  ## 添加参数
-    parser.add_argument('--machine', default='aliyun')  ## 添加参数
-    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
-    # print(args)
-    main(log_type=args.log_type,
-         crawler=args.crawler,
-         strategy=args.strategy,
-         oss_endpoint=args.oss_endpoint,
-         env=args.env,
-         machine=args.machine)

+ 0 - 46
douyin/douyin_main/run_douyin_recommend.py

@@ -1,46 +0,0 @@
-# -*- coding: utf-8 -*-
-# @Author: lierqiang
-# @Time: 2023/4/07
-import argparse
-import os
-import sys
-
-# import time
-
-sys.path.append(os.getcwd())
-from common.common import Common
-# from common.feishu import Feishu
-from douyin.douyin_recommend.recommend_dy import DyRecommend
-
-
-def main(log_type, crawler, strategy, oss_endpoint, env, machine):
-    our_id = 55252589
-    Common.logger(log_type, crawler).info('开始抓取 抖音 推荐\n')
-    DyRecommend.get_videolist(
-        log_type=log_type,
-        crawler=crawler,
-        strategy=strategy,
-        our_id=our_id,
-        oss_endpoint=oss_endpoint,
-        env=env,
-        machine=machine)
-    Common.del_logs(log_type, crawler)
-    Common.logger(log_type, crawler).info('抓取完一轮\n')
-
-
-if __name__ == "__main__":
-    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
-    parser.add_argument('--log_type', default='recommend', type=str)  ## 添加参数,注明参数类型
-    parser.add_argument('--crawler', default='douyin')  ## 添加参数
-    parser.add_argument('--strategy', default='推荐')  ## 添加参数
-    parser.add_argument('--oss_endpoint', default='inner')  ## 添加参数
-    parser.add_argument('--env', default='prod')  ## 添加参数
-    parser.add_argument('--machine', default='aliyun')  ## 添加参数
-    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
-    # print(args)
-    main(log_type=args.log_type,
-         crawler=args.crawler,
-         strategy=args.strategy,
-         oss_endpoint=args.oss_endpoint,
-         env=args.env,
-         machine=args.machine)

+ 0 - 50
douyin/douyin_main/run_douyin_recommend_scheduling.py

@@ -1,50 +0,0 @@
-# -*- coding: utf-8 -*-
-# @Author: wangkun
-# @Time: 2023/5/26
-import argparse
-import os
-import random
-import sys
-sys.path.append(os.getcwd())
-from common.common import Common
-from common.public import task_fun
-from common.scheduling_db import MysqlHelper
-from douyin.douyin_recommend.douyin_recommend_scheduling import DouyinrecommendScheduling
-
-
-def main(log_type, crawler, task, env):
-    task_dict = task_fun(task)['task_dict']
-    rule_dict = task_fun(task)['rule_dict']
-    task_id = task_dict['task_id']
-    select_user_sql = f"""select * from crawler_user_v3 where task_id={task_id}"""
-    user_list = MysqlHelper.get_values(log_type, crawler, select_user_sql, env, action="")
-    our_uid_list = []
-    for user in user_list:
-        our_uid_list.append(user["uid"])
-    our_uid = random.choice(our_uid_list)
-    Common.logger(log_type, crawler).info(f"调度任务:{task_dict}")
-    Common.logger(log_type, crawler).info(f"抓取规则:{rule_dict}")
-    # Common.logger(log_type, crawler).info(f"用户列表:{user_list}\n")
-    Common.logger(log_type, crawler).info(f'开始抓取 {task_dict["task_name"]}\n')
-    DouyinrecommendScheduling.get_videoList(log_type=log_type,
-                                            crawler=crawler,
-                                            rule_dict=rule_dict,
-                                            our_uid=our_uid,
-                                            env=env)
-    Common.del_logs(log_type, crawler)
-    Common.logger(log_type, crawler).info('抓取任务结束\n')
-
-
-if __name__ == "__main__":
-    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
-    parser.add_argument('--log_type', default='recommend')  ## 添加参数,注明参数类型
-    parser.add_argument('--crawler', default='kuaishou')  ## 添加参数
-    parser.add_argument('--task')  ## 添加参数
-    parser.add_argument('--env', default='prod')  ## 添加参数
-    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
-    main(log_type=args.log_type,
-         crawler=args.crawler,
-         task=args.task,
-         env=args.env)
-
-

+ 20 - 0
douyin/douyin_main/run_dy_author.py

@@ -23,6 +23,10 @@ def main(log_type, crawler, topic_name, group_id, env):
                                           f'WaitSeconds:{wait_seconds}\n'
                                           f'TopicName:{topic_name}\n'
                                           f'MQConsumer:{group_id}')
+    Common.logging(log_type, crawler, env, f'{10 * "="}Consume And Ack Message From Topic{10 * "="}\n'
+                                           f'WaitSeconds:{wait_seconds}\n'
+                                           f'TopicName:{topic_name}\n'
+                                           f'MQConsumer:{group_id}')
     while True:
         try:
             # 长轮询消费消息。
@@ -38,6 +42,16 @@ def main(log_type, crawler, topic_name, group_id, env):
                                                       f"NextConsumeTime:{msg.next_consume_time}\n"
                                                       f"ReceiptHandle:{msg.receipt_handle}\n"
                                                       f"Properties:{msg.properties}")
+                Common.logging(log_type, crawler, env, f"Receive\n"
+                                                       f"MessageId:{msg.message_id}\n"
+                                                       f"MessageBodyMD5:{msg.message_body_md5}\n"
+                                                       f"MessageTag:{msg.message_tag}\n"
+                                                       f"ConsumedTimes:{msg.consumed_times}\n"
+                                                       f"PublishTime:{msg.publish_time}\n"
+                                                       f"Body:{msg.message_body}\n"
+                                                       f"NextConsumeTime:{msg.next_consume_time}\n"
+                                                       f"ReceiptHandle:{msg.receipt_handle}\n"
+                                                       f"Properties:{msg.properties}")
                 # ack_mq_message
                 ack_message(log_type=log_type, crawler=crawler, recv_msgs=recv_msgs, consumer=consumer)
 
@@ -48,9 +62,12 @@ def main(log_type, crawler, topic_name, group_id, env):
                 select_user_sql = f"""select * from crawler_user_v3 where task_id={task_id}"""
                 user_list = MysqlHelper.get_values(log_type, crawler, select_user_sql, env, action="")
                 Common.logger(log_type, crawler).info(f"调度任务:{task_dict}")
+                Common.logging(log_type, crawler, env, f"调度任务:{task_dict}")
                 Common.logger(log_type, crawler).info(f"抓取规则:{rule_dict}")
+                Common.logging(log_type, crawler, env, f"抓取规则:{rule_dict}")
                 # Common.logger(log_type, crawler).info(f"用户列表:{user_list}\n")
                 Common.logger(log_type, crawler).info(f'开始抓取 {task_dict["taskName"]}\n')
+                Common.logging(log_type, crawler, env, f'开始抓取 {task_dict["taskName"]}\n')
                 DouyinauthorScheduling.get_author_videos(log_type=log_type,
                                                          crawler=crawler,
                                                          rule_dict=rule_dict,
@@ -58,14 +75,17 @@ def main(log_type, crawler, topic_name, group_id, env):
                                                          env=env)
                 Common.del_logs(log_type, crawler)
                 Common.logger(log_type, crawler).info('抓取一轮结束\n')
+                Common.logging(log_type, crawler, env, '抓取一轮结束\n')
 
         except MQExceptionBase as err:
             # Topic中没有消息可消费。
             if err.type == "MessageNotExist":
                 Common.logger(log_type, crawler).info(f"No new message! RequestId:{err.req_id}\n")
+                Common.logging(log_type, crawler, env, f"No new message! RequestId:{err.req_id}\n")
                 continue
 
             Common.logger(log_type, crawler).info(f"Consume Message Fail! Exception:{err}\n")
+            Common.logging(log_type, crawler, env, f"Consume Message Fail! Exception:{err}\n")
             time.sleep(2)
             continue
 

+ 25 - 0
douyin/douyin_main/run_dy_author_dev.py

@@ -0,0 +1,25 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/6/14
+import os
+import sys
+sys.path.append(os.getcwd())
+from common.common import Common
+from douyin.douyin_author.douyin_author_scheduling import DouyinauthorScheduling
+
+
+def xigua_search_main(log_type, crawler, env):
+    Common.logger(log_type, crawler).info("开始抓取抖音账号\n")
+    Common.logging(log_type, crawler, env, "开始抓取抖音账号\n")
+    DouyinauthorScheduling.get_author_videos(log_type=log_type,
+                                             crawler=crawler,
+                                             rule_dict={"like_cnt":{"min":30000,"max":0},"duration":{"min":50,"max":1800},"period":{"min":15,"max":15}},
+                                             user_list=[{"uid": 6267140, "source": "douyin", "link": "MS4wLjABAAAAqCDcUgcHVoOMoi_0u-1b9M303FJR9qXKdtitf80nt4g", "nick_name": "一乐店长", "avatar_url": "http://rescdn.yishihui.com/user/default/avatar/live/1616555584754_u=3828798179,534417403&fm=26&gp=0.jpg", "mode": "author"}],
+                                             env=env)
+    Common.del_logs(log_type, crawler)
+    Common.logger(log_type, crawler).info("抓取一轮结束\n")
+    Common.logging(log_type, crawler, env, "抓取一轮结束\n")
+
+
+if __name__ == "__main__":
+    xigua_search_main("author", "douyin", "dev")

+ 20 - 0
douyin/douyin_main/run_dy_recommend.py

@@ -24,6 +24,10 @@ def main(log_type, crawler, topic_name, group_id, env):
                                           f'WaitSeconds:{wait_seconds}\n'
                                           f'TopicName:{topic_name}\n'
                                           f'MQConsumer:{group_id}')
+    Common.logging(log_type, crawler, env, f'{10 * "="}Consume And Ack Message From Topic{10 * "="}\n'
+                                           f'WaitSeconds:{wait_seconds}\n'
+                                           f'TopicName:{topic_name}\n'
+                                           f'MQConsumer:{group_id}')
     while True:
         try:
             # 长轮询消费消息。
@@ -39,6 +43,16 @@ def main(log_type, crawler, topic_name, group_id, env):
                                                       f"NextConsumeTime:{msg.next_consume_time}\n"
                                                       f"ReceiptHandle:{msg.receipt_handle}\n"
                                                       f"Properties:{msg.properties}")
+                Common.logging(log_type, crawler, env, f"Receive\n"
+                                                       f"MessageId:{msg.message_id}\n"
+                                                       f"MessageBodyMD5:{msg.message_body_md5}\n"
+                                                       f"MessageTag:{msg.message_tag}\n"
+                                                       f"ConsumedTimes:{msg.consumed_times}\n"
+                                                       f"PublishTime:{msg.publish_time}\n"
+                                                       f"Body:{msg.message_body}\n"
+                                                       f"NextConsumeTime:{msg.next_consume_time}\n"
+                                                       f"ReceiptHandle:{msg.receipt_handle}\n"
+                                                       f"Properties:{msg.properties}")
                 # ack_mq_message
                 ack_message(log_type=log_type, crawler=crawler, recv_msgs=recv_msgs, consumer=consumer)
 
@@ -53,9 +67,12 @@ def main(log_type, crawler, topic_name, group_id, env):
                     our_uid_list.append(user["uid"])
                 our_uid = random.choice(our_uid_list)
                 Common.logger(log_type, crawler).info(f"调度任务:{task_dict}")
+                Common.logging(log_type, crawler, env, f"调度任务:{task_dict}")
                 Common.logger(log_type, crawler).info(f"抓取规则:{rule_dict}")
+                Common.logging(log_type, crawler, env, f"抓取规则:{rule_dict}")
                 # Common.logger(log_type, crawler).info(f"用户列表:{user_list}\n")
                 Common.logger(log_type, crawler).info(f'开始抓取 {task_dict["taskName"]}\n')
+                Common.logging(log_type, crawler, env, f'开始抓取 {task_dict["taskName"]}\n')
                 DouyinrecommendScheduling.get_videoList(log_type=log_type,
                                                         crawler=crawler,
                                                         rule_dict=rule_dict,
@@ -63,14 +80,17 @@ def main(log_type, crawler, topic_name, group_id, env):
                                                         env=env)
                 Common.del_logs(log_type, crawler)
                 Common.logger(log_type, crawler).info('抓取一轮结束\n')
+                Common.logging(log_type, crawler, env, '抓取一轮结束\n')
 
         except MQExceptionBase as err:
             # Topic中没有消息可消费。
             if err.type == "MessageNotExist":
                 Common.logger(log_type, crawler).info(f"No new message! RequestId:{err.req_id}\n")
+                Common.logging(log_type, crawler, env, f"No new message! RequestId:{err.req_id}\n")
                 continue
 
             Common.logger(log_type, crawler).info(f"Consume Message Fail! Exception:{err}\n")
+            Common.logging(log_type, crawler, env, f"Consume Message Fail! Exception:{err}\n")
             time.sleep(2)
             continue
 

+ 24 - 0
douyin/douyin_main/run_dy_recommend_dev.py

@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/6/14
+import os
+import sys
+sys.path.append(os.getcwd())
+from common.common import Common
+from douyin.douyin_recommend.douyin_recommend_scheduling import DouyinrecommendScheduling
+
+
+def main(log_type, crawler, env):
+    Common.logger(log_type, crawler).info(f'开始抓取 西瓜推荐\n')
+    Common.logging(log_type, crawler, env, "开始抓取 西瓜推荐\n")
+    DouyinrecommendScheduling.get_videoList(log_type=log_type,
+                                            crawler=crawler,
+                                            rule_dict={"like_cnt":{"min":50000,"max":0},"duration":{"min":60,"max":0},"period":{"min":60,"max":60},"share_cnt":{"min":2000,"max":0}},
+                                            our_uid=6267140,
+                                            env=env)
+    Common.logger(log_type, crawler).info("抓取一轮结束\n")
+    Common.logging(log_type, crawler, env, "抓取一轮结束\n")
+
+
+if __name__ == "__main__":
+    main("recommend", "douyin", "dev")

+ 15 - 0
douyin/douyin_recommend/douyin_recommend_scheduling.py

@@ -57,6 +57,7 @@ class DouyinrecommendScheduling:
     def get_videoList(cls, log_type, crawler, rule_dict, our_uid, env):
         for page in range(1, 101):
             Common.logger(log_type, crawler).info(f"正在抓取第{page}页\n")
+            Common.logging(log_type, crawler, env, f"正在抓取第{page}页\n")
             try:
                 aweme_pc_rec_raw_data = '%7B%22videoPrefer%22:%7B%22fsn%22:%5B%5D,%22like%22:%5B%5D,%22halfMin%22:%5B%227188684310696742200%22,%224380080926896941%22%5D,%22min%22:%5B%5D%7D,%22seo_info%22:%22https:%2F%2Fwww.douyin.com%2F%22,%22is_client%22:false,%22ff_danmaku_status%22:1,%22danmaku_switch_status%22:1%7D'
                 f_url = 'https://www.douyin.com/aweme/v1/web/tab/feed/?device_platform=webapp&aid=6383&channel=channel_pc_web&tag_id=&share_aweme_id=&count=10&refresh_index={page}&video_type_select=1&aweme_pc_rec_raw_data={aweme_pc_rec_raw_data}&globalwid=&pull_type=2&min_window=0&ug_source=&creative_id=&pc_client_type=1&version_code=170400&version_name=17.4.0&cookie_enabled=true&screen_width=1440&screen_height=900&browser_language=zh-CN&browser_platform=MacIntel&browser_name=Chrome&browser_version=109.0.0.0&browser_online=true&engine_name=Blink&engine_version=109.0.0.0&os_name=Mac+OS&os_version=10.15.7&cpu_core_num=8&device_memory=8&platform=PC&downlink=10&effective_type=4g&round_trip_time=50&webid=7219223873342260736&msToken=Sh6bVLWZUEZ3ruIHq1L3iUXnr1GT5yklyo-XZRO7lNgsFvpYq0C7tcu5Z4Jv9DrMESZ9kGVhwKT4ftFDkBL11ZGPUxth2ToA4M4q-qs2MK9ctR7GhwFkGw=='.format(
@@ -75,9 +76,11 @@ class DouyinrecommendScheduling:
                     aweme_list = res.get('aweme_list', [])
                 except Exception as e:
                     Common.logger(log_type, crawler).error(f"获取抖音推荐失败:{e}")
+                    Common.logging(log_type, crawler, env, f"获取抖音推荐失败:{e}")
                     continue
                 if not aweme_list:
                     Common.logger(log_type, crawler).warning(f"抖音推荐没有获取到更多数据,页数:{page}")
+                    Common.logging(log_type, crawler, env, f"抖音推荐没有获取到更多数据,页数:{page}")
                     continue
                 for info in aweme_list:
                     try:
@@ -116,9 +119,11 @@ class DouyinrecommendScheduling:
 
                         for k, v in video_dict.items():
                             Common.logger(log_type, crawler).info(f"{k}:{v}")
+                        Common.logging(log_type, crawler, env, f"{video_dict}")
                         if download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict,
                                          rule_dict=rule_dict) is False:
                             Common.logger(log_type, crawler).info("不满足抓取规则\n")
+                            Common.logging(log_type, crawler, env, "不满足抓取规则\n")
                         elif any(str(word) if str(word) in video_dict["video_title"] else False
                                  for word in get_config_from_mysql(log_type=log_type,
                                                                    source=crawler,
@@ -126,8 +131,10 @@ class DouyinrecommendScheduling:
                                                                    text="filter",
                                                                    action="")) is True:
                             Common.logger(log_type, crawler).info('已中过滤词\n')
+                            Common.logging(log_type, crawler, env, '已中过滤词\n')
                         elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
                             Common.logger(log_type, crawler).info('视频已下载\n')
+                            Common.logging(log_type, crawler, env, '视频已下载\n')
                         else:
                             cls.download_publish(log_type=log_type,
                                                  crawler=crawler,
@@ -137,8 +144,10 @@ class DouyinrecommendScheduling:
                                                  env=env)
                     except Exception as e:
                         Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
+                        Common.logging(log_type, crawler, env, f"抓取单条视频异常:{e}\n")
             except Exception as e:
                 Common.logger(log_type, crawler).error(f"抓取第{page}页时异常:{e}\n")
+                Common.logging(log_type, crawler, env, f"抓取第{page}页时异常:{e}\n")
 
     @classmethod
     def repeat_video(cls, log_type, crawler, video_id, env):
@@ -157,11 +166,13 @@ class DouyinrecommendScheduling:
                 # 删除视频文件夹
                 shutil.rmtree(f"./{crawler}/videos/{md_title}")
                 Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+                Common.logging(log_type, crawler, env, "视频size=0,删除成功\n")
                 return
         except FileNotFoundError:
             # 删除视频文件夹
             shutil.rmtree(f"./{crawler}/videos/{md_title}")
             Common.logger(log_type, crawler).info("视频文件不存在,删除文件夹成功\n")
+            Common.logging(log_type, crawler, env, "视频文件不存在,删除文件夹成功\n")
             return
         # 下载封面
         Common.download_method(log_type=log_type, crawler=crawler, text='cover', title=video_dict['video_title'], url=video_dict['cover_url'])
@@ -170,6 +181,7 @@ class DouyinrecommendScheduling:
 
         # 上传视频
         Common.logger(log_type, crawler).info("开始上传视频...")
+        Common.logging(log_type, crawler, env, "开始上传视频...")
         if env == "dev":
             oss_endpoint = "out"
             our_video_id = Publish.upload_and_publish(log_type=log_type,
@@ -236,8 +248,10 @@ class DouyinrecommendScheduling:
                         {int(video_dict['video_width'])},
                         {int(video_dict['video_height'])}) """
         Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+        Common.logging(log_type, crawler, env, f"insert_sql:{insert_sql}")
         MysqlHelper.update_values(log_type, crawler, insert_sql, env)
         Common.logger(log_type, crawler).info('视频信息写入数据库成功')
+        Common.logging(log_type, crawler, env, '视频信息写入数据库成功')
 
         # 视频写入飞书
         Feishu.insert_columns(log_type, crawler, "82c8d9", "ROWS", 1, 2)
@@ -265,6 +279,7 @@ class DouyinrecommendScheduling:
         time.sleep(0.5)
         Feishu.update_values(log_type, crawler, "82c8d9", "A2:Z2", values)
         Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
+        Common.logging(log_type, crawler, env, f"视频已保存至云文档\n")
 
 
 if __name__ == '__main__':

+ 1 - 0
gongzhonghao/gongzhonghao_author/gongzhonghao3_author.py

@@ -456,6 +456,7 @@ class GongzhonghaoAuthor3:
                 time.sleep(2)
                 continue
             len_sheet = len(user_sheet)
+            Common.logger(log_type, crawler).info(f"len_sheet:{len_sheet}")
             if len_sheet <= 201:
                 Common.logger(log_type, crawler).info("抓取用户数<=200,无需启动第三套抓取脚本\n")
                 return

+ 0 - 46
gongzhonghao/gongzhonghao_main/run_gongzhonghao1_author_scheduling.py

@@ -1,46 +0,0 @@
-# -*- coding: utf-8 -*-
-# @Author: wangkun
-# @Time: 2023/4/23
-import argparse
-import os
-import sys
-sys.path.append(os.getcwd())
-from common.public import task_fun
-from common.common import Common
-from gongzhonghao.gongzhonghao_author.gongzhonghao1_author import GongzhonghaoAuthor1
-
-
-def main(log_type, crawler, task, env):
-    # while True:
-    #     cmd = 'ps -ef | grep "run_gongzhonghao1_author_scheduling.py" | grep -v "grep"'
-    #     result = os.popen(cmd).read()
-    #     Common.logger(log_type, crawler).info(f"len_result:{len(result)}")
-    #     if len(result) > 573:
-    #         Common.logger(log_type, crawler).info("公众号_1抓取未完成,无需启动新进程")
-    #         time.sleep(1)
-    #     else:
-    #         break
-    task_dict = task_fun(task)['task_dict']
-    rule_dict = task_fun(task)['rule_dict']
-    Common.logger(log_type, crawler).info(f"调度任务:{task_dict}")
-    Common.logger(log_type, crawler).info(f"抓取规则:{rule_dict}\n")
-    Common.logger(log_type, crawler).info(f'开始抓取 {task_dict["task_name"]} 定向榜\n')
-    GongzhonghaoAuthor1.get_all_videos(log_type=log_type,
-                                        crawler=crawler,
-                                        rule_dict=rule_dict,
-                                        env=env)
-    Common.del_logs(log_type, crawler)
-    Common.logger(log_type, crawler).info('抓取完一轮\n')
-
-
-if __name__ == "__main__":
-    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
-    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
-    parser.add_argument('--crawler')  ## 添加参数
-    parser.add_argument('--task')  ## 添加参数
-    parser.add_argument('--env')  ## 添加参数
-    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
-    main(log_type=args.log_type,
-         crawler=args.crawler,
-         task=args.task,
-         env=args.env)

+ 0 - 37
gongzhonghao/gongzhonghao_main/run_gongzhonghao2_author_scheduling.py

@@ -1,37 +0,0 @@
-# -*- coding: utf-8 -*-
-# @Author: wangkun
-# @Time: 2023/4/23
-import argparse
-import os
-import sys
-sys.path.append(os.getcwd())
-from common.public import task_fun
-from common.common import Common
-from gongzhonghao.gongzhonghao_author.gongzhonghao2_author import GongzhonghaoAuthor2
-
-
-def main(log_type, crawler, task, env):
-    task_dict = task_fun(task)['task_dict']
-    rule_dict = task_fun(task)['rule_dict']
-    Common.logger(log_type, crawler).info(f"调度任务:{task_dict}")
-    Common.logger(log_type, crawler).info(f"抓取规则:{rule_dict}\n")
-    Common.logger(log_type, crawler).info(f'开始抓取 {task_dict["task_name"]} 定向榜\n')
-    GongzhonghaoAuthor2.get_all_videos(log_type=log_type,
-                                        crawler=crawler,
-                                        rule_dict=rule_dict,
-                                        env=env)
-    Common.del_logs(log_type, crawler)
-    Common.logger(log_type, crawler).info('抓取完一轮\n')
-
-
-if __name__ == "__main__":
-    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
-    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
-    parser.add_argument('--crawler')  ## 添加参数
-    parser.add_argument('--task')  ## 添加参数
-    parser.add_argument('--env')  ## 添加参数
-    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
-    main(log_type=args.log_type,
-         crawler=args.crawler,
-         task=args.task,
-         env=args.env)

+ 0 - 46
gongzhonghao/gongzhonghao_main/run_gongzhonghao3_author_scheduling.py

@@ -1,46 +0,0 @@
-# -*- coding: utf-8 -*-
-# @Author: wangkun
-# @Time: 2023/4/23
-import argparse
-import os
-import sys
-import time
-sys.path.append(os.getcwd())
-from common.public import task_fun
-from common.common import Common
-from gongzhonghao.gongzhonghao_author.gongzhonghao3_author import GongzhonghaoAuthor3
-
-
-def main(log_type, crawler, task, env):
-    # while True:
-    #     cmd = 'ps -ef | grep "run_gongzhonghao3_author_scheduling.py" | grep -v "grep"'
-    #     result = os.popen(cmd).read()
-    #     if len(result) > 573:
-    #         Common.logger(log_type, crawler).info("公众号_3抓取未完成,无需启动新进程")
-    #         time.sleep(1)
-    #     else:
-    #         break
-    task_dict = task_fun(task)['task_dict']
-    rule_dict = task_fun(task)['rule_dict']
-    Common.logger(log_type, crawler).info(f"调度任务:{task_dict}")
-    Common.logger(log_type, crawler).info(f"抓取规则:{rule_dict}\n")
-    Common.logger(log_type, crawler).info(f'开始抓取 {task_dict["task_name"]} 定向榜\n')
-    GongzhonghaoAuthor3.get_all_videos(log_type=log_type,
-                                       crawler=crawler,
-                                       rule_dict=rule_dict,
-                                       env=env)
-    Common.del_logs(log_type, crawler)
-    Common.logger(log_type, crawler).info('抓取完一轮\n')
-
-
-if __name__ == "__main__":
-    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
-    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
-    parser.add_argument('--crawler')  ## 添加参数
-    parser.add_argument('--task')  ## 添加参数
-    parser.add_argument('--env')  ## 添加参数
-    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
-    main(log_type=args.log_type,
-         crawler=args.crawler,
-         task=args.task,
-         env=args.env)

+ 0 - 46
gongzhonghao/gongzhonghao_main/run_gongzhonghao4_author_scheduling.py

@@ -1,46 +0,0 @@
-# -*- coding: utf-8 -*-
-# @Author: wangkun
-# @Time: 2023/4/23
-import argparse
-import os
-import sys
-import time
-sys.path.append(os.getcwd())
-from common.public import task_fun
-from common.common import Common
-from gongzhonghao.gongzhonghao_author.gongzhonghao4_author import GongzhonghaoAuthor4
-
-
-def main(log_type, crawler, task, env):
-    # while True:
-    #     cmd = 'ps -ef | grep "run_gongzhonghao2_author_scheduling.py" | grep -v "grep"'
-    #     result = os.popen(cmd).read()
-    #     if len(result) > 573:
-    #         Common.logger(log_type, crawler).info("公众号_4抓取未完成,无需启动新进程")
-    #         time.sleep(1)
-    #     else:
-    #         break
-    task_dict = task_fun(task)['task_dict']
-    rule_dict = task_fun(task)['rule_dict']
-    Common.logger(log_type, crawler).info(f"调度任务:{task_dict}")
-    Common.logger(log_type, crawler).info(f"抓取规则:{rule_dict}\n")
-    Common.logger(log_type, crawler).info(f'开始抓取 {task_dict["task_name"]} 定向榜\n')
-    GongzhonghaoAuthor4.get_all_videos(log_type=log_type,
-                                       crawler=crawler,
-                                       rule_dict=rule_dict,
-                                       env=env)
-    Common.del_logs(log_type, crawler)
-    Common.logger(log_type, crawler).info('抓取完一轮\n')
-
-
-if __name__ == "__main__":
-    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
-    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
-    parser.add_argument('--crawler')  ## 添加参数
-    parser.add_argument('--task')  ## 添加参数
-    parser.add_argument('--env')  ## 添加参数
-    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
-    main(log_type=args.log_type,
-         crawler=args.crawler,
-         task=args.task,
-         env=args.env)

+ 0 - 46
gongzhonghao/gongzhonghao_main/run_gongzhonghao5_author_scheduling.py

@@ -1,46 +0,0 @@
-# -*- coding: utf-8 -*-
-# @Author: wangkun
-# @Time: 2023/4/23
-import argparse
-import os
-import sys
-import time
-sys.path.append(os.getcwd())
-from common.public import task_fun
-from common.common import Common
-from gongzhonghao.gongzhonghao_author.gongzhonghao5_author import GongzhonghaoAuthor5
-
-
-def main(log_type, crawler, task, env):
-    # while True:
-    #     cmd = 'ps -ef | grep "run_gongzhonghao5_author_scheduling.py" | grep -v "grep"'
-    #     result = os.popen(cmd).read()
-    #     if len(result) > 573:
-    #         Common.logger(log_type, crawler).info("公众号_5抓取未完成,无需启动新进程")
-    #         time.sleep(1)
-    #     else:
-    #         break
-    task_dict = task_fun(task)['task_dict']
-    rule_dict = task_fun(task)['rule_dict']
-    Common.logger(log_type, crawler).info(f"调度任务:{task_dict}")
-    Common.logger(log_type, crawler).info(f"抓取规则:{rule_dict}\n")
-    Common.logger(log_type, crawler).info(f'开始抓取 {task_dict["task_name"]} 定向榜\n')
-    GongzhonghaoAuthor5.get_all_videos(log_type=log_type,
-                                       crawler=crawler,
-                                       rule_dict=rule_dict,
-                                       env=env)
-    Common.del_logs(log_type, crawler)
-    Common.logger(log_type, crawler).info('抓取完一轮\n')
-
-
-if __name__ == "__main__":
-    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
-    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
-    parser.add_argument('--crawler')  ## 添加参数
-    parser.add_argument('--task')  ## 添加参数
-    parser.add_argument('--env')  ## 添加参数
-    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
-    main(log_type=args.log_type,
-         crawler=args.crawler,
-         task=args.task,
-         env=args.env)

+ 0 - 43
gongzhonghao/gongzhonghao_main/run_gongzhonghao_follow.py

@@ -1,43 +0,0 @@
-# -*- coding: utf-8 -*-
-# @Author: wangkun
-# @Time: 2023/3/28
-import argparse
-import os
-import sys
-import time
-
-sys.path.append(os.getcwd())
-from common.common import Common
-from gongzhonghao.gongzhonghao_follow.gongzhonghao_follow import GongzhonghaoFollow
-
-
-class Main:
-    @classmethod
-    def main(cls, log_type, crawler, env):
-        try:
-            if env == "dev":
-                oss_endpoint = "out"
-            else:
-                oss_endpoint = "inner"
-            Common.logger(log_type, crawler).info('开始抓取公众号视频\n')
-            GongzhonghaoFollow.get_all_videos(log_type=log_type,
-                                              crawler=crawler,
-                                              oss_endpoint=oss_endpoint,
-                                              env=env)
-            Common.del_logs(log_type, crawler)
-            GongzhonghaoFollow.begin = 0
-            Common.logger(log_type, crawler).info('公众号抓取一轮完毕,休眠 8 小时\n')
-            time.sleep(3600*8)
-        except Exception as e:
-            Common.logger(log_type, crawler).info(f"公众号抓取异常:{e}\n")
-
-
-if __name__ == '__main__':
-    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
-    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
-    parser.add_argument('--crawler')  ## 添加参数
-    parser.add_argument('--env')  ## 添加参数
-    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
-    Main.main(log_type=args.log_type,
-              crawler=args.crawler,
-              env=args.env)

+ 0 - 43
gongzhonghao/gongzhonghao_main/run_gongzhonghao_follow_2.py

@@ -1,43 +0,0 @@
-# -*- coding: utf-8 -*-
-# @Author: wangkun
-# @Time: 2023/3/28
-import argparse
-import os
-import sys
-import time
-
-sys.path.append(os.getcwd())
-from common.common import Common
-from gongzhonghao.gongzhonghao_follow.gongzhonghao_follow_2 import GongzhonghaoFollow2
-
-
-class Main:
-    @classmethod
-    def main(cls, log_type, crawler, env):
-        # try:
-        if env == "dev":
-            oss_endpoint = "out"
-        else:
-            oss_endpoint = "inner"
-        Common.logger(log_type, crawler).info('开始抓取公众号视频\n')
-        GongzhonghaoFollow2.get_all_videos(log_type=log_type,
-                                           crawler=crawler,
-                                           oss_endpoint=oss_endpoint,
-                                           env=env)
-        Common.del_logs(log_type, crawler)
-        GongzhonghaoFollow2.begin = 0
-        Common.logger(log_type, crawler).info('公众号抓取一轮完毕,休眠 8 小时\n')
-        time.sleep(3600*8)
-        # except Exception as e:
-        #     Common.logger(log_type, crawler).info(f"公众号抓取异常:{e}\n")
-
-
-if __name__ == '__main__':
-    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
-    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
-    parser.add_argument('--crawler')  ## 添加参数
-    parser.add_argument('--env')  ## 添加参数
-    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
-    Main.main(log_type=args.log_type,
-              crawler=args.crawler,
-              env=args.env)

+ 0 - 43
gongzhonghao/gongzhonghao_main/run_gongzhonghao_follow_3.py

@@ -1,43 +0,0 @@
-# -*- coding: utf-8 -*-
-# @Author: wangkun
-# @Time: 2023/3/28
-import argparse
-import os
-import sys
-import time
-
-sys.path.append(os.getcwd())
-from common.common import Common
-from gongzhonghao.gongzhonghao_follow.gongzhonghao_follow_3 import GongzhonghaoFollow3
-
-
-class Main:
-    @classmethod
-    def main(cls, log_type, crawler, env):
-        # try:
-        if env == "dev":
-            oss_endpoint = "out"
-        else:
-            oss_endpoint = "inner"
-        Common.logger(log_type, crawler).info('开始抓取公众号视频\n')
-        GongzhonghaoFollow3.get_all_videos(log_type=log_type,
-                                           crawler=crawler,
-                                           oss_endpoint=oss_endpoint,
-                                           env=env)
-        Common.del_logs(log_type, crawler)
-        GongzhonghaoFollow3.begin = 0
-        Common.logger(log_type, crawler).info('公众号抓取一轮完毕,休眠 8 小时\n')
-        time.sleep(3600*8)
-        # except Exception as e:
-        #     Common.logger(log_type, crawler).info(f"公众号抓取异常:{e}\n")
-
-
-if __name__ == '__main__':
-    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
-    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
-    parser.add_argument('--crawler')  ## 添加参数
-    parser.add_argument('--env')  ## 添加参数
-    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
-    Main.main(log_type=args.log_type,
-              crawler=args.crawler,
-              env=args.env)

+ 21 - 2
kuaishou/kuaishou_author/kuaishou_author_scheduling.py

@@ -82,7 +82,6 @@ class KuaishouauthorScheduling:
     def get_videoList(cls, log_type, crawler, user_dict, rule_dict, env):
         pcursor = ""
         while True:
-            # Common.logger(log_type, crawler).info(f'cookie:{cls.get_cookie(log_type, crawler, env)["cookie"]}')
             url = "https://www.kuaishou.com/graphql"
             payload = json.dumps({
                 "operationName": "visionProfilePhotoList",
@@ -116,18 +115,23 @@ class KuaishouauthorScheduling:
             # Common.logger(log_type, crawler).info(f"response:{response.text}\n")
             if response.status_code != 200:
                 Common.logger(log_type, crawler).warning(f"response:{response.text}\n")
+                Common.logging(log_type, crawler, env, f"response:{response.text}\n")
                 return
             elif "data" not in response.json():
                 Common.logger(log_type, crawler).warning(f"response:{response.json()}\n")
+                Common.logging(log_type, crawler, env, f"response:{response.json()}\n")
                 return
             elif "visionProfilePhotoList" not in response.json()["data"]:
                 Common.logger(log_type, crawler).warning(f"response:{response.json()}\n")
+                Common.logging(log_type, crawler, env, f"response:{response.json()}\n")
                 return
             elif "feeds" not in response.json()["data"]["visionProfilePhotoList"]:
                 Common.logger(log_type, crawler).warning(f"response:{response.json()}\n")
+                Common.logging(log_type, crawler, env, f"response:{response.json()}\n")
                 return
             elif len(response.json()["data"]["visionProfilePhotoList"]["feeds"]) == 0:
                 Common.logger(log_type, crawler).warning(f"没有更多视频啦 ~\n")
+                Common.logging(log_type, crawler, env, f"没有更多视频啦 ~\n")
                 return
             pcursor = response.json()['data']['visionProfilePhotoList']['pcursor']
             feeds = response.json()['data']['visionProfilePhotoList']['feeds']
@@ -136,6 +140,7 @@ class KuaishouauthorScheduling:
                     if cls.download_cnt >= cls.videos_cnt(rule_dict):
                     # if cls.download_cnt >= 2:
                         Common.logger(log_type, crawler).info(f"已下载视频数:{cls.download_cnt}\n")
+                        Common.logging(log_type, crawler, env, f"已下载视频数:{cls.download_cnt}\n")
                         return
                     video_title = feeds[i].get("photo", {}).get("caption", random_title(log_type, crawler, env, text='title'))
                     video_title = cls.video_title(log_type, crawler, env, video_title)
@@ -169,14 +174,18 @@ class KuaishouauthorScheduling:
                                   'session': f"kuaishou-{int(time.time())}"}
                     for k, v in video_dict.items():
                         Common.logger(log_type, crawler).info(f"{k}:{v}")
+                    Common.logging(log_type, crawler, env, f"{video_dict}")
 
                     if int((int(time.time()) - int(publish_time_stamp)) / (3600*24)) > int(rule_dict.get("period", {}).get("max", 1000)):
                         Common.logger(log_type, crawler).info(f'发布时间超过{int(rule_dict.get("period", {}).get("max", 1000))}天\n')
+                        Common.logging(log_type, crawler, env, f'发布时间超过{int(rule_dict.get("period", {}).get("max", 1000))}天\n')
                         return
                     if video_dict["video_id"] == '' or video_dict["cover_url"] == '' or video_dict["video_url"] == '':
                         Common.logger(log_type, crawler).info('无效视频\n')
+                        Common.logging(log_type, crawler, env, '无效视频\n')
                     elif download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
                         Common.logger(log_type, crawler).info("不满足抓取规则\n")
+                        Common.logging(log_type, crawler, env, "不满足抓取规则\n")
                     elif any(str(word) if str(word) in video_dict["video_title"] else False
                              for word in get_config_from_mysql(log_type=log_type,
                                                                source=crawler,
@@ -184,8 +193,10 @@ class KuaishouauthorScheduling:
                                                                text="filter",
                                                                action="")) is True:
                         Common.logger(log_type, crawler).info('已中过滤词\n')
+                        Common.logging(log_type, crawler, env, '已中过滤词\n')
                     elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
                         Common.logger(log_type, crawler).info('视频已下载\n')
+                        Common.logging(log_type, crawler, env, '视频已下载\n')
                     else:
                         cls.download_publish(log_type=log_type,
                                              crawler=crawler,
@@ -195,6 +206,7 @@ class KuaishouauthorScheduling:
                                              env=env)
                 except Exception as e:
                     Common.logger(log_type, crawler).warning(f"抓取单条视频异常:{e}\n")
+                    Common.logging(log_type, crawler, env, f"抓取单条视频异常:{e}\n")
 
     @classmethod
     def repeat_video(cls, log_type, crawler, video_id, env):
@@ -212,11 +224,13 @@ class KuaishouauthorScheduling:
                 # 删除视频文件夹
                 shutil.rmtree(f"./{crawler}/videos/{md_title}")
                 Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+                Common.logging(log_type, crawler, env, "视频size=0,删除成功\n")
                 return
         except FileNotFoundError:
             # 删除视频文件夹
             shutil.rmtree(f"./{crawler}/videos/{md_title}")
             Common.logger(log_type, crawler).info("视频文件不存在,删除文件夹成功\n")
+            Common.logging(log_type, crawler, env, "视频文件不存在,删除文件夹成功\n")
             return
         # 下载封面
         Common.download_method(log_type=log_type, crawler=crawler, text='cover', title=video_dict['video_title'], url=video_dict['cover_url'])
@@ -225,6 +239,7 @@ class KuaishouauthorScheduling:
 
         # 上传视频
         Common.logger(log_type, crawler).info("开始上传视频...")
+        Common.logging(log_type, crawler, env, "开始上传视频...")
         if env == "dev":
             oss_endpoint = "out"
             our_video_id = Publish.upload_and_publish(log_type=log_type,
@@ -285,8 +300,10 @@ class KuaishouauthorScheduling:
                                                 {int(video_dict['video_width'])},
                                                 {int(video_dict['video_height'])}) """
         Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+        Common.logging(log_type, crawler, env, f"insert_sql:{insert_sql}")
         MysqlHelper.update_values(log_type, crawler, insert_sql, env, action="")
         Common.logger(log_type, crawler).info('视频信息写入数据库成功')
+        Common.logging(log_type, crawler, env, '视频信息写入数据库成功')
 
         # 视频写入飞书
         Feishu.insert_columns(log_type, crawler, "fYdA8F", "ROWS", 1, 2)
@@ -312,6 +329,7 @@ class KuaishouauthorScheduling:
         time.sleep(1)
         Feishu.update_values(log_type, crawler, "fYdA8F", "E2:Z2", values)
         Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
+        Common.logging(log_type, crawler, env, f"视频已保存至云文档\n")
         cls.download_cnt += 1
 
     @classmethod
@@ -319,6 +337,7 @@ class KuaishouauthorScheduling:
         for user_dict in user_list:
             try:
                 Common.logger(log_type, crawler).info(f"开始抓取 {user_dict['nick_name']} 主页视频")
+                Common.logging(log_type, crawler, env, f"开始抓取 {user_dict['nick_name']} 主页视频")
                 cls.download_cnt = 0
                 cls.get_videoList(log_type=log_type,
                                   crawler=crawler,
@@ -327,9 +346,9 @@ class KuaishouauthorScheduling:
                                   env=env)
             except Exception as e:
                 Common.logger(log_type, crawler).warning(f"抓取用户{user_dict['nick_name']}主页视频时异常:{e}\n")
+                Common.logging(log_type, crawler, env, f"抓取用户{user_dict['nick_name']}主页视频时异常:{e}\n")
 
 
 if __name__ == "__main__":
     print(KuaishouauthorScheduling.get_cookie("author", "kuaishou", "prod")["cookie"])
-    # print(int((int(time.time())-1681340400)/(3600*24)))
     pass

+ 20 - 0
kuaishou/kuaishou_main/run_ks_author.py

@@ -23,6 +23,10 @@ def main(log_type, crawler, topic_name, group_id, env):
                                           f'WaitSeconds:{wait_seconds}\n'
                                           f'TopicName:{topic_name}\n'
                                           f'MQConsumer:{group_id}')
+    Common.logging(log_type, crawler, env, f'{10 * "="}Consume And Ack Message From Topic{10 * "="}\n'
+                                           f'WaitSeconds:{wait_seconds}\n'
+                                           f'TopicName:{topic_name}\n'
+                                           f'MQConsumer:{group_id}')
     while True:
         try:
             # 长轮询消费消息。
@@ -38,6 +42,16 @@ def main(log_type, crawler, topic_name, group_id, env):
                                                       f"NextConsumeTime:{msg.next_consume_time}\n"
                                                       f"ReceiptHandle:{msg.receipt_handle}\n"
                                                       f"Properties:{msg.properties}")
+                Common.logging(log_type, crawler, env, f"Receive\n"
+                                                       f"MessageId:{msg.message_id}\n"
+                                                       f"MessageBodyMD5:{msg.message_body_md5}\n"
+                                                       f"MessageTag:{msg.message_tag}\n"
+                                                       f"ConsumedTimes:{msg.consumed_times}\n"
+                                                       f"PublishTime:{msg.publish_time}\n"
+                                                       f"Body:{msg.message_body}\n"
+                                                       f"NextConsumeTime:{msg.next_consume_time}\n"
+                                                       f"ReceiptHandle:{msg.receipt_handle}\n"
+                                                       f"Properties:{msg.properties}")
                 # ack_mq_message
                 ack_message(log_type=log_type, crawler=crawler, recv_msgs=recv_msgs, consumer=consumer)
 
@@ -48,9 +62,12 @@ def main(log_type, crawler, topic_name, group_id, env):
                 select_user_sql = f"""select * from crawler_user_v3 where task_id={task_id}"""
                 user_list = MysqlHelper.get_values(log_type, crawler, select_user_sql, env, action="")
                 Common.logger(log_type, crawler).info(f"调度任务:{task_dict}")
+                Common.logging(log_type, crawler, env, f"调度任务:{task_dict}")
                 Common.logger(log_type, crawler).info(f"抓取规则:{rule_dict}")
+                Common.logging(log_type, crawler, env, f"抓取规则:{rule_dict}")
                 # Common.logger(log_type, crawler).info(f"用户列表:{user_list}\n")
                 Common.logger(log_type, crawler).info(f'开始抓取 {task_dict["taskName"]}\n')
+                Common.logging(log_type, crawler, env, f'开始抓取 {task_dict["taskName"]}\n')
                 KuaishouauthorScheduling.get_author_videos(log_type=log_type,
                                                            crawler=crawler,
                                                            rule_dict=rule_dict,
@@ -58,14 +75,17 @@ def main(log_type, crawler, topic_name, group_id, env):
                                                            env=env)
                 Common.del_logs(log_type, crawler)
                 Common.logger(log_type, crawler).info('抓取一轮结束\n')
+                Common.logging(log_type, crawler, env, '抓取一轮结束\n')
 
         except MQExceptionBase as err:
             # Topic中没有消息可消费。
             if err.type == "MessageNotExist":
                 Common.logger(log_type, crawler).info(f"No new message! RequestId:{err.req_id}\n")
+                Common.logging(log_type, crawler, env, f"No new message! RequestId:{err.req_id}\n")
                 continue
 
             Common.logger(log_type, crawler).info(f"Consume Message Fail! Exception:{err}\n")
+            Common.logging(log_type, crawler, env, f"Consume Message Fail! Exception:{err}\n")
             time.sleep(2)
             continue
 

+ 20 - 0
kuaishou/kuaishou_main/run_ks_recommend.py

@@ -24,6 +24,10 @@ def main(log_type, crawler, topic_name, group_id, env):
                                           f'WaitSeconds:{wait_seconds}\n'
                                           f'TopicName:{topic_name}\n'
                                           f'MQConsumer:{group_id}')
+    Common.logging(log_type, crawler, env, f'{10 * "="}Consume And Ack Message From Topic{10 * "="}\n'
+                                           f'WaitSeconds:{wait_seconds}\n'
+                                           f'TopicName:{topic_name}\n'
+                                           f'MQConsumer:{group_id}')
     while True:
         try:
             # 长轮询消费消息。
@@ -39,6 +43,16 @@ def main(log_type, crawler, topic_name, group_id, env):
                                                       f"NextConsumeTime:{msg.next_consume_time}\n"
                                                       f"ReceiptHandle:{msg.receipt_handle}\n"
                                                       f"Properties:{msg.properties}")
+                Common.logging(log_type, crawler, env, f"Receive\n"
+                                                       f"MessageId:{msg.message_id}\n"
+                                                       f"MessageBodyMD5:{msg.message_body_md5}\n"
+                                                       f"MessageTag:{msg.message_tag}\n"
+                                                       f"ConsumedTimes:{msg.consumed_times}\n"
+                                                       f"PublishTime:{msg.publish_time}\n"
+                                                       f"Body:{msg.message_body}\n"
+                                                       f"NextConsumeTime:{msg.next_consume_time}\n"
+                                                       f"ReceiptHandle:{msg.receipt_handle}\n"
+                                                       f"Properties:{msg.properties}")
                 # ack_mq_message
                 ack_message(log_type=log_type, crawler=crawler, recv_msgs=recv_msgs, consumer=consumer)
 
@@ -53,9 +67,12 @@ def main(log_type, crawler, topic_name, group_id, env):
                     our_uid_list.append(user["uid"])
                 our_uid = random.choice(our_uid_list)
                 Common.logger(log_type, crawler).info(f"调度任务:{task_dict}")
+                Common.logging(log_type, crawler, env, f"调度任务:{task_dict}")
                 Common.logger(log_type, crawler).info(f"抓取规则:{rule_dict}")
+                Common.logging(log_type, crawler, env, f"抓取规则:{rule_dict}")
                 # Common.logger(log_type, crawler).info(f"用户列表:{user_list}\n")
                 Common.logger(log_type, crawler).info(f'开始抓取 {task_dict["taskName"]}\n')
+                Common.logging(log_type, crawler, env, f'开始抓取 {task_dict["taskName"]}\n')
                 KuaiShouRecommendScheduling.get_videoList(log_type=log_type,
                                                           crawler=crawler,
                                                           rule_dict=rule_dict,
@@ -63,14 +80,17 @@ def main(log_type, crawler, topic_name, group_id, env):
                                                           env=env)
                 Common.del_logs(log_type, crawler)
                 Common.logger(log_type, crawler).info('抓取一轮结束\n')
+                Common.logging(log_type, crawler, env, '抓取一轮结束\n')
 
         except MQExceptionBase as err:
             # Topic中没有消息可消费。
             if err.type == "MessageNotExist":
                 Common.logger(log_type, crawler).info(f"No new message! RequestId:{err.req_id}\n")
+                Common.logging(log_type, crawler, env, f"No new message! RequestId:{err.req_id}\n")
                 continue
 
             Common.logger(log_type, crawler).info(f"Consume Message Fail! Exception:{err}\n")
+            Common.logging(log_type, crawler, env, f"Consume Message Fail! Exception:{err}\n")
             time.sleep(2)
             continue
 

+ 2 - 0
kuaishou/kuaishou_main/run_ks_recommend_dev.py

@@ -10,6 +10,7 @@ from kuaishou.kuaishou_recommend.kuaishou_recommend_cut_title import KuaiShouRec
 
 def kuaishou_recommend_main(log_type, crawler, env):
     Common.logger(log_type, crawler).info("开始抓取快手推荐\n")
+    Common.logging(log_type, crawler, env, "开始抓取快手推荐\n")
     KuaiShouRecommendScheduling.get_videoList(log_type=log_type,
                                               crawler=crawler,
                                               our_uid=6267140,
@@ -17,6 +18,7 @@ def kuaishou_recommend_main(log_type, crawler, env):
                                               env=env)
     Common.del_logs(log_type, crawler)
     Common.logger(log_type, crawler).info("抓取一轮结束\n")
+    Common.logging(log_type, crawler, env, "抓取一轮结束\n")
 
 
 if __name__ == "__main__":

+ 21 - 3
kuaishou/kuaishou_recommend/kuaishou_recommend_cut_title.py

@@ -62,6 +62,7 @@ class KuaiShouRecommendScheduling:
         for page in range(1, 101):
             try:
                 Common.logger(log_type, crawler).info(f"正在抓取第{page}页")
+                Common.logging(log_type, crawler, env, f"正在抓取第{page}页")
                 url = "https://www.kuaishou.com/graphql"
                 payload = json.dumps({
                     "operationName": "visionNewRecoFeed",
@@ -95,23 +96,26 @@ class KuaiShouRecommendScheduling:
                 s.mount('http://', HTTPAdapter(max_retries=3))
                 s.mount('https://', HTTPAdapter(max_retries=3))
                 response = s.post(url=url, headers=headers, data=payload, proxies=Common.tunnel_proxies(), verify=False, timeout=10)
-                # Common.logger(log_type, crawler).info(f"response:{response.text}")
                 response.close()
                 if response.status_code != 200:
                     Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.status_code}, {response.text}\n")
+                    Common.logging(log_type, crawler, env, f"get_videoList_response:{response.status_code}, {response.text}\n")
                     continue
                 elif 'data' not in response.json():
                     Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()}\n")
+                    Common.logging(log_type, crawler, env, f"get_videoList_response:{response.json()}\n")
                     continue
                 elif 'visionNewRecoFeed' not in response.json()['data']:
                     Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()['data']}\n")
+                    Common.logging(log_type, crawler, env, f"get_videoList_response:{response.json()['data']}\n")
                     continue
                 elif 'feeds' not in response.json()['data']['visionNewRecoFeed']:
-                    Common.logger(log_type, crawler).warning(
-                        f"get_videoList_response:{response.json()['data']['visionNewRecoFeed']}\n")
+                    Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()['data']['visionNewRecoFeed']}\n")
+                    Common.logging(log_type, crawler, env, f"get_videoList_response:{response.json()['data']['visionNewRecoFeed']}\n")
                     continue
                 elif len(response.json()['data']['visionNewRecoFeed']['feeds']) == 0:
                     Common.logger(log_type, crawler).info("没有更多视频啦 ~\n")
+                    Common.logging(log_type, crawler, env, "没有更多视频啦 ~\n")
                     continue
                 else:
                     feeds = response.json()['data']['visionNewRecoFeed']['feeds']
@@ -149,11 +153,14 @@ class KuaiShouRecommendScheduling:
                                           'session': f"kuaishou-{int(time.time())}"}
                             for k, v in video_dict.items():
                                 Common.logger(log_type, crawler).info(f"{k}:{v}")
+                            Common.logging(log_type, crawler, env, f"{video_dict}")
 
                             if video_dict["video_id"] == '' or video_dict["cover_url"] == '' or video_dict["video_url"] == '':
                                 Common.logger(log_type, crawler).info('无效视频\n')
+                                Common.logging(log_type, crawler, env, '无效视频\n')
                             elif download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
                                 Common.logger(log_type, crawler).info("不满足抓取规则\n")
+                                Common.logging(log_type, crawler, env, "不满足抓取规则\n")
                             elif any(str(word) if str(word) in video_dict["video_title"] else False
                                      for word in get_config_from_mysql(log_type=log_type,
                                                                        source=crawler,
@@ -161,12 +168,15 @@ class KuaiShouRecommendScheduling:
                                                                        text="filter",
                                                                        action="")) is True:
                                 Common.logger(log_type, crawler).info('已中过滤词\n')
+                                Common.logging(log_type, crawler, env, '已中过滤词\n')
                             elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
                                 Common.logger(log_type, crawler).info('视频已下载\n')
+                                Common.logging(log_type, crawler, env, '视频已下载\n')
                             else:
                                 title_score = get_title_score(log_type, crawler, "16QspO", "0usaDk", video_title)
                                 if title_score <= 0.3:
                                     Common.logger(log_type, crawler).info(f"权重分:{title_score}<=0.3\n")
+                                    Common.logging(log_type, crawler, env, f"权重分:{title_score}<=0.3\n")
                                     continue
                                 cls.download_publish(log_type=log_type,
                                                      crawler=crawler,
@@ -177,8 +187,10 @@ class KuaiShouRecommendScheduling:
                                                      env=env)
                         except Exception as e:
                             Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
+                            Common.logging(log_type, crawler, env, f"抓取单条视频异常:{e}\n")
             except Exception as e:
                 Common.logger(log_type, crawler).error(f"抓取第{page}页时异常:{e}\n")
+                Common.logging(log_type, crawler, env, f"抓取第{page}页时异常:{e}\n")
 
     @classmethod
     def repeat_video(cls, log_type, crawler, video_id, env):
@@ -197,11 +209,13 @@ class KuaiShouRecommendScheduling:
                 # 删除视频文件夹
                 shutil.rmtree(f"./{crawler}/videos/{md_title}")
                 Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+                Common.logging(log_type, crawler, env, "视频size=0,删除成功\n")
                 return
         except FileNotFoundError:
             # 删除视频文件夹
             shutil.rmtree(f"./{crawler}/videos/{md_title}")
             Common.logger(log_type, crawler).info("视频文件不存在,删除文件夹成功\n")
+            Common.logging(log_type, crawler, env, "视频文件不存在,删除文件夹成功\n")
             return
         # 下载封面
         Common.download_method(log_type=log_type, crawler=crawler, text='cover', title=video_dict['video_title'], url=video_dict['cover_url'])
@@ -210,6 +224,7 @@ class KuaiShouRecommendScheduling:
 
         # 上传视频
         Common.logger(log_type, crawler).info("开始上传视频...")
+        Common.logging(log_type, crawler, env, "开始上传视频...")
         if env == "dev":
             oss_endpoint = "out"
             our_video_id = Publish.upload_and_publish(log_type=log_type,
@@ -270,8 +285,10 @@ class KuaiShouRecommendScheduling:
                                                 {int(video_dict['video_width'])},
                                                 {int(video_dict['video_height'])}) """
         Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+        Common.logging(log_type, crawler, env, f"insert_sql:{insert_sql}")
         MysqlHelper.update_values(log_type, crawler, insert_sql, env, action="")
         Common.logger(log_type, crawler).info('视频信息写入数据库成功')
+        Common.logging(log_type, crawler, env, '视频信息写入数据库成功')
 
         # 视频写入飞书
         Feishu.insert_columns(log_type, crawler, "Aps2BI", "ROWS", 1, 2)
@@ -298,6 +315,7 @@ class KuaiShouRecommendScheduling:
         time.sleep(0.5)
         Feishu.update_values(log_type, crawler, "Aps2BI", "D2:Z2", values)
         Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
+        Common.logging(log_type, crawler, env, f"视频已保存至云文档\n")
 
 
 if __name__ == "__main__":

+ 20 - 3
kuaishou/kuaishou_recommend/kuaishou_recommend_shceduling.py

@@ -62,6 +62,7 @@ class KuaiShouRecommendScheduling:
         for page in range(1, 101):
             try:
                 Common.logger(log_type, crawler).info(f"正在抓取第{page}页")
+                Common.logging(log_type, crawler, env, f"正在抓取第{page}页")
                 url = "https://www.kuaishou.com/graphql"
                 payload = json.dumps({
                     "operationName": "visionNewRecoFeed",
@@ -95,23 +96,26 @@ class KuaiShouRecommendScheduling:
                 s.mount('http://', HTTPAdapter(max_retries=3))
                 s.mount('https://', HTTPAdapter(max_retries=3))
                 response = s.post(url=url, headers=headers, data=payload, proxies=Common.tunnel_proxies(), verify=False, timeout=10)
-                # Common.logger(log_type, crawler).info(f"response:{response.text}")
                 response.close()
                 if response.status_code != 200:
                     Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.status_code}, {response.text}\n")
+                    Common.logging(log_type, crawler, env, f"get_videoList_response:{response.status_code}, {response.text}\n")
                     continue
                 elif 'data' not in response.json():
                     Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()}\n")
+                    Common.logging(log_type, crawler, env, f"get_videoList_response:{response.json()}\n")
                     continue
                 elif 'visionNewRecoFeed' not in response.json()['data']:
                     Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()['data']}\n")
+                    Common.logging(log_type, crawler, env, f"get_videoList_response:{response.json()['data']}\n")
                     continue
                 elif 'feeds' not in response.json()['data']['visionNewRecoFeed']:
-                    Common.logger(log_type, crawler).warning(
-                        f"get_videoList_response:{response.json()['data']['visionNewRecoFeed']}\n")
+                    Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()['data']['visionNewRecoFeed']}\n")
+                    Common.logging(log_type, crawler, env, f"get_videoList_response:{response.json()['data']['visionNewRecoFeed']}\n")
                     continue
                 elif len(response.json()['data']['visionNewRecoFeed']['feeds']) == 0:
                     Common.logger(log_type, crawler).info("没有更多视频啦 ~\n")
+                    Common.logging(log_type, crawler, env, "没有更多视频啦 ~\n")
                     continue
                 else:
                     feeds = response.json()['data']['visionNewRecoFeed']['feeds']
@@ -149,11 +153,14 @@ class KuaiShouRecommendScheduling:
                                           'session': f"kuaishou-{int(time.time())}"}
                             for k, v in video_dict.items():
                                 Common.logger(log_type, crawler).info(f"{k}:{v}")
+                            Common.logging(log_type, crawler, env, f"{video_dict}")
 
                             if video_dict["video_id"] == '' or video_dict["cover_url"] == '' or video_dict["video_url"] == '':
                                 Common.logger(log_type, crawler).info('无效视频\n')
+                                Common.logging(log_type, crawler, env, '无效视频\n')
                             elif download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
                                 Common.logger(log_type, crawler).info("不满足抓取规则\n")
+                                Common.logging(log_type, crawler, env, "不满足抓取规则\n")
                             elif any(str(word) if str(word) in video_dict["video_title"] else False
                                      for word in get_config_from_mysql(log_type=log_type,
                                                                        source=crawler,
@@ -161,8 +168,10 @@ class KuaiShouRecommendScheduling:
                                                                        text="filter",
                                                                        action="")) is True:
                                 Common.logger(log_type, crawler).info('已中过滤词\n')
+                                Common.logging(log_type, crawler, env, '已中过滤词\n')
                             elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
                                 Common.logger(log_type, crawler).info('视频已下载\n')
+                                Common.logging(log_type, crawler, env, '视频已下载\n')
                             else:
                                 cls.download_publish(log_type=log_type,
                                                      crawler=crawler,
@@ -172,8 +181,10 @@ class KuaiShouRecommendScheduling:
                                                      env=env)
                         except Exception as e:
                             Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
+                            Common.logging(log_type, crawler, env, f"抓取单条视频异常:{e}\n")
             except Exception as e:
                 Common.logger(log_type, crawler).error(f"抓取第{page}页时异常:{e}\n")
+                Common.logging(log_type, crawler, env, f"抓取第{page}页时异常:{e}\n")
 
     @classmethod
     def repeat_video(cls, log_type, crawler, video_id, env):
@@ -192,11 +203,13 @@ class KuaiShouRecommendScheduling:
                 # 删除视频文件夹
                 shutil.rmtree(f"./{crawler}/videos/{md_title}")
                 Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+                Common.logging(log_type, crawler, env, "视频size=0,删除成功\n")
                 return
         except FileNotFoundError:
             # 删除视频文件夹
             shutil.rmtree(f"./{crawler}/videos/{md_title}")
             Common.logger(log_type, crawler).info("视频文件不存在,删除文件夹成功\n")
+            Common.logging(log_type, crawler, env, "视频文件不存在,删除文件夹成功\n")
             return
         # 下载封面
         Common.download_method(log_type=log_type, crawler=crawler, text='cover', title=video_dict['video_title'], url=video_dict['cover_url'])
@@ -205,6 +218,7 @@ class KuaiShouRecommendScheduling:
 
         # 上传视频
         Common.logger(log_type, crawler).info("开始上传视频...")
+        Common.logging(log_type, crawler, env, "开始上传视频...")
         if env == "dev":
             oss_endpoint = "out"
             our_video_id = Publish.upload_and_publish(log_type=log_type,
@@ -265,8 +279,10 @@ class KuaiShouRecommendScheduling:
                                                 {int(video_dict['video_width'])},
                                                 {int(video_dict['video_height'])}) """
         Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+        Common.logging(log_type, crawler, env, f"insert_sql:{insert_sql}")
         MysqlHelper.update_values(log_type, crawler, insert_sql, env, action="")
         Common.logger(log_type, crawler).info('视频信息写入数据库成功')
+        Common.logging(log_type, crawler, env, '视频信息写入数据库成功')
 
         # 视频写入飞书
         Feishu.insert_columns(log_type, crawler, "Aps2BI", "ROWS", 1, 2)
@@ -292,6 +308,7 @@ class KuaiShouRecommendScheduling:
         time.sleep(0.5)
         Feishu.update_values(log_type, crawler, "Aps2BI", "E2:Z2", values)
         Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
+        Common.logging(log_type, crawler, env, f"视频已保存至云文档\n")
 
 
 if __name__ == "__main__":