wangkun 1 год назад
Родитель
Сommit
743cb2e073

+ 179 - 216
douyin/douyin_author/douyin_author_scheduling.py

@@ -3,63 +3,22 @@
 # @Time: 2023/5/26
 import json
 import os
-import random
 import shutil
 import sys
 import time
 import requests
 from hashlib import md5
-
-from douyin.douyin_recommend import get_xb
-
 sys.path.append(os.getcwd())
 from common.common import Common
-# from common.db import MysqlHelper
 from common.scheduling_db import MysqlHelper
+from douyin.douyin_recommend import get_xb
 from common.feishu import Feishu
 from common.publish import Publish
-from common.public import random_title
-from common.userAgent import get_random_user_agent
-from common.public import get_user_from_mysql, get_config_from_mysql
+from common.public import random_title, get_config_from_mysql, download_rule
 
 
-class DyAuthorScheduling(object):
+class DouyinauthorScheduling:
     platform = "抖音"
-    tag = "抖音定向爬虫策略"
-
-    @classmethod
-    def download_rule(cls, video_info_dict, rule_dict):
-        if video_info_dict['like_cnt'] >= rule_dict['like_cnt']['min']:
-            if video_info_dict['duration'] >= rule_dict['duration']['min']:
-                if video_info_dict['video_width'] >= rule_dict['width']['min'] \
-                        or video_info_dict['video_height'] >= rule_dict['height']['min']:
-                    return True
-                else:
-                    return False
-            else:
-                return False
-        else:
-            return False
-
-    # 过滤词库
-    @classmethod
-    def filter_words(cls, log_type, crawler):
-        try:
-            while True:
-                filter_words_sheet = Feishu.get_values_batch(log_type, crawler, '6BS2RR')
-                if filter_words_sheet is None:
-                    Common.logger(log_type, crawler).warning(f"filter_words_sheet:{filter_words_sheet} 10秒钟后重试")
-                    continue
-                filter_words_list = []
-                for x in filter_words_sheet:
-                    for y in x:
-                        if y is None:
-                            pass
-                        else:
-                            filter_words_list.append(y)
-                return filter_words_list
-        except Exception as e:
-            Common.logger(log_type, crawler).error(f'filter_words异常:{e}\n')
 
     @classmethod
     def video_title(cls, log_type, env, crawler, title):
@@ -94,36 +53,36 @@ class DyAuthorScheduling(object):
             return video_title
 
     @classmethod
-    def get_videoList(cls, log_type, crawler, strategy, task, our_uid, out_uid, oss_endpoint, env):
-        try:
-            max_cursor = ''
-            url = "https://www.douyin.com/aweme/v1/web/aweme/post/?device_platform=webapp&aid=6383&channel=channel_pc_web&sec_user_id={sec_user_id}&max_cursor={max_cursor}&show_live_replay_strategy=1&count=10&publish_video_strategy_type=2&pc_client_type=1&version_code=170400&version_name=17.4.0&cookie_enabled=true&screen_width=1440&screen_height=900&browser_language=zh-CN&browser_platform=MacIntel&browser_name=Chrome&browser_version=112.0.0.0&browser_online=true&engine_name=Blink&engine_version=112.0.0.0&os_name=Mac+OS&os_version=10.15.7&cpu_core_num=8&device_memory=8&platform=PC&downlink=10&effective_type=4g&round_trip_time=50".format(
-                sec_user_id=out_uid, max_cursor=max_cursor)
-            headers = {
-                'authority': 'www.douyin.com',
-                'accept': 'application/json, text/plain, */*',
-                'accept-language': 'zh-CN,zh;q=0.9',
-                # 'cookie': '__ac_nonce=06437a18000f23ad954f0; __ac_signature=_02B4Z6wo00f01Sb71TAAAIDCsi2OPpjonN0m29GAAC2M85; s_v_web_id=verify_lgeqr3uq_3aDaqQXf_juHS_40Yi_BE8b_tI8FCILZQXPK; _tea_utm_cache_2018=undefined; ttwid=1%7Cq_IBs6hbBUOIEcRR1gxtgY6GiTbTE3U1XhJNLL_9BZA%7C1681367431%7Cf77b36ae4721884fec1c3fa9d6a08c29e308236ae13df58d1be3b0d1f82f8668; strategyABtestKey=%221681367433.454%22; passport_csrf_token=bff0289a5846e58b4b9db6e1f64665f4; passport_csrf_token_default=bff0289a5846e58b4b9db6e1f64665f4; bd_ticket_guard_client_data=eyJiZC10aWNrZXQtZ3VhcmQtdmVyc2lvbiI6MiwiYmQtdGlja2V0LWd1YXJkLWl0ZXJhdGlvbi12ZXJzaW9uIjoxLCJiZC10aWNrZXQtZ3VhcmQtY2xpZW50LWNzciI6Ii0tLS0tQkVHSU4gQ0VSVElGSUNBVEUgUkVRVUVTVC0tLS0tXHJcbk1JSUJEVENCdFFJQkFEQW5NUXN3Q1FZRFZRUUdFd0pEVGpFWU1CWUdBMVVFQXd3UFltUmZkR2xqYTJWMFgyZDFcclxuWVhKa01Ga3dFd1lIS29aSXpqMENBUVlJS29aSXpqMERBUWNEUWdBRVhzcHJ5TElFT3E4Z2tPc2l5MTdSS1dEcVxyXG5xTXZkWEt5Y1V5NStiL3JpSmJ6VkMwMlYrU1dNaWtZTlNOK29IU2g2WVVTTmdUUjJrZEhvRUxISmxGdU9scUFzXHJcbk1Db0dDU3FHU0liM0RRRUpEakVkTUJzd0dRWURWUjBSQkJJd0VJSU9kM2QzTG1SdmRYbHBiaTVqYjIwd0NnWUlcclxuS29aSXpqMEVBd0lEUndBd1JBSWdIeW9SblFNK0h0Z3ZOU2dFMVpHdGpmYWQxT3BuWHJRdVFSNXNSaDkwakRJQ1xyXG5JRG1tVkthRkN5djBLemtpZ0J0RExaTVJSNndURzRBWUVoNUlWUmlZUU9UVVxyXG4tLS0tLUVORCBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS1cclxuIn0=; msToken=ZPkeAqCnLbjDWCkWcWf36ZZIoZTl07X33ca0xcNGk3tZPoMvgx-lo28oNb5JhbCKsXLgLNIoojAbocBrjdAv54Hwf-Tk3_yAjLW7WJxxSa0=; ttcid=54b98e03a03e43e09211ee28db90fdd126; home_can_add_dy_2_desktop=%221%22; msToken=nqMSwn8xJAXLZk2AikdOeJ6P3JvoCsbYjHROoR55KXMDvWs8auYCMpZFGQSClHhitgB0l_vwg8m0-lE-aWQovfN7Ga1QupM3SpdNktiamFRiHMdJExREc9_uxS1ln8E=; tt_scid=DnO5GFg1oLONpPMFuFeL-OveKBn43mRynRVTvHsh1KUQm283ocN6JX6qPKDDrFHbfdf1; download_guide=%222%2F20230413%22; msToken=b9haS5RjLaFgqVDoGp5xSqc8B4kl-miQB5Nku0BSIvHVutKT81Nzk_pPb0wm7xYlAp_nz1gytQng5gYeIRNxcMgZJ_MB7lhejt_093miXlHtvqAaxL0FNg==',
-                'referer': 'https://www.douyin.com/user/{}?showTab=post'.format(out_uid),
-                'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
-                'sec-ch-ua-mobile': '?0',
-                'sec-ch-ua-platform': '"macOS"',
-                'sec-fetch-dest': 'empty',
-                'sec-fetch-mode': 'cors',
-                'sec-fetch-site': 'same-origin',
-                'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
-            }
-            x_bogus = get_xb(url, headers['user-agent'])
-            url = url + '&X-Bogus={}'.format(x_bogus)
-            if not x_bogus:
-                return
-            res = requests.get(url=url, headers=headers, data={}, proxies=Common.tunnel_proxies(), timeout=10).json()
-            aweme_list = res.get('aweme_list', [])
-        except Exception as e:
-            Common.logger(log_type, crawler).error(f"获取抖音作者:{out_uid},视频列表失败:{e}")
+    def get_videoList(cls, log_type, crawler, user_dict, rule_dict, env):
+        max_cursor = ""
+        # while True:
+        url = "https://www.douyin.com/aweme/v1/web/aweme/post/?device_platform=webapp&aid=6383&channel=channel_pc_web&sec_user_id={sec_user_id}&max_cursor={max_cursor}&show_live_replay_strategy=1&count=10&publish_video_strategy_type=2&pc_client_type=1&version_code=170400&version_name=17.4.0&cookie_enabled=true&screen_width=1440&screen_height=900&browser_language=zh-CN&browser_platform=MacIntel&browser_name=Chrome&browser_version=112.0.0.0&browser_online=true&engine_name=Blink&engine_version=112.0.0.0&os_name=Mac+OS&os_version=10.15.7&cpu_core_num=8&device_memory=8&platform=PC&downlink=10&effective_type=4g&round_trip_time=50".format(
+            sec_user_id=user_dict["link"].replace("https://www.douyin.com/user/", ""), max_cursor=max_cursor)
+        Common.logger(log_type, crawler).info(f"url:{url}")
+        headers = {
+            'authority': 'www.douyin.com',
+            'accept': 'application/json, text/plain, */*',
+            'accept-language': 'zh-CN,zh;q=0.9',
+            # 'cookie': '__ac_nonce=06437a18000f23ad954f0; __ac_signature=_02B4Z6wo00f01Sb71TAAAIDCsi2OPpjonN0m29GAAC2M85; s_v_web_id=verify_lgeqr3uq_3aDaqQXf_juHS_40Yi_BE8b_tI8FCILZQXPK; _tea_utm_cache_2018=undefined; ttwid=1%7Cq_IBs6hbBUOIEcRR1gxtgY6GiTbTE3U1XhJNLL_9BZA%7C1681367431%7Cf77b36ae4721884fec1c3fa9d6a08c29e308236ae13df58d1be3b0d1f82f8668; strategyABtestKey=%221681367433.454%22; passport_csrf_token=bff0289a5846e58b4b9db6e1f64665f4; passport_csrf_token_default=bff0289a5846e58b4b9db6e1f64665f4; bd_ticket_guard_client_data=eyJiZC10aWNrZXQtZ3VhcmQtdmVyc2lvbiI6MiwiYmQtdGlja2V0LWd1YXJkLWl0ZXJhdGlvbi12ZXJzaW9uIjoxLCJiZC10aWNrZXQtZ3VhcmQtY2xpZW50LWNzciI6Ii0tLS0tQkVHSU4gQ0VSVElGSUNBVEUgUkVRVUVTVC0tLS0tXHJcbk1JSUJEVENCdFFJQkFEQW5NUXN3Q1FZRFZRUUdFd0pEVGpFWU1CWUdBMVVFQXd3UFltUmZkR2xqYTJWMFgyZDFcclxuWVhKa01Ga3dFd1lIS29aSXpqMENBUVlJS29aSXpqMERBUWNEUWdBRVhzcHJ5TElFT3E4Z2tPc2l5MTdSS1dEcVxyXG5xTXZkWEt5Y1V5NStiL3JpSmJ6VkMwMlYrU1dNaWtZTlNOK29IU2g2WVVTTmdUUjJrZEhvRUxISmxGdU9scUFzXHJcbk1Db0dDU3FHU0liM0RRRUpEakVkTUJzd0dRWURWUjBSQkJJd0VJSU9kM2QzTG1SdmRYbHBiaTVqYjIwd0NnWUlcclxuS29aSXpqMEVBd0lEUndBd1JBSWdIeW9SblFNK0h0Z3ZOU2dFMVpHdGpmYWQxT3BuWHJRdVFSNXNSaDkwakRJQ1xyXG5JRG1tVkthRkN5djBLemtpZ0J0RExaTVJSNndURzRBWUVoNUlWUmlZUU9UVVxyXG4tLS0tLUVORCBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS1cclxuIn0=; msToken=ZPkeAqCnLbjDWCkWcWf36ZZIoZTl07X33ca0xcNGk3tZPoMvgx-lo28oNb5JhbCKsXLgLNIoojAbocBrjdAv54Hwf-Tk3_yAjLW7WJxxSa0=; ttcid=54b98e03a03e43e09211ee28db90fdd126; home_can_add_dy_2_desktop=%221%22; msToken=nqMSwn8xJAXLZk2AikdOeJ6P3JvoCsbYjHROoR55KXMDvWs8auYCMpZFGQSClHhitgB0l_vwg8m0-lE-aWQovfN7Ga1QupM3SpdNktiamFRiHMdJExREc9_uxS1ln8E=; tt_scid=DnO5GFg1oLONpPMFuFeL-OveKBn43mRynRVTvHsh1KUQm283ocN6JX6qPKDDrFHbfdf1; download_guide=%222%2F20230413%22; msToken=b9haS5RjLaFgqVDoGp5xSqc8B4kl-miQB5Nku0BSIvHVutKT81Nzk_pPb0wm7xYlAp_nz1gytQng5gYeIRNxcMgZJ_MB7lhejt_093miXlHtvqAaxL0FNg==',
+            'referer': 'https://www.douyin.com/user/{}?showTab=post'.format(user_dict["link"].replace("https://www.douyin.com/user/", "")),
+            'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
+            'sec-ch-ua-mobile': '?0',
+            'sec-ch-ua-platform': '"macOS"',
+            'sec-fetch-dest': 'empty',
+            'sec-fetch-mode': 'cors',
+            'sec-fetch-site': 'same-origin',
+            'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
+        }
+        x_bogus = get_xb(url, headers['user-agent'])
+        url = url + '&X-Bogus={}'.format(x_bogus)
+        if not x_bogus:
             return
+        res = requests.get(url=url, headers=headers, data={}, proxies=Common.tunnel_proxies(), timeout=10)
+        Common.logger(log_type, crawler).info(f"res:{res.text}\n")
+        aweme_list = res.json().get('aweme_list', [])
+        # max_cursor = res.json().get("max_cursor", "")
         if not aweme_list:
-            Common.logger(log_type, crawler).warning(f"抖音作者没有获取到更多数据")
+            Common.logger(log_type, crawler).info(f"没有更多数据啦~\n")
             return
         for info in aweme_list:
             if info.get('is_ads'):
@@ -148,7 +107,7 @@ class DyAuthorScheduling(object):
                           'video_height': info['video']['height'],
                           'duration': round(info['video']['duration'] / 1000),
                           'publish_time': publish_day,
-                          'publish_time_stamp': publish_time * 1000,
+                          'publish_time_stamp': publish_time,
                           'publish_time_str': publish_time_str,
                           'user_name': info['author']['nickname'],
                           'user_id': info['author_user_id'],
@@ -161,16 +120,30 @@ class DyAuthorScheduling(object):
 
             for k, v in video_dict.items():
                 Common.logger(log_type, crawler).info(f"{k}:{v}")
-                # log_type, crawler, strategy, task, video_dict, rule_dict, our_uid, oss_endpoint, env
-            cls.download_publish(log_type=log_type,
-                                 crawler=crawler,
-                                 strategy=strategy,
-                                 task=task,
-                                 video_dict=video_dict,
-                                 our_uid=our_uid,
-                                 oss_endpoint=oss_endpoint,
-                                 env=env,
-                                 )
+
+            if int((int(time.time()) - int(video_dict["publish_time_stamp"])) / (3600 * 24)) > int(rule_dict.get("period", {}).get("max", 1000)):
+                Common.logger(log_type, crawler).info(f'发布时间超过{int(rule_dict.get("period", {}).get("max", 1000))}天\n')
+                return
+
+            if download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict,
+                             rule_dict=rule_dict) is False:
+                Common.logger(log_type, crawler).info("不满足抓取规则\n")
+            elif any(str(word) if str(word) in video_dict["video_title"] else False
+                     for word in get_config_from_mysql(log_type=log_type,
+                                                       source=crawler,
+                                                       env=env,
+                                                       text="filter",
+                                                       action="")) is True:
+                Common.logger(log_type, crawler).info('已中过滤词\n')
+            elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
+                Common.logger(log_type, crawler).info('视频已下载\n')
+            else:
+                cls.download_publish(log_type=log_type,
+                                     crawler=crawler,
+                                     user_dict=user_dict,
+                                     video_dict=video_dict,
+                                     rule_dict=rule_dict,
+                                     env=env)
 
     @classmethod
     def repeat_video(cls, log_type, crawler, video_id, env):
@@ -180,149 +153,139 @@ class DyAuthorScheduling(object):
 
     # 下载 / 上传
     @classmethod
-    def download_publish(cls, log_type, crawler, strategy, task, video_dict, our_uid, oss_endpoint, env):
+    def download_publish(cls, log_type, crawler, user_dict, video_dict, rule_dict, env):
+        # 下载视频
+        Common.download_method(log_type=log_type, crawler=crawler, text='video', title=video_dict['video_title'], url=video_dict['video_url'])
+        md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
         try:
-            filter_words = get_config_from_mysql(log_type, crawler, env, text='filter')
-            for filter_word in filter_words:
-                if filter_word in video_dict['video_title']:
-                    Common.logger(log_type, crawler).info('标题已中过滤词:{}\n', video_dict['video_title'])
-                    return
-            if cls.download_rule(video_dict, task['rule_dict']) is False:
-                Common.logger(log_type, crawler).info('不满足抓取规则\n')
-            elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
-                Common.logger(log_type, crawler).info('视频已下载\n')
-            else:
-                # 下载视频
-                Common.download_method(log_type=log_type, crawler=crawler, text='video',
-                                       title=video_dict['video_title'], url=video_dict['video_url'])
-                md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
-                if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
-                    # 删除视频文件夹
-                    shutil.rmtree(f"./{crawler}/videos/{md_title}")
-                    Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
-                    return
-                # ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
-                # if ffmpeg_dict is None or ffmpeg_dict['size'] == 0:
-                #     Common.logger(log_type, crawler).warning(f"下载的视频无效,已删除\n")
-                #     # 删除视频文件夹
-                #     shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
-                #     return
-                # 下载封面
-                Common.download_method(log_type=log_type, crawler=crawler, text='cover',
-                                       title=video_dict['video_title'], url=video_dict['cover_url'])
-                # 保存视频信息至txt
-                Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
+            if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+                return
+        except FileNotFoundError:
+            # 删除视频文件夹
+            shutil.rmtree(f"./{crawler}/videos/{md_title}")
+            Common.logger(log_type, crawler).info("视频文件不存在,删除文件夹成功\n")
+            return
+        # 下载封面
+        Common.download_method(log_type=log_type, crawler=crawler, text='cover', title=video_dict['video_title'], url=video_dict['cover_url'])
+        # 保存视频信息至txt
+        Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
 
-                # 上传视频
-                Common.logger(log_type, crawler).info("开始上传视频...")
-                our_video_id = Publish.upload_and_publish(log_type=log_type,
-                                                          crawler=crawler,
-                                                          strategy=strategy,
-                                                          our_uid=our_uid,
-                                                          env=env,
-                                                          oss_endpoint=oss_endpoint)
-                if env == 'dev':
-                    our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
-                else:
-                    our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
-                Common.logger(log_type, crawler).info("视频上传完成")
+        # 上传视频
+        Common.logger(log_type, crawler).info("开始上传视频...")
+        if env == "dev":
+            oss_endpoint = "out"
+            our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                      crawler=crawler,
+                                                      strategy="定向抓取策略",
+                                                      our_uid=user_dict["uid"],
+                                                      env=env,
+                                                      oss_endpoint=oss_endpoint)
+            our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+        else:
+            oss_endpoint = "inner"
+            our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                      crawler=crawler,
+                                                      strategy="定向抓取策略",
+                                                      our_uid=user_dict["uid"],
+                                                      env=env,
+                                                      oss_endpoint=oss_endpoint)
 
-                if our_video_id is None:
-                    # 删除视频文件夹
-                    shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
-                    return
+            our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
 
-                # 视频写入飞书
-                upload_time = int(time.time())
-                values = [[
-                    our_video_id,
-                    time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
-                    strategy,
-                    str(video_dict['video_id']),
-                    video_dict['video_title'],
-                    our_video_link,
-                    # video_dict['gid'],
-                    video_dict['play_cnt'],
-                    video_dict['comment_cnt'],
-                    video_dict['like_cnt'],
-                    video_dict['share_cnt'],
-                    video_dict['duration'],
-                    str(video_dict['video_width']) + '*' + str(video_dict['video_height']),
-                    video_dict['publish_time_str'],
-                    video_dict['user_name'],
-                    video_dict['user_id'],
-                    video_dict['avatar_url'],
-                    video_dict['cover_url'],
-                    video_dict['video_url']
-                ]]
-                # time.sleep(1)
-                Feishu.insert_columns(log_type, 'douyin', "qV9VC0", "ROWS", 1, 2)
-                Feishu.update_values(log_type, 'douyin', "qV9VC0", "A2:Z2", values)
-                Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
+        if our_video_id is None:
+            try:
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                return
+            except FileNotFoundError:
+                return
 
-                # 视频信息保存数据库
-                insert_sql = f""" insert into crawler_video(video_id,
-                                user_id,
-                                out_user_id,
-                                platform,
-                                strategy,
-                                out_video_id,
-                                video_title,
-                                cover_url,
-                                video_url,
-                                duration,
-                                publish_time,
-                                play_cnt,
-                                comment_cnt,
-                                like_cnt,
-                                share_cnt,
-                                crawler_rule,
-                                width,
-                                height)
-                                values({our_video_id},
-                                {our_uid},
-                                "{video_dict['user_id']}",
-                                "{cls.platform}",
-                                "{strategy}",
-                                "{video_dict['video_id']}",
-                                "{video_dict['video_title']}",
-                                "{video_dict['cover_url']}",
-                                "{video_dict['video_url']}",
-                                {int(video_dict['duration'])},
-                                "{video_dict['publish_time_str']}",
-                                {int(video_dict['play_cnt'])},
-                                {int(video_dict['comment_cnt'])},
-                                {int(video_dict['like_cnt'])},
-                                {int(video_dict['share_cnt'])},
-                                '{json.dumps(task["rule_dict"])}',
-                                {int(video_dict['video_width'])},
-                                {int(video_dict['video_height'])}) """
-                Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
-                MysqlHelper.update_values(log_type, crawler, insert_sql, env)
-                Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
-        except Exception as e:
-            Common.logger(log_type, crawler).error(f'download_publish异常:{e}\n')
+        # 视频信息保存数据库
+        insert_sql = f""" insert into crawler_video(video_id,
+                        user_id,
+                        out_user_id,
+                        platform,
+                        strategy,
+                        out_video_id,
+                        video_title,
+                        cover_url,
+                        video_url,
+                        duration,
+                        publish_time,
+                        play_cnt,
+                        comment_cnt,
+                        like_cnt,
+                        share_cnt,
+                        crawler_rule,
+                        width,
+                        height)
+                        values({our_video_id},
+                        {user_dict["uid"]},
+                        "{video_dict['user_id']}",
+                        "{cls.platform}",
+                        "定向抓取策略",
+                        "{video_dict['video_id']}",
+                        "{video_dict['video_title']}",
+                        "{video_dict['cover_url']}",
+                        "{video_dict['video_url']}",
+                        {int(video_dict['duration'])},
+                        "{video_dict['publish_time_str']}",
+                        {int(video_dict['play_cnt'])},
+                        {int(video_dict['comment_cnt'])},
+                        {int(video_dict['like_cnt'])},
+                        {int(video_dict['share_cnt'])},
+                        '{json.dumps(rule_dict)}',
+                        {int(video_dict['video_width'])},
+                        {int(video_dict['video_height'])}) """
+        Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+        MysqlHelper.update_values(log_type, crawler, insert_sql, env)
+        Common.logger(log_type, crawler).info('视频信息写入数据库成功')
 
-    @classmethod
-    def get_follow_videos(cls, log_type, crawler, task, oss_endpoint, env):
-        user_list = get_user_from_mysql(log_type, crawler, crawler, env)
-        strategy = '定向抓取策略'
+        # 视频写入飞书
+        upload_time = int(time.time())
+        values = [[
+            our_video_id,
+            time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
+            "定向抓取策略",
+            str(video_dict['video_id']),
+            video_dict['video_title'],
+            our_video_link,
+            # video_dict['gid'],
+            video_dict['play_cnt'],
+            video_dict['comment_cnt'],
+            video_dict['like_cnt'],
+            video_dict['share_cnt'],
+            video_dict['duration'],
+            str(video_dict['video_width']) + '*' + str(video_dict['video_height']),
+            video_dict['publish_time_str'],
+            video_dict['nick_name'],
+            video_dict['user_id'],
+            video_dict['avatar_url'],
+            video_dict['cover_url'],
+            video_dict['video_url']
+        ]]
+        Feishu.insert_columns(log_type, 'douyin', "qV9VC0", "ROWS", 1, 2)
+        time.sleep(0.5)
+        Feishu.update_values(log_type, 'douyin', "qV9VC0", "A2:Z2", values)
+        Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
 
-        for user in user_list:
-            spider_link = user["link"]
-            out_uid = spider_link
-            user_name = user["nick_name"]
-            our_uid = user["uid"]
-            Common.logger(log_type, crawler).info(f"开始抓取 {user_name} 用户主页视频\n")
+    @classmethod
+    def get_author_videos(cls, log_type, crawler, rule_dict, user_list, env):
+        for user_dict in user_list:
+            # try:
+            Common.logger(log_type, crawler).info(f"开始抓取 {user_dict['nick_name']} 用户主页视频\n")
             cls.get_videoList(log_type=log_type,
                               crawler=crawler,
-                              strategy=strategy,
-                              task=task,
-                              our_uid=our_uid,
-                              out_uid=out_uid,
-                              oss_endpoint=oss_endpoint,
+                              rule_dict=rule_dict,
+                              user_dict=user_dict,
                               env=env)
+            # except Exception as e:
+            #     Common.logger(log_type, crawler).error(f"抓取用户{user_dict['nick_name']}主页视频时异常:{e}\n")
 
 
 if __name__ == '__main__':
-    DyAuthorScheduling.get_follow_videos('author', 'douyin', '定向抓取策略', 'outer', 'prod')
+
+    pass

+ 4 - 6
douyin/douyin_follow/follow_dy.py

@@ -9,10 +9,8 @@ import sys
 import time
 import requests
 from hashlib import md5
-
-from douyin.douyin_recommend import get_xb
-
 sys.path.append(os.getcwd())
+from douyin.douyin_recommend import get_xb
 from common.common import Common
 from common.db import MysqlHelper
 from common.feishu import Feishu
@@ -97,7 +95,7 @@ class DyFollow(object):
         else:
             title1 = title_split1[-1]
 
-        title_split2 = title1.split(" #")
+        title_split2 = title1.split(" #")
         if title_split2[0] != "":
             title2 = title_split2[0]
         else:
@@ -110,7 +108,7 @@ class DyFollow(object):
             title3 = title_split3[-1]
 
         video_title = title3.strip().split('#')[0].replace("\n", "") \
-                          .replace("/", "").replace("抖音", "").replace(" ", "") \
+                          .replace("/", "").replace("抖音", "").replace(" ", "") \
                           .replace(" ", "").replace("&NBSP", "").replace("\r", "") \
                           .replace(".", "。").replace("\\", "") \
                           .replace(":", "").replace("*", "").replace("?", "") \
@@ -355,4 +353,4 @@ class DyFollow(object):
 
 
 if __name__ == '__main__':
-    DyFollow.get_follow_videos('author', 'douyin', '定向抓取策略', 'outer', 'prod', 'aliyun')
+    DyFollow.get_follow_videos('author', 'douyin', '定向抓取策略', 'outer', 'prod', 'aliyun')

+ 40 - 0
douyin/douyin_main/run_douyin_author_scheduling.py

@@ -1,3 +1,43 @@
 # -*- coding: utf-8 -*-
 # @Author: wangkun
 # @Time: 2023/5/26
+import argparse
+import os
+import sys
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.public import task_fun
+from common.scheduling_db import MysqlHelper
+from douyin.douyin_author.douyin_author_scheduling import DouyinauthorScheduling
+
+
+def main(log_type, crawler, task, env):
+    task_dict = task_fun(task)['task_dict']
+    rule_dict = task_fun(task)['rule_dict']
+    task_id = task_dict['task_id']
+    select_user_sql = f"""select * from crawler_user_v3 where task_id={task_id}"""
+    user_list = MysqlHelper.get_values(log_type, crawler, select_user_sql, env, action="")
+    Common.logger(log_type, crawler).info(f"调度任务:{task_dict}")
+    Common.logger(log_type, crawler).info(f"抓取规则:{rule_dict}")
+    # Common.logger(log_type, crawler).info(f"用户列表:{user_list}\n")
+    Common.logger(log_type, crawler).info(f'开始抓取 {task_dict["task_name"]}\n')
+    DouyinauthorScheduling.get_author_videos(log_type=log_type,
+                                            crawler=crawler,
+                                            rule_dict=rule_dict,
+                                            user_list=user_list,
+                                            env=env)
+    Common.del_logs(log_type, crawler)
+    Common.logger(log_type, crawler).info('抓取完一轮\n')
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
+    parser.add_argument('--log_type', default='recommend')  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler', default='kuaishou')  ## 添加参数
+    parser.add_argument('--task')  ## 添加参数
+    parser.add_argument('--env', default='prod')  ## 添加参数
+    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
+    main(log_type=args.log_type,
+         crawler=args.crawler,
+         task=args.task,
+         env=args.env)

+ 14 - 14
main/process.sh

@@ -83,20 +83,20 @@ else
   echo "$(date "+%Y-%m-%d %H:%M:%S") 小年糕小时榜爬虫策略 进程状态正常" >> ${log_path}
 fi
 
-# 小年糕播放量榜爬虫策略
-echo "$(date "+%Y-%m-%d %H:%M:%S") 正在监测 播放量榜爬虫策略 进程状态" >> ${log_path}
-ps -ef | grep "run_xiaoniangao_play.py" | grep -v "grep"
-if [ "$?" -eq 1 ];then
-  echo "$(date "+%Y-%m-%d_%H:%M:%S") 异常停止,正在重启!" >> ${log_path}
-  if [ ${env} = "dev" ];then
-    cd ${piaoquan_crawler_dir} && sh main/scheduling_main.sh ./xiaoniangao/xiaoniangao_main/run_xiaoniangao_play.py --log_type="play" --crawler="xiaoniangao" --env="dev" xiaoniangao/logs/nohup-play.log
-  else
-    cd ${piaoquan_crawler_dir} && /usr/bin/sh /data5/piaoquan_crawler/main/scheduling_main.sh ./xiaoniangao/xiaoniangao_main/run_xiaoniangao_play.py --log_type="play" --crawler="xiaoniangao" --env="prod" xiaoniangao/logs/nohup-play.log
-  fi
-  echo "$(date "+%Y-%m-%d %H:%M:%S") 重启完成!" >> ${log_path}
-else
-  echo "$(date "+%Y-%m-%d %H:%M:%S") 播放量榜爬虫策略 进程状态正常" >> ${log_path}
-fi
+## 小年糕播放量榜爬虫策略
+#echo "$(date "+%Y-%m-%d %H:%M:%S") 正在监测 播放量榜爬虫策略 进程状态" >> ${log_path}
+#ps -ef | grep "run_xiaoniangao_play.py" | grep -v "grep"
+#if [ "$?" -eq 1 ];then
+#  echo "$(date "+%Y-%m-%d_%H:%M:%S") 异常停止,正在重启!" >> ${log_path}
+#  if [ ${env} = "dev" ];then
+#    cd ${piaoquan_crawler_dir} && sh main/scheduling_main.sh ./xiaoniangao/xiaoniangao_main/run_xiaoniangao_play.py --log_type="play" --crawler="xiaoniangao" --env="dev" xiaoniangao/logs/nohup-play.log
+#  else
+#    cd ${piaoquan_crawler_dir} && /usr/bin/sh /data5/piaoquan_crawler/main/scheduling_main.sh ./xiaoniangao/xiaoniangao_main/run_xiaoniangao_play.py --log_type="play" --crawler="xiaoniangao" --env="prod" xiaoniangao/logs/nohup-play.log
+#  fi
+#  echo "$(date "+%Y-%m-%d %H:%M:%S") 重启完成!" >> ${log_path}
+#else
+#  echo "$(date "+%Y-%m-%d %H:%M:%S") 播放量榜爬虫策略 进程状态正常" >> ${log_path}
+#fi
 
 
 ## 快手定向爬虫策略

+ 21 - 18
xiaoniangao/xiaoniangao_main/run_xiaoniangao_play_scheduling.py

@@ -2,44 +2,47 @@
 # @Author: wangkun
 # @Time: 2023/4/21
 import argparse
-import datetime
 import os
+import random
 import sys
-
 sys.path.append(os.getcwd())
 from common.common import Common
 from common.public import task_fun
-
-from xiaoniangao.xiaoniangao_play.xiaoniangao_play_scheduling import XiaoniangaoPlayScheduling
+from common.scheduling_db import MysqlHelper
+from xiaoniangao.xiaoniangao_play.xiaoniangao_play_scheduling import XiaoniangaoplayScheduling
 
 
-def main(log_type, crawler, task, oss_endpoint, env):
+def main(log_type, crawler, task, env):
     task_dict = task_fun(task)['task_dict']
     rule_dict = task_fun(task)['rule_dict']
-    Common.logger(log_type, crawler).info(f"调度任务:\n{task_dict}")
-    Common.logger(log_type, crawler).info(f"抓取规则:\n{rule_dict}")
-    # 获取符合规则的视频,写入小时级数据_feeds
-    XiaoniangaoPlayScheduling.get_videoList(log_type=log_type,
+    task_id = task_dict['task_id']
+    select_user_sql = f"""select * from crawler_user_v3 where task_id={task_id}"""
+    user_list = MysqlHelper.get_values(log_type, crawler, select_user_sql, env, action="")
+    our_uid_list = []
+    for user in user_list:
+        our_uid_list.append(user["uid"])
+    our_uid = random.choice(our_uid_list)
+    Common.logger(log_type, crawler).info(f"调度任务:{task_dict}")
+    Common.logger(log_type, crawler).info(f"抓取规则:{rule_dict}")
+    # Common.logger(log_type, crawler).info(f"用户列表:{user_list}\n")
+    Common.logger(log_type, crawler).info(f'开始抓取 {task_dict["task_name"]}\n')
+    XiaoniangaoplayScheduling.get_videoList(log_type=log_type,
                                             crawler=crawler,
                                             rule_dict=rule_dict,
-                                            strategy="小时榜爬虫策略",
-                                            oss_endpoint=oss_endpoint,
+                                            our_uid=our_uid,
                                             env=env)
-
     Common.del_logs(log_type, crawler)
-    Common.logger(log_type, crawler).info('抓取完一轮\n')
+    Common.logger(log_type, crawler).info('抓取任务结束\n')
 
 
 if __name__ == "__main__":
     parser = argparse.ArgumentParser()  ## 新建参数解释器对象
-    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
-    parser.add_argument('--crawler')  ## 添加参数
+    parser.add_argument('--log_type', default='recommend')  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler', default='kuaishou')  ## 添加参数
     parser.add_argument('--task')  ## 添加参数
-    parser.add_argument('--oss_endpoint')  ## 添加参数
-    parser.add_argument('--env')  ## 添加参数
+    parser.add_argument('--env', default='prod')  ## 添加参数
     args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
     main(log_type=args.log_type,
          crawler=args.crawler,
          task=args.task,
-         oss_endpoint=args.oss_endpoint,
          env=args.env)

+ 198 - 301
xiaoniangao/xiaoniangao_play/xiaoniangao_play_scheduling.py

@@ -7,6 +7,7 @@ import random
 import shutil
 import sys
 import time
+from hashlib import md5
 import requests
 import urllib3
 sys.path.append(os.getcwd())
@@ -14,339 +15,235 @@ from common.common import Common
 from common.feishu import Feishu
 from common.publish import Publish
 from common.scheduling_db import MysqlHelper
-from common.public import get_config_from_mysql
+from common.public import get_config_from_mysql, download_rule
 proxies = {"http": None, "https": None}
 
 
-class XiaoniangaoPlayScheduling:
+class XiaoniangaoplayScheduling:
     platform = "小年糕"
     words = "abcdefghijklmnopqrstuvwxyz0123456789"
-    uid = f"""{"".join(random.sample(words, 8))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 12))}"""
-    token = "".join(random.sample(words, 32))
     uid_token_dict = {
-        "uid": uid,
-        "token": token
-    }
-
-    # 生成 uid、token
-    @classmethod
-    def get_uid_token(cls):
-        words = "abcdefghijklmnopqrstuvwxyz0123456789"
-        uid = f"""{"".join(random.sample(words, 8))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 12))}"""
-        token = "".join(random.sample(words, 32))
-        uid_token_dict = {
-            "uid": uid,
-            "token": token
-        }
-        return uid_token_dict
-
-    # 基础门槛规则
-    @staticmethod
-    def download_rule(log_type, crawler, video_dict, rule_dict):
-        """
-        下载视频的基本规则
-        :param log_type: 日志
-        :param crawler: 哪款爬虫
-        :param video_dict: 视频信息,字典格式
-        :param rule_dict: 规则信息,字典格式
-        :return: 满足规则,返回 True;反之,返回 False
-        """
-        rule_play_cnt_min = rule_dict.get('play_cnt', {}).get('min', 0)
-        rule_play_cnt_max = rule_dict.get('play_cnt', {}).get('max', 100000000)
-        if rule_play_cnt_max == 0:
-            rule_play_cnt_max = 100000000
-
-        rule_duration_min = rule_dict.get('duration', {}).get('min', 0)
-        rule_duration_max = rule_dict.get('duration', {}).get('max', 100000000)
-        if rule_duration_max == 0:
-            rule_duration_max = 100000000
-
-        rule_period_min = rule_dict.get('period', {}).get('min', 0)
-        # rule_period_max = rule_dict.get('period', {}).get('max', 100000000)
-        # if rule_period_max == 0:
-        #     rule_period_max = 100000000
-
-        rule_fans_cnt_min = rule_dict.get('fans_cnt', {}).get('min', 0)
-        rule_fans_cnt_max = rule_dict.get('fans_cnt', {}).get('max', 100000000)
-        if rule_fans_cnt_max == 0:
-            rule_fans_cnt_max = 100000000
-
-        rule_videos_cnt_min = rule_dict.get('videos_cnt', {}).get('min', 0)
-        rule_videos_cnt_max = rule_dict.get('videos_cnt', {}).get('max', 100000000)
-        if rule_videos_cnt_max == 0:
-            rule_videos_cnt_max = 100000000
-
-        rule_like_cnt_min = rule_dict.get('like_cnt', {}).get('min', 0)
-        rule_like_cnt_max = rule_dict.get('like_cnt', {}).get('max', 100000000)
-        if rule_like_cnt_max == 0:
-            rule_like_cnt_max = 100000000
-
-        rule_width_min = rule_dict.get('width', {}).get('min', 0)
-        rule_width_max = rule_dict.get('width', {}).get('max', 100000000)
-        if rule_width_max == 0:
-            rule_width_max = 100000000
-
-        rule_height_min = rule_dict.get('height', {}).get('min', 0)
-        rule_height_max = rule_dict.get('height', {}).get('max', 100000000)
-        if rule_height_max == 0:
-            rule_height_max = 100000000
-
-        rule_share_cnt_min = rule_dict.get('share_cnt', {}).get('min', 0)
-        rule_share_cnt_max = rule_dict.get('share_cnt', {}).get('max', 100000000)
-        if rule_share_cnt_max == 0:
-            rule_share_cnt_max = 100000000
-
-        rule_comment_cnt_min = rule_dict.get('comment_cnt', {}).get('min', 0)
-        rule_comment_cnt_max = rule_dict.get('comment_cnt', {}).get('max', 100000000)
-        if rule_comment_cnt_max == 0:
-            rule_comment_cnt_max = 100000000
-
-        rule_publish_time_min = rule_dict.get('publish_time', {}).get('min', 0)
-        rule_publish_time_max = rule_dict.get('publish_time', {}).get('max', 0)
-        if rule_publish_time_max == 0:
-            rule_publish_time_max = 4102415999000  # 2099-12-31 23:59:59
-
-        Common.logger(log_type, crawler).info(
-            f'rule_duration_max:{rule_duration_max} >= duration:{int(float(video_dict["duration"]))} >= rule_duration_min:{int(rule_duration_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_play_cnt_max:{int(rule_play_cnt_max)} >= play_cnt:{int(video_dict["play_cnt"])} >= rule_play_cnt_min:{int(rule_play_cnt_min)}')
-        Common.logger(log_type, crawler).info(
-            f'now:{int(time.time())} - publish_time_stamp:{int(video_dict["publish_time_stamp"])} <= {3600 * 24 * int(rule_period_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_like_cnt_max:{int(rule_like_cnt_max)} >= like_cnt:{int(video_dict["like_cnt"])} >= rule_like_cnt_min:{int(rule_like_cnt_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_comment_cnt_max:{int(rule_comment_cnt_max)} >= comment_cnt:{int(video_dict["comment_cnt"])} >= rule_comment_cnt_min:{int(rule_comment_cnt_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_share_cnt_max:{int(rule_share_cnt_max)} >= share_cnt:{int(video_dict["share_cnt"])} >= rule_share_cnt_min:{int(rule_share_cnt_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_width_max:{int(rule_width_max)} >= video_width:{int(video_dict["video_width"])} >= rule_width_min:{int(rule_width_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_height_max:{int(rule_height_max)} >= video_height:{int(video_dict["video_height"])} >= rule_height_min:{int(rule_height_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_publish_time_max:{int(rule_publish_time_max)} >= publish_time_stamp:{int(video_dict["publish_time_stamp"])} >= rule_publish_time_min:{int(rule_publish_time_min)}')
-
-        if int(rule_duration_max) >= int(float(video_dict["duration"])) >= int(rule_duration_min) \
-                and int(rule_play_cnt_max) >= int(video_dict['play_cnt']) >= int(rule_play_cnt_min) \
-                and int(time.time()) - int(video_dict["publish_time_stamp"]) <= 3600 * 24 * int(rule_period_min) \
-                and int(rule_like_cnt_max) >= int(video_dict['like_cnt']) >= int(rule_like_cnt_min) \
-                and int(rule_comment_cnt_max) >= int(video_dict['comment_cnt']) >= int(rule_comment_cnt_min) \
-                and int(rule_share_cnt_max) >= int(video_dict['share_cnt']) >= int(rule_share_cnt_min) \
-                and int(rule_width_max) >= int(video_dict['video_width']) >= int(rule_width_min) \
-                and int(rule_height_max) >= int(video_dict['video_height']) >= int(rule_height_min)\
-                and int(rule_publish_time_max) >= int(video_dict['publish_time_stamp']) >= int(rule_publish_time_min):
-            return True
-        else:
-            return False
+        "uid": f"""{"".join(random.sample(words, 8))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 12))}""",
+        "token": "".join(random.sample(words, 32))}
 
     # 获取列表
     @classmethod
-    def get_videoList(cls, log_type, crawler, rule_dict, strategy, oss_endpoint, env):
+    def get_videoList(cls, log_type, crawler, rule_dict, our_uid, env):
         uid_token_dict = cls.uid_token_dict
-        url = "https://kapi.xiaoniangao.cn/trends/get_recommend_trends"
-        headers = {
-            "x-b3-traceid": '1dc0a6d0929a2b',
-            "X-Token-Id": 'ae99a4953804085ebb0ae36fa138031d-1146052582',
-            "uid": uid_token_dict['uid'],
-            "content-type": "application/json",
-            "Accept-Encoding": "gzip,compress,br,deflate",
-            "User-Agent": 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X)'
-                          ' AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 '
-                          'MicroMessenger/8.0.20(0x18001432) NetType/WIFI Language/zh_CN',
-            "Referer": 'https://servicewechat.com/wxd7911e4c177690e4/620/page-frame.html'
-        }
-        data = {
-            "log_params": {
-                "page": "discover_rec",
-                "common": {
-                    "brand": "iPhone",
-                    "device": "iPhone 11",
-                    "os": "iOS 14.7.1",
-                    "weixinver": "8.0.20",
-                    "srcver": "2.24.2",
-                    "net": "wifi",
-                    "scene": 1089
+        for page in range(1, 101):
+            try:
+                Common.logger(log_type, crawler).info(f"正在抓取第{page}页")
+                url = "https://kapi.xiaoniangao.cn/trends/get_recommend_trends"
+                headers = {
+                    "x-b3-traceid": '1dc0a6d0929a2b',
+                    "X-Token-Id": 'ae99a4953804085ebb0ae36fa138031d-1146052582',
+                    "uid": uid_token_dict['uid'],
+                    "content-type": "application/json",
+                    "Accept-Encoding": "gzip,compress,br,deflate",
+                    "User-Agent": 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X)'
+                                  ' AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 '
+                                  'MicroMessenger/8.0.20(0x18001432) NetType/WIFI Language/zh_CN',
+                    "Referer": 'https://servicewechat.com/wxd7911e4c177690e4/620/page-frame.html'
                 }
-            },
-            "qs": "imageMogr2/gravity/center/rotate/$/thumbnail/!750x500r/crop/750x500/interlace/1/format/jpg",
-            "h_qs": "imageMogr2/gravity/center/rotate/$/thumbnail/!80x80r/crop/80x80/interlace/1/format/jpg",
-            "share_width": 625,
-            "share_height": 500,
-            "ext": {
-                "fmid": 0,
-                "items": {}
-            },
-            "app": "xng",
-            "rec_scene": "discover_rec",
-            "log_common_params": {
-                "e": [{
-                    "data": {
-                        "page": "discoverIndexPage",
-                        "topic": "recommend"
+                data = {
+                    "log_params": {
+                        "page": "discover_rec",
+                        "common": {
+                            "brand": "iPhone",
+                            "device": "iPhone 11",
+                            "os": "iOS 14.7.1",
+                            "weixinver": "8.0.20",
+                            "srcver": "2.24.2",
+                            "net": "wifi",
+                            "scene": 1089
+                        }
                     },
-                    "ab": {}
-                }],
-                "ext": {
-                    "brand": "iPhone",
-                    "device": "iPhone 11",
-                    "os": "iOS 14.7.1",
-                    "weixinver": "8.0.20",
-                    "srcver": "2.24.3",
-                    "net": "wifi",
-                    "scene": "1089"
-                },
-                "pj": "1",
-                "pf": "2",
-                "session_id": "7bcce313-b57d-4305-8d14-6ebd9a1bad29"
-            },
-            "refresh": False,
-            # "token": cls.play_token,
-            "token": uid_token_dict['token'],
-            # "uid": cls.play_uid,
-            "uid": uid_token_dict['uid'],
-            "proj": "ma",
-            "wx_ver": "8.0.20",
-            "code_ver": "3.62.0"
-        }
-        urllib3.disable_warnings()
-        r = requests.post(url=url, headers=headers, json=data, proxies=proxies, verify=False)
-        if "data" not in r.text or r.status_code != 200:
-            Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}")
-            return
-        elif "data" not in r.json():
-            Common.logger(log_type, crawler).info(f"get_videoList:{r.json()}")
-            return
-        elif "list" not in r.json()["data"]:
-            Common.logger(log_type, crawler).warning(f"get_videoList:{r.json()['data']}")
-            return
-        elif len(r.json()["data"]["list"]) == 0:
-            Common.logger(log_type, crawler).warning(f"get_videoList:{r.json()['data']['list']}")
-            return
-        else:
-            # 视频列表数据
-            feeds = r.json()["data"]["list"]
-            for i in range(len(feeds)):
-                # 标题,表情随机加在片头、片尾,或替代句子中间的标点符号
-                xiaoniangao_title = feeds[i].get("title", "").strip().replace("\n", "") \
-                    .replace("/", "").replace("\r", "").replace("#", "") \
-                    .replace(".", "。").replace("\\", "").replace("&NBSP", "") \
-                    .replace(":", "").replace("*", "").replace("?", "") \
-                    .replace("?", "").replace('"', "").replace("<", "") \
-                    .replace(">", "").replace("|", "").replace(" ", "") \
-                    .replace('"', '').replace("'", '')
-                # 随机取一个表情/符号
-                emoji = random.choice(get_config_from_mysql(log_type, crawler, env, "emoji"))
-                # 生成最终标题,标题list[表情+title, title+表情]随机取一个
-                video_title = random.choice([f"{emoji}{xiaoniangao_title}", f"{xiaoniangao_title}{emoji}"])
-
-                # 视频 ID
-                video_id = feeds[i].get("vid", "")
-                # 播放量
-                play_cnt = feeds[i].get("play_pv", 0)
-                # 点赞量
-                like_cnt = feeds[i].get("favor", {}).get("total", 0)
-                # 评论数
-                comment_cnt = feeds[i].get("comment_count", 0)
-                # 分享量
-                share_cnt = feeds[i].get("share", 0)
-                # 时长
-                duration = int(feeds[i].get("du", 0) / 1000)
-                # 宽和高
-                video_width = int(feeds[i].get("w", 0))
-                video_height = int(feeds[i].get("h", 0))
-                # 发布时间
-                publish_time_stamp = int(int(feeds[i].get("t", 0)) / 1000)
-                publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
-                # 用户名 / 头像
-                user_name = feeds[i].get("user", {}).get("nick", "").strip().replace("\n", "") \
-                    .replace("/", "").replace("快手", "").replace(" ", "") \
-                    .replace(" ", "").replace("&NBSP", "").replace("\r", "")
-                avatar_url = feeds[i].get("user", {}).get("hurl", "")
-                # 用户 ID
-                profile_id = feeds[i]["id"]
-                # 用户 mid
-                profile_mid = feeds[i]["user"]["mid"]
-                # 视频封面
-                cover_url = feeds[i].get("url", "")
-                # 视频播放地址
-                video_url = feeds[i].get("v_url", "")
+                    "qs": "imageMogr2/gravity/center/rotate/$/thumbnail/!750x500r/crop/750x500/interlace/1/format/jpg",
+                    "h_qs": "imageMogr2/gravity/center/rotate/$/thumbnail/!80x80r/crop/80x80/interlace/1/format/jpg",
+                    "share_width": 625,
+                    "share_height": 500,
+                    "ext": {
+                        "fmid": 0,
+                        "items": {}
+                    },
+                    "app": "xng",
+                    "rec_scene": "discover_rec",
+                    "log_common_params": {
+                        "e": [{
+                            "data": {
+                                "page": "discoverIndexPage",
+                                "topic": "recommend"
+                            },
+                            "ab": {}
+                        }],
+                        "ext": {
+                            "brand": "iPhone",
+                            "device": "iPhone 11",
+                            "os": "iOS 14.7.1",
+                            "weixinver": "8.0.20",
+                            "srcver": "2.24.3",
+                            "net": "wifi",
+                            "scene": "1089"
+                        },
+                        "pj": "1",
+                        "pf": "2",
+                        "session_id": "7bcce313-b57d-4305-8d14-6ebd9a1bad29"
+                    },
+                    "refresh": False,
+                    # "token": cls.play_token,
+                    "token": uid_token_dict['token'],
+                    # "uid": cls.play_uid,
+                    "uid": uid_token_dict['uid'],
+                    "proj": "ma",
+                    "wx_ver": "8.0.20",
+                    "code_ver": "3.62.0"
+                }
+                urllib3.disable_warnings()
+                r = requests.post(url=url, headers=headers, json=data, proxies=proxies, verify=False)
+                if "data" not in r.text or r.status_code != 200:
+                    Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
+                    return
+                elif "data" not in r.json():
+                    Common.logger(log_type, crawler).info(f"get_videoList:{r.json()}\n")
+                    return
+                elif "list" not in r.json()["data"]:
+                    Common.logger(log_type, crawler).warning(f"get_videoList:{r.json()['data']}\n")
+                    return
+                elif len(r.json()["data"]["list"]) == 0:
+                    Common.logger(log_type, crawler).warning(f"get_videoList:{r.json()['data']['list']}\n")
+                    return
+                else:
+                    # 视频列表数据
+                    feeds = r.json()["data"]["list"]
+                    for i in range(len(feeds)):
+                        try:
+                            # 标题,表情随机加在片头、片尾,或替代句子中间的标点符号
+                            xiaoniangao_title = feeds[i].get("title", "").strip().replace("\n", "") \
+                                .replace("/", "").replace("\r", "").replace("#", "") \
+                                .replace(".", "。").replace("\\", "").replace("&NBSP", "") \
+                                .replace(":", "").replace("*", "").replace("?", "") \
+                                .replace("?", "").replace('"', "").replace("<", "") \
+                                .replace(">", "").replace("|", "").replace(" ", "") \
+                                .replace('"', '').replace("'", '')
+                            # 随机取一个表情/符号
+                            emoji = random.choice(get_config_from_mysql(log_type, crawler, env, "emoji"))
+                            # 生成最终标题,标题list[表情+title, title+表情]随机取一个
+                            video_title = random.choice([f"{emoji}{xiaoniangao_title}", f"{xiaoniangao_title}{emoji}"])
+                            # 发布时间
+                            publish_time_stamp = int(int(feeds[i].get("t", 0)) / 1000)
+                            publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
+                            # 用户名 / 头像
+                            user_name = feeds[i].get("user", {}).get("nick", "").strip().replace("\n", "") \
+                                .replace("/", "").replace("快手", "").replace(" ", "") \
+                                .replace(" ", "").replace("&NBSP", "").replace("\r", "")
 
-                video_dict = {
-                    "video_title": video_title,
-                    "video_id": video_id,
-                    "duration": duration,
-                    "play_cnt": play_cnt,
-                    "like_cnt": like_cnt,
-                    "comment_cnt": comment_cnt,
-                    "share_cnt": share_cnt,
-                    "user_name": user_name,
-                    "publish_time_stamp": publish_time_stamp,
-                    "publish_time_str": publish_time_str,
-                    "video_width": video_width,
-                    "video_height": video_height,
-                    "avatar_url": avatar_url,
-                    "profile_id": profile_id,
-                    "profile_mid": profile_mid,
-                    "cover_url": cover_url,
-                    "video_url": video_url,
-                    "session": f"xiaoniangao-play-{int(time.time())}"
+                            video_dict = {
+                                "video_title": video_title,
+                                "video_id": feeds[i].get("vid", ""),
+                                "duration": int(feeds[i].get("du", 0) / 1000),
+                                "play_cnt": feeds[i].get("play_pv", 0),
+                                "like_cnt": feeds[i].get("favor", {}).get("total", 0),
+                                "comment_cnt": feeds[i].get("comment_count", 0),
+                                "share_cnt": feeds[i].get("share", 0),
+                                "user_name": user_name,
+                                "publish_time_stamp": publish_time_stamp,
+                                "publish_time_str": publish_time_str,
+                                "video_width": int(feeds[i].get("w", 0)),
+                                "video_height": int(feeds[i].get("h", 0)),
+                                "avatar_url": feeds[i].get("user", {}).get("hurl", ""),
+                                "profile_id": feeds[i]["id"],
+                                "profile_mid": feeds[i]["user"]["mid"],
+                                "cover_url": feeds[i].get("url", ""),
+                                "video_url": feeds[i].get("v_url", ""),
+                                "session": f"xiaoniangao-play-{int(time.time())}"
 
-                }
-                for k, v in video_dict.items():
-                    Common.logger(log_type, crawler).info(f"{k}:{v}")
+                            }
+                            for k, v in video_dict.items():
+                                Common.logger(log_type, crawler).info(f"{k}:{v}")
 
-                # 过滤无效视频
-                if video_title == "" or video_id == "" or video_url == "":
-                    Common.logger(log_type, crawler).warning("无效视频\n")
-                # 抓取基础规则过滤
-                elif cls.download_rule(log_type, crawler, video_dict, rule_dict) is False:
-                    Common.logger(log_type, crawler).info("不满足抓取规则\n")
-                elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
-                    Common.logger(log_type, crawler).info('视频已下载\n')
-                # 过滤敏感词
-                elif any(str(word) if str(word) in video_title else False for word in get_config_from_mysql(log_type, crawler, env, "filter", action="")) is True:
-                    Common.logger(log_type, crawler).info("视频已中过滤词\n")
-                else:
-                    cls.download_publish(log_type=log_type,
-                                         crawler=crawler,
-                                         video_dict=video_dict,
-                                         rule_dict=rule_dict,
-                                         strategy=strategy,
-                                         oss_endpoint=oss_endpoint,
-                                         env=env)
+                            # 过滤无效视频
+                            if video_title == "" or video_dict["video_id"] == "" or video_dict["video_url"] == "":
+                                Common.logger(log_type, crawler).warning("无效视频\n")
+                            # 抓取基础规则过滤
+                            elif download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
+                                Common.logger(log_type, crawler).info("不满足抓取规则\n")
+                            elif any(str(word) if str(word) in video_dict["video_title"] else False
+                                     for word in get_config_from_mysql(log_type=log_type,
+                                                                       source=crawler,
+                                                                       env=env,
+                                                                       text="filter",
+                                                                       action="")) is True:
+                                Common.logger(log_type, crawler).info('已中过滤词\n')
+                            elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
+                                Common.logger(log_type, crawler).info('视频已下载\n')
+                            else:
+                                cls.download_publish(log_type=log_type,
+                                                     crawler=crawler,
+                                                     video_dict=video_dict,
+                                                     rule_dict=rule_dict,
+                                                     our_uid=our_uid,
+                                                     env=env)
+                        except Exception as e:
+                            Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
+            except Exception as e:
+                Common.logger(log_type, crawler).error(f"抓取第{page}页时异常:{e}\n")
 
     @classmethod
     def repeat_video(cls, log_type, crawler, video_id, env):
-        sql = f""" select * from crawler_video where platform="小年糕" and out_video_id="{video_id}"; """
+        sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}"; """
         repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
         return len(repeat_video)
 
     @classmethod
-    def download_publish(cls, log_type, crawler, video_dict, rule_dict, strategy, oss_endpoint, env):
-        # 下载封面
-        Common.download_method(log_type=log_type, crawler=crawler, text="cover", title=video_dict["video_title"], url=video_dict["cover_url"])
+    def download_publish(cls, log_type, crawler, video_dict, rule_dict, our_uid, env):
         # 下载视频
         Common.download_method(log_type=log_type, crawler=crawler, text="video", title=video_dict["video_title"], url=video_dict["video_url"])
+        md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
+        try:
+            if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+                return
+        except FileNotFoundError:
+            # 删除视频文件夹
+            shutil.rmtree(f"./{crawler}/videos/{md_title}")
+            Common.logger(log_type, crawler).info("视频文件不存在,删除文件夹成功\n")
+            return
+        # 下载封面
+        Common.download_method(log_type=log_type, crawler=crawler, text="cover", title=video_dict["video_title"], url=video_dict["cover_url"])
         # 保存视频信息至 "./videos/{download_video_title}/info.txt"
         Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
 
         # 上传视频
         Common.logger(log_type, crawler).info("开始上传视频...")
-        our_video_id = Publish.upload_and_publish(log_type=log_type,
-                                                  crawler=crawler,
-                                                  strategy=strategy,
-                                                  our_uid="play",
-                                                  env=env,
-                                                  oss_endpoint=oss_endpoint)
         if env == "dev":
+            oss_endpoint = "out"
+            our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                      crawler=crawler,
+                                                      strategy="播放量抓取策略",
+                                                      our_uid=our_uid,
+                                                      env=env,
+                                                      oss_endpoint=oss_endpoint)
             our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
         else:
+            oss_endpoint = "inner"
+            our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                      crawler=crawler,
+                                                      strategy="播放量抓取策略",
+                                                      our_uid=our_uid,
+                                                      env=env,
+                                                      oss_endpoint=oss_endpoint)
+
             our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
-        Common.logger(log_type, crawler).info("视频上传完成")
 
         if our_video_id is None:
-            # 删除视频文件夹
-            shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
-            return
+            try:
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                return
+            except FileNotFoundError:
+                return
 
         insert_sql = f""" insert into crawler_video(video_id,
                                                     out_user_id,
@@ -365,7 +262,7 @@ class XiaoniangaoPlayScheduling:
                                                     values({our_video_id},
                                                     "{video_dict['profile_id']}",
                                                     "{cls.platform}",
-                                                    "播放量榜爬虫策略",
+                                                    "播放量抓取策略",
                                                     "{video_dict['video_id']}",
                                                     "{video_dict['video_title']}",
                                                     "{video_dict['cover_url']}",
@@ -378,7 +275,7 @@ class XiaoniangaoPlayScheduling:
                                                     {int(video_dict['video_height'])}) """
         Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
         MysqlHelper.update_values(log_type, crawler, insert_sql, env)
-        Common.logger(log_type, crawler).info('视频信息插入数据库成功!')
+        Common.logger(log_type, crawler).info('视频信息写入数据库成功')
 
         # 视频写入飞书
         Feishu.insert_columns(log_type, crawler, "c85k1C", "ROWS", 1, 2)
@@ -402,7 +299,7 @@ class XiaoniangaoPlayScheduling:
                    str(video_dict['avatar_url']),
                    str(video_dict['cover_url']),
                    str(video_dict['video_url'])]]
-        time.sleep(1)
+        time.sleep(0.5)
         Feishu.update_values(log_type, crawler, "c85k1C", "F2:Z2", values)
         Common.logger(log_type, crawler).info('视频信息写入飞书成功\n')
 

+ 2 - 2
xigua/xigua_author/xigua_author_scheduling.py

@@ -656,7 +656,7 @@ class XiguaauthorScheduling:
             oss_endpoint = "out"
             our_video_id = Publish.upload_and_publish(log_type=log_type,
                                                       crawler=crawler,
-                                                      strategy="推荐抓取策略",
+                                                      strategy="定向抓取策略",
                                                       our_uid=user_dict["uid"],
                                                       env=env,
                                                       oss_endpoint=oss_endpoint)
@@ -665,7 +665,7 @@ class XiguaauthorScheduling:
             oss_endpoint = "inner"
             our_video_id = Publish.upload_and_publish(log_type=log_type,
                                                       crawler=crawler,
-                                                      strategy="推荐抓取策略",
+                                                      strategy="定向抓取策略",
                                                       our_uid=user_dict["uid"],
                                                       env=env,
                                                       oss_endpoint=oss_endpoint)