| 
					
				 | 
			
			
				@@ -0,0 +1,328 @@ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+# -*- coding: utf-8 -*- 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+# @Author: lierqiang 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+# @Time: 2023/4/12 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+import json 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+import os 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+import random 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+import shutil 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+import sys 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+import time 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+import requests 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+from hashlib import md5 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+from douyin.douyin_recommend import get_xb 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+sys.path.append(os.getcwd()) 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+from common.common import Common 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+# from common.db import MysqlHelper 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+from common.scheduling_db import MysqlHelper 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+from common.feishu import Feishu 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+from common.publish import Publish 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+from common.public import random_title 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+from common.userAgent import get_random_user_agent 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+from common.public import get_user_from_mysql, get_config_from_mysql 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+class DyAuthorScheduling(object): 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+    platform = "抖音" 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+    tag = "抖音定向爬虫策略" 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+    @classmethod 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+    def download_rule(cls, video_info_dict, rule_dict): 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+        if video_info_dict['like_cnt'] >= rule_dict['like_cnt']['min']: 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            if video_info_dict['duration'] >= rule_dict['duration']['min']: 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                if video_info_dict['video_width'] >= rule_dict['width']['min'] \ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                        or video_info_dict['video_height'] >= rule_dict['height']['min']: 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                    return True 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                else: 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                    return False 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            else: 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                return False 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+        else: 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            return False 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+    # 过滤词库 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+    @classmethod 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+    def filter_words(cls, log_type, crawler): 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+        try: 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            while True: 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                filter_words_sheet = Feishu.get_values_batch(log_type, crawler, '6BS2RR') 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                if filter_words_sheet is None: 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                    Common.logger(log_type, crawler).warning(f"filter_words_sheet:{filter_words_sheet} 10秒钟后重试") 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                    continue 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                filter_words_list = [] 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                for x in filter_words_sheet: 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                    for y in x: 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                        if y is None: 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                            pass 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                        else: 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                            filter_words_list.append(y) 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                return filter_words_list 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+        except Exception as e: 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            Common.logger(log_type, crawler).error(f'filter_words异常:{e}\n') 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+    @classmethod 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+    def video_title(cls, log_type, env, crawler, title): 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+        title_split1 = title.split(" #") 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+        if title_split1[0] != "": 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            title1 = title_split1[0] 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+        else: 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            title1 = title_split1[-1] 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+        title_split2 = title1.split(" #") 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+        if title_split2[0] != "": 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            title2 = title_split2[0] 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+        else: 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            title2 = title_split2[-1] 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+        title_split3 = title2.split("@") 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+        if title_split3[0] != "": 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            title3 = title_split3[0] 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+        else: 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            title3 = title_split3[-1] 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+        video_title = title3.strip().split('#')[0].replace("\n", "") \ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                          .replace("/", "").replace("抖音", "").replace(" ", "") \ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                          .replace(" ", "").replace("&NBSP", "").replace("\r", "") \ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                          .replace(".", "。").replace("\\", "") \ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                          .replace(":", "").replace("*", "").replace("?", "") \ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                          .replace("?", "").replace('"', "").replace("<", "") \ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                          .replace(">", "").replace("|", "").replace("@", "").replace('"', '').replace("'", '')[:40] 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+        if video_title.replace(" ", "") == "" or video_title == "。。。" or video_title == "...": 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            return random_title(log_type, crawler, env, text='title') 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+        else: 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            return video_title 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+    @classmethod 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+    def get_videoList(cls, log_type, crawler, strategy, task, our_uid, out_uid, oss_endpoint, env): 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+        try: 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            max_cursor = '' 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            url = "https://www.douyin.com/aweme/v1/web/aweme/post/?device_platform=webapp&aid=6383&channel=channel_pc_web&sec_user_id={sec_user_id}&max_cursor={max_cursor}&show_live_replay_strategy=1&count=10&publish_video_strategy_type=2&pc_client_type=1&version_code=170400&version_name=17.4.0&cookie_enabled=true&screen_width=1440&screen_height=900&browser_language=zh-CN&browser_platform=MacIntel&browser_name=Chrome&browser_version=112.0.0.0&browser_online=true&engine_name=Blink&engine_version=112.0.0.0&os_name=Mac+OS&os_version=10.15.7&cpu_core_num=8&device_memory=8&platform=PC&downlink=10&effective_type=4g&round_trip_time=50".format( 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                sec_user_id=out_uid, max_cursor=max_cursor) 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            headers = { 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                'authority': 'www.douyin.com', 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                'accept': 'application/json, text/plain, */*', 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                'accept-language': 'zh-CN,zh;q=0.9', 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                # 'cookie': '__ac_nonce=06437a18000f23ad954f0; __ac_signature=_02B4Z6wo00f01Sb71TAAAIDCsi2OPpjonN0m29GAAC2M85; s_v_web_id=verify_lgeqr3uq_3aDaqQXf_juHS_40Yi_BE8b_tI8FCILZQXPK; _tea_utm_cache_2018=undefined; ttwid=1%7Cq_IBs6hbBUOIEcRR1gxtgY6GiTbTE3U1XhJNLL_9BZA%7C1681367431%7Cf77b36ae4721884fec1c3fa9d6a08c29e308236ae13df58d1be3b0d1f82f8668; strategyABtestKey=%221681367433.454%22; passport_csrf_token=bff0289a5846e58b4b9db6e1f64665f4; passport_csrf_token_default=bff0289a5846e58b4b9db6e1f64665f4; bd_ticket_guard_client_data=eyJiZC10aWNrZXQtZ3VhcmQtdmVyc2lvbiI6MiwiYmQtdGlja2V0LWd1YXJkLWl0ZXJhdGlvbi12ZXJzaW9uIjoxLCJiZC10aWNrZXQtZ3VhcmQtY2xpZW50LWNzciI6Ii0tLS0tQkVHSU4gQ0VSVElGSUNBVEUgUkVRVUVTVC0tLS0tXHJcbk1JSUJEVENCdFFJQkFEQW5NUXN3Q1FZRFZRUUdFd0pEVGpFWU1CWUdBMVVFQXd3UFltUmZkR2xqYTJWMFgyZDFcclxuWVhKa01Ga3dFd1lIS29aSXpqMENBUVlJS29aSXpqMERBUWNEUWdBRVhzcHJ5TElFT3E4Z2tPc2l5MTdSS1dEcVxyXG5xTXZkWEt5Y1V5NStiL3JpSmJ6VkMwMlYrU1dNaWtZTlNOK29IU2g2WVVTTmdUUjJrZEhvRUxISmxGdU9scUFzXHJcbk1Db0dDU3FHU0liM0RRRUpEakVkTUJzd0dRWURWUjBSQkJJd0VJSU9kM2QzTG1SdmRYbHBiaTVqYjIwd0NnWUlcclxuS29aSXpqMEVBd0lEUndBd1JBSWdIeW9SblFNK0h0Z3ZOU2dFMVpHdGpmYWQxT3BuWHJRdVFSNXNSaDkwakRJQ1xyXG5JRG1tVkthRkN5djBLemtpZ0J0RExaTVJSNndURzRBWUVoNUlWUmlZUU9UVVxyXG4tLS0tLUVORCBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS1cclxuIn0=; msToken=ZPkeAqCnLbjDWCkWcWf36ZZIoZTl07X33ca0xcNGk3tZPoMvgx-lo28oNb5JhbCKsXLgLNIoojAbocBrjdAv54Hwf-Tk3_yAjLW7WJxxSa0=; ttcid=54b98e03a03e43e09211ee28db90fdd126; home_can_add_dy_2_desktop=%221%22; msToken=nqMSwn8xJAXLZk2AikdOeJ6P3JvoCsbYjHROoR55KXMDvWs8auYCMpZFGQSClHhitgB0l_vwg8m0-lE-aWQovfN7Ga1QupM3SpdNktiamFRiHMdJExREc9_uxS1ln8E=; tt_scid=DnO5GFg1oLONpPMFuFeL-OveKBn43mRynRVTvHsh1KUQm283ocN6JX6qPKDDrFHbfdf1; download_guide=%222%2F20230413%22; msToken=b9haS5RjLaFgqVDoGp5xSqc8B4kl-miQB5Nku0BSIvHVutKT81Nzk_pPb0wm7xYlAp_nz1gytQng5gYeIRNxcMgZJ_MB7lhejt_093miXlHtvqAaxL0FNg==', 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                'referer': 'https://www.douyin.com/user/{}?showTab=post'.format(out_uid), 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"', 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                'sec-ch-ua-mobile': '?0', 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                'sec-ch-ua-platform': '"macOS"', 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                'sec-fetch-dest': 'empty', 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                'sec-fetch-mode': 'cors', 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                'sec-fetch-site': 'same-origin', 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36' 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            } 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            x_bogus = get_xb(url, headers['user-agent']) 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            url = url + '&X-Bogus={}'.format(x_bogus) 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            if not x_bogus: 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                return 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            res = requests.get(url=url, headers=headers, data={}, proxies=Common.tunnel_proxies(), timeout=10).json() 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            aweme_list = res.get('aweme_list', []) 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+        except Exception as e: 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            Common.logger(log_type, crawler).error(f"获取抖音作者:{out_uid},视频列表失败:{e}") 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            return 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+        if not aweme_list: 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            Common.logger(log_type, crawler).warning(f"抖音作者没有获取到更多数据") 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            return 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+        for info in aweme_list: 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            if info.get('is_ads'): 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                continue 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            publish_time = info.get('create_time') 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            if not publish_time: 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                continue 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time)) 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            publish_day = int((int(time.time()) - publish_time) / (3600 * 24)) 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            video_title = cls.video_title(log_type, env, crawler, info['desc']) 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            if not video_title: 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                video_title = random_title(log_type, crawler, env, text='title') 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            video_dict = {'video_title': video_title, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                          'video_id': info['aweme_id'], 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                          'play_cnt': info['statistics']['play_count'], 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                          'comment_cnt': info['statistics']['comment_count'], 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                          'like_cnt': info['statistics']['digg_count'], 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                          'share_cnt': info['statistics']['share_count'], 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                          'video_width': info['video']['width'], 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                          'video_height': info['video']['height'], 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                          'duration': round(info['video']['duration'] / 1000), 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                          'publish_time': publish_day, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                          'publish_time_stamp': publish_time * 1000, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                          'publish_time_str': publish_time_str, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                          'user_name': info['author']['nickname'], 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                          'user_id': info['author_user_id'], 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                          'user_sec_id': info['author']['sec_uid'], 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                          'avatar_url': info['author']['avatar_thumb']['url_list'][0], 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                          'cover_url': info['video']['origin_cover']['url_list'][0].replace('\u0026', '&'), 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                          'video_url': info['video']['play_addr']['url_list'][0], 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                          'session': f"douyin{int(time.time())}" 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                          } 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            for k, v in video_dict.items(): 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                Common.logger(log_type, crawler).info(f"{k}:{v}") 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                # log_type, crawler, strategy, task, video_dict, rule_dict, our_uid, oss_endpoint, env 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            cls.download_publish(log_type=log_type, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                 crawler=crawler, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                 strategy=strategy, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                 task=task, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                 video_dict=video_dict, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                 our_uid=our_uid, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                 oss_endpoint=oss_endpoint, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                 env=env, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                 ) 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+    @classmethod 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+    def repeat_video(cls, log_type, crawler, video_id, env): 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+        sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}"; """ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env) 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+        return len(repeat_video) 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+    # 下载 / 上传 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+    @classmethod 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+    def download_publish(cls, log_type, crawler, strategy, task, video_dict, our_uid, oss_endpoint, env): 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+        try: 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            filter_words = get_config_from_mysql(log_type, crawler, env, text='filter') 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            for filter_word in filter_words: 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                if filter_word in video_dict['video_title']: 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                    Common.logger(log_type, crawler).info('标题已中过滤词:{}\n', video_dict['video_title']) 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                    return 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            if cls.download_rule(video_dict, task['rule_dict']) is False: 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                Common.logger(log_type, crawler).info('不满足抓取规则\n') 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0: 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                Common.logger(log_type, crawler).info('视频已下载\n') 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            else: 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                # 下载视频 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                Common.download_method(log_type=log_type, crawler=crawler, text='video', 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                       title=video_dict['video_title'], url=video_dict['video_url']) 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest() 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0: 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                    # 删除视频文件夹 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                    shutil.rmtree(f"./{crawler}/videos/{md_title}") 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                    Common.logger(log_type, crawler).info("视频size=0,删除成功\n") 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                    return 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                # ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4") 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                # if ffmpeg_dict is None or ffmpeg_dict['size'] == 0: 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                #     Common.logger(log_type, crawler).warning(f"下载的视频无效,已删除\n") 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                #     # 删除视频文件夹 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                #     shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}") 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                #     return 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                # 下载封面 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                Common.download_method(log_type=log_type, crawler=crawler, text='cover', 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                       title=video_dict['video_title'], url=video_dict['cover_url']) 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                # 保存视频信息至txt 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict) 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                # 上传视频 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                Common.logger(log_type, crawler).info("开始上传视频...") 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                our_video_id = Publish.upload_and_publish(log_type=log_type, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                                          crawler=crawler, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                                          strategy=strategy, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                                          our_uid=our_uid, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                                          env=env, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                                          oss_endpoint=oss_endpoint) 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                if env == 'dev': 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                    our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info" 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                else: 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                    our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info" 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                Common.logger(log_type, crawler).info("视频上传完成") 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                if our_video_id is None: 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                    # 删除视频文件夹 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                    shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}") 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                    return 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                # 视频写入飞书 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                upload_time = int(time.time()) 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                values = [[ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                    our_video_id, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                    time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)), 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                    strategy, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                    str(video_dict['video_id']), 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                    video_dict['video_title'], 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                    our_video_link, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                    # video_dict['gid'], 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                    video_dict['play_cnt'], 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                    video_dict['comment_cnt'], 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                    video_dict['like_cnt'], 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                    video_dict['share_cnt'], 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                    video_dict['duration'], 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                    str(video_dict['video_width']) + '*' + str(video_dict['video_height']), 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                    video_dict['publish_time_str'], 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                    video_dict['user_name'], 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                    video_dict['user_id'], 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                    video_dict['avatar_url'], 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                    video_dict['cover_url'], 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                    video_dict['video_url'] 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                ]] 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                # time.sleep(1) 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                Feishu.insert_columns(log_type, 'douyin', "qV9VC0", "ROWS", 1, 2) 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                Feishu.update_values(log_type, 'douyin', "qV9VC0", "A2:Z2", values) 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                Common.logger(log_type, crawler).info(f"视频已保存至云文档\n") 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                # 视频信息保存数据库 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                insert_sql = f""" insert into crawler_video(video_id, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                user_id, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                out_user_id, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                platform, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                strategy, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                out_video_id, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                video_title, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                cover_url, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                video_url, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                duration, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                publish_time, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                play_cnt, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                comment_cnt, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                like_cnt, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                share_cnt, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                crawler_rule, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                width, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                height) 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                values({our_video_id}, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                {our_uid}, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                "{video_dict['user_id']}", 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                "{cls.platform}", 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                "{strategy}", 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                "{video_dict['video_id']}", 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                "{video_dict['video_title']}", 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                "{video_dict['cover_url']}", 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                "{video_dict['video_url']}", 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                {int(video_dict['duration'])}, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                "{video_dict['publish_time_str']}", 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                {int(video_dict['play_cnt'])}, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                {int(video_dict['comment_cnt'])}, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                {int(video_dict['like_cnt'])}, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                {int(video_dict['share_cnt'])}, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                '{json.dumps(task["rule_dict"])}', 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                {int(video_dict['video_width'])}, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                                {int(video_dict['video_height'])}) """ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}") 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                MysqlHelper.update_values(log_type, crawler, insert_sql, env) 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n') 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+        except Exception as e: 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            Common.logger(log_type, crawler).error(f'download_publish异常:{e}\n') 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+    @classmethod 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+    def get_follow_videos(cls, log_type, crawler, task, oss_endpoint, env): 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+        user_list = get_user_from_mysql(log_type, crawler, crawler, env) 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+        strategy = '定向抓取策略' 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+        for user in user_list: 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            spider_link = user["link"] 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            out_uid = spider_link 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            user_name = user["nick_name"] 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            our_uid = user["uid"] 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            Common.logger(log_type, crawler).info(f"开始抓取 {user_name} 用户主页视频\n") 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+            cls.get_videoList(log_type=log_type, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                              crawler=crawler, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                              strategy=strategy, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                              task=task, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                              our_uid=our_uid, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                              out_uid=out_uid, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                              oss_endpoint=oss_endpoint, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+                              env=env) 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+if __name__ == '__main__': 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+    DyAuthorScheduling.get_follow_videos('author', 'douyin', '定向抓取策略', 'outer', 'prod') 
			 |