# -*- coding: utf-8 -*- # @Author: lierqiang # @Time: 2023/4/06 import json import os import shutil import sys import time import requests from hashlib import md5 sys.path.append(os.getcwd()) from common.common import Common from common.feishu import Feishu from common.db import MysqlHelper from common.publish import Publish from douyin.douyin_recommend import get_xb from common.public import get_config_from_mysql from common.public import random_title from common.userAgent import get_random_user_agent class DyRecommend(object): platform = "抖音" @classmethod def get_rule(cls, log_type, crawler): try: while True: rule_sheet = Feishu.get_values_batch(log_type, crawler, "a6L9Kb") if rule_sheet is None: Common.logger(log_type, crawler).warning("rule_sheet is None! 10秒后重新获取") time.sleep(10) continue rule_dict = { "video_width": int(rule_sheet[0][2]), "video_height": int(rule_sheet[1][2]), "like_cnt": int(rule_sheet[2][2]), "duration": int(rule_sheet[3][2]), "publish_time": int(rule_sheet[4][2]), "share_cnt": int(rule_sheet[5][2]) } return rule_dict except Exception as e: Common.logger(log_type, crawler).error(f"get_rule:{e}\n") # 下载规则 @classmethod def download_rule(cls, video_info_dict, rule_dict): if video_info_dict['like_cnt'] >= rule_dict['like_cnt']: if video_info_dict['duration'] >= rule_dict['duration']: if video_info_dict['video_width'] >= rule_dict['video_width'] \ or video_info_dict['video_height'] >= rule_dict['video_height']: if video_info_dict['share_cnt'] >= rule_dict['share_cnt']: return True else: return False else: return False else: return False else: return False # 过滤词库 @classmethod def filter_words(cls, log_type, crawler): try: while True: filter_words_sheet = Feishu.get_values_batch(log_type, crawler, '6BS2RR') if filter_words_sheet is None: Common.logger(log_type, crawler).warning(f"filter_words_sheet:{filter_words_sheet} 10秒钟后重试") continue filter_words_list = [] for x in filter_words_sheet: for y in x: if y is None: pass else: filter_words_list.append(y) return filter_words_list except Exception as e: Common.logger(log_type, crawler).error(f'filter_words异常:{e}\n') @classmethod def video_title(cls, log_type, crawler, env, title): title_split1 = title.split(" #") if title_split1[0] != "": title1 = title_split1[0] else: title1 = title_split1[-1] title_split2 = title1.split(" #") if title_split2[0] != "": title2 = title_split2[0] else: title2 = title_split2[-1] title_split3 = title2.split("@") if title_split3[0] != "": title3 = title_split3[0] else: title3 = title_split3[-1] video_title = title3.strip().replace("\n", "") \ .replace("/", "").replace("抖音", "").replace(" ", "") \ .replace(" ", "").replace("&NBSP", "").replace("\r", "") \ .replace("#", "").replace(".", "。").replace("\\", "") \ .replace(":", "").replace("*", "").replace("?", "") \ .replace("?", "").replace('"', "").replace("<", "") \ .replace(">", "").replace("|", "").replace("@", "").replace('"', '').replace("'", '')[:40] if video_title.replace(" ", "") == "" or video_title == "。。。" or video_title == "...": return random_title(log_type, crawler, env, text='title') else: return video_title @classmethod def get_videolist(cls, log_type, crawler, strategy, our_id, oss_endpoint, env, machine): rule_dict = cls.get_rule(log_type, crawler) for page in range(1, 101): try: aweme_pc_rec_raw_data = '%7B%22videoPrefer%22:%7B%22fsn%22:%5B%5D,%22like%22:%5B%5D,%22halfMin%22:%5B%227188684310696742200%22,%224380080926896941%22%5D,%22min%22:%5B%5D%7D,%22seo_info%22:%22https:%2F%2Fwww.douyin.com%2F%22,%22is_client%22:false,%22ff_danmaku_status%22:1,%22danmaku_switch_status%22:1%7D' f_url = 'https://www.douyin.com/aweme/v1/web/tab/feed/?device_platform=webapp&aid=6383&channel=channel_pc_web&tag_id=&share_aweme_id=&count=10&refresh_index={page}&video_type_select=1&aweme_pc_rec_raw_data={aweme_pc_rec_raw_data}&globalwid=&pull_type=2&min_window=0&ug_source=&creative_id=&pc_client_type=1&version_code=170400&version_name=17.4.0&cookie_enabled=true&screen_width=1440&screen_height=900&browser_language=zh-CN&browser_platform=MacIntel&browser_name=Chrome&browser_version=109.0.0.0&browser_online=true&engine_name=Blink&engine_version=109.0.0.0&os_name=Mac+OS&os_version=10.15.7&cpu_core_num=8&device_memory=8&platform=PC&downlink=10&effective_type=4g&round_trip_time=50&webid=7219223873342260736&msToken=Sh6bVLWZUEZ3ruIHq1L3iUXnr1GT5yklyo-XZRO7lNgsFvpYq0C7tcu5Z4Jv9DrMESZ9kGVhwKT4ftFDkBL11ZGPUxth2ToA4M4q-qs2MK9ctR7GhwFkGw=='.format( page=page, aweme_pc_rec_raw_data=aweme_pc_rec_raw_data) headers = { # 'cookie': 'ttwid=1%7CzpiG_VTvd1xpRFvfHUjEHiaq3qkfUqPElZUu0wbTSr8%7C1680507728%7Cc61697d37b4d4d49d42b466a3bbe8ecd5c06ae6e9a751d9e410102d2c52a185d; douyin.com; passport_csrf_token=208a829b0156a2feaa0fa24ad026ea91; passport_csrf_token_default=208a829b0156a2feaa0fa24ad026ea91; s_v_web_id=verify_lg0iwv1g_BwfztkmU_azbL_4Gua_9Fb9_KWfGPVXCyWua; bd_ticket_guard_client_data=eyJiZC10aWNrZXQtZ3VhcmQtdmVyc2lvbiI6MiwiYmQtdGlja2V0LWd1YXJkLWl0ZXJhdGlvbi12ZXJzaW9uIjoxLCJiZC10aWNrZXQtZ3VhcmQtY2xpZW50LWNzciI6Ii0tLS0tQkVHSU4gQ0VSVElGSUNBVEUgUkVRVUVTVC0tLS0tXHJcbk1JSUJEekNCdFFJQkFEQW5NUXN3Q1FZRFZRUUdFd0pEVGpFWU1CWUdBMVVFQXd3UFltUmZkR2xqYTJWMFgyZDFcclxuWVhKa01Ga3dFd1lIS29aSXpqMENBUVlJS29aSXpqMERBUWNEUWdBRU9zWGhGbG5ZWjVNeG5ZRGFFOCtCYmRGdFxyXG5VZTh6SG0ycTRXeWxvdkxXVXVOcy9oV2tlZlBRK3BsNkg2OGQwdGtOVVB5UStmUnpyWlRFL1ZXMTR5UlRkS0FzXHJcbk1Db0dDU3FHU0liM0RRRUpEakVkTUJzd0dRWURWUjBSQkJJd0VJSU9kM2QzTG1SdmRYbHBiaTVqYjIwd0NnWUlcclxuS29aSXpqMEVBd0lEU1FBd1JnSWhBTmdPS3Jkb3V4SHBzcHNiY0dmUHJYQ0lVNnVwcmZkd2ZFY2g5TXZndW5Ea1xyXG5BaUVBM2xVeDQ2bzd0UWJUT0dXdzgzQm45RnFyQkRVVHNOVjkyZUEyR1hPR3BkVT1cclxuLS0tLS1FTkQgQ0VSVElGSUNBVEUgUkVRVUVTVC0tLS0tXHJcbiJ9; csrf_session_id=a5355d954d3c63ed1ba35faada452b4d; ttcid=60304e8eb309434784f02372ef36387b41; xgplayer_user_id=446878319428; __ac_nonce=0642bcb0a001596f3fe5b; __ac_signature=_02B4Z6wo00f012HU-wAAAIDA9QKgDGYSlVNh9P-AALxqUQdGOEO.l3IAhdmUh4D-Y9rXLut3p7moXUuAUmo7rUOUJzpnB9nLx0YdZcvdMNeUgQOjsGIHh9LTN38BOVtrElZBeXDLjuVVC5Hh81; strategyABtestKey=%221680591628.189%22; download_guide=%223%2F20230404%22; VIDEO_FILTER_MEMO_SELECT=%7B%22expireTime%22%3A1681196521206%2C%22type%22%3A0%7D; home_can_add_dy_2_desktop=%221%22; msToken=v_pzGLfpXwl4PugynDwIb5DeepUms68tZLZFNLHl8WQnEeNQZtaawWYVu4Y3TLWxpqbvgqkOFULGmld2BLBZydbzrMJgkx5q5GjqetkVI4GoxLX1QdJQ0CP607uEVw==; tt_scid=NZGTg99heu5lHFfAvBht7p3Qxl0TGP.TyfxOQ7cWIvZjEnOcZERFaJxQ.HnKY-UT18cb; msToken=JFY-VD2YHS-6IO6lNhTj4AcLVtHqjein_5giYzIUh_VRsXMPFXy9QOg-RKDDwQgW5TTbgQB_BLzpfQhNdNEQCv5sGXatzGei9yppG1eSLLkbI9fjhTdBWtdkAJpLIg==', 'cookie': 'ttwid=1%7CI2Xp275XabSiVJ9GAmfLtqbtqturVSIS2yLbXVkAHnQ%7C1680856567%7Cfd36579475157e2303e36e5fd75cdea4ebad78c20da989be0590305f169242ca; douyin.com; strategyABtestKey=%221680856567.817%22; passport_csrf_token=6a28a7b1e1ad38570cc5ee39deaf587a; passport_csrf_token_default=6a28a7b1e1ad38570cc5ee39deaf587a; s_v_web_id=verify_lg6aloex_WG2LmlFk_kbA6_4tMb_BZlD_iuKxRvsMvrg7; bd_ticket_guard_client_data=eyJiZC10aWNrZXQtZ3VhcmQtdmVyc2lvbiI6MiwiYmQtdGlja2V0LWd1YXJkLWl0ZXJhdGlvbi12ZXJzaW9uIjoxLCJiZC10aWNrZXQtZ3VhcmQtY2xpZW50LWNzciI6Ii0tLS0tQkVHSU4gQ0VSVElGSUNBVEUgUkVRVUVTVC0tLS0tXHJcbk1JSUJEakNCdFFJQkFEQW5NUXN3Q1FZRFZRUUdFd0pEVGpFWU1CWUdBMVVFQXd3UFltUmZkR2xqYTJWMFgyZDFcclxuWVhKa01Ga3dFd1lIS29aSXpqMENBUVlJS29aSXpqMERBUWNEUWdBRWNFZitKV2tDb3l4OHhLekNnY0hReEVaRFxyXG5hN29maHJhUG9rMkQ5b1RaRGRvbDJuTVhaTis5dGJFclV0cVdUcm81ck4zekFyWTFLaXIzRlRUR2ZQUXRmS0FzXHJcbk1Db0dDU3FHU0liM0RRRUpEakVkTUJzd0dRWURWUjBSQkJJd0VJSU9kM2QzTG1SdmRYbHBiaTVqYjIwd0NnWUlcclxuS29aSXpqMEVBd0lEU0FBd1JRSWhBUFZaSGNFdW5HeGtBZFNmQXJ1MmdWb1RHbFhINkhsa1prRzZNc1pyR2hBL1xyXG5BaUJsV2NpM3h5SDk2UnJlTXpPSy8xVmFJQUNuTWUyU0RodUJIY2ZZaE80OWtRPT1cclxuLS0tLS1FTkQgQ0VSVElGSUNBVEUgUkVRVUVTVC0tLS0tXHJcbiJ9; csrf_session_id=a5355d954d3c63ed1ba35faada452b4d; ttcid=585ca9110e2345a09b4499dc543ec39959; odin_tt=d5fbc530c34bf8d4ea8cf8030c85a3f70202ee34dad7fb5b9ef7ff299d20e38ed01c852f02f2f3f864f2a45060480717f72083b58cdbc204c988edbf997fda7c; xgplayer_user_id=64984931555; SEARCH_RESULT_LIST_TYPE=%22single%22; pwa2=%222%7C1%22; download_guide=%223%2F20230407%22; __ac_nonce=0642fff310028297fec16; __ac_signature=_02B4Z6wo00f01M3AeJAAAIDDWRYjnPVAtVzN4HwAAFdlurQp2aR1Npvb7RYeaCY4fZs3DkMFlu7-Obn7zsvc34whBQesvTIc2p8nV-1crQtTacaxSYqP8nwNk3WqH.tkNdKQaUMw6sCC2.xZ15; VIDEO_FILTER_MEMO_SELECT=%7B%22expireTime%22%3A1681472712797%2C%22type%22%3A1%7D; home_can_add_dy_2_desktop=%221%22; tt_scid=PNmY2BL-Q9E9lAfdecenRGOzq64XgFOc0CJGFMN.JIE-QJO51S3Zvw56-Z6O12QVf4d9; msToken=uFCSX87jL9sTq6ScVYJucYgv9Hd5gCbTPvKIGBRMgVLuo7Pp9zLRrutBYzq4BmnCr83WnJAwZb8H78lBr3s3eyJLnySxYO5FgClQRXW1i_mAu7fLfBj3gA==; msToken=Sh6bVLWZUEZ3ruIHq1L3iUXnr1GT5yklyo-XZRO7lNgsFvpYq0C7tcu5Z4Jv9DrMESZ9kGVhwKT4ftFDkBL11ZGPUxth2ToA4M4q-qs2MK9ctR7GhwFkGw==', 'referer': 'https://www.douyin.com/', 'user-agent': get_random_user_agent('pc') } try: x_bogus = get_xb(f_url, headers['user-agent']) if not x_bogus: continue url = f_url + '&X-Bogus={}'.format(x_bogus) res = requests.get(url=url, headers=headers, proxies=Common.tunnel_proxies()).json() aweme_list = res.get('aweme_list', []) except Exception as e: Common.logger(log_type, crawler).error(f"获取抖音推荐失败:{e}") continue if not aweme_list: Common.logger(log_type, crawler).warning(f"抖音推荐没有获取到更多数据,页数:{page}") continue for info in aweme_list: try: if info.get('is_ads'): continue publish_time = info.get('create_time') if not publish_time: continue publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time)) publish_day = int((int(time.time()) - publish_time) / (3600 * 24)) if not info['desc']: video_title = random_title(log_type, crawler, env, text='title') else: video_title = cls.video_title(log_type, crawler, env, info['desc']) video_dict = {'video_title': video_title, 'video_id': info['aweme_id'], 'play_cnt': info['statistics']['play_count'], 'comment_cnt': info['statistics']['comment_count'], 'like_cnt': info['statistics']['digg_count'], 'share_cnt': info['statistics']['share_count'], 'video_width': info['video']['width'], 'video_height': info['video']['height'], 'duration': round(info['video']['duration'] / 1000), 'publish_time': publish_day, 'publish_time_stamp': publish_time * 1000, 'publish_time_str': publish_time_str, 'user_name': info['author']['nickname'], 'user_id': info['author_user_id'], 'user_sec_id': info['author']['sec_uid'], 'avatar_url': info['author']['avatar_thumb']['url_list'][0], 'cover_url': info['video']['origin_cover']['url_list'][0].replace('\u0026', '&'), 'video_url': info['video']['play_addr']['url_list'][0], 'session': f"douyin{int(time.time())}" } for k, v in video_dict.items(): Common.logger(log_type, crawler).info(f"{k}:{v}") cls.download_publish(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict, strategy=strategy, our_uid=our_id, oss_endpoint=oss_endpoint, env=env, machine=machine) except Exception as e: Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n") except Exception as e: Common.logger(log_type, crawler).error(f"抓取第{page}页时异常:{e}\n") @classmethod def repeat_video(cls, log_type, crawler, video_id, env, machine): sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}"; """ repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env, machine) return len(repeat_video) # 下载 / 上传 @classmethod def download_publish(cls, log_type, crawler, strategy, video_dict, rule_dict, our_uid, oss_endpoint, env, machine): filter_words = get_config_from_mysql(log_type, crawler, env, text='filter') for filter_word in filter_words: if filter_word in video_dict['video_title']: Common.logger(log_type, crawler).info('标题已中过滤词:{}\n', video_dict['video_title']) return if cls.download_rule(video_dict, rule_dict) is False: Common.logger(log_type, crawler).info('不满足抓取规则\n') elif any(word if word in video_dict['video_title'] else False for word in cls.filter_words(log_type, crawler)) is True: Common.logger(log_type, crawler).info('标题已中过滤词:{}\n', video_dict['video_title']) elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env, machine) != 0: Common.logger(log_type, crawler).info('视频已下载\n') else: # 下载视频 Common.download_method(log_type=log_type, crawler=crawler, text='video', title=video_dict['video_title'], url=video_dict['video_url']) md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest() try: if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0: # 删除视频文件夹 shutil.rmtree(f"./{crawler}/videos/{md_title}") Common.logger(log_type, crawler).info("视频size=0,删除成功\n") return except FileNotFoundError: # 删除视频文件夹 shutil.rmtree(f"./{crawler}/videos/{md_title}") Common.logger(log_type, crawler).info("视频文件不存在,删除文件夹成功\n") return # 下载封面 Common.download_method(log_type=log_type, crawler=crawler, text='cover', title=video_dict['video_title'], url=video_dict['cover_url']) # 保存视频信息至txt Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict) # 上传视频 Common.logger(log_type, crawler).info("开始上传视频...") our_video_id = Publish.upload_and_publish(log_type=log_type, crawler=crawler, strategy=strategy, our_uid=our_uid, env=env, oss_endpoint=oss_endpoint) if env == 'dev': our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info" else: our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info" Common.logger(log_type, crawler).info("视频上传完成") if our_video_id is None: try: # 删除视频文件夹 shutil.rmtree(f"./{crawler}/videos/{md_title}") return except FileNotFoundError: return # 视频写入飞书 Feishu.insert_columns(log_type, 'douyin', "82c8d9", "ROWS", 1, 2) upload_time = int(time.time()) values = [[ our_video_id, time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)), "推荐", str(video_dict['video_id']), video_dict['video_title'], our_video_link, video_dict['play_cnt'], video_dict['comment_cnt'], video_dict['like_cnt'], video_dict['share_cnt'], video_dict['duration'], str(video_dict['video_width']) + '*' + str(video_dict['video_height']), video_dict['publish_time_str'], video_dict['user_name'], video_dict['user_id'], video_dict['avatar_url'], video_dict['cover_url'], video_dict['video_url'] ]] time.sleep(0.5) Feishu.update_values(log_type, 'douyin', "82c8d9", "A2:Z2", values) Common.logger(log_type, crawler).info(f"视频已保存至云文档\n") # 视频信息保存数据库 insert_sql = f""" insert into crawler_video(video_id, user_id, out_user_id, platform, strategy, out_video_id, video_title, cover_url, video_url, duration, publish_time, play_cnt, comment_cnt, like_cnt, share_cnt, crawler_rule, width, height) values({our_video_id}, {our_uid}, "{video_dict['user_id']}", "{cls.platform}", "推荐爬虫策略", "{video_dict['video_id']}", "{video_dict['video_title']}", "{video_dict['cover_url']}", "{video_dict['video_url']}", {int(video_dict['duration'])}, "{video_dict['publish_time_str']}", {int(video_dict['play_cnt'])}, {int(video_dict['comment_cnt'])}, {int(video_dict['like_cnt'])}, {int(video_dict['share_cnt'])}, '{json.dumps(rule_dict)}', {int(video_dict['video_width'])}, {int(video_dict['video_height'])}) """ Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}") MysqlHelper.update_values(log_type, crawler, insert_sql, env, machine) Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n') if __name__ == '__main__': # DyRecommend.get_recommend('recommend','douyin','推荐抓取策略', 'inner','prod', 'aliyun') DyRecommend.get_videolist('recommend', 'douyin', '推荐抓取策略', 6282709, 'outer', 'dev', 'aliyun')