# -*- coding: utf-8 -*- # @Author: wangkun # @Time: 2023/5/26 import json import os import shutil import sys import time import requests from hashlib import md5 from common.mq import MQ sys.path.append(os.getcwd()) from common.common import Common from common.scheduling_db import MysqlHelper from douyin.douyin_recommend import get_xb from common.feishu import Feishu from common.publish import Publish from common.public import random_title, get_config_from_mysql, download_rule class DouyinauthorScheduling: platform = "抖音" @classmethod def video_title(cls, log_type, env, crawler, title): title_split1 = title.split(" #") if title_split1[0] != "": title1 = title_split1[0] else: title1 = title_split1[-1] title_split2 = title1.split(" #") if title_split2[0] != "": title2 = title_split2[0] else: title2 = title_split2[-1] title_split3 = title2.split("@") if title_split3[0] != "": title3 = title_split3[0] else: title3 = title_split3[-1] video_title = title3.strip().split('#')[0].replace("\n", "") \ .replace("/", "").replace("抖音", "").replace(" ", "") \ .replace(" ", "").replace("&NBSP", "").replace("\r", "") \ .replace(".", "。").replace("\\", "") \ .replace(":", "").replace("*", "").replace("?", "") \ .replace("?", "").replace('"', "").replace("<", "") \ .replace(">", "").replace("|", "").replace("@", "").replace('"', '').replace("'", '')[:40] if video_title.replace(" ", "") == "" or video_title == "。。。" or video_title == "...": return random_title(log_type, crawler, env, text='title') else: return video_title @classmethod def get_videoList(cls, log_type, crawler, user_dict, rule_dict, env): mq = MQ(topic_name="topic_crawler_etl_" + env) max_cursor = "" # while True: url = "https://www.douyin.com/aweme/v1/web/aweme/post/?device_platform=webapp&aid=6383&channel=channel_pc_web&sec_user_id={sec_user_id}&max_cursor={max_cursor}&show_live_replay_strategy=1&count=10&publish_video_strategy_type=2&pc_client_type=1&version_code=170400&version_name=17.4.0&cookie_enabled=true&screen_width=1440&screen_height=900&browser_language=zh-CN&browser_platform=MacIntel&browser_name=Chrome&browser_version=112.0.0.0&browser_online=true&engine_name=Blink&engine_version=112.0.0.0&os_name=Mac+OS&os_version=10.15.7&cpu_core_num=8&device_memory=8&platform=PC&downlink=10&effective_type=4g&round_trip_time=50".format( sec_user_id=user_dict["link"].replace("https://www.douyin.com/user/", ""), max_cursor=max_cursor) Common.logger(log_type, crawler).info(f"url:{url}") Common.logging(log_type, crawler, env, f"url:{url}") headers = { 'authority': 'www.douyin.com', 'accept': 'application/json, text/plain, */*', 'accept-language': 'zh-CN,zh;q=0.9', # 'cookie': 'odin_tt=9ebfd77e79e064a5c16d731535eeec8d485ed131ba5c0f5b979f1da669637acec017b2e2b7d2c94f5a2fe7445a0e2d402919f82c1c7ea46d889ebe26a55aca3c; s_v_web_id=verify_li8nnb7v_aEEO68y0_0Db5_404R_B6i7_ANC1ln58RvVB; ttwid=1%7CKxWYJ46jKAswOGQREGTtFAVoBUUr4ACMdbfCeRJO-a8%7C1685352994%7C7bb6d66ada14fbcc4e0cfaf91006e4735fde9b4203a18b46320c5dfa6f006075; passport_csrf_token=cbcafc524cc619e61806470aab857120; passport_csrf_token_default=cbcafc524cc619e61806470aab857120; __bd_ticket_guard_local_probe=1685352998442; __ac_nonce=0649be02c0097ca9f14dc; __ac_signature=_02B4Z6wo00f01Si6jFAAAIDBsNEbImSQqNUomojAAC64iuNwqvzWmx5wJJLlgnfwGBgQIjlV2Ojfn5qTisixCJ4o457sghCX0B0.2hrJMIxm.bB13csOM6o0lYJ985o.wppgcl6F0o.Cp4PI43; strategyABtestKey=%221687937069.743%22; bd_ticket_guard_client_data=eyJiZC10aWNrZXQtZ3VhcmQtdmVyc2lvbiI6MiwiYmQtdGlja2V0LWd1YXJkLWl0ZXJhdGlvbi12ZXJzaW9uIjoxLCJiZC10aWNrZXQtZ3VhcmQtY2xpZW50LWNzciI6Ii0tLS0tQkVHSU4gQ0VSVElGSUNBVEUgUkVRVUVTVC0tLS0tXHJcbk1JSUJEVENCdFFJQkFEQW5NUXN3Q1FZRFZRUUdFd0pEVGpFWU1CWUdBMVVFQXd3UFltUmZkR2xqYTJWMFgyZDFcclxuWVhKa01Ga3dFd1lIS29aSXpqMENBUVlJS29aSXpqMERBUWNEUWdBRVF2c3ljSS82c3F1SjlHck9jNUNidjBzV1xyXG5ETFlCVjlNWHlRb1R3d2l2eWN5REtjYm83OCsrN1lxMFhaUUdUVHNBeVA3UEUyRU9BSzNMYmhNU1FKK3hkS0FzXHJcbk1Db0dDU3FHU0liM0RRRUpEakVkTUJzd0dRWURWUjBSQkJJd0VJSU9kM2QzTG1SdmRYbHBiaTVqYjIwd0NnWUlcclxuS29aSXpqMEVBd0lEUndBd1JBSWdHeS9JdXlGOFZYYjNCSldwbWdwWG9ialJyTFVhQXJNVFBYUW5tZW1yTGhnQ1xyXG5JQ0lKam9MRG45K21WK25iY0lndSt0UlZOcVVhd1pJT05sREVBTXhZeDhnNlxyXG4tLS0tLUVORCBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS1cclxuIn0=; FORCE_LOGIN=%7B%22videoConsumedRemainSeconds%22%3A180%7D; pwa2=%220%7C0%7C1%7C0%22; home_can_add_dy_2_desktop=%221%22; msToken=Zo9u9-ysc3AkThON24LotCve2TNnd5EsEssTTzS6sv1XILDp2-i0fDAvnB9WC8ZTehP86vXgvp0AiXGU1jQgktUaYAQyDzc7ONNebTihWtwKjwVKtXZnsKp643loNJi4; msToken=6hSdQgVSMkI21mpQ-mfwmdIlu8mlDZE_zuCnhbLEGDJCTM0oVp6Mix6MWGIMF-04MX1sk4rAuMQtvsp3mHtjwKV0aVVMm6Arx8zQgvnC5t9Ids2iDA1vfXI=; tt_scid=b4hUtRdH2BotaVlXhErm7q7mBznaHqDS-G10A19yONNKxg5yvdZJnSiCg1Hr7ZDs38b7; download_guide=%222%2F20230628%2F0%22', 'referer': 'https://www.douyin.com/user/{}?showTab=post'.format(user_dict["link"].replace("https://www.douyin.com/user/", "")), 'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"', 'sec-ch-ua-mobile': '?0', 'sec-ch-ua-platform': '"macOS"', 'sec-fetch-dest': 'empty', 'sec-fetch-mode': 'cors', 'sec-fetch-site': 'same-origin', 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36' } x_bogus = get_xb(url, headers['user-agent']) url = url + '&X-Bogus={}'.format(x_bogus) if not x_bogus: return res = requests.get(url=url, headers=headers, data={}, proxies=Common.tunnel_proxies(), timeout=10) # Common.logger(log_type, crawler).info(f"res:{res.text}\n") aweme_list = res.json().get('aweme_list', []) # max_cursor = res.json().get("max_cursor", "") if not aweme_list: Common.logger(log_type, crawler).info(f"没有更多数据啦~:{res.text}\n") Common.logging(log_type, crawler, env, f"没有更多数据啦~:{res.text}\n") return for info in aweme_list: try: if info.get('is_ads'): continue publish_time = info.get('create_time') if not publish_time: continue publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time)) publish_day = int((int(time.time()) - publish_time) / (3600 * 24)) video_title = cls.video_title(log_type, env, crawler, info['desc']) if not video_title: video_title = random_title(log_type, crawler, env, text='title') video_dict = {'video_title': video_title, 'video_id': info['aweme_id'], 'play_cnt': info['statistics']['play_count'], 'comment_cnt': info['statistics']['comment_count'], 'like_cnt': info['statistics']['digg_count'], 'share_cnt': info['statistics']['share_count'], 'video_width': info['video']['width'], 'video_height': info['video']['height'], 'duration': round(info['video']['duration'] / 1000), 'publish_time': publish_day, 'publish_time_stamp': publish_time, 'publish_time_str': publish_time_str, 'user_name': info['author']['nickname'], 'user_id': info['author_user_id'], 'user_sec_id': info['author']['sec_uid'], 'avatar_url': info['author']['avatar_thumb']['url_list'][0], 'cover_url': info['video']['origin_cover']['url_list'][0].replace('\u0026', '&'), 'video_url': info['video']['play_addr']['url_list'][0], 'session': f"douyin{int(time.time())}" } for k, v in video_dict.items(): Common.logger(log_type, crawler).info(f"{k}:{v}") Common.logging(log_type, crawler, env, f"{video_dict}") if int((int(time.time()) - int(video_dict["publish_time_stamp"])) / (3600 * 24)) > int(rule_dict.get("period", {}).get("max", 1000)): Common.logger(log_type, crawler).info(f'发布时间超过{int(rule_dict.get("period", {}).get("max", 1000))}天\n') Common.logging(log_type, crawler, env, f'发布时间超过{int(rule_dict.get("period", {}).get("max", 1000))}天\n') return if download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False: Common.logger(log_type, crawler).info("不满足抓取规则\n") Common.logging(log_type, crawler, env, "不满足抓取规则\n") elif any(str(word) if str(word) in video_dict["video_title"] else False for word in get_config_from_mysql(log_type=log_type, source=crawler, env=env, text="filter", action="")) is True: Common.logger(log_type, crawler).info('已中过滤词\n') Common.logging(log_type, crawler, env, '已中过滤词\n') elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0: Common.logger(log_type, crawler).info('视频已下载\n') Common.logging(log_type, crawler, env, '视频已下载\n') else: # cls.download_publish(log_type=log_type, # crawler=crawler, # user_dict=user_dict, # video_dict=video_dict, # rule_dict=rule_dict, # env=env) video_dict["out_user_id"] = video_dict["user_id"] video_dict["platform"] = crawler video_dict["strategy"] = log_type video_dict["out_video_id"] = video_dict["video_id"] video_dict["width"] = video_dict["video_width"] video_dict["height"] = video_dict["video_height"] video_dict["crawler_rule"] = json.dumps(rule_dict) video_dict["user_id"] = user_dict["uid"] video_dict["publish_time"] = video_dict["publish_time_str"] video_dict["strategy_type"] = log_type mq.send_msg(video_dict) except Exception as e: Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n") Common.logging(log_type, crawler, env, f"抓取单条视频异常:{e}\n") @classmethod def repeat_video(cls, log_type, crawler, video_id, env): # sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}"; """ sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and out_video_id="{video_id}"; """ repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env) return len(repeat_video) # 下载 / 上传 @classmethod def download_publish(cls, log_type, crawler, user_dict, video_dict, rule_dict, env): # 下载视频 Common.download_method(log_type=log_type, crawler=crawler, text='video', title=video_dict['video_title'], url=video_dict['video_url']) md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest() try: if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0: # 删除视频文件夹 shutil.rmtree(f"./{crawler}/videos/{md_title}") Common.logger(log_type, crawler).info("视频size=0,删除成功\n") Common.logging(log_type, crawler, env, "视频size=0,删除成功\n") return except FileNotFoundError: # 删除视频文件夹 shutil.rmtree(f"./{crawler}/videos/{md_title}") Common.logger(log_type, crawler).info("视频文件不存在,删除文件夹成功\n") Common.logging(log_type, crawler, env, "视频文件不存在,删除文件夹成功\n") return # 下载封面 Common.download_method(log_type=log_type, crawler=crawler, text='cover', title=video_dict['video_title'], url=video_dict['cover_url']) # 保存视频信息至txt Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict) # 上传视频 Common.logger(log_type, crawler).info("开始上传视频...") Common.logging(log_type, crawler, env, "开始上传视频...") if env == "dev": oss_endpoint = "out" our_video_id = Publish.upload_and_publish(log_type=log_type, crawler=crawler, strategy="定向抓取策略", our_uid=user_dict["uid"], env=env, oss_endpoint=oss_endpoint) our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info" else: oss_endpoint = "inner" our_video_id = Publish.upload_and_publish(log_type=log_type, crawler=crawler, strategy="定向抓取策略", our_uid=user_dict["uid"], env=env, oss_endpoint=oss_endpoint) our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info" if our_video_id is None: try: # 删除视频文件夹 shutil.rmtree(f"./{crawler}/videos/{md_title}") return except FileNotFoundError: return # 视频信息保存数据库 insert_sql = f""" insert into crawler_video(video_id, user_id, out_user_id, platform, strategy, out_video_id, video_title, cover_url, video_url, duration, publish_time, play_cnt, comment_cnt, like_cnt, share_cnt, crawler_rule, width, height) values({our_video_id}, {user_dict["uid"]}, "{video_dict['user_id']}", "{cls.platform}", "定向抓取策略", "{video_dict['video_id']}", "{video_dict['video_title']}", "{video_dict['cover_url']}", "{video_dict['video_url']}", {int(video_dict['duration'])}, "{video_dict['publish_time_str']}", {int(video_dict['play_cnt'])}, {int(video_dict['comment_cnt'])}, {int(video_dict['like_cnt'])}, {int(video_dict['share_cnt'])}, '{json.dumps(rule_dict)}', {int(video_dict['video_width'])}, {int(video_dict['video_height'])}) """ Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}") Common.logging(log_type, crawler, env, f"insert_sql:{insert_sql}") MysqlHelper.update_values(log_type, crawler, insert_sql, env) Common.logger(log_type, crawler).info('视频信息写入数据库成功') Common.logging(log_type, crawler, env, '视频信息写入数据库成功') # 视频写入飞书 upload_time = int(time.time()) values = [[ our_video_id, time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)), "定向抓取策略", str(video_dict['video_id']), video_dict['video_title'], our_video_link, # video_dict['gid'], video_dict['play_cnt'], video_dict['comment_cnt'], video_dict['like_cnt'], video_dict['share_cnt'], video_dict['duration'], str(video_dict['video_width']) + '*' + str(video_dict['video_height']), video_dict['publish_time_str'], video_dict['nick_name'], video_dict['user_id'], video_dict['avatar_url'], video_dict['cover_url'], video_dict['video_url'] ]] Feishu.insert_columns(log_type, 'douyin', "qV9VC0", "ROWS", 1, 2) time.sleep(0.5) Feishu.update_values(log_type, 'douyin', "qV9VC0", "A2:Z2", values) Common.logger(log_type, crawler).info(f"视频已保存至云文档\n") Common.logging(log_type, crawler, env, f"视频已保存至云文档\n") @classmethod def get_author_videos(cls, log_type, crawler, rule_dict, user_list, env): for user_dict in user_list: try: Common.logger(log_type, crawler).info(f"开始抓取:{user_dict['nick_name']} 主页视频\n") Common.logging(log_type, crawler, env, f"开始抓取:{user_dict['nick_name']} 主页视频\n") cls.get_videoList(log_type=log_type, crawler=crawler, rule_dict=rule_dict, user_dict=user_dict, env=env) except Exception as e: Common.logger(log_type, crawler).error(f"抓取用户{user_dict['nick_name']}主页视频时异常:{e}\n") Common.logging(log_type, crawler, env, f"抓取用户{user_dict['nick_name']}主页视频时异常:{e}\n") if __name__ == '__main__': pass