Parcourir la source

update 增加抖音作者抓取

lierqiang il y a 2 ans
Parent
commit
2b0667738f
3 fichiers modifiés avec 418 ajouts et 1 suppressions
  1. 1 1
      common/public.py
  2. 372 0
      douyin/douyin_follow/follow_dy.py
  3. 45 0
      douyin/douyin_main/run_douyin_follow.py

+ 1 - 1
common/public.py

@@ -30,7 +30,7 @@ def filter_word(log_type, crawler, source, env):
 
 
 def get_user_from_mysql(log_type, crawler, source, env, action=''):
-    sql = f"select * from crawler_author_map where source='{source}' and is_del=1"
+    sql = f"select * from crawler_author_map where source='{source}' and task_type='{log_type}'  and is_del=1"
     results = MysqlHelper.get_values(log_type, crawler, sql, env, action=action)
     if results:
         return results

+ 372 - 0
douyin/douyin_follow/follow_dy.py

@@ -0,0 +1,372 @@
+# -*- coding: utf-8 -*-
+# @Author: lierqiang
+# @Time: 2023/4/12
+import json
+import os
+import random
+import shutil
+import sys
+import time
+import requests
+from hashlib import md5
+
+from common.public import get_user_from_mysql
+from douyin.douyin_recommend import get_xb
+sys.path.append(os.getcwd())
+from common.db import MysqlHelper
+from common.feishu import Feishu
+from common.publish import Publish
+from common.userAgent import get_random_user_agent
+from common.common import Common
+
+
+class DyFollow(object):
+    platform = "抖音"
+    tag = "抖音定向爬虫策略"
+
+    @classmethod
+    def get_rule(cls, log_type, crawler):
+        try:
+            while True:
+                rule_sheet = Feishu.get_values_batch(log_type, crawler, "fn2hEO")
+                if rule_sheet is None:
+                    Common.logger(log_type, crawler).warning("rule_sheet is None! 10秒后重新获取")
+                    time.sleep(10)
+                    continue
+                rule_dict = {
+                    "video_width": int(rule_sheet[0][2]),
+                    "video_height": int(rule_sheet[1][2]),
+                    "like_cnt": int(rule_sheet[2][2]),
+                    "duration": int(rule_sheet[3][2]),
+                    "publish_time": int(rule_sheet[4][2]),
+
+                }
+                return rule_dict
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f"get_rule:{e}\n")
+
+    # 下载规则
+    @classmethod
+    def download_rule(cls, video_info_dict, rule_dict):
+        # if video_info_dict['play_cnt'] >= rule_dict['play_cnt']:
+        #     if video_info_dict['comment_cnt'] >= rule_dict['comment_cnt']:
+        if video_info_dict['like_cnt'] >= rule_dict['like_cnt']:
+            if video_info_dict['duration'] >= rule_dict['duration']:
+                if video_info_dict['video_width'] >= rule_dict['video_width'] \
+                        or video_info_dict['video_height'] >= rule_dict['video_height']:
+                    return True
+                else:
+                    return False
+            else:
+                return False
+        else:
+            return False
+
+    # else:
+    #     return False
+    # else:
+    #     return False
+
+    # 过滤词库
+    @classmethod
+    def filter_words(cls, log_type, crawler):
+        try:
+            while True:
+                filter_words_sheet = Feishu.get_values_batch(log_type, crawler, '6BS2RR')
+                if filter_words_sheet is None:
+                    Common.logger(log_type, crawler).warning(f"filter_words_sheet:{filter_words_sheet} 10秒钟后重试")
+                    continue
+                filter_words_list = []
+                for x in filter_words_sheet:
+                    for y in x:
+                        if y is None:
+                            pass
+                        else:
+                            filter_words_list.append(y)
+                return filter_words_list
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f'filter_words异常:{e}\n')
+
+    @classmethod
+    def video_title(cls, log_type, crawler, title):
+        title_split1 = title.split(" #")
+        if title_split1[0] != "":
+            title1 = title_split1[0]
+        else:
+            title1 = title_split1[-1]
+
+        title_split2 = title1.split(" #")
+        if title_split2[0] != "":
+            title2 = title_split2[0]
+        else:
+            title2 = title_split2[-1]
+
+        title_split3 = title2.split("@")
+        if title_split3[0] != "":
+            title3 = title_split3[0]
+        else:
+            title3 = title_split3[-1]
+
+        video_title = title3.strip().replace("\n", "") \
+                          .replace("/", "").replace("抖音", "").replace(" ", "") \
+                          .replace(" ", "").replace("&NBSP", "").replace("\r", "") \
+                          .replace("#", "").replace(".", "。").replace("\\", "") \
+                          .replace(":", "").replace("*", "").replace("?", "") \
+                          .replace("?", "").replace('"', "").replace("<", "") \
+                          .replace(">", "").replace("|", "").replace("@", "").replace('"', '').replace("'", '')[:40]
+        if video_title.replace(" ", "") == "" or video_title == "。。。" or video_title == "...":
+            return cls.random_title(log_type, crawler)
+        else:
+            return video_title
+
+    @classmethod
+    def random_title(cls, log_type, crawler):
+        try:
+            while True:
+                random_title_sheet = Feishu.get_values_batch(log_type, crawler, 'sPK2oY')
+                if random_title_sheet is None:
+                    Common.logger(log_type, crawler).warning(f"filter_words_sheet:{random_title_sheet} 10秒钟后重试")
+                    continue
+                random_title_list = []
+                for x in random_title_sheet:
+                    for y in x:
+                        if y is None:
+                            pass
+                        else:
+                            random_title_list.append(y)
+                return random.choice(random_title_list)
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f'random_title:{e}\n')
+
+    @classmethod
+    def get_videoList(cls, log_type, crawler, strategy, our_uid, out_uid, oss_endpoint, env, machine, rule_dict):
+
+        try:
+            max_cursor = ''
+            url = "https://www.douyin.com/aweme/v1/web/aweme/post/?device_platform=webapp&aid=6383&channel=channel_pc_web&sec_user_id={sec_user_id}&max_cursor={max_cursor}&show_live_replay_strategy=1&count=10&publish_video_strategy_type=2&pc_client_type=1&version_code=170400&version_name=17.4.0&cookie_enabled=true&screen_width=1440&screen_height=900&browser_language=zh-CN&browser_platform=MacIntel&browser_name=Chrome&browser_version=112.0.0.0&browser_online=true&engine_name=Blink&engine_version=112.0.0.0&os_name=Mac+OS&os_version=10.15.7&cpu_core_num=8&device_memory=8&platform=PC&downlink=10&effective_type=4g&round_trip_time=50".format(sec_user_id=out_uid, max_cursor=max_cursor)
+            headers = {
+                'authority': 'www.douyin.com',
+                'accept': 'application/json, text/plain, */*',
+                'accept-language': 'zh-CN,zh;q=0.9',
+                # 'cookie': '__ac_nonce=06437a18000f23ad954f0; __ac_signature=_02B4Z6wo00f01Sb71TAAAIDCsi2OPpjonN0m29GAAC2M85; s_v_web_id=verify_lgeqr3uq_3aDaqQXf_juHS_40Yi_BE8b_tI8FCILZQXPK; _tea_utm_cache_2018=undefined; ttwid=1%7Cq_IBs6hbBUOIEcRR1gxtgY6GiTbTE3U1XhJNLL_9BZA%7C1681367431%7Cf77b36ae4721884fec1c3fa9d6a08c29e308236ae13df58d1be3b0d1f82f8668; strategyABtestKey=%221681367433.454%22; passport_csrf_token=bff0289a5846e58b4b9db6e1f64665f4; passport_csrf_token_default=bff0289a5846e58b4b9db6e1f64665f4; bd_ticket_guard_client_data=eyJiZC10aWNrZXQtZ3VhcmQtdmVyc2lvbiI6MiwiYmQtdGlja2V0LWd1YXJkLWl0ZXJhdGlvbi12ZXJzaW9uIjoxLCJiZC10aWNrZXQtZ3VhcmQtY2xpZW50LWNzciI6Ii0tLS0tQkVHSU4gQ0VSVElGSUNBVEUgUkVRVUVTVC0tLS0tXHJcbk1JSUJEVENCdFFJQkFEQW5NUXN3Q1FZRFZRUUdFd0pEVGpFWU1CWUdBMVVFQXd3UFltUmZkR2xqYTJWMFgyZDFcclxuWVhKa01Ga3dFd1lIS29aSXpqMENBUVlJS29aSXpqMERBUWNEUWdBRVhzcHJ5TElFT3E4Z2tPc2l5MTdSS1dEcVxyXG5xTXZkWEt5Y1V5NStiL3JpSmJ6VkMwMlYrU1dNaWtZTlNOK29IU2g2WVVTTmdUUjJrZEhvRUxISmxGdU9scUFzXHJcbk1Db0dDU3FHU0liM0RRRUpEakVkTUJzd0dRWURWUjBSQkJJd0VJSU9kM2QzTG1SdmRYbHBiaTVqYjIwd0NnWUlcclxuS29aSXpqMEVBd0lEUndBd1JBSWdIeW9SblFNK0h0Z3ZOU2dFMVpHdGpmYWQxT3BuWHJRdVFSNXNSaDkwakRJQ1xyXG5JRG1tVkthRkN5djBLemtpZ0J0RExaTVJSNndURzRBWUVoNUlWUmlZUU9UVVxyXG4tLS0tLUVORCBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS1cclxuIn0=; msToken=ZPkeAqCnLbjDWCkWcWf36ZZIoZTl07X33ca0xcNGk3tZPoMvgx-lo28oNb5JhbCKsXLgLNIoojAbocBrjdAv54Hwf-Tk3_yAjLW7WJxxSa0=; ttcid=54b98e03a03e43e09211ee28db90fdd126; home_can_add_dy_2_desktop=%221%22; msToken=nqMSwn8xJAXLZk2AikdOeJ6P3JvoCsbYjHROoR55KXMDvWs8auYCMpZFGQSClHhitgB0l_vwg8m0-lE-aWQovfN7Ga1QupM3SpdNktiamFRiHMdJExREc9_uxS1ln8E=; tt_scid=DnO5GFg1oLONpPMFuFeL-OveKBn43mRynRVTvHsh1KUQm283ocN6JX6qPKDDrFHbfdf1; download_guide=%222%2F20230413%22; msToken=b9haS5RjLaFgqVDoGp5xSqc8B4kl-miQB5Nku0BSIvHVutKT81Nzk_pPb0wm7xYlAp_nz1gytQng5gYeIRNxcMgZJ_MB7lhejt_093miXlHtvqAaxL0FNg==',
+                'referer': 'https://www.douyin.com/user/{}?showTab=post'.format(out_uid),
+                'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
+                'sec-ch-ua-mobile': '?0',
+                'sec-ch-ua-platform': '"macOS"',
+                'sec-fetch-dest': 'empty',
+                'sec-fetch-mode': 'cors',
+                'sec-fetch-site': 'same-origin',
+                'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
+            }
+            x_bogus = get_xb(url, headers['user-agent'])
+            url = url + '&X-Bogus={}'.format(x_bogus)
+            if not x_bogus:
+                return
+            res = requests.get(url=url, headers=headers, data={}, proxies=Common.tunnel_proxies(), timeout=10).json()
+            aweme_list = res.get('aweme_list', [])
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f"获取抖音作者:{out_uid},视频列表失败:{e}")
+            return
+        if not aweme_list:
+            Common.logger(log_type, crawler).warning(f"抖音作者没有获取到更多数据")
+            return
+        for info in aweme_list:
+            # if info.get('is_ads'):
+            #     continue
+            publish_time = info['create_time']
+            publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time))
+            publish_day = int((int(time.time()) - publish_time) / (3600 * 24))
+            if not info['desc']:
+                video_title = cls.random_title(log_type, crawler)
+            else:
+                video_title = cls.video_title(log_type, crawler, info['desc'])
+
+            video_dict = {'video_title': video_title,
+                          'video_id': info['aweme_id'],
+                          'play_cnt': info['statistics']['play_count'],
+                          'comment_cnt': info['statistics']['comment_count'],
+                          'like_cnt': info['statistics']['digg_count'],
+                          'share_cnt': info['statistics']['share_count'],
+                          'video_width': info['video']['width'],
+                          'video_height': info['video']['height'],
+                          'duration': round(info['video']['duration'] / 1000),
+                          'publish_time': publish_day,
+                          'publish_time_stamp': publish_time * 1000,
+                          'publish_time_str': publish_time_str,
+                          'user_name': info['author']['nickname'],
+                          'user_id': info['author_user_id'],
+                          'user_sec_id': info['author']['sec_uid'],
+                          'avatar_url': info['author']['avatar_thumb']['url_list'][0],
+                          'cover_url': info['video']['origin_cover']['url_list'][0].replace('\u0026', '&'),
+                          'video_url': info['video']['play_addr']['url_list'][0],
+                          'session': f"douyin{int(time.time())}"
+                          }
+
+            for k, v in video_dict.items():
+                Common.logger(log_type, crawler).info(f"{k}:{v}")
+            cls.download_publish(log_type=log_type,
+                                 crawler=crawler,
+                                 video_dict=video_dict,
+                                 rule_dict=rule_dict,
+                                 strategy=strategy,
+                                 our_uid=our_uid,
+                                 oss_endpoint=oss_endpoint,
+                                 env=env,
+                                 machine=machine)
+
+    @classmethod
+    def repeat_video(cls, log_type, crawler, video_id, env, machine):
+        sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}"; """
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env, machine)
+        return len(repeat_video)
+
+    # 下载 / 上传
+    @classmethod
+    def download_publish(cls, log_type, crawler, strategy, video_dict, rule_dict, our_uid, oss_endpoint, env, machine):
+        try:
+            if cls.download_rule(video_dict, rule_dict) is False:
+                Common.logger(log_type, crawler).info('不满足抓取规则\n')
+            elif any(word if word in video_dict['video_title'] else False for word in
+                     cls.filter_words(log_type, crawler)) is True:
+                Common.logger(log_type, crawler).info('标题已中过滤词:{}\n', video_dict['video_title'])
+            elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env, machine) != 0:
+                Common.logger(log_type, crawler).info('视频已下载\n')
+            else:
+                # 下载视频
+                Common.download_method(log_type=log_type, crawler=crawler, text='video',
+                                       title=video_dict['video_title'], url=video_dict['video_url'])
+                md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
+                if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
+                    # 删除视频文件夹
+                    shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                    Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+                    return
+                # ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
+                # if ffmpeg_dict is None or ffmpeg_dict['size'] == 0:
+                #     Common.logger(log_type, crawler).warning(f"下载的视频无效,已删除\n")
+                #     # 删除视频文件夹
+                #     shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
+                #     return
+                # 下载封面
+                Common.download_method(log_type=log_type, crawler=crawler, text='cover',
+                                       title=video_dict['video_title'], url=video_dict['cover_url'])
+                # 保存视频信息至txt
+                Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
+
+                # 上传视频
+                Common.logger(log_type, crawler).info("开始上传视频...")
+                our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                          crawler=crawler,
+                                                          strategy=strategy,
+                                                          our_uid=our_uid,
+                                                          env=env,
+                                                          oss_endpoint=oss_endpoint)
+                if env == 'dev':
+                    our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+                else:
+                    our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+                Common.logger(log_type, crawler).info("视频上传完成")
+
+                if our_video_id is None:
+                    # 删除视频文件夹
+                    shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
+                    return
+
+                # 视频写入飞书
+                upload_time = int(time.time())
+                values = [[
+                    our_video_id,
+                    time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
+                    strategy,
+                    str(video_dict['video_id']),
+                    video_dict['video_title'],
+                    our_video_link,
+                    # video_dict['gid'],
+                    video_dict['play_cnt'],
+                    video_dict['comment_cnt'],
+                    video_dict['like_cnt'],
+                    video_dict['share_cnt'],
+                    video_dict['duration'],
+                    str(video_dict['video_width']) + '*' + str(video_dict['video_height']),
+                    video_dict['publish_time_str'],
+                    video_dict['user_name'],
+                    video_dict['user_id'],
+                    video_dict['avatar_url'],
+                    video_dict['cover_url'],
+                    video_dict['video_url']
+                ]]
+                # time.sleep(1)
+                Feishu.insert_columns(log_type, 'douyin', "qV9VC0", "ROWS", 1, 2)
+                Feishu.update_values(log_type, 'douyin', "qV9VC0", "A2:Z2", values)
+                Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
+
+                # 视频信息保存数据库
+                insert_sql = f""" insert into crawler_video(video_id,
+                                user_id,
+                                out_user_id,
+                                platform,
+                                strategy,
+                                out_video_id,
+                                video_title,
+                                cover_url,
+                                video_url,
+                                duration,
+                                publish_time,
+                                play_cnt,
+                                comment_cnt,
+                                like_cnt,
+                                share_cnt,
+                                crawler_rule,
+                                width,
+                                height)
+                                values({our_video_id},
+                                {our_uid},
+                                "{video_dict['user_id']}",
+                                "{cls.platform}",
+                                "{strategy}",
+                                "{video_dict['video_id']}",
+                                "{video_dict['video_title']}",
+                                "{video_dict['cover_url']}",
+                                "{video_dict['video_url']}",
+                                {int(video_dict['duration'])},
+                                "{video_dict['publish_time_str']}",
+                                {int(video_dict['play_cnt'])},
+                                {int(video_dict['comment_cnt'])},
+                                {int(video_dict['like_cnt'])},
+                                {int(video_dict['share_cnt'])},
+                                '{json.dumps(rule_dict)}',
+                                {int(video_dict['video_width'])},
+                                {int(video_dict['video_height'])}) """
+                Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+                MysqlHelper.update_values(log_type, crawler, insert_sql, env, machine)
+                Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f'download_publish异常:{e}\n')
+
+    @classmethod
+    def get_follow_videos(cls, log_type, crawler, strategy, oss_endpoint, env, machine):
+        user_list = get_user_from_mysql(log_type, crawler, crawler, env)
+        rule_dict = cls.get_rule(log_type, crawler)
+        for user in user_list:
+            spider_link = user["spider_link"]
+            out_uid = spider_link
+            user_name = user["nick_name"]
+            our_uid = user["media_id"]
+            Common.logger(log_type, crawler).info(f"开始抓取 {user_name} 用户主页视频\n")
+            try:
+                cls.get_videoList(log_type=log_type,
+                                  crawler=crawler,
+                                  strategy=strategy,
+                                  our_uid=our_uid,
+                                  out_uid=out_uid,
+                                  oss_endpoint=oss_endpoint,
+                                  env=env,
+                                  machine=machine,
+                                  rule_dict=rule_dict,
+                                  )
+            except Exception as e:
+                Common.logger(log_type, crawler).error(f"get_follow_videos:{e}\n")
+
+
+if __name__ == '__main__':
+    DyFollow.get_follow_videos('author','douyin', '定向抓取策略','outer','prod', 'aliyun')

+ 45 - 0
douyin/douyin_main/run_douyin_follow.py

@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+# @Author: lierqiang
+# @Time: 2023/4/07
+import argparse
+import os
+import sys
+
+sys.path.append(os.getcwd())
+from common.common import Common
+from douyin.douyin_follow.follow_dy import DyFollow
+
+
+def main(log_type, crawler, strategy, oss_endpoint, env, machine):
+    try:
+        Common.logger(log_type, crawler).info('开始抓取 抖音 作者\n')
+        DyFollow.get_follow_videos(
+            log_type=log_type,
+            crawler=crawler,
+            strategy=strategy,
+            oss_endpoint=oss_endpoint,
+            env=env,
+            machine=machine)
+        Common.del_logs(log_type, crawler)
+        Common.logger(log_type, crawler).info('抓取完一轮\n')
+    except Exception as e:
+        Common.logger(log_type, crawler).info(f"抖音作者异常,触发报警:{e}\n")
+        # Feishu.bot(log_type, crawler, f"快手定向榜异常,触发报警:{e}")
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
+    parser.add_argument('--log_type', default='author', type=str)  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler', default='douyin')  ## 添加参数
+    parser.add_argument('--strategy', default='定向抓取策略')  ## 添加参数
+    parser.add_argument('--oss_endpoint', default='inner')  ## 添加参数
+    parser.add_argument('--env', default='prod')  ## 添加参数
+    parser.add_argument('--machine', default='aliyun')  ## 添加参数
+    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
+    # print(args)
+    main(log_type=args.log_type,
+         crawler=args.crawler,
+         strategy=args.strategy,
+         oss_endpoint=args.oss_endpoint,
+         env=args.env,
+         machine=args.machine)