wangkun 1 سال پیش
والد
کامیت
183b0ba9a4
2فایلهای تغییر یافته به همراه88 افزوده شده و 87 حذف شده
  1. 1 0
      README.MD
  2. 87 87
      douyin/douyin_author/douyin_author_scheduling.py

+ 1 - 0
README.MD

@@ -248,4 +248,5 @@ ps aux | grep Appium.app | grep -v grep | awk '{print $2}' | xargs kill -9
 ps aux | grep kuaishou | grep -v grep | awk '{print $2}' | xargs kill -9
 ps aux | grep xigua_search | grep -v grep | awk '{print $2}' | xargs kill -9
 ps aux | grep kanyikan | grep -v grep | awk '{print $2}' | xargs kill -9
+ps aux | grep shipinhao_search | grep -v grep | awk '{print $2}' | xargs kill -9
 ```

+ 87 - 87
douyin/douyin_author/douyin_author_scheduling.py

@@ -55,98 +55,98 @@ class DouyinauthorScheduling:
     @classmethod
     def get_videoList(cls, log_type, crawler, user_dict, rule_dict, env):
         max_cursor = ""
-        while True:
-            url = "https://www.douyin.com/aweme/v1/web/aweme/post/?device_platform=webapp&aid=6383&channel=channel_pc_web&sec_user_id={sec_user_id}&max_cursor={max_cursor}&show_live_replay_strategy=1&count=10&publish_video_strategy_type=2&pc_client_type=1&version_code=170400&version_name=17.4.0&cookie_enabled=true&screen_width=1440&screen_height=900&browser_language=zh-CN&browser_platform=MacIntel&browser_name=Chrome&browser_version=112.0.0.0&browser_online=true&engine_name=Blink&engine_version=112.0.0.0&os_name=Mac+OS&os_version=10.15.7&cpu_core_num=8&device_memory=8&platform=PC&downlink=10&effective_type=4g&round_trip_time=50".format(
-                sec_user_id=user_dict["link"].replace("https://www.douyin.com/user/", ""), max_cursor=max_cursor)
-            Common.logger(log_type, crawler).info(f"url:{url}")
-            headers = {
-                'authority': 'www.douyin.com',
-                'accept': 'application/json, text/plain, */*',
-                'accept-language': 'zh-CN,zh;q=0.9',
-                # 'cookie': '__ac_nonce=06437a18000f23ad954f0; __ac_signature=_02B4Z6wo00f01Sb71TAAAIDCsi2OPpjonN0m29GAAC2M85; s_v_web_id=verify_lgeqr3uq_3aDaqQXf_juHS_40Yi_BE8b_tI8FCILZQXPK; _tea_utm_cache_2018=undefined; ttwid=1%7Cq_IBs6hbBUOIEcRR1gxtgY6GiTbTE3U1XhJNLL_9BZA%7C1681367431%7Cf77b36ae4721884fec1c3fa9d6a08c29e308236ae13df58d1be3b0d1f82f8668; strategyABtestKey=%221681367433.454%22; passport_csrf_token=bff0289a5846e58b4b9db6e1f64665f4; passport_csrf_token_default=bff0289a5846e58b4b9db6e1f64665f4; bd_ticket_guard_client_data=eyJiZC10aWNrZXQtZ3VhcmQtdmVyc2lvbiI6MiwiYmQtdGlja2V0LWd1YXJkLWl0ZXJhdGlvbi12ZXJzaW9uIjoxLCJiZC10aWNrZXQtZ3VhcmQtY2xpZW50LWNzciI6Ii0tLS0tQkVHSU4gQ0VSVElGSUNBVEUgUkVRVUVTVC0tLS0tXHJcbk1JSUJEVENCdFFJQkFEQW5NUXN3Q1FZRFZRUUdFd0pEVGpFWU1CWUdBMVVFQXd3UFltUmZkR2xqYTJWMFgyZDFcclxuWVhKa01Ga3dFd1lIS29aSXpqMENBUVlJS29aSXpqMERBUWNEUWdBRVhzcHJ5TElFT3E4Z2tPc2l5MTdSS1dEcVxyXG5xTXZkWEt5Y1V5NStiL3JpSmJ6VkMwMlYrU1dNaWtZTlNOK29IU2g2WVVTTmdUUjJrZEhvRUxISmxGdU9scUFzXHJcbk1Db0dDU3FHU0liM0RRRUpEakVkTUJzd0dRWURWUjBSQkJJd0VJSU9kM2QzTG1SdmRYbHBiaTVqYjIwd0NnWUlcclxuS29aSXpqMEVBd0lEUndBd1JBSWdIeW9SblFNK0h0Z3ZOU2dFMVpHdGpmYWQxT3BuWHJRdVFSNXNSaDkwakRJQ1xyXG5JRG1tVkthRkN5djBLemtpZ0J0RExaTVJSNndURzRBWUVoNUlWUmlZUU9UVVxyXG4tLS0tLUVORCBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS1cclxuIn0=; msToken=ZPkeAqCnLbjDWCkWcWf36ZZIoZTl07X33ca0xcNGk3tZPoMvgx-lo28oNb5JhbCKsXLgLNIoojAbocBrjdAv54Hwf-Tk3_yAjLW7WJxxSa0=; ttcid=54b98e03a03e43e09211ee28db90fdd126; home_can_add_dy_2_desktop=%221%22; msToken=nqMSwn8xJAXLZk2AikdOeJ6P3JvoCsbYjHROoR55KXMDvWs8auYCMpZFGQSClHhitgB0l_vwg8m0-lE-aWQovfN7Ga1QupM3SpdNktiamFRiHMdJExREc9_uxS1ln8E=; tt_scid=DnO5GFg1oLONpPMFuFeL-OveKBn43mRynRVTvHsh1KUQm283ocN6JX6qPKDDrFHbfdf1; download_guide=%222%2F20230413%22; msToken=b9haS5RjLaFgqVDoGp5xSqc8B4kl-miQB5Nku0BSIvHVutKT81Nzk_pPb0wm7xYlAp_nz1gytQng5gYeIRNxcMgZJ_MB7lhejt_093miXlHtvqAaxL0FNg==',
-                'referer': 'https://www.douyin.com/user/{}?showTab=post'.format(user_dict["link"].replace("https://www.douyin.com/user/", "")),
-                'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
-                'sec-ch-ua-mobile': '?0',
-                'sec-ch-ua-platform': '"macOS"',
-                'sec-fetch-dest': 'empty',
-                'sec-fetch-mode': 'cors',
-                'sec-fetch-site': 'same-origin',
-                'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
-            }
-            x_bogus = get_xb(url, headers['user-agent'])
-            url = url + '&X-Bogus={}'.format(x_bogus)
-            if not x_bogus:
-                return
-            res = requests.get(url=url, headers=headers, data={}, proxies=Common.tunnel_proxies(), timeout=10)
-            # Common.logger(log_type, crawler).info(f"res:{res.text}\n")
-            aweme_list = res.json().get('aweme_list', [])
-            max_cursor = res.json().get("max_cursor", "")
-            if not aweme_list:
-                Common.logger(log_type, crawler).info(f"没有更多数据啦~:{res.text}\n")
-                return
-            for info in aweme_list:
-                try:
-                    if info.get('is_ads'):
-                        continue
-                    publish_time = info.get('create_time')
-                    if not publish_time:
-                        continue
-                    publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time))
-                    publish_day = int((int(time.time()) - publish_time) / (3600 * 24))
+        # while True:
+        url = "https://www.douyin.com/aweme/v1/web/aweme/post/?device_platform=webapp&aid=6383&channel=channel_pc_web&sec_user_id={sec_user_id}&max_cursor={max_cursor}&show_live_replay_strategy=1&count=10&publish_video_strategy_type=2&pc_client_type=1&version_code=170400&version_name=17.4.0&cookie_enabled=true&screen_width=1440&screen_height=900&browser_language=zh-CN&browser_platform=MacIntel&browser_name=Chrome&browser_version=112.0.0.0&browser_online=true&engine_name=Blink&engine_version=112.0.0.0&os_name=Mac+OS&os_version=10.15.7&cpu_core_num=8&device_memory=8&platform=PC&downlink=10&effective_type=4g&round_trip_time=50".format(
+            sec_user_id=user_dict["link"].replace("https://www.douyin.com/user/", ""), max_cursor=max_cursor)
+        Common.logger(log_type, crawler).info(f"url:{url}")
+        headers = {
+            'authority': 'www.douyin.com',
+            'accept': 'application/json, text/plain, */*',
+            'accept-language': 'zh-CN,zh;q=0.9',
+            # 'cookie': '__ac_nonce=06437a18000f23ad954f0; __ac_signature=_02B4Z6wo00f01Sb71TAAAIDCsi2OPpjonN0m29GAAC2M85; s_v_web_id=verify_lgeqr3uq_3aDaqQXf_juHS_40Yi_BE8b_tI8FCILZQXPK; _tea_utm_cache_2018=undefined; ttwid=1%7Cq_IBs6hbBUOIEcRR1gxtgY6GiTbTE3U1XhJNLL_9BZA%7C1681367431%7Cf77b36ae4721884fec1c3fa9d6a08c29e308236ae13df58d1be3b0d1f82f8668; strategyABtestKey=%221681367433.454%22; passport_csrf_token=bff0289a5846e58b4b9db6e1f64665f4; passport_csrf_token_default=bff0289a5846e58b4b9db6e1f64665f4; bd_ticket_guard_client_data=eyJiZC10aWNrZXQtZ3VhcmQtdmVyc2lvbiI6MiwiYmQtdGlja2V0LWd1YXJkLWl0ZXJhdGlvbi12ZXJzaW9uIjoxLCJiZC10aWNrZXQtZ3VhcmQtY2xpZW50LWNzciI6Ii0tLS0tQkVHSU4gQ0VSVElGSUNBVEUgUkVRVUVTVC0tLS0tXHJcbk1JSUJEVENCdFFJQkFEQW5NUXN3Q1FZRFZRUUdFd0pEVGpFWU1CWUdBMVVFQXd3UFltUmZkR2xqYTJWMFgyZDFcclxuWVhKa01Ga3dFd1lIS29aSXpqMENBUVlJS29aSXpqMERBUWNEUWdBRVhzcHJ5TElFT3E4Z2tPc2l5MTdSS1dEcVxyXG5xTXZkWEt5Y1V5NStiL3JpSmJ6VkMwMlYrU1dNaWtZTlNOK29IU2g2WVVTTmdUUjJrZEhvRUxISmxGdU9scUFzXHJcbk1Db0dDU3FHU0liM0RRRUpEakVkTUJzd0dRWURWUjBSQkJJd0VJSU9kM2QzTG1SdmRYbHBiaTVqYjIwd0NnWUlcclxuS29aSXpqMEVBd0lEUndBd1JBSWdIeW9SblFNK0h0Z3ZOU2dFMVpHdGpmYWQxT3BuWHJRdVFSNXNSaDkwakRJQ1xyXG5JRG1tVkthRkN5djBLemtpZ0J0RExaTVJSNndURzRBWUVoNUlWUmlZUU9UVVxyXG4tLS0tLUVORCBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS1cclxuIn0=; msToken=ZPkeAqCnLbjDWCkWcWf36ZZIoZTl07X33ca0xcNGk3tZPoMvgx-lo28oNb5JhbCKsXLgLNIoojAbocBrjdAv54Hwf-Tk3_yAjLW7WJxxSa0=; ttcid=54b98e03a03e43e09211ee28db90fdd126; home_can_add_dy_2_desktop=%221%22; msToken=nqMSwn8xJAXLZk2AikdOeJ6P3JvoCsbYjHROoR55KXMDvWs8auYCMpZFGQSClHhitgB0l_vwg8m0-lE-aWQovfN7Ga1QupM3SpdNktiamFRiHMdJExREc9_uxS1ln8E=; tt_scid=DnO5GFg1oLONpPMFuFeL-OveKBn43mRynRVTvHsh1KUQm283ocN6JX6qPKDDrFHbfdf1; download_guide=%222%2F20230413%22; msToken=b9haS5RjLaFgqVDoGp5xSqc8B4kl-miQB5Nku0BSIvHVutKT81Nzk_pPb0wm7xYlAp_nz1gytQng5gYeIRNxcMgZJ_MB7lhejt_093miXlHtvqAaxL0FNg==',
+            'referer': 'https://www.douyin.com/user/{}?showTab=post'.format(user_dict["link"].replace("https://www.douyin.com/user/", "")),
+            'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
+            'sec-ch-ua-mobile': '?0',
+            'sec-ch-ua-platform': '"macOS"',
+            'sec-fetch-dest': 'empty',
+            'sec-fetch-mode': 'cors',
+            'sec-fetch-site': 'same-origin',
+            'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
+        }
+        x_bogus = get_xb(url, headers['user-agent'])
+        url = url + '&X-Bogus={}'.format(x_bogus)
+        if not x_bogus:
+            return
+        res = requests.get(url=url, headers=headers, data={}, proxies=Common.tunnel_proxies(), timeout=10)
+        # Common.logger(log_type, crawler).info(f"res:{res.text}\n")
+        aweme_list = res.json().get('aweme_list', [])
+        # max_cursor = res.json().get("max_cursor", "")
+        if not aweme_list:
+            Common.logger(log_type, crawler).info(f"没有更多数据啦~:{res.text}\n")
+            return
+        for info in aweme_list:
+            try:
+                if info.get('is_ads'):
+                    continue
+                publish_time = info.get('create_time')
+                if not publish_time:
+                    continue
+                publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time))
+                publish_day = int((int(time.time()) - publish_time) / (3600 * 24))
 
-                    video_title = cls.video_title(log_type, env, crawler, info['desc'])
-                    if not video_title:
-                        video_title = random_title(log_type, crawler, env, text='title')
+                video_title = cls.video_title(log_type, env, crawler, info['desc'])
+                if not video_title:
+                    video_title = random_title(log_type, crawler, env, text='title')
 
-                    video_dict = {'video_title': video_title,
-                                  'video_id': info['aweme_id'],
-                                  'play_cnt': info['statistics']['play_count'],
-                                  'comment_cnt': info['statistics']['comment_count'],
-                                  'like_cnt': info['statistics']['digg_count'],
-                                  'share_cnt': info['statistics']['share_count'],
-                                  'video_width': info['video']['width'],
-                                  'video_height': info['video']['height'],
-                                  'duration': round(info['video']['duration'] / 1000),
-                                  'publish_time': publish_day,
-                                  'publish_time_stamp': publish_time,
-                                  'publish_time_str': publish_time_str,
-                                  'user_name': info['author']['nickname'],
-                                  'user_id': info['author_user_id'],
-                                  'user_sec_id': info['author']['sec_uid'],
-                                  'avatar_url': info['author']['avatar_thumb']['url_list'][0],
-                                  'cover_url': info['video']['origin_cover']['url_list'][0].replace('\u0026', '&'),
-                                  'video_url': info['video']['play_addr']['url_list'][0],
-                                  'session': f"douyin{int(time.time())}"
-                                  }
+                video_dict = {'video_title': video_title,
+                              'video_id': info['aweme_id'],
+                              'play_cnt': info['statistics']['play_count'],
+                              'comment_cnt': info['statistics']['comment_count'],
+                              'like_cnt': info['statistics']['digg_count'],
+                              'share_cnt': info['statistics']['share_count'],
+                              'video_width': info['video']['width'],
+                              'video_height': info['video']['height'],
+                              'duration': round(info['video']['duration'] / 1000),
+                              'publish_time': publish_day,
+                              'publish_time_stamp': publish_time,
+                              'publish_time_str': publish_time_str,
+                              'user_name': info['author']['nickname'],
+                              'user_id': info['author_user_id'],
+                              'user_sec_id': info['author']['sec_uid'],
+                              'avatar_url': info['author']['avatar_thumb']['url_list'][0],
+                              'cover_url': info['video']['origin_cover']['url_list'][0].replace('\u0026', '&'),
+                              'video_url': info['video']['play_addr']['url_list'][0],
+                              'session': f"douyin{int(time.time())}"
+                              }
 
-                    for k, v in video_dict.items():
-                        Common.logger(log_type, crawler).info(f"{k}:{v}")
+                for k, v in video_dict.items():
+                    Common.logger(log_type, crawler).info(f"{k}:{v}")
 
-                    if int((int(time.time()) - int(video_dict["publish_time_stamp"])) / (3600 * 24)) > int(rule_dict.get("period", {}).get("max", 1000)):
-                        Common.logger(log_type, crawler).info(f'发布时间超过{int(rule_dict.get("period", {}).get("max", 1000))}天\n')
-                        return
+                if int((int(time.time()) - int(video_dict["publish_time_stamp"])) / (3600 * 24)) > int(rule_dict.get("period", {}).get("max", 1000)):
+                    Common.logger(log_type, crawler).info(f'发布时间超过{int(rule_dict.get("period", {}).get("max", 1000))}天\n')
+                    return
 
-                    if download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict,
-                                     rule_dict=rule_dict) is False:
-                        Common.logger(log_type, crawler).info("不满足抓取规则\n")
-                    elif any(str(word) if str(word) in video_dict["video_title"] else False
-                             for word in get_config_from_mysql(log_type=log_type,
-                                                               source=crawler,
-                                                               env=env,
-                                                               text="filter",
-                                                               action="")) is True:
-                        Common.logger(log_type, crawler).info('已中过滤词\n')
-                    elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
-                        Common.logger(log_type, crawler).info('视频已下载\n')
-                    else:
-                        cls.download_publish(log_type=log_type,
-                                             crawler=crawler,
-                                             user_dict=user_dict,
-                                             video_dict=video_dict,
-                                             rule_dict=rule_dict,
-                                             env=env)
-                except Exception as e:
-                    Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
+                if download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict,
+                                 rule_dict=rule_dict) is False:
+                    Common.logger(log_type, crawler).info("不满足抓取规则\n")
+                elif any(str(word) if str(word) in video_dict["video_title"] else False
+                         for word in get_config_from_mysql(log_type=log_type,
+                                                           source=crawler,
+                                                           env=env,
+                                                           text="filter",
+                                                           action="")) is True:
+                    Common.logger(log_type, crawler).info('已中过滤词\n')
+                elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
+                    Common.logger(log_type, crawler).info('视频已下载\n')
+                else:
+                    cls.download_publish(log_type=log_type,
+                                         crawler=crawler,
+                                         user_dict=user_dict,
+                                         video_dict=video_dict,
+                                         rule_dict=rule_dict,
+                                         env=env)
+            except Exception as e:
+                Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
 
     @classmethod
     def repeat_video(cls, log_type, crawler, video_id, env):