wangkun il y a 2 ans
Parent
commit
2e365d3105

+ 252 - 252
gongzhonghao/gongzhonghao_follow/gongzhonghao_follow.py

@@ -190,121 +190,121 @@ class GongzhonghaoFollow:
         fakeid_dict = cls.get_fakeid(log_type, crawler, user, index)
         token_dict = cls.get_token(log_type, crawler)
         while True:
-            # try:
-            url = "https://mp.weixin.qq.com/cgi-bin/appmsg?"
-            headers = {
-                "accept": "*/*",
-                "accept-encoding": "gzip, deflate, br",
-                "accept-language": "zh-CN,zh;q=0.9",
-                "referer": "https://mp.weixin.qq.com/cgi-bin/appmsg?"
-                           "t=media/appmsg_edit_v2&action=edit&isNew=1"
-                           "&type=77&createType=5&token=" + str(token_dict['token']) + "&lang=zh_CN",
-                'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
-                "sec-ch-ua-mobile": "?0",
-                "sec-ch-ua-platform": '"Windows"',
-                "sec-fetch-dest": "empty",
-                "sec-fetch-mode": "cors",
-                "sec-fetch-site": "same-origin",
-                "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
-                              " (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
-                "x-requested-with": "XMLHttpRequest",
-                'cookie': token_dict['cookie'],
-            }
-            params = {
-                "action": "list_ex",
-                "begin": str(cls.begin),
-                "count": "5",
-                "fakeid": fakeid_dict['fakeid'],
-                "type": "9",
-                "query": "",
-                "token": str(token_dict['token']),
-                "lang": "zh_CN",
-                "f": "json",
-                "ajax": "1",
-            }
-            urllib3.disable_warnings()
-            r = requests.get(url=url, headers=headers, params=params, verify=False)
-            while True:
-                if r.status_code != 200 and 21 >= datetime.datetime.now().hour >= 10:
-                    Feishu.bot(log_type, crawler, "token过期啦,请扫码更换\nhttps://mp.weixin.qq.com/")
-                    time.sleep(60 * 10)
-                else:
-                    break
-            if 'app_msg_list' not in r.json():
-                Common.logger(log_type, crawler).warning(f"get_gzh_url:{r.text}\n")
-                break
-            elif len(r.json()['app_msg_list']) == 0:
-                Common.logger(log_type, crawler).info('没有更多视频了\n')
-            else:
-                cls.begin += 5
-                app_msg_list = r.json()['app_msg_list']
-                for article_url in app_msg_list:
-                    # title
-                    if 'title' in article_url:
-                        title = article_url['title'].replace('/', '').replace('\n', '') \
-                            .replace('.', '').replace('“', '').replace('”', '').replace(' ', '')
-                    else:
-                        title = 0
-
-                    # aid
-                    if 'aid' in article_url:
-                        aid = article_url['aid']
-                    else:
-                        aid = 0
-
-                    # create_time
-                    if 'create_time' in article_url:
-                        create_time = article_url['create_time']
-                    else:
-                        create_time = 0
-                    publish_time_stamp = int(create_time)
-                    publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
-
-                    avatar_url = fakeid_dict['head_url']
-
-                    # cover_url
-                    if 'cover' in article_url:
-                        cover_url = article_url['cover']
-                    else:
-                        cover_url = 0
-
-                    # article_url
-                    if 'link' in article_url:
-                        article_url = article_url['link']
+            try:
+                url = "https://mp.weixin.qq.com/cgi-bin/appmsg?"
+                headers = {
+                    "accept": "*/*",
+                    "accept-encoding": "gzip, deflate, br",
+                    "accept-language": "zh-CN,zh;q=0.9",
+                    "referer": "https://mp.weixin.qq.com/cgi-bin/appmsg?"
+                               "t=media/appmsg_edit_v2&action=edit&isNew=1"
+                               "&type=77&createType=5&token=" + str(token_dict['token']) + "&lang=zh_CN",
+                    'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
+                    "sec-ch-ua-mobile": "?0",
+                    "sec-ch-ua-platform": '"Windows"',
+                    "sec-fetch-dest": "empty",
+                    "sec-fetch-mode": "cors",
+                    "sec-fetch-site": "same-origin",
+                    "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
+                                  " (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
+                    "x-requested-with": "XMLHttpRequest",
+                    'cookie': token_dict['cookie'],
+                }
+                params = {
+                    "action": "list_ex",
+                    "begin": str(cls.begin),
+                    "count": "5",
+                    "fakeid": fakeid_dict['fakeid'],
+                    "type": "9",
+                    "query": "",
+                    "token": str(token_dict['token']),
+                    "lang": "zh_CN",
+                    "f": "json",
+                    "ajax": "1",
+                }
+                urllib3.disable_warnings()
+                r = requests.get(url=url, headers=headers, params=params, verify=False)
+                while True:
+                    if r.status_code != 200 and 21 >= datetime.datetime.now().hour >= 10:
+                        Feishu.bot(log_type, crawler, "token过期啦,请扫码更换\nhttps://mp.weixin.qq.com/")
+                        time.sleep(60 * 10)
                     else:
-                        article_url = 0
-
-                    video_url = cls.get_video_url(log_type, crawler, article_url, env)
-
-                    video_dict = {
-                        'video_id': aid,
-                        'video_title': title,
-                        'publish_time_stamp': publish_time_stamp,
-                        'publish_time_str': publish_time_str,
-                        'user_name': user,
-                        'play_cnt': 0,
-                        'comment_cnt': 0,
-                        'like_cnt': 0,
-                        'share_cnt': 0,
-                        'user_id': fakeid_dict['fakeid'],
-                        'avatar_url': avatar_url,
-                        'cover_url': cover_url,
-                        'article_url': article_url,
-                        'video_url': video_url,
-                        'session': f'gongzhonghao-follow-{int(time.time())}'
-                    }
-                    for k, v in video_dict.items():
-                        Common.logger(log_type, crawler).info(f"{k}:{v}")
-                    if int(time.time()) - publish_time_stamp >= 3600 * 24 * 3:
-                        Common.logger(log_type, crawler).info(f'发布时间{publish_time_str} > 3 天\n')
-                        cls.begin = 0
-                        return
-                    cls.download_publish(log_type, crawler, video_dict, oss_endpoint, env)
-
-                Common.logger(log_type, crawler).info('休眠 5 秒\n')
-                time.sleep(5)
-            # except Exception as e:
-            #     Common.logger(log_type, crawler).error("get_gzh_url异常:{}\n", e)
+                        break
+                if 'app_msg_list' not in r.json():
+                    Common.logger(log_type, crawler).warning(f"get_gzh_url:{r.text}\n")
+                    break
+                elif len(r.json()['app_msg_list']) == 0:
+                    Common.logger(log_type, crawler).info('没有更多视频了\n')
+                else:
+                    cls.begin += 5
+                    app_msg_list = r.json()['app_msg_list']
+                    for article_url in app_msg_list:
+                        # title
+                        if 'title' in article_url:
+                            title = article_url['title'].replace('/', '').replace('\n', '') \
+                                .replace('.', '').replace('“', '').replace('”', '').replace(' ', '')
+                        else:
+                            title = 0
+
+                        # aid
+                        if 'aid' in article_url:
+                            aid = article_url['aid']
+                        else:
+                            aid = 0
+
+                        # create_time
+                        if 'create_time' in article_url:
+                            create_time = article_url['create_time']
+                        else:
+                            create_time = 0
+                        publish_time_stamp = int(create_time)
+                        publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
+
+                        avatar_url = fakeid_dict['head_url']
+
+                        # cover_url
+                        if 'cover' in article_url:
+                            cover_url = article_url['cover']
+                        else:
+                            cover_url = 0
+
+                        # article_url
+                        if 'link' in article_url:
+                            article_url = article_url['link']
+                        else:
+                            article_url = 0
+
+                        video_url = cls.get_video_url(log_type, crawler, article_url, env)
+
+                        video_dict = {
+                            'video_id': aid,
+                            'video_title': title,
+                            'publish_time_stamp': publish_time_stamp,
+                            'publish_time_str': publish_time_str,
+                            'user_name': user,
+                            'play_cnt': 0,
+                            'comment_cnt': 0,
+                            'like_cnt': 0,
+                            'share_cnt': 0,
+                            'user_id': fakeid_dict['fakeid'],
+                            'avatar_url': avatar_url,
+                            'cover_url': cover_url,
+                            'article_url': article_url,
+                            'video_url': video_url,
+                            'session': f'gongzhonghao-follow-{int(time.time())}'
+                        }
+                        for k, v in video_dict.items():
+                            Common.logger(log_type, crawler).info(f"{k}:{v}")
+                        if int(time.time()) - publish_time_stamp >= 3600 * 24 * 3:
+                            Common.logger(log_type, crawler).info(f'发布时间{publish_time_str} > 3 天\n')
+                            cls.begin = 0
+                            return
+                        cls.download_publish(log_type, crawler, video_dict, oss_endpoint, env)
+
+                    Common.logger(log_type, crawler).info('休眠 5 秒\n')
+                    time.sleep(5)
+            except Exception as e:
+                Common.logger(log_type, crawler).error("get_gzh_url异常:{}\n", e)
 
     @classmethod
     def repeat_video(cls, log_type, crawler, video_id, env):
@@ -315,133 +315,133 @@ class GongzhonghaoFollow:
     # 下载/上传
     @classmethod
     def download_publish(cls, log_type, crawler, video_dict, oss_endpoint, env):
-        # try:
-        if video_dict['article_url'] == 0 or video_dict['video_url'] == 0:
-            Common.logger(log_type, crawler).info("文章涉嫌违反相关法律法规和政策\n")
-        # 标题敏感词过滤
-        elif any(word if word in video_dict['video_title'] else False for word in
-                 filter_word(log_type, crawler, "公众号", env)) is True:
-            Common.logger(log_type, crawler).info("标题已中过滤词\n")
-        # 已下载判断
-        elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
-            Common.logger(log_type, crawler).info("视频已下载\n")
-        # 标题相似度
-        elif cls.title_like(log_type, crawler, video_dict['video_title'], env) is True:
-            Common.logger(log_type, crawler).info(f'标题相似度>=80%:{video_dict["video_title"]}\n')
-        else:
-            # 下载视频
-            Common.download_method(log_type=log_type, crawler=crawler, text="video",
-                                   title=video_dict["video_title"], url=video_dict["video_url"])
-            md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
-            # 获取视频时长
-            ffmpeg_dict = Common.ffmpeg(log_type, crawler,
-                                        f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
-            if ffmpeg_dict is None:
-                # 删除视频文件夹
-                shutil.rmtree(f"./{crawler}/videos/{md_title}")
-                Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
-                return
-            video_dict["video_width"] = ffmpeg_dict["width"]
-            video_dict["video_height"] = ffmpeg_dict["height"]
-            video_dict["duration"] = ffmpeg_dict["duration"]
-            video_size = ffmpeg_dict["size"]
-            Common.logger(log_type, crawler).info(f'video_width:{video_dict["video_width"]}')
-            Common.logger(log_type, crawler).info(f'video_height:{video_dict["video_height"]}')
-            Common.logger(log_type, crawler).info(f'duration:{video_dict["duration"]}')
-            Common.logger(log_type, crawler).info(f'video_size:{video_size}')
-            # 视频size=0,直接删除
-            if int(video_size) == 0 or cls.download_rule(video_dict) is False:
-                # 删除视频文件夹
-                shutil.rmtree(f"./{crawler}/videos/{md_title}")
-                Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
-                return
-            # 下载封面
-            Common.download_method(log_type=log_type, crawler=crawler, text="cover",
-                                   title=video_dict["video_title"], url=video_dict["cover_url"])
-            # 保存视频信息至 "./videos/{video_title}/info.txt"
-            Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
-
-            # 上传视频
-            Common.logger(log_type, crawler).info("开始上传视频...")
-            strategy = "定向爬虫策略"
-            our_video_id = Publish.upload_and_publish(log_type=log_type,
-                                                      crawler=crawler,
-                                                      strategy=strategy,
-                                                      our_uid="follow",
-                                                      oss_endpoint=oss_endpoint,
-                                                      env=env)
-            if env == 'prod':
-                our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
+        try:
+            if video_dict['article_url'] == 0 or video_dict['video_url'] == 0:
+                Common.logger(log_type, crawler).info("文章涉嫌违反相关法律法规和政策\n")
+            # 标题敏感词过滤
+            elif any(word if word in video_dict['video_title'] else False for word in
+                     filter_word(log_type, crawler, "公众号", env)) is True:
+                Common.logger(log_type, crawler).info("标题已中过滤词\n")
+            # 已下载判断
+            elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
+                Common.logger(log_type, crawler).info("视频已下载\n")
+            # 标题相似度
+            elif cls.title_like(log_type, crawler, video_dict['video_title'], env) is True:
+                Common.logger(log_type, crawler).info(f'标题相似度>=80%:{video_dict["video_title"]}\n')
             else:
-                our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
-            Common.logger(log_type, crawler).info("视频上传完成")
-
-            if our_video_id is None:
-                # 删除视频文件夹
-                shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
-                return
-
-            # 视频信息保存数据库
-            rule_dict = {
-                "duration": {"min": 20, "max": 45 * 60},
-                "publish_day": {"min": 3}
-            }
-
-            insert_sql = f""" insert into crawler_video(video_id,
-                                                        out_user_id,
-                                                        platform,
-                                                        strategy,
-                                                        out_video_id,
-                                                        video_title,
-                                                        cover_url,
-                                                        video_url,
-                                                        duration,
-                                                        publish_time,
-                                                        play_cnt,
-                                                        crawler_rule,
-                                                        width,
-                                                        height)
-                                                        values({our_video_id},
-                                                        "{video_dict['user_id']}",
-                                                        "{cls.platform}",
-                                                        "定向爬虫策略",
-                                                        "{video_dict['video_id']}",
-                                                        "{video_dict['video_title']}",
-                                                        "{video_dict['cover_url']}",
-                                                        "{video_dict['video_url']}",
-                                                        {int(video_dict['duration'])},
-                                                        "{video_dict['publish_time_str']}",
-                                                        {int(video_dict['play_cnt'])},
-                                                        '{json.dumps(rule_dict)}',
-                                                        {int(video_dict['video_width'])},
-                                                        {int(video_dict['video_height'])}) """
-            Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
-            MysqlHelper.update_values(log_type, crawler, insert_sql, env)
-            Common.logger(log_type, crawler).info('视频信息插入数据库成功!')
-
-            # 视频写入飞书
-            Feishu.insert_columns(log_type, crawler, "47e39d", "ROWS", 1, 2)
-            # 视频ID工作表,首行写入数据
-            upload_time = int(time.time())
-            values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
-                       "用户主页",
-                       video_dict['video_title'],
-                       video_dict['video_id'],
-                       our_video_link,
-                       int(video_dict['duration']),
-                       f"{video_dict['video_width']}*{video_dict['video_height']}",
-                       video_dict['publish_time_str'],
-                       video_dict['user_name'],
-                       video_dict['user_id'],
-                       video_dict['avatar_url'],
-                       video_dict['cover_url'],
-                       video_dict['article_url'],
-                       video_dict['video_url']]]
-            time.sleep(0.5)
-            Feishu.update_values(log_type, crawler, "47e39d", "F2:Z2", values)
-            Common.logger(log_type, crawler).info('视频下载/上传成功\n')
-        # except Exception as e:
-        #     Common.logger(log_type, crawler).error(f"download_publish异常:{e}\n")
+                # 下载视频
+                Common.download_method(log_type=log_type, crawler=crawler, text="video",
+                                       title=video_dict["video_title"], url=video_dict["video_url"])
+                md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
+                # 获取视频时长
+                ffmpeg_dict = Common.ffmpeg(log_type, crawler,
+                                            f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
+                if ffmpeg_dict is None:
+                    # 删除视频文件夹
+                    shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                    Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+                    return
+                video_dict["video_width"] = ffmpeg_dict["width"]
+                video_dict["video_height"] = ffmpeg_dict["height"]
+                video_dict["duration"] = ffmpeg_dict["duration"]
+                video_size = ffmpeg_dict["size"]
+                Common.logger(log_type, crawler).info(f'video_width:{video_dict["video_width"]}')
+                Common.logger(log_type, crawler).info(f'video_height:{video_dict["video_height"]}')
+                Common.logger(log_type, crawler).info(f'duration:{video_dict["duration"]}')
+                Common.logger(log_type, crawler).info(f'video_size:{video_size}')
+                # 视频size=0,直接删除
+                if int(video_size) == 0 or cls.download_rule(video_dict) is False:
+                    # 删除视频文件夹
+                    shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                    Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+                    return
+                # 下载封面
+                Common.download_method(log_type=log_type, crawler=crawler, text="cover",
+                                       title=video_dict["video_title"], url=video_dict["cover_url"])
+                # 保存视频信息至 "./videos/{video_title}/info.txt"
+                Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
+
+                # 上传视频
+                Common.logger(log_type, crawler).info("开始上传视频...")
+                strategy = "定向爬虫策略"
+                our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                          crawler=crawler,
+                                                          strategy=strategy,
+                                                          our_uid="follow",
+                                                          oss_endpoint=oss_endpoint,
+                                                          env=env)
+                if env == 'prod':
+                    our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
+                else:
+                    our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
+                Common.logger(log_type, crawler).info("视频上传完成")
+
+                if our_video_id is None:
+                    # 删除视频文件夹
+                    shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
+                    return
+
+                # 视频信息保存数据库
+                rule_dict = {
+                    "duration": {"min": 20, "max": 45 * 60},
+                    "publish_day": {"min": 3}
+                }
+
+                insert_sql = f""" insert into crawler_video(video_id,
+                                                            out_user_id,
+                                                            platform,
+                                                            strategy,
+                                                            out_video_id,
+                                                            video_title,
+                                                            cover_url,
+                                                            video_url,
+                                                            duration,
+                                                            publish_time,
+                                                            play_cnt,
+                                                            crawler_rule,
+                                                            width,
+                                                            height)
+                                                            values({our_video_id},
+                                                            "{video_dict['user_id']}",
+                                                            "{cls.platform}",
+                                                            "定向爬虫策略",
+                                                            "{video_dict['video_id']}",
+                                                            "{video_dict['video_title']}",
+                                                            "{video_dict['cover_url']}",
+                                                            "{video_dict['video_url']}",
+                                                            {int(video_dict['duration'])},
+                                                            "{video_dict['publish_time_str']}",
+                                                            {int(video_dict['play_cnt'])},
+                                                            '{json.dumps(rule_dict)}',
+                                                            {int(video_dict['video_width'])},
+                                                            {int(video_dict['video_height'])}) """
+                Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+                MysqlHelper.update_values(log_type, crawler, insert_sql, env)
+                Common.logger(log_type, crawler).info('视频信息插入数据库成功!')
+
+                # 视频写入飞书
+                Feishu.insert_columns(log_type, crawler, "47e39d", "ROWS", 1, 2)
+                # 视频ID工作表,首行写入数据
+                upload_time = int(time.time())
+                values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
+                           "用户主页",
+                           video_dict['video_title'],
+                           video_dict['video_id'],
+                           our_video_link,
+                           int(video_dict['duration']),
+                           f"{video_dict['video_width']}*{video_dict['video_height']}",
+                           video_dict['publish_time_str'],
+                           video_dict['user_name'],
+                           video_dict['user_id'],
+                           video_dict['avatar_url'],
+                           video_dict['cover_url'],
+                           video_dict['article_url'],
+                           video_dict['video_url']]]
+                time.sleep(0.5)
+                Feishu.update_values(log_type, crawler, "47e39d", "F2:Z2", values)
+                Common.logger(log_type, crawler).info('视频下载/上传成功\n')
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f"download_publish异常:{e}\n")
 
     @classmethod
     def get_users(cls):
@@ -525,18 +525,18 @@ class GongzhonghaoFollow:
 
     @classmethod
     def get_all_videos(cls, log_type, crawler, oss_endpoint, env):
-        # try:
-        user_list = cls.get_users()
-        for user_dict in user_list:
-            user_name = user_dict['user_name']
-            index = user_dict['index']
-            Common.logger(log_type, crawler).info(f'获取 {user_name} 公众号视频\n')
-            cls.get_videoList(log_type, crawler, user_name, index, oss_endpoint, env)
-            cls.begin = 0
-            Common.logger(log_type, crawler).info('休眠60秒\n')
-            time.sleep(60)
-        # except Exception as e:
-        #     Common.logger(log_type, crawler).info(f'get_all_videos异常:{e}\n')
+        try:
+            user_list = cls.get_users()
+            for user_dict in user_list:
+                user_name = user_dict['user_name']
+                index = user_dict['index']
+                Common.logger(log_type, crawler).info(f'获取 {user_name} 公众号视频\n')
+                cls.get_videoList(log_type, crawler, user_name, index, oss_endpoint, env)
+                cls.begin = 0
+                Common.logger(log_type, crawler).info('休眠60秒\n')
+                time.sleep(60)
+        except Exception as e:
+            Common.logger(log_type, crawler).info(f'get_all_videos异常:{e}\n')
 
 
 if __name__ == "__main__":

+ 16 - 16
gongzhonghao/gongzhonghao_main/run_gongzhonghao_follow.py

@@ -14,22 +14,22 @@ class Main:
     @classmethod
     def main(cls, log_type, crawler, env):
         while True:
-            # try:
-            if env == "dev":
-                oss_endpoint = "out"
-            else:
-                oss_endpoint = "inner"
-            Common.logger(log_type, crawler).info('开始抓取公众号视频\n')
-            GongzhonghaoFollow.get_all_videos(log_type=log_type,
-                                              crawler=crawler,
-                                              oss_endpoint=oss_endpoint,
-                                              env=env)
-            Common.del_logs(log_type, crawler)
-            GongzhonghaoFollow.begin = 0
-            Common.logger(log_type, crawler).info('休眠 8 小时\n')
-            time.sleep(3600*8)
-            # except Exception as e:
-            #     Common.logger(log_type, crawler).info(f"公众号抓取异常:{e}\n")
+            try:
+                if env == "dev":
+                    oss_endpoint = "out"
+                else:
+                    oss_endpoint = "inner"
+                Common.logger(log_type, crawler).info('开始抓取公众号视频\n')
+                GongzhonghaoFollow.get_all_videos(log_type=log_type,
+                                                  crawler=crawler,
+                                                  oss_endpoint=oss_endpoint,
+                                                  env=env)
+                Common.del_logs(log_type, crawler)
+                GongzhonghaoFollow.begin = 0
+                Common.logger(log_type, crawler).info('休眠 8 小时\n')
+                time.sleep(3600*8)
+            except Exception as e:
+                Common.logger(log_type, crawler).info(f"公众号抓取异常:{e}\n")
 
 
 if __name__ == '__main__':