wangkun 2 anni fa
parent
commit
e691d8a96c

+ 16 - 35
gongzhonghao/gongzhonghao_follow/gongzhonghao_follow.py

@@ -5,6 +5,7 @@ import datetime
 import difflib
 import json
 import os
+import random
 import shutil
 import sys
 import time
@@ -117,12 +118,12 @@ class GongzhonghaoFollow:
                 if r.json()["base_resp"]["err_msg"] == "invalid session" and 21 >= datetime.datetime.now().hour >= 10:
                     Common.logger(log_type, crawler).info(f"status_code:{r.status_code}")
                     Common.logger(log_type, crawler).warning(f"get_gzh_url:{r.text}\n")
-                    Feishu.bot(log_type, crawler, "token过期啦,请扫码更换token\nhttps://mp.weixin.qq.com/")
+                    Feishu.bot(log_type, crawler, "token_1过期啦,请扫码更换token\nhttps://mp.weixin.qq.com/")
                     time.sleep(60 * 10)
                 elif r.json()["base_resp"]["err_msg"] == "freq control" and 21 >= datetime.datetime.now().hour >= 10:
                     Common.logger(log_type, crawler).info(f"status_code:{r.status_code}")
                     Common.logger(log_type, crawler).warning(f"get_gzh_url:{r.text}\n")
-                    Feishu.bot(log_type, crawler, "公众号频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
+                    Feishu.bot(log_type, crawler, "公众号_1频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
                     time.sleep(60 * 10)
                 else:
                     break
@@ -234,15 +235,18 @@ class GongzhonghaoFollow:
                 urllib3.disable_warnings()
                 r = requests.get(url=url, headers=headers, params=params, verify=False)
                 while True:
-                    if r.json()["base_resp"]["err_msg"] == "invalid session" and 21 >= datetime.datetime.now().hour >= 10:
+                    if r.json()["base_resp"][
+                        "err_msg"] == "invalid session" and 21 >= datetime.datetime.now().hour >= 10:
                         Common.logger(log_type, crawler).info(f"status_code:{r.status_code}")
                         Common.logger(log_type, crawler).info(f"response:{r.text}")
-                        Feishu.bot(log_type, crawler, "token过期啦,请扫码更换token\nhttps://mp.weixin.qq.com/")
+                        Feishu.bot(log_type, crawler, "token_1过期啦,请扫码更换token\nhttps://mp.weixin.qq.com/")
                         time.sleep(60 * 10)
-                    elif r.json()["base_resp"]["err_msg"] == "freq control" and 21 >= datetime.datetime.now().hour >= 10:
+                    elif r.json()["base_resp"][
+                        "err_msg"] == "freq control" and 21 >= datetime.datetime.now().hour >= 10:
                         Common.logger(log_type, crawler).info(f"status_code:{r.status_code}")
                         Common.logger(log_type, crawler).warning(f"get_gzh_url:{r.text}\n")
-                        Feishu.bot(log_type, crawler, "公众号频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
+                        Feishu.bot(log_type, crawler,
+                                   "公众号_1频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
                         time.sleep(60 * 10)
                     else:
                         break
@@ -318,8 +322,8 @@ class GongzhonghaoFollow:
                             return
                         cls.download_publish(log_type, crawler, video_dict, oss_endpoint, env)
 
-                    Common.logger(log_type, crawler).info('休眠 5 秒\n')
-                    time.sleep(5)
+                    Common.logger(log_type, crawler).info('随机休眠 0-60 秒\n')
+                    time.sleep(random.randint(0, 60))
             except Exception as e:
                 Common.logger(log_type, crawler).error("get_videoList异常:{}\n", e)
 
@@ -464,7 +468,7 @@ class GongzhonghaoFollow:
     def get_users(cls):
         # user_sheet = Feishu.get_values_batch("follow", 'gongzhonghao', 'Bzv72P')
         # user_list = []
-        # for i in range(1, len(user_sheet)):
+        # for i in range(1, 51):
         #     user_name = user_sheet[i][0]
         #     index = user_sheet[i][1]
         #     user_dict = {
@@ -498,30 +502,7 @@ class GongzhonghaoFollow:
                      {'user_name': '万物归息', 'index': 1}, {'user_name': '神州红魂', 'index': 1},
                      {'user_name': '音乐早餐', 'index': 1}, {'user_name': '1条末读消息', 'index': 1},
                      {'user_name': '环球文摘', 'index': 1}, {'user_name': '精彩有余', 'index': 1},
-                     {'user_name': '一起训练吧', 'index': 1}, {'user_name': '1条重要消息', 'index': 1},
-                     {'user_name': '太上养身', 'index': 1}, {'user_name': '懂点养身秘诀', 'index': 1},
-                     {'user_name': '送乐者', 'index': 1}, {'user_name': '蜂业小百科', 'index': 1},
-                     {'user_name': '健康与养身秘诀', 'index': 1}, {'user_name': '有心人r', 'index': 1},
-                     {'user_name': '古诗词世界', 'index': 1}, {'user_name': '晨间悦读', 'index': 1},
-                     {'user_name': '养身有诀窍', 'index': 1}, {'user_name': '退休族信息圈', 'index': 1},
-                     {'user_name': '艾公铁粉团', 'index': 1}, {'user_name': '酸甜苦辣麻咸', 'index': 1},
-                     {'user_name': '日常生活小帮手', 'index': 1}, {'user_name': '小帅的精彩视频', 'index': 1},
-                     {'user_name': '养身常识小窍门', 'index': 1}, {'user_name': '医学养身技巧', 'index': 1},
-                     {'user_name': '退休圈', 'index': 1}, {'user_name': '生活小助手', 'index': 1},
-                     {'user_name': '经典老歌曲好听的音乐', 'index': 1}, {'user_name': '黑马快讯', 'index': 1},
-                     {'user_name': '绝妙经典', 'index': 1}, {'user_name': '深读时策', 'index': 1},
-                     {'user_name': '健康与生活大全', 'index': 1}, {'user_name': '李肃论道', 'index': 1},
-                     {'user_name': '爱国者吹锋号', 'index': 1}, {'user_name': '兵心可鉴', 'index': 1},
-                     {'user_name': '精选动心金曲', 'index': 1}, {'user_name': '爱二胡群', 'index': 1},
-                     {'user_name': '数码科技大爆炸', 'index': 1}, {'user_name': '何静同学', 'index': 1},
-                     {'user_name': '方敏爱美食', 'index': 1}, {'user_name': '针灸推拿特色技术', 'index': 1},
-                     {'user_name': '挺进天山', 'index': 1}, {'user_name': '紫陌捻花', 'index': 1},
-                     {'user_name': '巨响养身', 'index': 1}, {'user_name': '荣观世界', 'index': 1},
-                     {'user_name': 'Music音乐世界', 'index': 1}, {'user_name': '微观调查组', 'index': 1},
-                     {'user_name': '用汉方拥抱世界', 'index': 1}, {'user_name': '医学养身秘诀', 'index': 1},
-                     {'user_name': '医学老人养身', 'index': 1}, {'user_name': '热文微观', 'index': 1},
-                     {'user_name': '医学养身秘笈', 'index': 1}, {'user_name': '你未读消息', 'index': 2},
-                     {'user_name': '6点谈健康', 'index': 1}]
+                     {'user_name': '一起训练吧', 'index': 1}, {'user_name': '1条重要消息', 'index': 1}]
 
         return user_list
 
@@ -535,8 +516,8 @@ class GongzhonghaoFollow:
                 Common.logger(log_type, crawler).info(f'获取 {user_name} 公众号视频\n')
                 cls.get_videoList(log_type, crawler, user_name, index, oss_endpoint, env)
                 cls.begin = 0
-                Common.logger(log_type, crawler).info('休眠60秒\n')
-                time.sleep(60)
+                Common.logger(log_type, crawler).info('随机休眠 0-60 秒\n')
+                time.sleep(random.randint(0, 60))
         except Exception as e:
             Common.logger(log_type, crawler).info(f'get_all_videos异常:{e}\n')
 

+ 532 - 0
gongzhonghao/gongzhonghao_follow/gongzhonghao_follow_2.py

@@ -0,0 +1,532 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/3/28
+import datetime
+import difflib
+import json
+import os
+import random
+import shutil
+import sys
+import time
+from hashlib import md5
+
+import requests
+import urllib3
+from selenium.webdriver import DesiredCapabilities
+from selenium.webdriver.chrome.service import Service
+from selenium.webdriver.common.by import By
+from selenium import webdriver
+
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.feishu import Feishu
+from common.public import filter_word
+from common.publish import Publish
+from common.scheduling_db import MysqlHelper
+
+
+class GongzhonghaoFollow2:
+    # 翻页参数
+    begin = 0
+    platform = "公众号"
+
+    # 基础门槛规则
+    @staticmethod
+    def download_rule(video_dict):
+        """
+        下载视频的基本规则
+        :param video_dict: 视频信息,字典格式
+        :return: 满足规则,返回 True;反之,返回 False
+        """
+        # 视频时长 20秒 - 45 分钟
+        if 60 * 45 >= int(float(video_dict['duration'])) >= 20:
+            # 宽或高
+            if int(video_dict['video_width']) >= 0 or int(video_dict['video_height']) >= 0:
+                return True
+            else:
+                return False
+        else:
+            return False
+
+    @classmethod
+    def title_like(cls, log_type, crawler, title, env):
+        select_sql = f""" select * from crawler_video where platform="公众号" """
+        video_list = MysqlHelper.get_values(log_type, crawler, select_sql, env, action="")
+        if len(video_list) == 0:
+            return None
+        for video_dict in video_list:
+            video_title = video_dict["video_title"]
+            if difflib.SequenceMatcher(None, title, video_title).quick_ratio() >= 0.8:
+                return True
+            else:
+                pass
+
+    # 获取 token
+    @classmethod
+    def get_token(cls, log_type, crawler):
+        while True:
+            try:
+                sheet = Feishu.get_values_batch(log_type, crawler, "I4aeh3")
+                if sheet is None:
+                    time.sleep(3)
+                    continue
+                token = sheet[0][1]
+                cookie = sheet[1][1]
+                token_dict = {'token': token, 'cookie': cookie}
+                return token_dict
+            except Exception as e:
+                Common.logger(log_type, crawler).error(f"get_cookie_token异常:{e}\n")
+
+    # 获取用户 fakeid
+    @classmethod
+    def get_fakeid(cls, log_type, crawler, user, index):
+        try:
+            token_dict = cls.get_token(log_type, crawler)
+            url = "https://mp.weixin.qq.com/cgi-bin/searchbiz?"
+            headers = {
+                "accept": "*/*",
+                "accept-encoding": "gzip, deflate, br",
+                "accept-language": "zh-CN,zh;q=0.9",
+                "referer": "https://mp.weixin.qq.com/cgi-bin/appmsg?"
+                           "t=media/appmsg_edit_v2&action=edit&isNew=1"
+                           "&type=77&createType=5&token=1011071554&lang=zh_CN",
+                'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
+                "sec-ch-ua-mobile": "?0",
+                "sec-ch-ua-platform": '"Windows"',
+                "sec-fetch-dest": "empty",
+                "sec-fetch-mode": "cors",
+                "sec-fetch-site": "same-origin",
+                "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
+                              " (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
+                "x-requested-with": "XMLHttpRequest",
+                'cookie': token_dict['cookie'],
+            }
+            params = {
+                "action": "search_biz",
+                "begin": "0",
+                "count": "5",
+                "query": str(user),
+                "token": token_dict['token'],
+                "lang": "zh_CN",
+                "f": "json",
+                "ajax": "1",
+            }
+            urllib3.disable_warnings()
+            r = requests.get(url=url, headers=headers, params=params, verify=False)
+            while True:
+                if r.json()["base_resp"]["err_msg"] == "invalid session" and 21 >= datetime.datetime.now().hour >= 10:
+                    Common.logger(log_type, crawler).info(f"status_code:{r.status_code}")
+                    Common.logger(log_type, crawler).warning(f"get_gzh_url:{r.text}\n")
+                    Feishu.bot(log_type, crawler, "token_2过期啦,请扫码更换token\nhttps://mp.weixin.qq.com/")
+                    time.sleep(60 * 10)
+                elif r.json()["base_resp"]["err_msg"] == "freq control" and 21 >= datetime.datetime.now().hour >= 10:
+                    Common.logger(log_type, crawler).info(f"status_code:{r.status_code}")
+                    Common.logger(log_type, crawler).warning(f"get_gzh_url:{r.text}\n")
+                    Feishu.bot(log_type, crawler, "公众号_2频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
+                    time.sleep(60 * 10)
+                else:
+                    break
+            if "list" not in r.json() or len(r.json()["list"]) == 0:
+                Common.logger(log_type, crawler).info(f"status_code:{r.status_code}")
+                Common.logger(log_type, crawler).warning(f"get_fakeid:{r.text},休眠 1 秒\n")
+                time.sleep(1)
+            else:
+                fakeid = r.json()["list"][int(index) - 1]["fakeid"]
+                head_url = r.json()["list"][int(index) - 1]["round_head_img"]
+                fakeid_dict = {'fakeid': fakeid, 'head_url': head_url}
+                return fakeid_dict
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f"get_fakeid异常:{e}\n")
+
+    # 获取腾讯视频下载链接
+    @classmethod
+    def get_tencent_video_url(cls, log_type, crawler, video_id):
+        try:
+            url = 'https://vv.video.qq.com/getinfo?vids=' + str(video_id) + '&platform=101001&charge=0&otype=json'
+            response = requests.get(url=url).text.replace('QZOutputJson=', '').replace('"};', '"}')
+            response = json.loads(response)
+            url = response['vl']['vi'][0]['ul']['ui'][0]['url']
+            fvkey = response['vl']['vi'][0]['fvkey']
+            video_url = url + str(video_id) + '.mp4?vkey=' + fvkey
+            return video_url
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f"get_tencent_video_url异常:{e}\n")
+
+    @classmethod
+    def get_video_url(cls, log_type, crawler, article_url, env):
+        try:
+            # 打印请求配置
+            ca = DesiredCapabilities.CHROME
+            ca["goog:loggingPrefs"] = {"performance": "ALL"}
+
+            # 不打开浏览器运行
+            chrome_options = webdriver.ChromeOptions()
+            chrome_options.add_argument("headless")
+            chrome_options.add_argument(
+                f'user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36')
+            chrome_options.add_argument("--no-sandbox")
+
+            # driver初始化
+            if env == "prod":
+                driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options)
+            else:
+                driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options, service=Service(
+                    '/Users/wangkun/Downloads/chromedriver/chromedriver_v111/chromedriver'))
+
+            driver.implicitly_wait(10)
+            # Common.logger(log_type, crawler).info('打开文章链接')
+            driver.get(article_url)
+            time.sleep(1)
+
+            if len(driver.find_elements(By.XPATH, '//div[@class="js_video_poster video_poster"]/*[2]')) != 0:
+                video_url = driver.find_element(
+                    By.XPATH, '//div[@class="js_video_poster video_poster"]/*[2]').get_attribute('src')
+            elif len(driver.find_elements(By.XPATH, '//span[@class="js_tx_video_container"]/*[1]')) != 0:
+                iframe = driver.find_element(By.XPATH, '//span[@class="js_tx_video_container"]/*[1]').get_attribute(
+                    'src')
+                video_id = iframe.split('vid=')[-1].split('&')[0]
+                video_url = cls.get_tencent_video_url(log_type, crawler, video_id)
+            else:
+                video_url = 0
+
+            return video_url
+        except Exception as e:
+            Common.logger(log_type, crawler).info(f'get_video_url异常:{e}\n')
+
+    # 获取文章列表
+    @classmethod
+    def get_videoList(cls, log_type, crawler, user, index, oss_endpoint, env):
+        fakeid_dict = cls.get_fakeid(log_type, crawler, user, index)
+        token_dict = cls.get_token(log_type, crawler)
+        while True:
+            try:
+                url = "https://mp.weixin.qq.com/cgi-bin/appmsg?"
+                headers = {
+                    "accept": "*/*",
+                    "accept-encoding": "gzip, deflate, br",
+                    "accept-language": "zh-CN,zh;q=0.9",
+                    "referer": "https://mp.weixin.qq.com/cgi-bin/appmsg?"
+                               "t=media/appmsg_edit_v2&action=edit&isNew=1"
+                               "&type=77&createType=5&token=" + str(token_dict['token']) + "&lang=zh_CN",
+                    'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
+                    "sec-ch-ua-mobile": "?0",
+                    "sec-ch-ua-platform": '"Windows"',
+                    "sec-fetch-dest": "empty",
+                    "sec-fetch-mode": "cors",
+                    "sec-fetch-site": "same-origin",
+                    "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
+                                  " (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
+                    "x-requested-with": "XMLHttpRequest",
+                    'cookie': token_dict['cookie'],
+                }
+                params = {
+                    "action": "list_ex",
+                    "begin": str(cls.begin),
+                    "count": "5",
+                    "fakeid": fakeid_dict['fakeid'],
+                    "type": "9",
+                    "query": "",
+                    "token": str(token_dict['token']),
+                    "lang": "zh_CN",
+                    "f": "json",
+                    "ajax": "1",
+                }
+                urllib3.disable_warnings()
+                r = requests.get(url=url, headers=headers, params=params, verify=False)
+                while True:
+                    if r.json()["base_resp"][
+                        "err_msg"] == "invalid session" and 21 >= datetime.datetime.now().hour >= 10:
+                        Common.logger(log_type, crawler).info(f"status_code:{r.status_code}")
+                        Common.logger(log_type, crawler).info(f"response:{r.text}")
+                        Feishu.bot(log_type, crawler, "token_2过期啦,请扫码更换token\nhttps://mp.weixin.qq.com/")
+                        time.sleep(60 * 10)
+                    elif r.json()["base_resp"][
+                        "err_msg"] == "freq control" and 21 >= datetime.datetime.now().hour >= 10:
+                        Common.logger(log_type, crawler).info(f"status_code:{r.status_code}")
+                        Common.logger(log_type, crawler).warning(f"get_gzh_url:{r.text}\n")
+                        Feishu.bot(log_type, crawler, "公众号_2频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
+                        time.sleep(60 * 10)
+                    else:
+                        break
+                if 'app_msg_list' not in r.json():
+                    Common.logger(log_type, crawler).info(f"status_code:{r.status_code}")
+                    Common.logger(log_type, crawler).warning(f"get_gzh_url:{r.text}\n")
+                    break
+                elif len(r.json()['app_msg_list']) == 0:
+                    Common.logger(log_type, crawler).info('没有更多视频了\n')
+                else:
+                    cls.begin += 5
+                    app_msg_list = r.json()['app_msg_list']
+                    for article_url in app_msg_list:
+                        # title
+                        if 'title' in article_url:
+                            title = article_url['title'].replace('/', '').replace('\n', '') \
+                                .replace('.', '').replace('“', '').replace('”', '').replace(' ', '')
+                        else:
+                            title = 0
+
+                        # aid
+                        if 'aid' in article_url:
+                            aid = article_url['aid']
+                        else:
+                            aid = 0
+
+                        # create_time
+                        if 'create_time' in article_url:
+                            create_time = article_url['create_time']
+                        else:
+                            create_time = 0
+                        publish_time_stamp = int(create_time)
+                        publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
+
+                        avatar_url = fakeid_dict['head_url']
+
+                        # cover_url
+                        if 'cover' in article_url:
+                            cover_url = article_url['cover']
+                        else:
+                            cover_url = 0
+
+                        # article_url
+                        if 'link' in article_url:
+                            article_url = article_url['link']
+                        else:
+                            article_url = 0
+
+                        video_url = cls.get_video_url(log_type, crawler, article_url, env)
+
+                        video_dict = {
+                            'video_id': aid,
+                            'video_title': title,
+                            'publish_time_stamp': publish_time_stamp,
+                            'publish_time_str': publish_time_str,
+                            'user_name': user,
+                            'play_cnt': 0,
+                            'comment_cnt': 0,
+                            'like_cnt': 0,
+                            'share_cnt': 0,
+                            'user_id': fakeid_dict['fakeid'],
+                            'avatar_url': avatar_url,
+                            'cover_url': cover_url,
+                            'article_url': article_url,
+                            'video_url': video_url,
+                            'session': f'gongzhonghao-follow-{int(time.time())}'
+                        }
+                        for k, v in video_dict.items():
+                            Common.logger(log_type, crawler).info(f"{k}:{v}")
+                        if int(time.time()) - publish_time_stamp >= 3600 * 24 * 3:
+                            Common.logger(log_type, crawler).info(f'发布时间{publish_time_str} > 3 天\n')
+                            cls.begin = 0
+                            return
+                        cls.download_publish(log_type, crawler, video_dict, oss_endpoint, env)
+
+                    Common.logger(log_type, crawler).info('随机休眠 0-60 秒\n')
+                    time.sleep(random.randint(0, 60))
+            except Exception as e:
+                Common.logger(log_type, crawler).error("get_videoList异常:{}\n", e)
+
+    @classmethod
+    def repeat_video(cls, log_type, crawler, video_id, env):
+        sql = f""" select * from crawler_video where platform="公众号" and out_video_id="{video_id}"; """
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
+        return len(repeat_video)
+
+    # 下载/上传
+    @classmethod
+    def download_publish(cls, log_type, crawler, video_dict, oss_endpoint, env):
+        try:
+            if video_dict['article_url'] == 0 or video_dict['video_url'] == 0:
+                Common.logger(log_type, crawler).info("文章涉嫌违反相关法律法规和政策\n")
+            # 标题敏感词过滤
+            elif any(word if word in video_dict['video_title'] else False for word in
+                     filter_word(log_type, crawler, "公众号", env)) is True:
+                Common.logger(log_type, crawler).info("标题已中过滤词\n")
+            # 已下载判断
+            elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
+                Common.logger(log_type, crawler).info("视频已下载\n")
+            # 标题相似度
+            elif cls.title_like(log_type, crawler, video_dict['video_title'], env) is True:
+                Common.logger(log_type, crawler).info(f'标题相似度>=80%:{video_dict["video_title"]}\n')
+            else:
+                # 下载视频
+                Common.download_method(log_type=log_type, crawler=crawler, text="video",
+                                       title=video_dict["video_title"], url=video_dict["video_url"])
+                md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
+                # 获取视频时长
+                ffmpeg_dict = Common.ffmpeg(log_type, crawler,
+                                            f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
+                if ffmpeg_dict is None:
+                    # 删除视频文件夹
+                    shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                    Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+                    return
+                video_dict["video_width"] = ffmpeg_dict["width"]
+                video_dict["video_height"] = ffmpeg_dict["height"]
+                video_dict["duration"] = ffmpeg_dict["duration"]
+                video_size = ffmpeg_dict["size"]
+                Common.logger(log_type, crawler).info(f'video_width:{video_dict["video_width"]}')
+                Common.logger(log_type, crawler).info(f'video_height:{video_dict["video_height"]}')
+                Common.logger(log_type, crawler).info(f'duration:{video_dict["duration"]}')
+                Common.logger(log_type, crawler).info(f'video_size:{video_size}')
+                # 视频size=0,直接删除
+                if int(video_size) == 0 or cls.download_rule(video_dict) is False:
+                    # 删除视频文件夹
+                    shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                    Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+                    return
+                # 下载封面
+                Common.download_method(log_type=log_type, crawler=crawler, text="cover",
+                                       title=video_dict["video_title"], url=video_dict["cover_url"])
+                # 保存视频信息至 "./videos/{video_title}/info.txt"
+                Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
+
+                # 上传视频
+                Common.logger(log_type, crawler).info("开始上传视频...")
+                strategy = "定向爬虫策略"
+                our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                          crawler=crawler,
+                                                          strategy=strategy,
+                                                          our_uid="follow",
+                                                          oss_endpoint=oss_endpoint,
+                                                          env=env)
+                if env == 'prod':
+                    our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
+                else:
+                    our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
+                Common.logger(log_type, crawler).info("视频上传完成")
+
+                if our_video_id is None:
+                    # 删除视频文件夹
+                    shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
+                    return
+
+                # 视频信息保存数据库
+                rule_dict = {
+                    "duration": {"min": 20, "max": 45 * 60},
+                    "publish_day": {"min": 3}
+                }
+
+                insert_sql = f""" insert into crawler_video(video_id,
+                                                            out_user_id,
+                                                            platform,
+                                                            strategy,
+                                                            out_video_id,
+                                                            video_title,
+                                                            cover_url,
+                                                            video_url,
+                                                            duration,
+                                                            publish_time,
+                                                            play_cnt,
+                                                            crawler_rule,
+                                                            width,
+                                                            height)
+                                                            values({our_video_id},
+                                                            "{video_dict['user_id']}",
+                                                            "{cls.platform}",
+                                                            "定向爬虫策略",
+                                                            "{video_dict['video_id']}",
+                                                            "{video_dict['video_title']}",
+                                                            "{video_dict['cover_url']}",
+                                                            "{video_dict['video_url']}",
+                                                            {int(video_dict['duration'])},
+                                                            "{video_dict['publish_time_str']}",
+                                                            {int(video_dict['play_cnt'])},
+                                                            '{json.dumps(rule_dict)}',
+                                                            {int(video_dict['video_width'])},
+                                                            {int(video_dict['video_height'])}) """
+                Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+                MysqlHelper.update_values(log_type, crawler, insert_sql, env)
+                Common.logger(log_type, crawler).info('视频信息插入数据库成功!')
+
+                # 视频写入飞书
+                Feishu.insert_columns(log_type, crawler, "47e39d", "ROWS", 1, 2)
+                # 视频ID工作表,首行写入数据
+                upload_time = int(time.time())
+                values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
+                           "用户主页",
+                           video_dict['video_title'],
+                           video_dict['video_id'],
+                           our_video_link,
+                           int(video_dict['duration']),
+                           f"{video_dict['video_width']}*{video_dict['video_height']}",
+                           video_dict['publish_time_str'],
+                           video_dict['user_name'],
+                           video_dict['user_id'],
+                           video_dict['avatar_url'],
+                           video_dict['cover_url'],
+                           video_dict['article_url'],
+                           video_dict['video_url']]]
+                time.sleep(0.5)
+                Feishu.update_values(log_type, crawler, "47e39d", "F2:Z2", values)
+                Common.logger(log_type, crawler).info('视频下载/上传成功\n')
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f"download_publish异常:{e}\n")
+
+    @classmethod
+    def get_users(cls):
+        # user_sheet = Feishu.get_values_batch("follow", 'gongzhonghao', 'Bzv72P')
+        # user_list = []
+        # for i in range(51, len(user_sheet)):
+        #     user_name = user_sheet[i][0]
+        #     index = user_sheet[i][1]
+        #     user_dict = {
+        #         "user_name": user_name,
+        #         "index": index,
+        #     }
+        #     user_list.append(user_dict)
+        # print(len(user_list))
+        # print(user_list)
+        user_list = [{'user_name': '太上养身', 'index': 1}, {'user_name': '懂点养身秘诀', 'index': 1},
+                     {'user_name': '送乐者', 'index': 1}, {'user_name': '蜂业小百科', 'index': 1},
+                     {'user_name': '健康与养身秘诀', 'index': 1}, {'user_name': '有心人r', 'index': 1},
+                     {'user_name': '古诗词世界', 'index': 1}, {'user_name': '晨间悦读', 'index': 1},
+                     {'user_name': '养身有诀窍', 'index': 1}, {'user_name': '退休族信息圈', 'index': 1},
+                     {'user_name': '艾公铁粉团', 'index': 1}, {'user_name': '酸甜苦辣麻咸', 'index': 1},
+                     {'user_name': '日常生活小帮手', 'index': 1}, {'user_name': '小帅的精彩视频', 'index': 1},
+                     {'user_name': '养身常识小窍门', 'index': 1}, {'user_name': '医学养身技巧', 'index': 1},
+                     {'user_name': '退休圈', 'index': 1}, {'user_name': '生活小助手', 'index': 1},
+                     {'user_name': '经典老歌曲好听的音乐', 'index': 1}, {'user_name': '黑马快讯', 'index': 1},
+                     {'user_name': '绝妙经典', 'index': 1}, {'user_name': '深读时策', 'index': 1},
+                     {'user_name': '健康与生活大全', 'index': 1}, {'user_name': '李肃论道', 'index': 1},
+                     {'user_name': '爱国者吹锋号', 'index': 1}, {'user_name': '兵心可鉴', 'index': 1},
+                     {'user_name': '精选动心金曲', 'index': 1}, {'user_name': '爱二胡群', 'index': 1},
+                     {'user_name': '数码科技大爆炸', 'index': 1}, {'user_name': '何静同学', 'index': 1},
+                     {'user_name': '方敏爱美食', 'index': 1}, {'user_name': '针灸推拿特色技术', 'index': 1},
+                     {'user_name': '挺进天山', 'index': 1}, {'user_name': '紫陌捻花', 'index': 1},
+                     {'user_name': '巨响养身', 'index': 1}, {'user_name': '荣观世界', 'index': 1},
+                     {'user_name': 'Music音乐世界', 'index': 1}, {'user_name': '微观调查组', 'index': 1},
+                     {'user_name': '用汉方拥抱世界', 'index': 1}, {'user_name': '医学养身秘诀', 'index': 1},
+                     {'user_name': '医学老人养身', 'index': 1}, {'user_name': '热文微观', 'index': 1},
+                     {'user_name': '医学养身秘笈', 'index': 1}, {'user_name': '你未读消息', 'index': 2},
+                     {'user_name': '6点谈健康', 'index': 1}, {'user_name': '观念颠覆一切', 'index': 1},
+                     {'user_name': '侯老师说食疗精选', 'index': 1}, {'user_name': '侯老师说食疗', 'index': 1},
+                     {'user_name': '今日看点收集', 'index': 1}, {'user_name': '君拍', 'index': 1}]
+        return user_list
+
+    @classmethod
+    def get_all_videos(cls, log_type, crawler, oss_endpoint, env):
+        try:
+            user_list = cls.get_users()
+            for user_dict in user_list:
+                user_name = user_dict['user_name']
+                index = user_dict['index']
+                Common.logger(log_type, crawler).info(f'获取 {user_name} 公众号视频\n')
+                cls.get_videoList(log_type, crawler, user_name, index, oss_endpoint, env)
+                cls.begin = 0
+                Common.logger(log_type, crawler).info('随机休眠 0-60 秒\n')
+                time.sleep(random.randint(0, 60))
+        except Exception as e:
+            Common.logger(log_type, crawler).info(f'get_all_videos异常:{e}\n')
+
+
+if __name__ == "__main__":
+    print(GongzhonghaoFollow2.get_token(log_type="follow", crawler="gongzhonghao"))
+    # GongzhonghaoFollow.get_users()
+    # GongzhonghaoFollow.get_videoList(log_type="follow",
+    #                                  crawler="gongzhonghao",
+    #                                  user="香音难忘",
+    #                                  index=1,
+    #                                  oss_endpoint="out",
+    #                                  env="dev")
+    pass

+ 43 - 0
gongzhonghao/gongzhonghao_main/run_gongzhonghao_follow_2.py

@@ -0,0 +1,43 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/3/28
+import argparse
+import os
+import sys
+import time
+sys.path.append(os.getcwd())
+from common.common import Common
+from gongzhonghao.gongzhonghao_follow.gongzhonghao_follow_2 import GongzhonghaoFollow2
+
+
+class Main:
+    @classmethod
+    def main(cls, log_type, crawler, env):
+        while True:
+            try:
+                if env == "dev":
+                    oss_endpoint = "out"
+                else:
+                    oss_endpoint = "inner"
+                Common.logger(log_type, crawler).info('开始抓取公众号视频\n')
+                GongzhonghaoFollow2.get_all_videos(log_type=log_type,
+                                                   crawler=crawler,
+                                                   oss_endpoint=oss_endpoint,
+                                                   env=env)
+                Common.del_logs(log_type, crawler)
+                GongzhonghaoFollow2.begin = 0
+                Common.logger(log_type, crawler).info('休眠 8 小时\n')
+                time.sleep(3600*8)
+            except Exception as e:
+                Common.logger(log_type, crawler).info(f"公众号抓取异常:{e}\n")
+
+
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
+    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler')  ## 添加参数
+    parser.add_argument('--env')  ## 添加参数
+    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
+    Main.main(log_type=args.log_type,
+              crawler=args.crawler,
+              env=args.env)

+ 21 - 6
main/process.sh

@@ -18,21 +18,36 @@ echo "$(date "+%Y-%m-%d %H:%M:%S") 更新环境变量..." >> ${log_path}
 cd ~ && source /etc/profile
 echo "$(date "+%Y-%m-%d %H:%M:%S") 更新环境变量完成!\n" >> ${log_path}
 
-## 公众号爬虫策略
-#echo "$(date "+%Y-%m-%d %H:%M:%S") 正在监测 公众号爬虫策略 进程状态" >> ${log_path}
-#ps -ef | grep "run_gongzhonghao_follow.py" | grep -v "grep"
+# 公众号爬虫策略
+echo "$(date "+%Y-%m-%d %H:%M:%S") 正在监测 公众号爬虫策略1-50个账号 进程状态" >> ${log_path}
+ps -ef | grep "run_gongzhonghao_follow.py" | grep -v "grep"
+if [ "$?" -eq 1 ];then
+  echo "$(date "+%Y-%m-%d_%H:%M:%S") 异常停止,正在重启!" >> ${log_path}
+  if [ ${env} = "dev" ];then
+    cd ${piaoquan_crawler_dir} && sh main/scheduling_main.sh ./gongzhonghao/gongzhonghao_main/run_gongzhonghao_follow.py --log_type="follow" --crawler="gongzhonghao" --env="dev" gongzhonghao/nohup-follow.log
+  else
+    cd ${piaoquan_crawler_dir} && /usr/bin/sh /data5/piaoquan_crawler/main/scheduling_main.sh ./gongzhonghao/gongzhonghao_main/run_gongzhonghao_follow.py --log_type="follow" --crawler="gongzhonghao" --env="prod"  gongzhonghao/nohup-follow.log
+  fi
+  echo "$(date "+%Y-%m-%d %H:%M:%S") 重启完成!\n" >> ${log_path}
+else
+  echo "$(date "+%Y-%m-%d %H:%M:%S") 公众号爬虫策略1-50个账号 进程状态正常\n" >> ${log_path}
+fi
+
+#echo "$(date "+%Y-%m-%d %H:%M:%S") 正在监测 公众号爬虫策略51-100个账号 进程状态" >> ${log_path}
+#ps -ef | grep "run_gongzhonghao_follow_2.py" | grep -v "grep"
 #if [ "$?" -eq 1 ];then
 #  echo "$(date "+%Y-%m-%d_%H:%M:%S") 异常停止,正在重启!" >> ${log_path}
 #  if [ ${env} = "dev" ];then
-#    cd ${piaoquan_crawler_dir} && sh main/scheduling_main.sh ./gongzhonghao/gongzhonghao_main/run_gongzhonghao_follow.py --log_type="follow" --crawler="gongzhonghao" --env="dev" gongzhonghao/nohup-follow.log
+#    cd ${piaoquan_crawler_dir} && sh main/scheduling_main.sh ./gongzhonghao/gongzhonghao_main/run_gongzhonghao_follow_2.py --log_type="follow" --crawler="gongzhonghao" --env="dev" gongzhonghao/nohup-follow.log
 #  else
-#    cd ${piaoquan_crawler_dir} && /usr/bin/sh /data5/piaoquan_crawler/main/scheduling_main.sh ./gongzhonghao/gongzhonghao_main/run_gongzhonghao_follow.py --log_type="follow" --crawler="gongzhonghao" --env="prod"  gongzhonghao/nohup-follow.log
+#    cd ${piaoquan_crawler_dir} && /usr/bin/sh /data5/piaoquan_crawler/main/scheduling_main.sh ./gongzhonghao/gongzhonghao_main/run_gongzhonghao_follow_2.py --log_type="follow" --crawler="gongzhonghao" --env="prod"  gongzhonghao/nohup-follow-2.log
 #  fi
 #  echo "$(date "+%Y-%m-%d %H:%M:%S") 重启完成!\n" >> ${log_path}
 #else
-#  echo "$(date "+%Y-%m-%d %H:%M:%S") 公众号爬虫策略 进程状态正常\n" >> ${log_path}
+#  echo "$(date "+%Y-%m-%d %H:%M:%S") 公众号爬虫策略1-50个账号 进程状态正常\n" >> ${log_path}
 #fi
 
+
 # 小年糕定向爬虫策略
 echo "$(date "+%Y-%m-%d %H:%M:%S") 正在监测 小年糕定向爬虫策略 进程状态" >> ${log_path}
 ps -ef | grep "run_xiaoniangao_follow.py" | grep -v "grep"