wangkun 2 роки тому
батько
коміт
543b9b31a8

+ 15 - 3
README.MD

@@ -60,9 +60,9 @@ ps aux | grep run_youtube | grep -v grep | awk '{print $2}' | xargs kill -9
 #### 微信指数
 #### 微信指数
 ```commandline
 ```commandline
 微信指数杀进程
 微信指数杀进程
-nohup python3 -u weixinzhishu/weixinzhishu_main/weixinzhishu_inner_sort.py >>./weixinzhishu/nohup_inner_sort.log 2>&1 &
-nohup python3 -u weixinzhishu/weixinzhishu_main/weixinzhishu_inner_long.py >>./weixinzhishu/nohup_inner_long.log 2>&1 &
-nohup python3 -u weixinzhishu/weixinzhishu_main/weixinzhishu_out.py >>./weixinzhishu/nohup_out.log 2>&1 &
+nohup python3 -u /data5/piaoquan_crawler/weixinzhishu/weixinzhishu_main/weixinzhishu_inner_sort.py >>/data5/piaoquan_crawler//weixinzhishu/nohup_inner_sort.log 2>&1 &
+nohup python3 -u /data5/piaoquan_crawler/weixinzhishu/weixinzhishu_main/weixinzhishu_inner_long.py >>/data5/piaoquan_crawler//weixinzhishu/nohup_inner_long.log 2>&1 &
+nohup python3 -u /data5/piaoquan_crawler/weixinzhishu/weixinzhishu_main/weixinzhishu_out.py >>/data5/piaoquan_crawler//weixinzhishu/nohup_out.log 2>&1 &
 ps aux | grep run_weixinzhishu
 ps aux | grep run_weixinzhishu
 ps aux | grep weixinzhishu | grep -v grep | awk '{print $2}' | xargs kill -9
 ps aux | grep weixinzhishu | grep -v grep | awk '{print $2}' | xargs kill -9
 获取 wechat_key 设备: Mac Air 
 获取 wechat_key 设备: Mac Air 
@@ -124,4 +124,16 @@ ps aux | grep run_xiaoniangao_play
 ps aux | grep run_xiaoniangao_follow | grep -v grep | awk '{print $2}' | xargs kill -9 
 ps aux | grep run_xiaoniangao_follow | grep -v grep | awk '{print $2}' | xargs kill -9 
 ps aux | grep run_xiaoniangao_hour | grep -v grep | awk '{print $2}' | xargs kill -9 
 ps aux | grep run_xiaoniangao_hour | grep -v grep | awk '{print $2}' | xargs kill -9 
 ps aux | grep run_xiaoniangao_play | grep -v grep | awk '{print $2}' | xargs kill -9 
 ps aux | grep run_xiaoniangao_play | grep -v grep | awk '{print $2}' | xargs kill -9 
+```
+
+
+#### 公众号
+```commandline
+阿里云 102 服务器
+定向爬虫策略: ps aux | grep run_gongzhonghao | grep -v grep | awk '{print $2}' | xargs kill -9 && /usr/bin/sh /data5/piaoquan_crawler/main/scheduling_main.sh ./gongzhonghao/gongzhonghao_main/run_gongzhonghao_follow.py --log_type="follow" --crawler="gongzhonghao" --env="prod"  gongzhonghao/nohup-follow.log
+线下调试
+定向爬虫策略: sh main/scheduling_main.sh ./gongzhonghao/gongzhonghao_main/run_gongzhonghao_follow.py --log_type="follow" --crawler="gongzhonghao" --env="dev" gongzhonghao/nohup-follow.log
+杀进程命令
+ps aux | grep run_gongzhonghao
+ps aux | grep run_gongzhonghao | grep -v grep | awk '{print $2}' | xargs kill -9 
 ```
 ```

+ 9 - 0
common/common.py

@@ -236,6 +236,12 @@ class Common:
 
 
     @classmethod
     @classmethod
     def ffmpeg(cls, log_type, crawler, video_path):
     def ffmpeg(cls, log_type, crawler, video_path):
+        # Common.logger(log_type, crawler).info(f"video_path:{video_path}")
+        video_title = video_path.replace(f"./{crawler}/videos/", "").replace("/video.mp4", "")
+        # Common.logger(log_type, crawler).info(f"video_title:{video_title}")
+        md_title = md5(video_title.encode('utf8')).hexdigest()
+        video_path = f"./{crawler}/videos/{md_title}/video.mp4"
+        # Common.logger(log_type, crawler).info(f"{video_path}")
         probe = ffmpeg.probe(video_path)
         probe = ffmpeg.probe(video_path)
         video_stream = next((stream for stream in probe['streams'] if stream['codec_type'] == 'video'), None)
         video_stream = next((stream for stream in probe['streams'] if stream['codec_type'] == 'video'), None)
         if video_stream is None:
         if video_stream is None:
@@ -257,6 +263,9 @@ class Common:
     # 合并音视频
     # 合并音视频
     @classmethod
     @classmethod
     def video_compose(cls, log_type, crawler, video_dir):
     def video_compose(cls, log_type, crawler, video_dir):
+        video_title = video_dir.replace(f"./{crawler}/videos/", "")
+        md_title = md5(video_title.encode('utf8')).hexdigest()
+        video_dir = f"./{crawler}/videos/{md_title}"
         try:
         try:
             video_path = f'{video_dir}/video1.mp4'
             video_path = f'{video_dir}/video1.mp4'
             audio_path = f'{video_dir}/audio1.mp4'
             audio_path = f'{video_dir}/audio1.mp4'

+ 7 - 6
common/feishu.py

@@ -118,14 +118,14 @@ class Feishu:
             return 'shtcnz1ymxHL1u8WHblfqfys7qe'
             return 'shtcnz1ymxHL1u8WHblfqfys7qe'
         elif crawler == 'ggdc':
         elif crawler == 'ggdc':
             return 'shtcnTuJgeZU2bc7VaesAqk3QJx'
             return 'shtcnTuJgeZU2bc7VaesAqk3QJx'
-        elif crawler == 'gongzhonghao_xinxin':
-            return 'shtcna98M2mX7TbivTj9Sb7WKBN'
         elif crawler == 'youtube':
         elif crawler == 'youtube':
             return 'shtcnrLyr1zbYbhhZyqpN7Xrd5f'
             return 'shtcnrLyr1zbYbhhZyqpN7Xrd5f'
         elif crawler == 'weixinzhishu':
         elif crawler == 'weixinzhishu':
             return 'shtcnqhMRUGunIfGnGXMOBYiy4K'
             return 'shtcnqhMRUGunIfGnGXMOBYiy4K'
         elif crawler == 'weixinzhishu_search_word':
         elif crawler == 'weixinzhishu_search_word':
             return 'shtcnHxCj6dZBYMuK1Q3tIJVlqg'
             return 'shtcnHxCj6dZBYMuK1Q3tIJVlqg'
+        elif crawler == 'gongzhonghao':
+            return 'shtcna98M2mX7TbivTj9Sb7WKBN'
 
 
     # 获取飞书api token
     # 获取飞书api token
     @classmethod
     @classmethod
@@ -419,6 +419,8 @@ class Feishu:
                 username = "15712941385"
                 username = "15712941385"
             elif username == "muxinyi":
             elif username == "muxinyi":
                 username = '13699208058'
                 username = '13699208058'
+            elif username == "wangxueke":
+                username = '13513479926'
 
 
             data = {"mobiles": [username]}
             data = {"mobiles": [username]}
             urllib3.disable_warnings()
             urllib3.disable_warnings()
@@ -583,11 +585,10 @@ class Feishu:
                 users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
                 users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
                     cls.get_userid(log_type, crawler, "huxinxue")) + "></at>\n"
                     cls.get_userid(log_type, crawler, "huxinxue")) + "></at>\n"
 
 
-            elif crawler == "gongzhonghao_xinxin":
+            elif crawler == "gongzhonghao":
                 content = "公众号_信欣_爬虫表"
                 content = "公众号_信欣_爬虫表"
                 sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcna98M2mX7TbivTj9Sb7WKBN?"
                 sheet_url = "https://w42nne6hzg.feishu.cn/sheets/shtcna98M2mX7TbivTj9Sb7WKBN?"
-                users = "\n<at id=" + str(cls.get_userid(log_type, crawler, "wangkun")) + "></at> <at id=" + str(
-                    cls.get_userid(log_type, crawler, "xinxin")) + "></at>\n"
+                users = f"\n<at id={str(cls.get_userid(log_type, crawler, 'huxinxue'))}></at> <at id={str(cls.get_userid(log_type, crawler, 'wangxueke'))}></at> <at id={str(cls.get_userid(log_type, crawler, 'xinxin'))}></at>\n"
 
 
             elif crawler == "weiqun":
             elif crawler == "weiqun":
                 content = "微群爬虫表"
                 content = "微群爬虫表"
@@ -670,4 +671,4 @@ class Feishu:
 
 
 
 
 if __name__ == "__main__":
 if __name__ == "__main__":
-    Feishu.bot('follow', 'weixinzhishu_out', 'test:微信指数_站外指数已抓取完毕')
+    Feishu.bot('bor', 'gongzhonghao', 'token过期啦,请扫码更换\nhttps://mp.weixin.qq.com/')

+ 2 - 5
common/public.py

@@ -17,13 +17,10 @@ def filter_word(log_type, crawler, source, env):
     """
     """
     select_sql = f""" select * from crawler_filter_word where source="{source}" """
     select_sql = f""" select * from crawler_filter_word where source="{source}" """
     words = MysqlHelper.get_values(log_type, crawler, select_sql, env, action='')
     words = MysqlHelper.get_values(log_type, crawler, select_sql, env, action='')
-    if len(words) == 0:
-        return None
     word_list = []
     word_list = []
+    if len(words) == 0:
+        return word_list
     for word in words:
     for word in words:
-        # ss = word['filter_word']
-        # print(type(ss))
-        # print(ss)
         word_list.append(word['filter_word'])
         word_list.append(word['filter_word'])
 
 
     return word_list
     return word_list

+ 4 - 0
common/publish.py

@@ -190,6 +190,10 @@ class Publish:
             uids_prod_xiaoniangao_play = [50322222, 50322223, 50322224, 50322225]
             uids_prod_xiaoniangao_play = [50322222, 50322223, 50322224, 50322225]
             return random.choice(uids_prod_xiaoniangao_play)
             return random.choice(uids_prod_xiaoniangao_play)
 
 
+        elif crawler == 'gongzhonghao' and env == 'prod' and strategy == '定向爬虫策略':
+            uids_prod_gongzhonghao_follow = [26117675, 26117676, 26117677, 26117678, 26117679, 26117680]
+            return random.choice(uids_prod_gongzhonghao_follow)
+
         elif crawler == 'kanyikan':
         elif crawler == 'kanyikan':
             uids_prod_kanyikan_moment = [20631208, 20631209, 20631210, 20631211, 20631212,
             uids_prod_kanyikan_moment = [20631208, 20631209, 20631210, 20631211, 20631212,
                                          20631213, 20631214, 20631215, 20631216, 20631217,
                                          20631213, 20631214, 20631215, 20631216, 20631217,

BIN
gongzhonghao/.DS_Store


+ 3 - 0
gongzhonghao/__init__.py

@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/3/28

+ 3 - 0
gongzhonghao/gongzhonghao_follow/__init__.py

@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/3/28

+ 539 - 0
gongzhonghao/gongzhonghao_follow/gongzhonghao_follow.py

@@ -0,0 +1,539 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/3/28
+import datetime
+import difflib
+import json
+import os
+import shutil
+import sys
+import time
+import requests
+import urllib3
+from selenium.webdriver import DesiredCapabilities
+from selenium.webdriver.chrome.service import Service
+from selenium.webdriver.common.by import By
+from selenium import webdriver
+
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.feishu import Feishu
+from common.public import filter_word
+from common.publish import Publish
+from common.scheduling_db import MysqlHelper
+
+
+class GongzhonghaoFollow:
+    # 翻页参数
+    begin = 0
+    platform = "公众号"
+
+    # 基础门槛规则
+    @staticmethod
+    def download_rule(video_dict):
+        """
+        下载视频的基本规则
+        :param video_dict: 视频信息,字典格式
+        :return: 满足规则,返回 True;反之,返回 False
+        """
+        # 视频时长 20秒 - 45 分钟
+        if 60 * 45 >= int(float(video_dict['duration'])) >= 20:
+            # 宽或高
+            if int(video_dict['video_width']) >= 0 or int(video_dict['video_height']) >= 0:
+                return True
+            else:
+                return False
+        else:
+            return False
+
+    @classmethod
+    def title_like(cls, log_type, crawler, title, env):
+        select_sql = f""" select * from crawler_video where platform="公众号" """
+        video_list = MysqlHelper.get_values(log_type, crawler, select_sql, env, action="")
+        if len(video_list) == 0:
+            return None
+        for video_dict in video_list:
+            video_title = video_dict["video_title"]
+            if difflib.SequenceMatcher(None, title, video_title).quick_ratio() >= 0.8:
+                return True
+            else:
+                pass
+
+    # 获取 token
+    @classmethod
+    def get_token(cls, log_type, crawler):
+        while True:
+            try:
+                sheet = Feishu.get_values_batch(log_type, "gongzhonghao", "OjyJqs")
+                if sheet is None:
+                    time.sleep(3)
+                    continue
+                token = sheet[0][1]
+                cookie = sheet[1][1]
+                token_dict = {'token': token, 'cookie': cookie}
+                return token_dict
+            except Exception as e:
+                Common.logger(log_type, crawler).error(f"get_cookie_token异常:{e}\n")
+
+    # 获取用户 fakeid
+    @classmethod
+    def get_fakeid(cls, log_type, crawler, user, index):
+        try:
+            token_dict = cls.get_token(log_type, crawler)
+            url = "https://mp.weixin.qq.com/cgi-bin/searchbiz?"
+            headers = {
+                "accept": "*/*",
+                "accept-encoding": "gzip, deflate, br",
+                "accept-language": "zh-CN,zh;q=0.9",
+                "referer": "https://mp.weixin.qq.com/cgi-bin/appmsg?"
+                           "t=media/appmsg_edit_v2&action=edit&isNew=1"
+                           "&type=77&createType=5&token=1011071554&lang=zh_CN",
+                'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
+                "sec-ch-ua-mobile": "?0",
+                "sec-ch-ua-platform": '"Windows"',
+                "sec-fetch-dest": "empty",
+                "sec-fetch-mode": "cors",
+                "sec-fetch-site": "same-origin",
+                "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
+                              " (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
+                "x-requested-with": "XMLHttpRequest",
+                'cookie': token_dict['cookie'],
+            }
+            params = {
+                "action": "search_biz",
+                "begin": "0",
+                "count": "5",
+                "query": str(user),
+                "token": token_dict['token'],
+                "lang": "zh_CN",
+                "f": "json",
+                "ajax": "1",
+            }
+            urllib3.disable_warnings()
+            r = requests.get(url=url, headers=headers, params=params, verify=False)
+            while True:
+                if r.status_code != 200 and 21 >= datetime.datetime.now().hour >= 10:
+                    Feishu.bot(log_type, crawler, "token过期啦,请扫码更换\nhttps://mp.weixin.qq.com/")
+                    time.sleep(60 * 10)
+                else:
+                    break
+            if "list" not in r.json() or len(r.json()["list"]) == 0:
+                Common.logger(log_type, crawler).warning(f"get_fakeid:{r.text},休眠 1 秒\n")
+                time.sleep(1)
+            else:
+                fakeid = r.json()["list"][int(index) - 1]["fakeid"]
+                head_url = r.json()["list"][int(index) - 1]["round_head_img"]
+                fakeid_dict = {'fakeid': fakeid, 'head_url': head_url}
+                return fakeid_dict
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f"get_fakeid异常:{e}\n")
+
+    # 获取腾讯视频下载链接
+    @classmethod
+    def get_tencent_video_url(cls, log_type, crawler, video_id):
+        try:
+            url = 'https://vv.video.qq.com/getinfo?vids=' + str(video_id) + '&platform=101001&charge=0&otype=json'
+            response = requests.get(url=url).text.replace('QZOutputJson=', '').replace('"};', '"}')
+            response = json.loads(response)
+            url = response['vl']['vi'][0]['ul']['ui'][0]['url']
+            fvkey = response['vl']['vi'][0]['fvkey']
+            video_url = url + str(video_id) + '.mp4?vkey=' + fvkey
+            return video_url
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f"get_tencent_video_url异常:{e}\n")
+
+    @classmethod
+    def get_video_url(cls, log_type, crawler, article_url, env):
+        try:
+            # 打印请求配置
+            ca = DesiredCapabilities.CHROME
+            ca["goog:loggingPrefs"] = {"performance": "ALL"}
+
+            # 不打开浏览器运行
+            chrome_options = webdriver.ChromeOptions()
+            chrome_options.add_argument("headless")
+            chrome_options.add_argument(
+                f'user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36')
+            chrome_options.add_argument("--no-sandbox")
+
+            # driver初始化
+            if env == "prod":
+                driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options)
+            else:
+                driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options, service=Service('/Users/wangkun/Downloads/chromedriver/chromedriver_v111/chromedriver'))
+
+            driver.implicitly_wait(10)
+            # Common.logger(log_type, crawler).info('打开文章链接')
+            driver.get(article_url)
+            time.sleep(2)
+
+            if len(driver.find_elements(By.XPATH, '//div[@class="js_video_poster video_poster"]/*[2]')) != 0:
+                video_url = driver.find_element(
+                    By.XPATH, '//div[@class="js_video_poster video_poster"]/*[2]').get_attribute('src')
+            elif len(driver.find_elements(By.XPATH, '//span[@class="js_tx_video_container"]/*[1]')) != 0:
+                iframe = driver.find_element(By.XPATH, '//span[@class="js_tx_video_container"]/*[1]').get_attribute(
+                    'src')
+                video_id = iframe.split('vid=')[-1].split('&')[0]
+                video_url = cls.get_tencent_video_url(log_type, crawler, video_id)
+            else:
+                video_url = 0
+
+            return video_url
+        except Exception as e:
+            Common.logger(log_type, crawler).info(f'get_video_url异常:{e}\n')
+
+    # 获取文章列表
+    @classmethod
+    def get_videoList(cls, log_type, crawler, user, index, oss_endpoint, env):
+        fakeid_dict = cls.get_fakeid(log_type, crawler, user, index)
+        token_dict = cls.get_token(log_type, crawler)
+        while True:
+            # try:
+            url = "https://mp.weixin.qq.com/cgi-bin/appmsg?"
+            headers = {
+                "accept": "*/*",
+                "accept-encoding": "gzip, deflate, br",
+                "accept-language": "zh-CN,zh;q=0.9",
+                "referer": "https://mp.weixin.qq.com/cgi-bin/appmsg?"
+                           "t=media/appmsg_edit_v2&action=edit&isNew=1"
+                           "&type=77&createType=5&token=" + str(token_dict['token']) + "&lang=zh_CN",
+                'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
+                "sec-ch-ua-mobile": "?0",
+                "sec-ch-ua-platform": '"Windows"',
+                "sec-fetch-dest": "empty",
+                "sec-fetch-mode": "cors",
+                "sec-fetch-site": "same-origin",
+                "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
+                              " (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
+                "x-requested-with": "XMLHttpRequest",
+                'cookie': token_dict['cookie'],
+            }
+            params = {
+                "action": "list_ex",
+                "begin": str(cls.begin),
+                "count": "5",
+                "fakeid": fakeid_dict['fakeid'],
+                "type": "9",
+                "query": "",
+                "token": str(token_dict['token']),
+                "lang": "zh_CN",
+                "f": "json",
+                "ajax": "1",
+            }
+            urllib3.disable_warnings()
+            r = requests.get(url=url, headers=headers, params=params, verify=False)
+            while True:
+                if r.status_code != 200 and 21 >= datetime.datetime.now().hour >= 10:
+                    Feishu.bot(log_type, crawler, "token过期啦,请扫码更换\nhttps://mp.weixin.qq.com/")
+                    time.sleep(60 * 10)
+                else:
+                    break
+            if 'app_msg_list' not in r.json():
+                Common.logger(log_type, crawler).warning(f"get_gzh_url:{r.text}\n")
+                break
+            elif len(r.json()['app_msg_list']) == 0:
+                Common.logger(log_type, crawler).info('没有更多视频了\n')
+            else:
+                cls.begin += 5
+                app_msg_list = r.json()['app_msg_list']
+                for article_url in app_msg_list:
+                    # title
+                    if 'title' in article_url:
+                        title = article_url['title'].replace('/', '').replace('\n', '') \
+                            .replace('.', '').replace('“', '').replace('”', '').replace(' ', '')
+                    else:
+                        title = 0
+
+                    # aid
+                    if 'aid' in article_url:
+                        aid = article_url['aid']
+                    else:
+                        aid = 0
+
+                    # create_time
+                    if 'create_time' in article_url:
+                        create_time = article_url['create_time']
+                    else:
+                        create_time = 0
+                    publish_time_stamp = int(create_time)
+                    publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
+
+                    avatar_url = fakeid_dict['head_url']
+
+                    # cover_url
+                    if 'cover' in article_url:
+                        cover_url = article_url['cover']
+                    else:
+                        cover_url = 0
+
+                    # article_url
+                    if 'link' in article_url:
+                        article_url = article_url['link']
+                    else:
+                        article_url = 0
+
+                    video_url = cls.get_video_url(log_type, crawler, article_url, env)
+
+                    video_dict = {
+                        'video_id': aid,
+                        'video_title': title,
+                        'publish_time_stamp': publish_time_stamp,
+                        'publish_time_str': publish_time_str,
+                        'user_name': user,
+                        'play_cnt': 0,
+                        'comment_cnt': 0,
+                        'like_cnt': 0,
+                        'share_cnt': 0,
+                        'user_id': fakeid_dict['fakeid'],
+                        'avatar_url': avatar_url,
+                        'cover_url': cover_url,
+                        'article_url': article_url,
+                        'video_url': video_url,
+                        'session': f'gongzhonghao-follow-{int(time.time())}'
+                    }
+                    for k, v in video_dict.items():
+                        Common.logger(log_type, crawler).info(f"{k}:{v}")
+                    if int(time.time()) - publish_time_stamp >= 3600 * 24 * 3:
+                        Common.logger(log_type, crawler).info(f'发布时间{publish_time_str} > 3 天\n')
+                        cls.begin = 0
+                        return
+                    cls.download_publish(log_type, crawler, video_dict, oss_endpoint, env)
+
+                Common.logger(log_type, crawler).info('休眠 5 秒\n')
+                time.sleep(5)
+            # except Exception as e:
+            #     Common.logger(log_type, crawler).error("get_gzh_url异常:{}\n", e)
+
+    @classmethod
+    def repeat_video(cls, log_type, crawler, video_id, env):
+        sql = f""" select * from crawler_video where platform="公众号" and out_video_id="{video_id}"; """
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
+        return len(repeat_video)
+
+    # 下载/上传
+    @classmethod
+    def download_publish(cls, log_type, crawler, video_dict, oss_endpoint, env):
+        # try:
+        if video_dict['article_url'] == 0 or video_dict['video_url'] == 0:
+            Common.logger(log_type, crawler).info("文章涉嫌违反相关法律法规和政策\n")
+        # 标题敏感词过滤
+        elif any(word if word in video_dict['video_title'] else False for word in
+                 filter_word(log_type, crawler, "公众号", env)) is True:
+            Common.logger(log_type, crawler).info("标题已中过滤词\n")
+        # 已下载判断
+        elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
+            Common.logger(log_type, crawler).info("视频已下载\n")
+        # 标题相似度
+        elif cls.title_like(log_type, crawler, video_dict['video_title'], env) is True:
+            Common.logger(log_type, crawler).info(f'标题相似度>=80%:{video_dict["video_title"]}\n')
+        else:
+            # 下载视频
+            Common.download_method(log_type=log_type, crawler=crawler, text="video",
+                                   title=video_dict["video_title"], url=video_dict["video_url"])
+            # 获取视频时长
+            ffmpeg_dict = Common.ffmpeg(log_type, crawler,
+                                        f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
+            video_dict["video_width"] = ffmpeg_dict["width"]
+            video_dict["video_height"] = ffmpeg_dict["height"]
+            video_dict["duration"] = ffmpeg_dict["duration"]
+            video_size = ffmpeg_dict["size"]
+            Common.logger(log_type, crawler).info(f'video_width:{video_dict["video_width"]}')
+            Common.logger(log_type, crawler).info(f'video_height:{video_dict["video_height"]}')
+            Common.logger(log_type, crawler).info(f'duration:{video_dict["duration"]}')
+            Common.logger(log_type, crawler).info(f'video_size:{video_size}')
+            # 视频size=0,直接删除
+            if int(video_size) == 0 or cls.download_rule(video_dict) is False:
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
+                Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+                return
+            # 下载封面
+            Common.download_method(log_type=log_type, crawler=crawler, text="cover",
+                                   title=video_dict["video_title"], url=video_dict["cover_url"])
+            # 保存视频信息至 "./videos/{video_title}/info.txt"
+            Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
+
+            # 上传视频
+            Common.logger(log_type, crawler).info("开始上传视频...")
+            strategy = "定向爬虫策略"
+            our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                      crawler=crawler,
+                                                      strategy=strategy,
+                                                      our_uid="follow",
+                                                      oss_endpoint=oss_endpoint,
+                                                      env=env)
+            if env == 'prod':
+                our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
+            else:
+                our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
+            Common.logger(log_type, crawler).info("视频上传完成")
+
+            if our_video_id is None:
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
+                return
+
+            # 视频信息保存数据库
+            rule_dict = {
+                "duration": {"min": 20, "max": 45 * 60},
+                "publish_day": {"min": 3}
+            }
+
+            insert_sql = f""" insert into crawler_video(video_id,
+                                                        out_user_id,
+                                                        platform,
+                                                        strategy,
+                                                        out_video_id,
+                                                        video_title,
+                                                        cover_url,
+                                                        video_url,
+                                                        duration,
+                                                        publish_time,
+                                                        play_cnt,
+                                                        crawler_rule,
+                                                        width,
+                                                        height)
+                                                        values({our_video_id},
+                                                        "{video_dict['user_id']}",
+                                                        "{cls.platform}",
+                                                        "定向爬虫策略",
+                                                        "{video_dict['video_id']}",
+                                                        "{video_dict['video_title']}",
+                                                        "{video_dict['cover_url']}",
+                                                        "{video_dict['video_url']}",
+                                                        {int(video_dict['duration'])},
+                                                        "{video_dict['publish_time_str']}",
+                                                        {int(video_dict['play_cnt'])},
+                                                        '{json.dumps(rule_dict)}',
+                                                        {int(video_dict['video_width'])},
+                                                        {int(video_dict['video_height'])}) """
+            Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+            MysqlHelper.update_values(log_type, crawler, insert_sql, env)
+            Common.logger(log_type, crawler).info('视频信息插入数据库成功!')
+
+            # 视频写入飞书
+            Feishu.insert_columns(log_type, crawler, "47e39d", "ROWS", 1, 2)
+            # 视频ID工作表,首行写入数据
+            upload_time = int(time.time())
+            values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
+                       "用户主页",
+                       video_dict['video_title'],
+                       video_dict['video_id'],
+                       our_video_link,
+                       int(video_dict['duration']),
+                       f"{video_dict['video_width']}*{video_dict['video_height']}",
+                       video_dict['publish_time_str'],
+                       video_dict['user_name'],
+                       video_dict['user_id'],
+                       video_dict['avatar_url'],
+                       video_dict['cover_url'],
+                       video_dict['article_url'],
+                       video_dict['video_url']]]
+            time.sleep(0.5)
+            Feishu.update_values(log_type, crawler, "47e39d", "F2:Z2", values)
+            Common.logger(log_type, crawler).info('视频下载/上传成功\n')
+        # except Exception as e:
+        #     Common.logger(log_type, crawler).error(f"download_publish异常:{e}\n")
+
+    @classmethod
+    def get_users(cls):
+        # user_sheet = Feishu.get_values_batch("follow", 'gongzhonghao', 'Bzv72P')
+        # user_list = []
+        # for i in range(1, len(user_sheet)):
+        #     user_name = user_sheet[i][0]
+        #     index = user_sheet[i][1]
+        #     user_dict = {
+        #         "user_name": user_name,
+        #         "index": index,
+        #     }
+        #     user_list.append(user_dict)
+        # print(user_list)
+        user_list = [{'user_name': '香音难忘', 'index': 1},
+                     {'user_name': '墨儿心灵驿站', 'index': 1},
+                     {'user_name': '荒烟茶生', 'index': 1},
+                     {'user_name': '幸福花朵', 'index': 1},
+                     {'user_name': '我的节日祝福', 'index': 1},
+                     {'user_name': '生活创意妙招', 'index': 1},
+                     {'user_name': '二大妈有话说', 'index': 1},
+                     {'user_name': '医路健康美食', 'index': 1},
+                     {'user_name': '老年相知相伴', 'index': 1},
+                     {'user_name': '一争', 'index': 1},
+                     {'user_name': '老年企退群', 'index': 1},
+                     {'user_name': '消逝的哨声', 'index': 1},
+                     {'user_name': '一颗打破石头的蛋', 'index': 1},
+                     {'user_name': '叩问苍穹荒烟茶生', 'index': 1},
+                     {'user_name': '布衣星火', 'index': 1},
+                     {'user_name': '叩问苍穹', 'index': 1},
+                     {'user_name': '微观调查', 'index': 2},
+                     {'user_name': '传统节日祝福', 'index': 1},
+                     {'user_name': '因和德尚', 'index': 1},
+                     {'user_name': '飨宴心灵', 'index': 1},
+                     {'user_name': '朝闻解局', 'index': 1},
+                     {'user_name': '远见光芒', 'index': 1},
+                     {'user_name': '墨儿微刊', 'index': 1},
+                     {'user_name': '博爱论', 'index': 1},
+                     {'user_name': '张大春讲堂', 'index': 1},
+                     {'user_name': ' 司马南频道', 'index': 1},
+                     {'user_name': '音乐小镇', 'index': 1},
+                     {'user_name': '节日祝福365', 'index': 1},
+                     {'user_name': '动画音乐相册', 'index': 1},
+                     {'user_name': '音乐动漫相册', 'index': 1},
+                     {'user_name': '早点谈健康', 'index': 1},
+                     {'user_name': '早点谈养生', 'index': 1},
+                     {'user_name': '早点谈养身', 'index': 1},
+                     {'user_name': '医道谈养身', 'index': 1},
+                     {'user_name': '中老年谈养身', 'index': 1},
+                     {'user_name': '尼古拉斯瞭望', 'index': 1},
+                     {'user_name': '奇易时光百姓的福音', 'index': 1},
+                     {'user_name': '寰宇时光', 'index': 1},
+                     {'user_name': '红兴文化公苑', 'index': 1},
+                     {'user_name': '早点音乐', 'index': 1},
+                     {'user_name': '小分子生物活性肽', 'index': 1},
+                     {'user_name': '张小妹美食', 'index': 1},
+                     {'user_name': '万物归息', 'index': 1},
+                     {'user_name': '神州红魂', 'index': 1},
+                     {'user_name': '音乐早餐', 'index': 1},
+                     {'user_name': '1条末读消息', 'index': 1},
+                     {'user_name': '环球文摘', 'index': 1},
+                     {'user_name': '精彩有余', 'index': 1},
+                     {'user_name': '一起训练吧', 'index': 1},
+                     {'user_name': '1条重要消息', 'index': 1},
+                     {'user_name': '太上养身', 'index': 1},
+                     {'user_name': '懂点养身秘诀', 'index': 1},
+                     {'user_name': '送乐者', 'index': 1},
+                     {'user_name': '蜂业小百科', 'index': 1},
+                     {'user_name': '健康与养身秘诀', 'index': 1},
+                     {'user_name': '有心人r', 'index': 1},
+                     {'user_name': '古诗词世界', 'index': 1},
+                     {'user_name': '晨间悦读', 'index': 1},
+                     {'user_name': '养身有诀窍', 'index': 1},
+                     {'user_name': '退休族信息圈', 'index': 1},
+                     {'user_name': '艾公铁粉团', 'index': 1},
+                     {'user_name': '酸甜苦辣麻咸', 'index': 1},
+                     {'user_name': '日常生活小帮手', 'index': 1},
+                     {'user_name': '小帅的精彩视频', 'index': 1},
+                     {'user_name': '养身常识小窍门', 'index': 1}]
+        return user_list
+
+    @classmethod
+    def get_all_videos(cls, log_type, crawler, oss_endpoint, env):
+        # try:
+        user_list = cls.get_users()
+        for user_dict in user_list:
+            user_name = user_dict['user_name']
+            index = user_dict['index']
+            Common.logger(log_type, crawler).info(f'获取 {user_name} 公众号视频\n')
+            cls.get_videoList(log_type, crawler, user_name, index, oss_endpoint, env)
+            cls.begin = 0
+            Common.logger(log_type, crawler).info('休眠60秒\n')
+            time.sleep(60)
+        # except Exception as e:
+        #     Common.logger(log_type, crawler).info(f'get_all_videos异常:{e}\n')
+
+
+if __name__ == "__main__":
+    # like_score = GongzhonghaoFollow.title_like("follow", "gongzhonghao", "最减寿的习惯,不是抽烟,不是喝酒,而是这三样", "prod")
+    # print(like_score)
+    # print(GongzhonghaoFollow.get_token("follow", "gongzhonghao"))
+    GongzhonghaoFollow.get_users()
+    pass

+ 96 - 0
gongzhonghao/gongzhonghao_follow/insert_video.py

@@ -0,0 +1,96 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/3/28
+import json
+import os
+import sys
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.scheduling_db  import MysqlHelper
+from common.feishu import Feishu
+
+
+class Insert:
+    @classmethod
+    def insert_video_from_feishu_to_mysql(cls, log_type, crawler, env, machine):
+        gongzhonghao_sheetid_list = ['47e39d']
+        for sheetid in gongzhonghao_sheetid_list:
+            gongzhonghao_sheet = Feishu.get_values_batch(log_type, crawler, sheetid)
+            for i in range(1, len(gongzhonghao_sheet)):
+            # for i in range(1, 3):
+                if gongzhonghao_sheet[i][5] is None or gongzhonghao_sheet[i][9] is None:
+                    continue
+                video_id = gongzhonghao_sheet[i][9].replace("https://admin.piaoquantv.com/cms/post-detail/", "").replace("/info", "")
+                if video_id == "None":
+                    continue
+                video_id = int(video_id)
+                out_user_id = str(gongzhonghao_sheet[i][14])
+                platform = "公众号"
+                strategy = "定向爬虫策略"
+                out_video_id = str(gongzhonghao_sheet[i][8])
+                video_title = str(gongzhonghao_sheet[i][7])
+                cover_url = str(gongzhonghao_sheet[i][16])
+                video_url = str(gongzhonghao_sheet[i][18])
+                duration = int(gongzhonghao_sheet[i][10])
+                publish_time = str(gongzhonghao_sheet[i][12]).replace("/", "-")
+                crawler_rule = json.dumps({"play_cnt": {"min": 0}, "duration": {"min": 20}, "publish_day": {"min": 0}})
+                width = int(gongzhonghao_sheet[i][11].split("*")[0])
+                height = int(gongzhonghao_sheet[i][11].split("*")[1])
+
+                # print(f"video_id:{video_id}, type:{type(video_id)}")
+                # print(f"out_user_id:{out_user_id}, type:{type(out_user_id)}")
+                # print(f"platform:{platform}, type:{type(platform)}")
+                # print(f"strategy:{strategy}, type:{type(strategy)}")
+                # print(f"out_video_id:{out_video_id}, type:{type(out_video_id)}")
+                # print(f"video_title:{video_title}, type:{type(video_title)}")
+                # print(f"cover_url:{cover_url}, type:{type(cover_url)}")
+                # print(f"video_url:{video_url}, type:{type(video_url)}")
+                # print(f"duration:{duration}, type:{type(duration)}")
+                # print(f"publish_time:{publish_time}, type:{type(publish_time)}")
+                # print(f"crawler_rule:{crawler_rule}, type:{type(crawler_rule)}")
+                # print(f"width:{width}, type:{type(width)}")
+                # print(f"height:{height}, type:{type(height)}\n")
+
+                select_sql = f""" select * from crawler_video where platform="{platform}" and out_video_id="{out_video_id}" """
+                Common.logger(log_type, crawler).info(f"select_sql:{select_sql}")
+                repeat_video = MysqlHelper.get_values(log_type, crawler, select_sql, env, machine)
+                Common.logger(log_type, crawler).info(f"repeat_video:{repeat_video}")
+
+                if repeat_video is not None and len(repeat_video) != 0:
+                    Common.logger(log_type, crawler).info(f"{video_title} 已存在数据库中\n")
+                else:
+                    # 视频信息保存数据库
+                    insert_sql = f""" insert into crawler_video(video_id,
+                                    out_user_id,
+                                    platform,
+                                    strategy,
+                                    out_video_id,
+                                    video_title,
+                                    cover_url,
+                                    video_url,
+                                    duration,
+                                    publish_time,
+                                    crawler_rule,
+                                    width,
+                                    height)
+                                    values({video_id},
+                                    "{out_user_id}",
+                                    "{platform}",
+                                    "{strategy}",
+                                    "{out_video_id}",
+                                    "{video_title}",
+                                    "{cover_url}",
+                                    "{video_url}",
+                                    {duration},
+                                    "{publish_time}",
+                                    '{crawler_rule}',
+                                    {width},
+                                    {height}) """
+                    Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+                    MysqlHelper.update_values(log_type, crawler, insert_sql, env, machine)
+                    Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
+
+
+if __name__ == "__main__":
+    Insert.insert_video_from_feishu_to_mysql("insert-prod", "gongzhonghao", "prod", "local")
+    pass

+ 3 - 0
gongzhonghao/gongzhonghao_main/__init__.py

@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/3/28

+ 43 - 0
gongzhonghao/gongzhonghao_main/run_gongzhonghao_follow.py

@@ -0,0 +1,43 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/3/28
+import argparse
+import os
+import sys
+import time
+sys.path.append(os.getcwd())
+from common.common import Common
+from gongzhonghao.gongzhonghao_follow.gongzhonghao_follow import GongzhonghaoFollow
+
+
+class Main:
+    @classmethod
+    def main(cls, log_type, crawler, env):
+        while True:
+            # try:
+            if env == "dev":
+                oss_endpoint = "out"
+            else:
+                oss_endpoint = "inner"
+            Common.logger(log_type, crawler).info('开始抓取公众号视频\n')
+            GongzhonghaoFollow.get_all_videos(log_type=log_type,
+                                              crawler=crawler,
+                                              oss_endpoint=oss_endpoint,
+                                              env=env)
+            Common.del_logs(log_type, crawler)
+            GongzhonghaoFollow.begin = 0
+            Common.logger(log_type, crawler).info('休眠 8 小时\n')
+            time.sleep(3600*8)
+            # except Exception as e:
+            #     Common.logger(log_type, crawler).info(f"公众号抓取异常:{e}\n")
+
+
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
+    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler')  ## 添加参数
+    parser.add_argument('--env')  ## 添加参数
+    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
+    Main.main(log_type=args.log_type,
+              crawler=args.crawler,
+              env=args.env)

BIN
gongzhonghao/logs/.DS_Store


BIN
gongzhonghao/videos/.DS_Store