wangkun 1 year ago
parent
commit
f85b527ebc

+ 3 - 3
README.MD

@@ -105,9 +105,9 @@ ps aux | grep run_kuaishou | grep -v grep | awk '{print $2}' | xargs kill -9
 播放量榜爬虫策略: /usr/bin/sh /data5/piaoquan_crawler/main/scheduling_main.sh ./xiaoniangao/xiaoniangao_main/run_xiaoniangao_play.py --log_type="play" --crawler="xiaoniangao" --env="prod" xiaoniangao/nohup-play.log
 播放量榜爬虫策略: /usr/bin/sh /data5/piaoquan_crawler/main/scheduling_main.sh ./xiaoniangao/xiaoniangao_main/run_xiaoniangao_play.py --log_type="play" --crawler="xiaoniangao" --env="prod" xiaoniangao/nohup-play.log
 
 
 线下调试
 线下调试
-定向爬虫策略: sh main/scheduling_main.sh ./xiaoniangao/xiaoniangao_main/run_xiaoniangao_follow.py --log_type="follow" --crawler="xiaoniangao" --env="dev" xiaoniangao/nohup-follow.log
-小时榜爬虫策略: sh main/scheduling_main.sh ./xiaoniangao/xiaoniangao_main/run_xiaoniangao_hour.py --log_type="hour" --crawler="xiaoniangao" --env="dev" xiaoniangao/nohup-hour.log
-播放量榜爬虫策略: sh main/scheduling_main.sh ./xiaoniangao/xiaoniangao_main/run_xiaoniangao_play.py --log_type="play" --crawler="xiaoniangao" --env="dev" xiaoniangao/nohup-play.log
+定向爬虫策略: sh main/scheduling_main.sh ./xiaoniangao/xiaoniangao_main/run_xiaoniangao_follow.py --log_type="follow" --crawler="xiaoniangao" --env="dev" xiaoniangao/logs/nohup-follow.log
+小时榜爬虫策略: sh main/scheduling_main.sh ./xiaoniangao/xiaoniangao_main/run_xiaoniangao_hour.py --log_type="hour" --crawler="xiaoniangao" --env="dev" xiaoniangao/logs/nohup-hour.log
+播放量榜爬虫策略: sh main/scheduling_main.sh ./xiaoniangao/xiaoniangao_main/run_xiaoniangao_play.py --log_type="play" --crawler="xiaoniangao" --env="dev" xiaoniangao/logs/nohup-play.log
 
 
 nohup python3 -u xiaoniangao/xiaoniangao_follow/insert_video_1.py >> xiaoniangao/nohup-1.log 2>&1 &
 nohup python3 -u xiaoniangao/xiaoniangao_follow/insert_video_1.py >> xiaoniangao/nohup-1.log 2>&1 &
 nohup python3 -u xiaoniangao/xiaoniangao_follow/insert_video_2.py >> xiaoniangao/nohup-1.log 2>&1 &
 nohup python3 -u xiaoniangao/xiaoniangao_follow/insert_video_2.py >> xiaoniangao/nohup-1.log 2>&1 &

+ 129 - 119
gongzhonghao/gongzhonghao_author/gongzhonghao1_author.py

@@ -18,7 +18,7 @@ sys.path.append(os.getcwd())
 from common.common import Common
 from common.common import Common
 from common.feishu import Feishu
 from common.feishu import Feishu
 from common.publish import Publish
 from common.publish import Publish
-from common.getuser import getUser
+# from common.getuser import getUser
 from common.scheduling_db import MysqlHelper
 from common.scheduling_db import MysqlHelper
 from common.public import get_config_from_mysql, download_rule, title_like
 from common.public import get_config_from_mysql, download_rule, title_like
 
 
@@ -48,53 +48,70 @@ class GongzhonghaoAuthor1:
         return token_dict
         return token_dict
 
 
     @classmethod
     @classmethod
-    def create_user(cls, log_type, crawler, user_sheet, i, env):
-        user_name = user_sheet[i][0]
-        wechat_name = user_sheet[i][2]
-        if wechat_name is None or wechat_name.strip() == "" or wechat_name.replace(" ", "") == "":
-            wechat_name = user_name
-        Common.logger(log_type, crawler).info(f"befor_wechat_name:{type(wechat_name)}, {wechat_name}")
-        our_uid = user_sheet[i][5]
-        our_user_link = user_sheet[i][6]
-        user_info_dict = cls.get_user_info(log_type=log_type, crawler=crawler, wechat_name=wechat_name, env=env)
-        out_uid = user_info_dict["user_id"]
-        avatar_url = user_info_dict["avatar_url"]
-        tag1 = user_sheet[i][7]
-        tag2 = user_sheet[i][8]
-        tag3 = user_sheet[i][9]
-        tag4 = user_sheet[i][10]
-        tag5 = user_sheet[i][11]
-        tag6 = user_sheet[i][12]
-        Common.logger(log_type, crawler).info(f"正在更新 {user_name} 用户信息")
-        if out_uid is None or our_uid is None:
-            # 用来创建our_id的信息
-            user_dict = {
-                'recommendStatus': -6,
-                'appRecommendStatus': -6,
-                'nickName': user_info_dict["user_name"],
-                'avatarUrl': user_info_dict['avatar_url'],
-                'tagName': f'{tag1},{tag2},{tag3},{tag4},{tag5},{tag6}',
-            }
-            our_uid = getUser.create_uid(log_type, crawler, user_dict, env)
-            Common.logger(log_type, crawler).info(f'新创建的站内UID:{our_uid}')
-            if env == 'prod':
-                our_user_link = f'https://admin.piaoquantv.com/ums/user/{our_uid}/post'
-            else:
-                our_user_link = f'https://testadmin.piaoquantv.com/ums/user/{our_uid}/post'
-            Feishu.update_values(log_type, crawler, "Bzv72P", f'D{i + 1}:G{i + 1}', [
-                [user_info_dict["user_id"], user_info_dict["avatar_url"], our_uid, our_user_link]])
-            Common.logger(log_type, crawler).info(f'用户信息创建成功!\n')
-        else:
-            Common.logger(log_type, crawler).info("用户信息已存在\n")
-        our_user_dict = {
-            'user_name': user_name,
-            'user_id': out_uid,
-            'wechat_name': wechat_name,
-            'our_uid': our_uid,
-            'our_user_link': our_user_link,
-            'avatar_url': avatar_url,
-        }
-        return our_user_dict
+    def get_users(cls, log_type, crawler, sheetid, env):
+        while True:
+            user_sheet = Feishu.get_values_batch(log_type, crawler, sheetid)
+            if user_sheet is None:
+                Common.logger(log_type, crawler).warning(f"user_sheet:{user_sheet}, 2秒后重试")
+                time.sleep(2)
+                continue
+            user_list = []
+            len_sheet = len(user_sheet)
+            if len_sheet >= 101:
+                len_sheet = 101
+            for i in range(1, len_sheet):
+            # for i in range(1, 3):
+                user_name = user_sheet[i][0]
+                wechat_name = user_sheet[i][2]
+                if wechat_name is None or wechat_name.strip() == "" or wechat_name.replace(" ", "") == "":
+                    wechat_name = user_name
+                # Common.logger(log_type, crawler).info(f"befor_wechat_name:{type(wechat_name)}, {wechat_name}")
+                # our_uid = user_sheet[i][5]
+                # our_user_link = user_sheet[i][6]
+                out_uid = user_sheet[i][3]
+                avatar_url = user_sheet[i][4]
+                if out_uid is None or out_uid.strip() == "" or out_uid.replace(" ", "") == "":
+                    user_info_dict = cls.get_user_info(log_type=log_type, crawler=crawler, wechat_name=wechat_name, env=env)
+                    out_uid = user_info_dict["user_id"]
+                    avatar_url = user_info_dict["avatar_url"]
+                # tag1 = user_sheet[i][7]
+                # tag2 = user_sheet[i][8]
+                # tag3 = user_sheet[i][9]
+                # tag4 = user_sheet[i][10]
+                # tag5 = user_sheet[i][11]
+                # tag6 = user_sheet[i][12]
+                # Common.logger(log_type, crawler).info(f"正在更新 {user_name} 用户信息")
+                # if out_uid is None or our_uid is None:
+                # if out_uid is None or our_uid is None:
+                #     # 用来创建our_id的信息
+                #     user_dict = {
+                #         'recommendStatus': -6,
+                #         'appRecommendStatus': -6,
+                #         'nickName': user_info_dict["user_name"],
+                #         'avatarUrl': user_info_dict['avatar_url'],
+                #         'tagName': f'{tag1},{tag2},{tag3},{tag4},{tag5},{tag6}',
+                #     }
+                #     our_uid = getUser.create_uid(log_type, crawler, user_dict, env)
+                #     Common.logger(log_type, crawler).info(f'新创建的站内UID:{our_uid}')
+                #     if env == 'prod':
+                #         our_user_link = f'https://admin.piaoquantv.com/ums/user/{our_uid}/post'
+                #     else:
+                #         our_user_link = f'https://testadmin.piaoquantv.com/ums/user/{our_uid}/post'
+                #     Feishu.update_values(log_type, crawler, "Bzv72P", f'D{i + 1}:G{i + 1}', [
+                #         [user_info_dict["user_id"], user_info_dict["avatar_url"], our_uid, our_user_link]])
+                #     Common.logger(log_type, crawler).info(f'用户信息创建成功!\n')
+                # else:
+                #     Common.logger(log_type, crawler).info("用户信息已存在\n")
+                our_user_dict = {
+                    'user_name': user_name,
+                    'user_id': out_uid,
+                    'wechat_name': wechat_name,
+                    # 'our_uid': our_uid,
+                    # 'our_user_link': our_user_link,
+                    'avatar_url': avatar_url,
+                }
+                user_list.append(our_user_dict)
+            return user_list
 
 
     # 获取用户 fakeid
     # 获取用户 fakeid
     @classmethod
     @classmethod
@@ -276,60 +293,60 @@ class GongzhonghaoAuthor1:
                 begin += 5
                 begin += 5
                 app_msg_list = r.json()['app_msg_list']
                 app_msg_list = r.json()['app_msg_list']
                 for article in app_msg_list:
                 for article in app_msg_list:
-                    # try:
-                    create_time = article.get('create_time', 0)
-                    publish_time_stamp = int(create_time)
-                    publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
-                    article_url = article.get('link', '')
-                    video_dict = {
-                        'video_id': article.get('aid', ''),
-                        'video_title': article.get("title", "").replace(' ', '').replace('"', '').replace("'", ""),
-                        'publish_time_stamp': publish_time_stamp,
-                        'publish_time_str': publish_time_str,
-                        'user_name': user_dict["user_name"],
-                        'play_cnt': 0,
-                        'comment_cnt': 0,
-                        'like_cnt': 0,
-                        'share_cnt': 0,
-                        'user_id': user_dict['user_id'],
-                        'avatar_url': user_dict['avatar_url'],
-                        'cover_url': article.get('cover', ''),
-                        'article_url': article.get('link', ''),
-                        'video_url': cls.get_video_url(article_url, env),
-                        'session': f'gongzhonghao-author1-{int(time.time())}'
-                    }
-                    for k, v in video_dict.items():
-                        Common.logger(log_type, crawler).info(f"{k}:{v}")
+                    try:
+                        create_time = article.get('create_time', 0)
+                        publish_time_stamp = int(create_time)
+                        publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
+                        article_url = article.get('link', '')
+                        video_dict = {
+                            'video_id': article.get('aid', ''),
+                            'video_title': article.get("title", "").replace(' ', '').replace('"', '').replace("'", ""),
+                            'publish_time_stamp': publish_time_stamp,
+                            'publish_time_str': publish_time_str,
+                            'user_name': user_dict["user_name"],
+                            'play_cnt': 0,
+                            'comment_cnt': 0,
+                            'like_cnt': 0,
+                            'share_cnt': 0,
+                            'user_id': user_dict['user_id'],
+                            'avatar_url': user_dict['avatar_url'],
+                            'cover_url': article.get('cover', ''),
+                            'article_url': article.get('link', ''),
+                            'video_url': cls.get_video_url(article_url, env),
+                            'session': f'gongzhonghao-author1-{int(time.time())}'
+                        }
+                        for k, v in video_dict.items():
+                            Common.logger(log_type, crawler).info(f"{k}:{v}")
 
 
-                    if int(time.time()) - publish_time_stamp > 3600 * 24 * int(rule_dict.get('period', {}).get('max', 1000)):
-                        Common.logger(log_type, crawler).info(f"发布时间超过{int(rule_dict.get('period', {}).get('max', 1000))}天\n")
-                        return
+                        if int(time.time()) - publish_time_stamp > 3600 * 24 * int(rule_dict.get('period', {}).get('max', 1000)):
+                            Common.logger(log_type, crawler).info(f"发布时间超过{int(rule_dict.get('period', {}).get('max', 1000))}天\n")
+                            return
 
 
-                    if video_dict['article_url'] == 0 or video_dict['video_url'] == 0:
-                        Common.logger(log_type, crawler).info("文章涉嫌违反相关法律法规和政策\n")
-                    # 标题敏感词过滤
-                    elif any(str(word) if str(word) in video_dict['video_title'] else False
-                             for word in get_config_from_mysql(log_type=log_type,
-                                                               source=crawler,
-                                                               env=env,
-                                                               text="filter",
-                                                               action="")) is True:
-                        Common.logger(log_type, crawler).info("标题已中过滤词\n")
-                    # 已下载判断
-                    elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
-                        Common.logger(log_type, crawler).info("视频已下载\n")
-                    # 标题相似度
-                    elif title_like(log_type, crawler, video_dict['video_title'], cls.platform, env) is True:
-                        Common.logger(log_type, crawler).info(f'标题相似度>=80%:{video_dict["video_title"]}\n')
-                    else:
-                        cls.download_publish(log_type=log_type,
-                                             crawler=crawler,
-                                             video_dict=video_dict,
-                                             rule_dict=rule_dict,
-                                             user_dict=user_dict,
-                                             env=env)
-                    # except Exception as e:
-                    #     Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
+                        if video_dict['article_url'] == 0 or video_dict['video_url'] == 0:
+                            Common.logger(log_type, crawler).info("文章涉嫌违反相关法律法规和政策\n")
+                        # 标题敏感词过滤
+                        elif any(str(word) if str(word) in video_dict['video_title'] else False
+                                 for word in get_config_from_mysql(log_type=log_type,
+                                                                   source=crawler,
+                                                                   env=env,
+                                                                   text="filter",
+                                                                   action="")) is True:
+                            Common.logger(log_type, crawler).info("标题已中过滤词\n")
+                        # 已下载判断
+                        elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
+                            Common.logger(log_type, crawler).info("视频已下载\n")
+                        # 标题相似度
+                        elif title_like(log_type, crawler, video_dict['video_title'], cls.platform, env) is True:
+                            Common.logger(log_type, crawler).info(f'标题相似度>=80%:{video_dict["video_title"]}\n')
+                        else:
+                            cls.download_publish(log_type=log_type,
+                                                 crawler=crawler,
+                                                 video_dict=video_dict,
+                                                 rule_dict=rule_dict,
+                                                 # user_dict=user_dict,
+                                                 env=env)
+                    except Exception as e:
+                        Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
                 Common.logger(log_type, crawler).info('休眠 60 秒\n')
                 Common.logger(log_type, crawler).info('休眠 60 秒\n')
                 time.sleep(60)
                 time.sleep(60)
 
 
@@ -341,7 +358,7 @@ class GongzhonghaoAuthor1:
 
 
     # 下载/上传
     # 下载/上传
     @classmethod
     @classmethod
-    def download_publish(cls, log_type, crawler, video_dict, rule_dict, user_dict, env):
+    def download_publish(cls, log_type, crawler, video_dict, rule_dict, env):
         # 下载视频
         # 下载视频
         Common.download_method(log_type=log_type, crawler=crawler, text="video", title=video_dict["video_title"], url=video_dict["video_url"])
         Common.download_method(log_type=log_type, crawler=crawler, text="video", title=video_dict["video_title"], url=video_dict["video_url"])
         md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
         md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
@@ -376,13 +393,13 @@ class GongzhonghaoAuthor1:
 
 
         # 上传视频
         # 上传视频
         Common.logger(log_type, crawler).info("开始上传视频...")
         Common.logger(log_type, crawler).info("开始上传视频...")
-        strategy = "定向爬虫策略"
+        strategy = "定向爬虫策略"
         if env == 'prod':
         if env == 'prod':
             oss_endpoint = "inner"
             oss_endpoint = "inner"
             our_video_id = Publish.upload_and_publish(log_type=log_type,
             our_video_id = Publish.upload_and_publish(log_type=log_type,
                                                       crawler=crawler,
                                                       crawler=crawler,
                                                       strategy=strategy,
                                                       strategy=strategy,
-                                                      our_uid=user_dict["our_uid"],
+                                                      our_uid="follow",
                                                       oss_endpoint=oss_endpoint,
                                                       oss_endpoint=oss_endpoint,
                                                       env=env)
                                                       env=env)
             our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
             our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
@@ -391,7 +408,7 @@ class GongzhonghaoAuthor1:
             our_video_id = Publish.upload_and_publish(log_type=log_type,
             our_video_id = Publish.upload_and_publish(log_type=log_type,
                                                       crawler=crawler,
                                                       crawler=crawler,
                                                       strategy=strategy,
                                                       strategy=strategy,
-                                                      our_uid=user_dict["our_uid"],
+                                                      our_uid="follow",
                                                       oss_endpoint=oss_endpoint,
                                                       oss_endpoint=oss_endpoint,
                                                       env=env)
                                                       env=env)
             our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
             our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
@@ -460,19 +477,12 @@ class GongzhonghaoAuthor1:
 
 
     @classmethod
     @classmethod
     def get_all_videos(cls, log_type, crawler, rule_dict, env):
     def get_all_videos(cls, log_type, crawler, rule_dict, env):
-        while True:
-            user_sheet = Feishu.get_values_batch(log_type, crawler, "Bzv72P")
-            if user_sheet is None:
-                Common.logger(log_type, crawler).warning(f"user_sheet:{user_sheet}, 2秒后重试")
-                time.sleep(2)
-                continue
-            len_sheet = len(user_sheet)
-            if len_sheet >= 101:
-                len_sheet = 101
-            Common.logger(log_type, crawler).info(f"len_sheet:{len_sheet}")
-            for i in range(6, len_sheet):
-                user_dict = cls.create_user(log_type=log_type, crawler=crawler, user_sheet=user_sheet, i=i, env=env)
-                # try:
+        user_list = cls.get_users(log_type, crawler, "Bzv72P", env)
+        if user_list is None or len(user_list) == 0:
+            Common.logger(log_type, crawler).warning(f"抓取用户列表为空\n")
+            return
+        for user_dict in user_list:
+            try:
                 Common.logger(log_type, crawler).info(f'获取 {user_dict["user_name"]} 公众号视频\n')
                 Common.logger(log_type, crawler).info(f'获取 {user_dict["user_name"]} 公众号视频\n')
                 cls.get_videoList(log_type=log_type,
                 cls.get_videoList(log_type=log_type,
                                   crawler=crawler,
                                   crawler=crawler,
@@ -481,8 +491,8 @@ class GongzhonghaoAuthor1:
                                   env=env)
                                   env=env)
                 Common.logger(log_type, crawler).info('休眠 60 秒\n')
                 Common.logger(log_type, crawler).info('休眠 60 秒\n')
                 time.sleep(60)
                 time.sleep(60)
-                # except Exception as e:
-                #     Common.logger(log_type, crawler).info(f'抓取{user_dict["user_name"]}公众号时异常:{e}\n')
+            except Exception as e:
+                Common.logger(log_type, crawler).info(f'抓取{user_dict["user_name"]}公众号时异常:{e}\n')
 
 
 
 
 if __name__ == "__main__":
 if __name__ == "__main__":

+ 500 - 0
gongzhonghao/gongzhonghao_author/gongzhonghao1_author_create_user.py

@@ -0,0 +1,500 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/3/28
+import datetime
+import json
+import os
+import shutil
+import sys
+import time
+from hashlib import md5
+import requests
+import urllib3
+from selenium.webdriver import DesiredCapabilities
+from selenium.webdriver.chrome.service import Service
+from selenium.webdriver.common.by import By
+from selenium import webdriver
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.feishu import Feishu
+from common.publish import Publish
+from common.getuser import getUser
+from common.scheduling_db import MysqlHelper
+from common.public import get_config_from_mysql, download_rule, title_like
+
+
+class GongzhonghaoAuthor1:
+    platform = "公众号"
+
+    # 获取 token
+    @classmethod
+    def get_token(cls, log_type, crawler, env):
+        select_sql = f""" select * from crawler_config where source="{crawler}" and title LIKE "%公众号_1%";"""
+        configs = MysqlHelper.get_values(log_type, crawler, select_sql, env, action="")
+        if len(configs) == 0:
+            Feishu.bot(log_type, crawler, "公众号_1:未配置token")
+            time.sleep(60)
+            return None
+        token_dict = {
+            "token_id": configs[0]["id"],
+            "title": configs[0]["title"].strip(),
+            "token": dict(eval(configs[0]["config"]))["token"].strip(),
+            "cookie": dict(eval(configs[0]["config"]))["cookie"].strip(),
+            "update_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(configs[0]["update_time"]/1000))),
+            "operator": configs[0]["operator"].strip()
+        }
+        # for k, v in token_dict.items():
+        #     print(f"{k}:{type(v)}, {v}")
+        return token_dict
+
+    @classmethod
+    def get_users(cls, log_type, crawler, sheetid, env):
+        while True:
+            user_sheet = Feishu.get_values_batch(log_type, crawler, sheetid)
+            if user_sheet is None:
+                Common.logger(log_type, crawler).warning(f"user_sheet:{user_sheet}, 2秒后重试")
+                time.sleep(2)
+                continue
+            user_list = []
+            len_sheet = len(user_sheet)
+            if len_sheet >= 101:
+                len_sheet = 101
+            for i in range(1, len_sheet):
+            # for i in range(1, 3):
+                user_name = user_sheet[i][0]
+                wechat_name = user_sheet[i][2]
+                if wechat_name is None or wechat_name.strip() == "" or wechat_name.replace(" ", "") == "":
+                    wechat_name = user_name
+                Common.logger(log_type, crawler).info(f"befor_wechat_name:{type(wechat_name)}, {wechat_name}")
+                our_uid = user_sheet[i][5]
+                our_user_link = user_sheet[i][6]
+                user_info_dict = cls.get_user_info(log_type=log_type, crawler=crawler, wechat_name=wechat_name, env=env)
+                out_uid = user_info_dict["user_id"]
+                avatar_url = user_info_dict["avatar_url"]
+                tag1 = user_sheet[i][7]
+                tag2 = user_sheet[i][8]
+                tag3 = user_sheet[i][9]
+                tag4 = user_sheet[i][10]
+                tag5 = user_sheet[i][11]
+                tag6 = user_sheet[i][12]
+                Common.logger(log_type, crawler).info(f"正在更新 {user_name} 用户信息")
+                if out_uid is None or our_uid is None:
+                    # 用来创建our_id的信息
+                    user_dict = {
+                        'recommendStatus': -6,
+                        'appRecommendStatus': -6,
+                        'nickName': user_info_dict["user_name"],
+                        'avatarUrl': user_info_dict['avatar_url'],
+                        'tagName': f'{tag1},{tag2},{tag3},{tag4},{tag5},{tag6}',
+                    }
+                    our_uid = getUser.create_uid(log_type, crawler, user_dict, env)
+                    Common.logger(log_type, crawler).info(f'新创建的站内UID:{our_uid}')
+                    if env == 'prod':
+                        our_user_link = f'https://admin.piaoquantv.com/ums/user/{our_uid}/post'
+                    else:
+                        our_user_link = f'https://testadmin.piaoquantv.com/ums/user/{our_uid}/post'
+                    Feishu.update_values(log_type, crawler, "Bzv72P", f'D{i + 1}:G{i + 1}', [
+                        [user_info_dict["user_id"], user_info_dict["avatar_url"], our_uid, our_user_link]])
+                    Common.logger(log_type, crawler).info(f'用户信息创建成功!\n')
+                else:
+                    Common.logger(log_type, crawler).info("用户信息已存在\n")
+                our_user_dict = {
+                    'user_name': user_name,
+                    'user_id': out_uid,
+                    'wechat_name': wechat_name,
+                    'our_uid': our_uid,
+                    'our_user_link': our_user_link,
+                    'avatar_url': avatar_url,
+                }
+                user_list.append(our_user_dict)
+            return user_list
+
+    # 获取用户 fakeid
+    @classmethod
+    def get_user_info(cls, log_type, crawler, wechat_name, env):
+        Common.logger(log_type, crawler).info(f"wechat_name:{wechat_name}")
+        while True:
+            token_dict = cls.get_token(log_type, crawler, env)
+            url = "https://mp.weixin.qq.com/cgi-bin/searchbiz?"
+            headers = {
+                "accept": "*/*",
+                "accept-encoding": "gzip, deflate, br",
+                "accept-language": "zh-CN,zh;q=0.9",
+                "referer": "https://mp.weixin.qq.com/cgi-bin/appmsg?"
+                           "t=media/appmsg_edit_v2&action=edit&isNew=1"
+                           "&type=77&createType=5&token=1011071554&lang=zh_CN",
+                'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
+                "sec-ch-ua-mobile": "?0",
+                "sec-ch-ua-platform": '"Windows"',
+                "sec-fetch-dest": "empty",
+                "sec-fetch-mode": "cors",
+                "sec-fetch-site": "same-origin",
+                "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
+                              " (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
+                "x-requested-with": "XMLHttpRequest",
+                'cookie': token_dict['cookie'],
+            }
+            params = {
+                "action": "search_biz",
+                "begin": "0",
+                "count": "5",
+                "query": str(wechat_name),
+                "token": token_dict['token'],
+                "lang": "zh_CN",
+                "f": "json",
+                "ajax": "1",
+            }
+            urllib3.disable_warnings()
+            r = requests.get(url=url, headers=headers, params=params, verify=False)
+            r.close()
+            if r.json()["base_resp"]["err_msg"] == "invalid session":
+                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
+                Common.logger(log_type, crawler).warning(f"get_fakeid:{r.text}\n")
+                if 20 >= datetime.datetime.now().hour >= 10:
+                    Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n过期啦,请扫码更换token\nhttps://mp.weixin.qq.com/")
+                time.sleep(60 * 10)
+                continue
+            if r.json()["base_resp"]["err_msg"] == "freq control":
+                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
+                Common.logger(log_type, crawler).warning(f"get_fakeid:{r.text}\n")
+                if 20 >= datetime.datetime.now().hour >= 10:
+                    Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
+                time.sleep(60 * 10)
+                continue
+            if "list" not in r.json() or len(r.json()["list"]) == 0:
+                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
+                Common.logger(log_type, crawler).warning(f"get_fakeid:{r.text}\n")
+                if 20 >= datetime.datetime.now().hour >= 10:
+                    Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
+                time.sleep(60 * 10)
+                continue
+            user_info_dict = {'user_name': r.json()["list"][0]["nickname"],
+                              'user_id': r.json()["list"][0]["fakeid"],
+                              'avatar_url': r.json()["list"][0]["round_head_img"]}
+            return user_info_dict
+
+    # 获取腾讯视频下载链接
+    @classmethod
+    def get_tencent_video_url(cls, video_id):
+        url = 'https://vv.video.qq.com/getinfo?vids=' + str(video_id) + '&platform=101001&charge=0&otype=json'
+        response = requests.get(url=url).text.replace('QZOutputJson=', '').replace('"};', '"}')
+        response = json.loads(response)
+        url = response['vl']['vi'][0]['ul']['ui'][0]['url']
+        fvkey = response['vl']['vi'][0]['fvkey']
+        video_url = url + str(video_id) + '.mp4?vkey=' + fvkey
+        return video_url
+
+    @classmethod
+    def get_video_url(cls, article_url, env):
+        # 打印请求配置
+        ca = DesiredCapabilities.CHROME
+        ca["goog:loggingPrefs"] = {"performance": "ALL"}
+
+        # 不打开浏览器运行
+        chrome_options = webdriver.ChromeOptions()
+        chrome_options.add_argument("headless")
+        chrome_options.add_argument(
+            f'user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36')
+        chrome_options.add_argument("--no-sandbox")
+
+        # driver初始化
+        if env == "prod":
+            driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options)
+        else:
+            driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options, service=Service(
+                '/Users/wangkun/Downloads/chromedriver/chromedriver_v113/chromedriver'))
+
+        driver.implicitly_wait(10)
+        driver.get(article_url)
+        time.sleep(1)
+
+        if len(driver.find_elements(By.XPATH, '//div[@class="js_video_poster video_poster"]/*[2]')) != 0:
+            video_url = driver.find_element(
+                By.XPATH, '//div[@class="js_video_poster video_poster"]/*[2]').get_attribute('src')
+        elif len(driver.find_elements(By.XPATH, '//span[@class="js_tx_video_container"]/*[1]')) != 0:
+            iframe = driver.find_element(By.XPATH, '//span[@class="js_tx_video_container"]/*[1]').get_attribute(
+                'src')
+            video_id = iframe.split('vid=')[-1].split('&')[0]
+            video_url = cls.get_tencent_video_url(video_id)
+        else:
+            video_url = 0
+        driver.quit()
+        return video_url
+
+    # 获取文章列表
+    @classmethod
+    def get_videoList(cls, log_type, crawler, rule_dict, user_dict, env):
+        begin = 0
+        while True:
+            token_dict = cls.get_token(log_type, crawler, env)
+            url = "https://mp.weixin.qq.com/cgi-bin/appmsg?"
+            headers = {
+                "accept": "*/*",
+                "accept-encoding": "gzip, deflate, br",
+                "accept-language": "zh-CN,zh;q=0.9",
+                "referer": "https://mp.weixin.qq.com/cgi-bin/appmsg?"
+                           "t=media/appmsg_edit_v2&action=edit&isNew=1"
+                           "&type=77&createType=5&token=" + str(token_dict['token']) + "&lang=zh_CN",
+                'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
+                "sec-ch-ua-mobile": "?0",
+                "sec-ch-ua-platform": '"Windows"',
+                "sec-fetch-dest": "empty",
+                "sec-fetch-mode": "cors",
+                "sec-fetch-site": "same-origin",
+                "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
+                              " (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
+                "x-requested-with": "XMLHttpRequest",
+                'cookie': token_dict['cookie'],
+            }
+            params = {
+                "action": "list_ex",
+                "begin": str(begin),
+                "count": "5",
+                "fakeid": user_dict['user_id'],
+                "type": "9",
+                "query": "",
+                "token": str(token_dict['token']),
+                "lang": "zh_CN",
+                "f": "json",
+                "ajax": "1",
+            }
+            urllib3.disable_warnings()
+            r = requests.get(url=url, headers=headers, params=params, verify=False)
+            r.close()
+            if r.json()["base_resp"]["err_msg"] == "invalid session":
+                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
+                Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
+                if 20 >= datetime.datetime.now().hour >= 10:
+                    Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']}\n过期啦,请扫码更换token\nhttps://mp.weixin.qq.com/")
+                time.sleep(60 * 10)
+                continue
+            if r.json()["base_resp"]["err_msg"] == "freq control":
+                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
+                Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
+                if 20 >= datetime.datetime.now().hour >= 10:
+                    Feishu.bot(log_type, crawler,f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
+                time.sleep(60 * 10)
+                continue
+            if 'app_msg_list' not in r.json():
+                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
+                Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
+                if 20 >= datetime.datetime.now().hour >= 10:
+                    Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']}\n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
+                time.sleep(60 * 10)
+                continue
+            if len(r.json()['app_msg_list']) == 0:
+                Common.logger(log_type, crawler).info('没有更多视频了\n')
+                return
+            else:
+                begin += 5
+                app_msg_list = r.json()['app_msg_list']
+                for article in app_msg_list:
+                    try:
+                        create_time = article.get('create_time', 0)
+                        publish_time_stamp = int(create_time)
+                        publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
+                        article_url = article.get('link', '')
+                        video_dict = {
+                            'video_id': article.get('aid', ''),
+                            'video_title': article.get("title", "").replace(' ', '').replace('"', '').replace("'", ""),
+                            'publish_time_stamp': publish_time_stamp,
+                            'publish_time_str': publish_time_str,
+                            'user_name': user_dict["user_name"],
+                            'play_cnt': 0,
+                            'comment_cnt': 0,
+                            'like_cnt': 0,
+                            'share_cnt': 0,
+                            'user_id': user_dict['user_id'],
+                            'avatar_url': user_dict['avatar_url'],
+                            'cover_url': article.get('cover', ''),
+                            'article_url': article.get('link', ''),
+                            'video_url': cls.get_video_url(article_url, env),
+                            'session': f'gongzhonghao-author1-{int(time.time())}'
+                        }
+                        for k, v in video_dict.items():
+                            Common.logger(log_type, crawler).info(f"{k}:{v}")
+
+                        if int(time.time()) - publish_time_stamp > 3600 * 24 * int(rule_dict.get('period', {}).get('max', 1000)):
+                            Common.logger(log_type, crawler).info(f"发布时间超过{int(rule_dict.get('period', {}).get('max', 1000))}天\n")
+                            return
+
+                        if video_dict['article_url'] == 0 or video_dict['video_url'] == 0:
+                            Common.logger(log_type, crawler).info("文章涉嫌违反相关法律法规和政策\n")
+                        # 标题敏感词过滤
+                        elif any(str(word) if str(word) in video_dict['video_title'] else False
+                                 for word in get_config_from_mysql(log_type=log_type,
+                                                                   source=crawler,
+                                                                   env=env,
+                                                                   text="filter",
+                                                                   action="")) is True:
+                            Common.logger(log_type, crawler).info("标题已中过滤词\n")
+                        # 已下载判断
+                        elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
+                            Common.logger(log_type, crawler).info("视频已下载\n")
+                        # 标题相似度
+                        elif title_like(log_type, crawler, video_dict['video_title'], cls.platform, env) is True:
+                            Common.logger(log_type, crawler).info(f'标题相似度>=80%:{video_dict["video_title"]}\n')
+                        else:
+                            cls.download_publish(log_type=log_type,
+                                                 crawler=crawler,
+                                                 video_dict=video_dict,
+                                                 rule_dict=rule_dict,
+                                                 user_dict=user_dict,
+                                                 env=env)
+                    except Exception as e:
+                        Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
+                Common.logger(log_type, crawler).info('休眠 60 秒\n')
+                time.sleep(60)
+
+    @classmethod
+    def repeat_video(cls, log_type, crawler, video_id, env):
+        sql = f""" select * from crawler_video where platform="公众号" and out_video_id="{video_id}"; """
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
+        return len(repeat_video)
+
+    # 下载/上传
+    @classmethod
+    def download_publish(cls, log_type, crawler, video_dict, rule_dict, user_dict, env):
+        # 下载视频
+        Common.download_method(log_type=log_type, crawler=crawler, text="video", title=video_dict["video_title"], url=video_dict["video_url"])
+        md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
+        try:
+            if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+                return
+        except FileNotFoundError:
+            # 删除视频文件夹
+            shutil.rmtree(f"./{crawler}/videos/{md_title}")
+            Common.logger(log_type, crawler).info("视频文件不存在,删除文件夹成功\n")
+            return
+        # 获取视频时长
+        ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
+        video_dict["video_width"] = ffmpeg_dict["width"]
+        video_dict["video_height"] = ffmpeg_dict["height"]
+        video_dict["duration"] = ffmpeg_dict["duration"]
+        Common.logger(log_type, crawler).info(f'video_width:{video_dict["video_width"]}')
+        Common.logger(log_type, crawler).info(f'video_height:{video_dict["video_height"]}')
+        Common.logger(log_type, crawler).info(f'duration:{video_dict["duration"]}')
+        if download_rule(log_type, crawler, video_dict, rule_dict) is False:
+            shutil.rmtree(f"./{crawler}/videos/{md_title}")
+            Common.logger(log_type, crawler).info("不满足抓取规则,删除成功\n")
+            return
+        # 下载封面
+        Common.download_method(log_type=log_type, crawler=crawler, text="cover",
+                               title=video_dict["video_title"], url=video_dict["cover_url"])
+        # 保存视频信息至 "./videos/{video_title}/info.txt"
+        Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
+
+        # 上传视频
+        Common.logger(log_type, crawler).info("开始上传视频...")
+        strategy = "定向榜爬虫策略"
+        if env == 'prod':
+            oss_endpoint = "inner"
+            our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                      crawler=crawler,
+                                                      strategy=strategy,
+                                                      our_uid=user_dict["our_uid"],
+                                                      oss_endpoint=oss_endpoint,
+                                                      env=env)
+            our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
+        else:
+            oss_endpoint = "out"
+            our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                      crawler=crawler,
+                                                      strategy=strategy,
+                                                      our_uid=user_dict["our_uid"],
+                                                      oss_endpoint=oss_endpoint,
+                                                      env=env)
+            our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
+
+        if our_video_id is None:
+            try:
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                return
+            except FileNotFoundError:
+                return
+
+        insert_sql = f""" insert into crawler_video(video_id,
+                                                    out_user_id,
+                                                    platform,
+                                                    strategy,
+                                                    out_video_id,
+                                                    video_title,
+                                                    cover_url,
+                                                    video_url,
+                                                    duration,
+                                                    publish_time,
+                                                    play_cnt,
+                                                    crawler_rule,
+                                                    width,
+                                                    height)
+                                                    values({our_video_id},
+                                                    "{video_dict['user_id']}",
+                                                    "{cls.platform}",
+                                                    "定向爬虫策略",
+                                                    "{video_dict['video_id']}",
+                                                    "{video_dict['video_title']}",
+                                                    "{video_dict['cover_url']}",
+                                                    "{video_dict['video_url']}",
+                                                    {int(video_dict['duration'])},
+                                                    "{video_dict['publish_time_str']}",
+                                                    {int(video_dict['play_cnt'])},
+                                                    '{json.dumps(rule_dict)}',
+                                                    {int(video_dict['video_width'])},
+                                                    {int(video_dict['video_height'])}) """
+        Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+        MysqlHelper.update_values(log_type, crawler, insert_sql, env)
+        Common.logger(log_type, crawler).info('视频信息写入数据库成功')
+
+        # 视频写入飞书
+        Feishu.insert_columns(log_type, crawler, "47e39d", "ROWS", 1, 2)
+        # 视频ID工作表,首行写入数据
+        upload_time = int(time.time())
+        values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
+                   "用户主页",
+                   video_dict['video_title'],
+                   video_dict['video_id'],
+                   our_video_link,
+                   int(video_dict['duration']),
+                   f"{video_dict['video_width']}*{video_dict['video_height']}",
+                   video_dict['publish_time_str'],
+                   video_dict['user_name'],
+                   video_dict['user_id'],
+                   video_dict['avatar_url'],
+                   video_dict['cover_url'],
+                   video_dict['article_url'],
+                   video_dict['video_url']]]
+        time.sleep(0.5)
+        Feishu.update_values(log_type, crawler, "47e39d", "F2:Z2", values)
+        Common.logger(log_type, crawler).info('视频下载/上传成功\n')
+
+    @classmethod
+    def get_all_videos(cls, log_type, crawler, rule_dict, env):
+        user_list = cls.get_users(log_type, crawler, "Bzv72P", env)
+        if user_list is None or len(user_list) == 0:
+            Common.logger(log_type, crawler).warning(f"抓取用户列表为空\n")
+            return
+        for user_dict in user_list:
+            try:
+                Common.logger(log_type, crawler).info(f'获取 {user_dict["user_name"]} 公众号视频\n')
+                cls.get_videoList(log_type=log_type,
+                                  crawler=crawler,
+                                  rule_dict=rule_dict,
+                                  user_dict=user_dict,
+                                  env=env)
+                Common.logger(log_type, crawler).info('休眠 60 秒\n')
+                time.sleep(60)
+            except Exception as e:
+                Common.logger(log_type, crawler).info(f'抓取{user_dict["user_name"]}公众号时异常:{e}\n')
+
+
+if __name__ == "__main__":
+    # GongzhonghaoAuthor1.get_token("author", "gongzhonghao", "prod")
+    # print(GongzhonghaoAuthor1.get_users("author", "gongzhonghao", "Bzv72P", "dev"))
+    # print(get_config_from_mysql("author", "gongzhonghao", "dev", "filter", action=""))
+    # print(title_like("author", "gongzhonghao", "公众号", "123", "dev"))
+    print(GongzhonghaoAuthor1.get_user_info("author", "gongzhonghao", "幸福花朵", "dev"))
+    pass

+ 44 - 41
gongzhonghao/gongzhonghao_author/gongzhonghao2_author.py

@@ -15,7 +15,7 @@ from selenium.webdriver.chrome.service import Service
 from selenium.webdriver.common.by import By
 from selenium.webdriver.common.by import By
 from selenium import webdriver
 from selenium import webdriver
 sys.path.append(os.getcwd())
 sys.path.append(os.getcwd())
-from common.getuser import getUser
+# from common.getuser import getUser
 from common.common import Common
 from common.common import Common
 from common.feishu import Feishu
 from common.feishu import Feishu
 from common.publish import Publish
 from common.publish import Publish
@@ -66,46 +66,49 @@ class GongzhonghaoAuthor2:
             # for i in range(1, 3):
             # for i in range(1, 3):
                 user_name = user_sheet[i][0]
                 user_name = user_sheet[i][0]
                 wechat_name = user_sheet[i][2]
                 wechat_name = user_sheet[i][2]
-                if wechat_name is None:
+                if wechat_name is None or wechat_name.strip() == "" or wechat_name.replace(" ", "") == "":
                     wechat_name = user_name
                     wechat_name = user_name
-                our_uid = user_sheet[i][5]
-                our_user_link = user_sheet[i][6]
-                user_info_dict = cls.get_user_info(log_type=log_type, crawler=crawler, wechat_name=wechat_name, env=env)
-                out_uid = user_info_dict["user_id"]
-                avatar_url = user_info_dict["avatar_url"]
-                tag1 = user_sheet[i][7]
-                tag2 = user_sheet[i][8]
-                tag3 = user_sheet[i][9]
-                tag4 = user_sheet[i][10]
-                tag5 = user_sheet[i][11]
-                tag6 = user_sheet[i][12]
-                Common.logger(log_type, crawler).info(f"正在更新 {user_name} 用户信息")
-                if out_uid is None or our_uid is None:
-                    # 用来创建our_id的信息
-                    user_dict = {
-                        'recommendStatus': -6,
-                        'appRecommendStatus': -6,
-                        'nickName': user_info_dict["user_name"],
-                        'avatarUrl': user_info_dict['avatar_url'],
-                        'tagName': f'{tag1},{tag2},{tag3},{tag4},{tag5},{tag6}',
-                    }
-                    our_uid = getUser.create_uid(log_type, crawler, user_dict, env)
-                    Common.logger(log_type, crawler).info(f'新创建的站内UID:{our_uid}')
-                    if env == 'prod':
-                        our_user_link = f'https://admin.piaoquantv.com/ums/user/{our_uid}/post'
-                    else:
-                        our_user_link = f'https://testadmin.piaoquantv.com/ums/user/{our_uid}/post'
-                    Feishu.update_values(log_type, crawler, sheetid, f'D{i + 1}:G{i + 1}', [
-                        [user_info_dict["user_id"], user_info_dict["avatar_url"], our_uid, our_user_link]])
-                    Common.logger(log_type, crawler).info(f'用户信息创建成功!\n')
-                else:
-                    Common.logger(log_type, crawler).info("用户信息已存在\n")
+                # our_uid = user_sheet[i][5]
+                # our_user_link = user_sheet[i][6]
+                out_uid = user_sheet[i][3]
+                avatar_url = user_sheet[i][4]
+                if out_uid is None or out_uid.strip() == "" or out_uid.replace(" ", "") == "":
+                    user_info_dict = cls.get_user_info(log_type=log_type, crawler=crawler, wechat_name=wechat_name, env=env)
+                    out_uid = user_info_dict["user_id"]
+                    avatar_url = user_info_dict["avatar_url"]
+                # tag1 = user_sheet[i][7]
+                # tag2 = user_sheet[i][8]
+                # tag3 = user_sheet[i][9]
+                # tag4 = user_sheet[i][10]
+                # tag5 = user_sheet[i][11]
+                # tag6 = user_sheet[i][12]
+                # Common.logger(log_type, crawler).info(f"正在更新 {user_name} 用户信息")
+                # if out_uid is None or our_uid is None:
+                #     # 用来创建our_id的信息
+                #     user_dict = {
+                #         'recommendStatus': -6,
+                #         'appRecommendStatus': -6,
+                #         'nickName': user_info_dict["user_name"],
+                #         'avatarUrl': user_info_dict['avatar_url'],
+                #         'tagName': f'{tag1},{tag2},{tag3},{tag4},{tag5},{tag6}',
+                #     }
+                #     our_uid = getUser.create_uid(log_type, crawler, user_dict, env)
+                #     Common.logger(log_type, crawler).info(f'新创建的站内UID:{our_uid}')
+                #     if env == 'prod':
+                #         our_user_link = f'https://admin.piaoquantv.com/ums/user/{our_uid}/post'
+                #     else:
+                #         our_user_link = f'https://testadmin.piaoquantv.com/ums/user/{our_uid}/post'
+                #     Feishu.update_values(log_type, crawler, sheetid, f'D{i + 1}:G{i + 1}', [
+                #         [user_info_dict["user_id"], user_info_dict["avatar_url"], our_uid, our_user_link]])
+                #     Common.logger(log_type, crawler).info(f'用户信息创建成功!\n')
+                # else:
+                #     Common.logger(log_type, crawler).info("用户信息已存在\n")
                 our_user_dict = {
                 our_user_dict = {
                     'user_name': user_name,
                     'user_name': user_name,
                     'user_id': out_uid,
                     'user_id': out_uid,
                     'wechat_name': wechat_name,
                     'wechat_name': wechat_name,
-                    'our_uid': our_uid,
-                    'our_user_link': our_user_link,
+                    # 'our_uid': our_uid,
+                    # 'our_user_link': our_user_link,
                     'avatar_url': avatar_url,
                     'avatar_url': avatar_url,
                 }
                 }
                 user_list.append(our_user_dict)
                 user_list.append(our_user_dict)
@@ -342,7 +345,7 @@ class GongzhonghaoAuthor2:
                                                  crawler=crawler,
                                                  crawler=crawler,
                                                  video_dict=video_dict,
                                                  video_dict=video_dict,
                                                  rule_dict=rule_dict,
                                                  rule_dict=rule_dict,
-                                                 user_dict=user_dict,
+                                                 # user_dict=user_dict,
                                                  env=env)
                                                  env=env)
                     except Exception as e:
                     except Exception as e:
                         Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
                         Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
@@ -358,7 +361,7 @@ class GongzhonghaoAuthor2:
 
 
     # 下载/上传
     # 下载/上传
     @classmethod
     @classmethod
-    def download_publish(cls, log_type, crawler, video_dict, rule_dict, user_dict, env):
+    def download_publish(cls, log_type, crawler, video_dict, rule_dict, env):
         # 下载视频
         # 下载视频
         Common.download_method(log_type=log_type, crawler=crawler, text="video", title=video_dict["video_title"], url=video_dict["video_url"])
         Common.download_method(log_type=log_type, crawler=crawler, text="video", title=video_dict["video_title"], url=video_dict["video_url"])
         md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
         md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
@@ -393,13 +396,13 @@ class GongzhonghaoAuthor2:
 
 
         # 上传视频
         # 上传视频
         Common.logger(log_type, crawler).info("开始上传视频...")
         Common.logger(log_type, crawler).info("开始上传视频...")
-        strategy = "定向爬虫策略"
+        strategy = "定向爬虫策略"
         if env == 'prod':
         if env == 'prod':
             oss_endpoint = "inner"
             oss_endpoint = "inner"
             our_video_id = Publish.upload_and_publish(log_type=log_type,
             our_video_id = Publish.upload_and_publish(log_type=log_type,
                                                       crawler=crawler,
                                                       crawler=crawler,
                                                       strategy=strategy,
                                                       strategy=strategy,
-                                                      our_uid=user_dict["our_uid"],
+                                                      our_uid="follow",
                                                       oss_endpoint=oss_endpoint,
                                                       oss_endpoint=oss_endpoint,
                                                       env=env)
                                                       env=env)
             our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
             our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
@@ -408,7 +411,7 @@ class GongzhonghaoAuthor2:
             our_video_id = Publish.upload_and_publish(log_type=log_type,
             our_video_id = Publish.upload_and_publish(log_type=log_type,
                                                       crawler=crawler,
                                                       crawler=crawler,
                                                       strategy=strategy,
                                                       strategy=strategy,
-                                                      our_uid=user_dict["our_uid"],
+                                                      our_uid="follow",
                                                       oss_endpoint=oss_endpoint,
                                                       oss_endpoint=oss_endpoint,
                                                       env=env)
                                                       env=env)
             our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
             our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"

+ 44 - 41
gongzhonghao/gongzhonghao_author/gongzhonghao3_author.py

@@ -16,7 +16,7 @@ from selenium.webdriver.common.by import By
 from selenium import webdriver
 from selenium import webdriver
 sys.path.append(os.getcwd())
 sys.path.append(os.getcwd())
 from common.common import Common
 from common.common import Common
-from common.getuser import getUser
+# from common.getuser import getUser
 from common.feishu import Feishu
 from common.feishu import Feishu
 from common.publish import Publish
 from common.publish import Publish
 from common.scheduling_db import MysqlHelper
 from common.scheduling_db import MysqlHelper
@@ -66,46 +66,49 @@ class GongzhonghaoAuthor3:
             # for i in range(1, 3):
             # for i in range(1, 3):
                 user_name = user_sheet[i][0]
                 user_name = user_sheet[i][0]
                 wechat_name = user_sheet[i][2]
                 wechat_name = user_sheet[i][2]
-                if wechat_name is None:
+                if wechat_name is None or wechat_name.strip() == "" or wechat_name.replace(" ", "") == "":
                     wechat_name = user_name
                     wechat_name = user_name
-                our_uid = user_sheet[i][5]
-                our_user_link = user_sheet[i][6]
-                user_info_dict = cls.get_user_info(log_type=log_type, crawler=crawler, wechat_name=wechat_name, env=env)
-                out_uid = user_info_dict["user_id"]
-                avatar_url = user_info_dict["avatar_url"]
-                tag1 = user_sheet[i][7]
-                tag2 = user_sheet[i][8]
-                tag3 = user_sheet[i][9]
-                tag4 = user_sheet[i][10]
-                tag5 = user_sheet[i][11]
-                tag6 = user_sheet[i][12]
-                Common.logger(log_type, crawler).info(f"正在更新 {user_name} 用户信息")
-                if out_uid is None or our_uid is None:
-                    # 用来创建our_id的信息
-                    user_dict = {
-                        'recommendStatus': -6,
-                        'appRecommendStatus': -6,
-                        'nickName': user_info_dict["user_name"],
-                        'avatarUrl': user_info_dict['avatar_url'],
-                        'tagName': f'{tag1},{tag2},{tag3},{tag4},{tag5},{tag6}',
-                    }
-                    our_uid = getUser.create_uid(log_type, crawler, user_dict, env)
-                    Common.logger(log_type, crawler).info(f'新创建的站内UID:{our_uid}')
-                    if env == 'prod':
-                        our_user_link = f'https://admin.piaoquantv.com/ums/user/{our_uid}/post'
-                    else:
-                        our_user_link = f'https://testadmin.piaoquantv.com/ums/user/{our_uid}/post'
-                    Feishu.update_values(log_type, crawler, sheetid, f'D{i + 1}:G{i + 1}', [
-                        [user_info_dict["user_id"], user_info_dict["avatar_url"], our_uid, our_user_link]])
-                    Common.logger(log_type, crawler).info(f'用户信息创建成功!\n')
-                else:
-                    Common.logger(log_type, crawler).info("用户信息已存在\n")
+                # our_uid = user_sheet[i][5]
+                # our_user_link = user_sheet[i][6]
+                out_uid = user_sheet[i][3]
+                avatar_url = user_sheet[i][4]
+                if out_uid is None or out_uid.strip() == "" or out_uid.replace(" ", "") == "":
+                    user_info_dict = cls.get_user_info(log_type=log_type, crawler=crawler, wechat_name=wechat_name, env=env)
+                    out_uid = user_info_dict["user_id"]
+                    avatar_url = user_info_dict["avatar_url"]
+                # tag1 = user_sheet[i][7]
+                # tag2 = user_sheet[i][8]
+                # tag3 = user_sheet[i][9]
+                # tag4 = user_sheet[i][10]
+                # tag5 = user_sheet[i][11]
+                # tag6 = user_sheet[i][12]
+                # Common.logger(log_type, crawler).info(f"正在更新 {user_name} 用户信息")
+                # if out_uid is None or our_uid is None:
+                #     # 用来创建our_id的信息
+                #     user_dict = {
+                #         'recommendStatus': -6,
+                #         'appRecommendStatus': -6,
+                #         'nickName': user_info_dict["user_name"],
+                #         'avatarUrl': user_info_dict['avatar_url'],
+                #         'tagName': f'{tag1},{tag2},{tag3},{tag4},{tag5},{tag6}',
+                #     }
+                #     our_uid = getUser.create_uid(log_type, crawler, user_dict, env)
+                #     Common.logger(log_type, crawler).info(f'新创建的站内UID:{our_uid}')
+                #     if env == 'prod':
+                #         our_user_link = f'https://admin.piaoquantv.com/ums/user/{our_uid}/post'
+                #     else:
+                #         our_user_link = f'https://testadmin.piaoquantv.com/ums/user/{our_uid}/post'
+                #     Feishu.update_values(log_type, crawler, sheetid, f'D{i + 1}:G{i + 1}', [
+                #         [user_info_dict["user_id"], user_info_dict["avatar_url"], our_uid, our_user_link]])
+                #     Common.logger(log_type, crawler).info(f'用户信息创建成功!\n')
+                # else:
+                #     Common.logger(log_type, crawler).info("用户信息已存在\n")
                 our_user_dict = {
                 our_user_dict = {
                     'user_name': user_name,
                     'user_name': user_name,
                     'user_id': out_uid,
                     'user_id': out_uid,
                     'wechat_name': wechat_name,
                     'wechat_name': wechat_name,
-                    'our_uid': our_uid,
-                    'our_user_link': our_user_link,
+                    # 'our_uid': our_uid,
+                    # 'our_user_link': our_user_link,
                     'avatar_url': avatar_url,
                     'avatar_url': avatar_url,
                 }
                 }
                 user_list.append(our_user_dict)
                 user_list.append(our_user_dict)
@@ -348,7 +351,7 @@ class GongzhonghaoAuthor3:
                                                  crawler=crawler,
                                                  crawler=crawler,
                                                  video_dict=video_dict,
                                                  video_dict=video_dict,
                                                  rule_dict=rule_dict,
                                                  rule_dict=rule_dict,
-                                                 user_dict=user_dict,
+                                                 # user_dict=user_dict,
                                                  env=env)
                                                  env=env)
                     except Exception as e:
                     except Exception as e:
                         Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
                         Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
@@ -364,7 +367,7 @@ class GongzhonghaoAuthor3:
 
 
     # 下载/上传
     # 下载/上传
     @classmethod
     @classmethod
-    def download_publish(cls, log_type, crawler, video_dict, rule_dict, user_dict, env):
+    def download_publish(cls, log_type, crawler, video_dict, rule_dict, env):
         # 下载视频
         # 下载视频
         Common.download_method(log_type=log_type, crawler=crawler, text="video", title=video_dict["video_title"], url=video_dict["video_url"])
         Common.download_method(log_type=log_type, crawler=crawler, text="video", title=video_dict["video_title"], url=video_dict["video_url"])
         md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
         md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
@@ -399,13 +402,13 @@ class GongzhonghaoAuthor3:
 
 
         # 上传视频
         # 上传视频
         Common.logger(log_type, crawler).info("开始上传视频...")
         Common.logger(log_type, crawler).info("开始上传视频...")
-        strategy = "定向爬虫策略"
+        strategy = "定向爬虫策略"
         if env == 'prod':
         if env == 'prod':
             oss_endpoint = "inner"
             oss_endpoint = "inner"
             our_video_id = Publish.upload_and_publish(log_type=log_type,
             our_video_id = Publish.upload_and_publish(log_type=log_type,
                                                       crawler=crawler,
                                                       crawler=crawler,
                                                       strategy=strategy,
                                                       strategy=strategy,
-                                                      our_uid=user_dict["our_uid"],
+                                                      our_uid="follow",
                                                       oss_endpoint=oss_endpoint,
                                                       oss_endpoint=oss_endpoint,
                                                       env=env)
                                                       env=env)
             our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
             our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
@@ -414,7 +417,7 @@ class GongzhonghaoAuthor3:
             our_video_id = Publish.upload_and_publish(log_type=log_type,
             our_video_id = Publish.upload_and_publish(log_type=log_type,
                                                       crawler=crawler,
                                                       crawler=crawler,
                                                       strategy=strategy,
                                                       strategy=strategy,
-                                                      our_uid=user_dict["our_uid"],
+                                                      our_uid="follow",
                                                       oss_endpoint=oss_endpoint,
                                                       oss_endpoint=oss_endpoint,
                                                       env=env)
                                                       env=env)
             our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
             our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"

+ 28 - 28
main/process.sh

@@ -24,34 +24,34 @@ echo "$(date "+%Y-%m-%d %H:%M:%S") 更新环境变量..." >> ${log_path}
 cd ~ && source /etc/profile
 cd ~ && source /etc/profile
 echo "$(date "+%Y-%m-%d %H:%M:%S") 更新环境变量完成!" >> ${log_path}
 echo "$(date "+%Y-%m-%d %H:%M:%S") 更新环境变量完成!" >> ${log_path}
 
 
-# 公众号爬虫策略
-echo "$(date "+%Y-%m-%d %H:%M:%S") 正在监测 公众号爬虫策略 1-100个账号 进程状态" >> ${log_path}
-ps -ef | grep "run_gongzhonghao_follow.py" | grep -v "grep"
-if [ "$?" -eq 1 ];then
-  echo "$(date "+%Y-%m-%d_%H:%M:%S") 异常停止,正在重启!" >> ${log_path}
-  if [ ${env} = "dev" ];then
-    cd ${piaoquan_crawler_dir} && sh main/scheduling_main.sh ./gongzhonghao/gongzhonghao_main/run_gongzhonghao_follow.py --log_type="author" --crawler="gongzhonghao" --env="dev" gongzhonghao/logs/nohup-follow.log
-  else
-    cd ${piaoquan_crawler_dir} && /usr/bin/sh /data5/piaoquan_crawler/main/scheduling_main.sh ./gongzhonghao/gongzhonghao_main/run_gongzhonghao_follow.py --log_type="author" --crawler="gongzhonghao" --env="prod"  gongzhonghao/logs/nohup-follow.log
-  fi
-  echo "$(date "+%Y-%m-%d %H:%M:%S") 重启完成!" >> ${log_path}
-else
-  echo "$(date "+%Y-%m-%d %H:%M:%S") 公众号爬虫策略1-100个账号 进程状态正常" >> ${log_path}
-fi
-
-echo "$(date "+%Y-%m-%d %H:%M:%S") 正在监测 公众号爬虫策略101-145个账号 进程状态" >> ${log_path}
-ps -ef | grep "run_gongzhonghao_follow_2.py" | grep -v "grep"
-if [ "$?" -eq 1 ];then
-  echo "$(date "+%Y-%m-%d_%H:%M:%S") 异常停止,正在重启!" >> ${log_path}
-  if [ ${env} = "dev" ];then
-    cd ${piaoquan_crawler_dir} && sh main/scheduling_main.sh ./gongzhonghao/gongzhonghao_main/run_gongzhonghao_follow_2.py --log_type="follow-2" --crawler="gongzhonghao" --env="dev" gongzhonghao/logs/nohup-follow-2.log
-  else
-    cd ${piaoquan_crawler_dir} && /usr/bin/sh /data5/piaoquan_crawler/main/scheduling_main.sh ./gongzhonghao/gongzhonghao_main/run_gongzhonghao_follow_2.py --log_type="follow-2" --crawler="gongzhonghao" --env="prod"  gongzhonghao/logs/nohup-follow-2.log
-  fi
-  echo "$(date "+%Y-%m-%d %H:%M:%S") 重启完成!" >> ${log_path}
-else
-  echo "$(date "+%Y-%m-%d %H:%M:%S") 公众号爬虫策略101-145个账号 进程状态正常" >> ${log_path}
-fi
+## 公众号爬虫策略
+#echo "$(date "+%Y-%m-%d %H:%M:%S") 正在监测 公众号爬虫策略 1-100个账号 进程状态" >> ${log_path}
+#ps -ef | grep "run_gongzhonghao_follow.py" | grep -v "grep"
+#if [ "$?" -eq 1 ];then
+#  echo "$(date "+%Y-%m-%d_%H:%M:%S") 异常停止,正在重启!" >> ${log_path}
+#  if [ ${env} = "dev" ];then
+#    cd ${piaoquan_crawler_dir} && sh main/scheduling_main.sh ./gongzhonghao/gongzhonghao_main/run_gongzhonghao_follow.py --log_type="author" --crawler="gongzhonghao" --env="dev" gongzhonghao/logs/nohup-follow.log
+#  else
+#    cd ${piaoquan_crawler_dir} && /usr/bin/sh /data5/piaoquan_crawler/main/scheduling_main.sh ./gongzhonghao/gongzhonghao_main/run_gongzhonghao_follow.py --log_type="author" --crawler="gongzhonghao" --env="prod"  gongzhonghao/logs/nohup-follow.log
+#  fi
+#  echo "$(date "+%Y-%m-%d %H:%M:%S") 重启完成!" >> ${log_path}
+#else
+#  echo "$(date "+%Y-%m-%d %H:%M:%S") 公众号爬虫策略1-100个账号 进程状态正常" >> ${log_path}
+#fi
+#
+#echo "$(date "+%Y-%m-%d %H:%M:%S") 正在监测 公众号爬虫策略101-145个账号 进程状态" >> ${log_path}
+#ps -ef | grep "run_gongzhonghao_follow_2.py" | grep -v "grep"
+#if [ "$?" -eq 1 ];then
+#  echo "$(date "+%Y-%m-%d_%H:%M:%S") 异常停止,正在重启!" >> ${log_path}
+#  if [ ${env} = "dev" ];then
+#    cd ${piaoquan_crawler_dir} && sh main/scheduling_main.sh ./gongzhonghao/gongzhonghao_main/run_gongzhonghao_follow_2.py --log_type="follow-2" --crawler="gongzhonghao" --env="dev" gongzhonghao/logs/nohup-follow-2.log
+#  else
+#    cd ${piaoquan_crawler_dir} && /usr/bin/sh /data5/piaoquan_crawler/main/scheduling_main.sh ./gongzhonghao/gongzhonghao_main/run_gongzhonghao_follow_2.py --log_type="follow-2" --crawler="gongzhonghao" --env="prod"  gongzhonghao/logs/nohup-follow-2.log
+#  fi
+#  echo "$(date "+%Y-%m-%d %H:%M:%S") 重启完成!" >> ${log_path}
+#else
+#  echo "$(date "+%Y-%m-%d %H:%M:%S") 公众号爬虫策略101-145个账号 进程状态正常" >> ${log_path}
+#fi
 
 
 # 小年糕定向爬虫策略
 # 小年糕定向爬虫策略
 echo "$(date "+%Y-%m-%d %H:%M:%S") 正在监测 小年糕定向爬虫策略 进程状态" >> ${log_path}
 echo "$(date "+%Y-%m-%d %H:%M:%S") 正在监测 小年糕定向爬虫策略 进程状态" >> ${log_path}