wangkun 1 year ago
parent
commit
eebfceeb08

+ 5 - 0
main/process_mq.sh

@@ -20,6 +20,11 @@ elif [ ${crawler} = "kykjk" ];then
   profile_path=/.base_profile
   profile_path=/.base_profile
   python=python3
   python=python3
   log_path=${piaoquan_crawler_dir}main/main_logs/process-mq-$(date +%Y-%m-%d).log
   log_path=${piaoquan_crawler_dir}main/main_logs/process-mq-$(date +%Y-%m-%d).log
+elif [ ${crawler} = "sph" ] && [ ${log_type} = "search" ];then
+  piaoquan_crawler_dir=/Users/piaoquan/Desktop/piaoquan_crawler/
+  profile_path=/etc/profile
+  python=python3
+  log_path=${piaoquan_crawler_dir}main/main_logs/process-mq-$(date +%Y-%m-%d).log
 elif [ ${crawler} = "kyk" ] || [ ${crawler} = "sph" ];then
 elif [ ${crawler} = "kyk" ] || [ ${crawler} = "sph" ];then
   piaoquan_crawler_dir=/Users/lieyunye/Desktop/crawler/piaoquan_crawler/
   piaoquan_crawler_dir=/Users/lieyunye/Desktop/crawler/piaoquan_crawler/
   profile_path=./base_profile
   profile_path=./base_profile

+ 0 - 45
shipinhao/shipinhao_main/run_shipinhao_search_scheduling.py

@@ -1,45 +0,0 @@
-# -*- coding: utf-8 -*-
-# @Author: wangkun
-# @Time: 2023/5/5
-import argparse
-import os
-import sys
-sys.path.append(os.getcwd())
-from common.public import task_fun
-from common.common import Common
-from common.scheduling_db import MysqlHelper
-from shipinhao.shipinhao_search.shipinhao_search_scheduling import ShipinhaoSearchScheduling
-
-
-def main(log_type, crawler, task, oss_endpoint, env):
-    task_dict = task_fun(task)['task_dict']
-    rule_dict = task_fun(task)['rule_dict']
-    task_id = task_dict['task_id']
-    select_user_sql = f"""select * from crawler_user_v3 where task_id={task_id}"""
-    user_list = MysqlHelper.get_values(log_type, crawler, select_user_sql, env, action="")
-    Common.logger(log_type, crawler).info(f"调度任务:\n{task_dict}")
-    Common.logger(log_type, crawler).info(f"抓取规则:\n{rule_dict}")
-    Common.logger(log_type, crawler).info(f"用户列表:\n{user_list}")
-    Common.logger(log_type, crawler).info('开始抓取 视频号 搜索爬虫策略\n')
-    ShipinhaoSearchScheduling.get_search_videos(log_type=log_type,
-                                                crawler=crawler,
-                                                rule_dict=rule_dict,
-                                                oss_endpoint=oss_endpoint,
-                                                env=env)
-    Common.del_logs(log_type, crawler)
-    Common.logger(log_type, crawler).info('抓取完一轮\n')
-
-
-if __name__ == "__main__":
-    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
-    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
-    parser.add_argument('--crawler')  ## 添加参数
-    parser.add_argument('--task')  ## 添加参数
-    parser.add_argument('--oss_endpoint')  ## 添加参数
-    parser.add_argument('--env')  ## 添加参数
-    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
-    main(log_type=args.log_type,
-         crawler=args.crawler,
-         task=args.task,
-         oss_endpoint=args.oss_endpoint,
-         env=args.env)

+ 109 - 0
shipinhao/shipinhao_main/run_sph_search.py

@@ -0,0 +1,109 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/5/5
+import argparse
+from mq_http_sdk.mq_client import *
+from mq_http_sdk.mq_consumer import *
+from mq_http_sdk.mq_exception import MQExceptionBase
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.public import get_consumer, ack_message, task_fun_mq
+from common.scheduling_db import MysqlHelper
+from shipinhao.shipinhao_search.shipinhao_search_scheduling import ShipinhaoSearchScheduling
+
+
+class ShipinhaoSearchMain:
+    @classmethod
+    def shipinhao_search_main(cls, log_type, crawler, topic_name, group_id, env):
+        consumer = get_consumer(topic_name, group_id)
+        # 长轮询表示如果Topic没有消息,则客户端请求会在服务端挂起3秒,3秒内如果有消息可以消费则立即返回响应。
+        # 长轮询时间3秒(最多可设置为30秒)。
+        wait_seconds = 30
+        # 一次最多消费3条(最多可设置为16条)。
+        batch = 1
+        Common.logger(log_type, crawler).info(f'{10 * "="}Consume And Ack Message From Topic{10 * "="}\n'
+                                              f'WaitSeconds:{wait_seconds}\n'
+                                              f'TopicName:{topic_name}\n'
+                                              f'MQConsumer:{group_id}')
+        Common.logging(log_type, crawler, env, f'{10 * "="}Consume And Ack Message From Topic{10 * "="}\n'
+                                               f'WaitSeconds:{wait_seconds}\n'
+                                               f'TopicName:{topic_name}\n'
+                                               f'MQConsumer:{group_id}')
+        while True:
+            try:
+                # 长轮询消费消息。
+                recv_msgs = consumer.consume_message(batch, wait_seconds)
+                for msg in recv_msgs:
+                    Common.logger(log_type, crawler).info(f"Receive\n"
+                                                          f"MessageId:{msg.message_id}\n"
+                                                          f"MessageBodyMD5:{msg.message_body_md5}\n"
+                                                          f"MessageTag:{msg.message_tag}\n"
+                                                          f"ConsumedTimes:{msg.consumed_times}\n"
+                                                          f"PublishTime:{msg.publish_time}\n"
+                                                          f"Body:{msg.message_body}\n"
+                                                          f"NextConsumeTime:{msg.next_consume_time}\n"
+                                                          f"ReceiptHandle:{msg.receipt_handle}\n"
+                                                          f"Properties:{msg.properties}")
+                    Common.logging(log_type, crawler, env, f"Receive\n"
+                                                           f"MessageId:{msg.message_id}\n"
+                                                           f"MessageBodyMD5:{msg.message_body_md5}\n"
+                                                           f"MessageTag:{msg.message_tag}\n"
+                                                           f"ConsumedTimes:{msg.consumed_times}\n"
+                                                           f"PublishTime:{msg.publish_time}\n"
+                                                           f"Body:{msg.message_body}\n"
+                                                           f"NextConsumeTime:{msg.next_consume_time}\n"
+                                                           f"ReceiptHandle:{msg.receipt_handle}\n"
+                                                           f"Properties:{msg.properties}")
+                    # ack_mq_message
+                    ack_message(log_type=log_type, crawler=crawler, recv_msgs=recv_msgs, consumer=consumer)
+
+                    # 处理爬虫业务
+                    task_dict = task_fun_mq(msg.message_body)['task_dict']
+                    rule_dict = task_fun_mq(msg.message_body)['rule_dict']
+                    task_id = task_dict['id']
+                    select_user_sql = f"""select * from crawler_user_v3 where task_id={task_id}"""
+                    user_list = MysqlHelper.get_values(log_type, crawler, select_user_sql, env, action="")
+                    Common.logger(log_type, crawler).info(f"调度任务:{task_dict}")
+                    Common.logging(log_type, crawler, env, f"调度任务:{task_dict}")
+                    Common.logger(log_type, crawler).info(f"抓取规则:{rule_dict}")
+                    Common.logging(log_type, crawler, env, f"抓取规则:{rule_dict}")
+                    # Common.logger(log_type, crawler).info(f"用户列表:{user_list}\n")
+                    # Common.logging(log_type, crawler, env, f"用户列表:{user_list}\n")
+                    Common.logger(log_type, crawler).info(f'开始抓取:{task_dict["taskName"]}\n')
+                    Common.logging(log_type, crawler, env, f'开始抓取:{task_dict["taskName"]}\n')
+
+                    # 抓取符合规则的视频列表
+                    ShipinhaoSearchScheduling.get_search_videos(log_type=log_type,
+                                                                crawler=crawler,
+                                                                rule_dict=rule_dict,
+                                                                user_list=user_list,
+                                                                env=env)
+                    Common.logger(log_type, crawler).info('抓取一轮结束\n')
+                    Common.logging(log_type, crawler, env, '抓取一轮结束\n')
+
+            except MQExceptionBase as err:
+                # Topic中没有消息可消费。
+                if err.type == "MessageNotExist":
+                    Common.logger(log_type, crawler).info(f"No new message! RequestId:{err.req_id}\n")
+                    Common.logging(log_type, crawler, env, f"No new message! RequestId:{err.req_id}\n")
+                    continue
+
+                Common.logger(log_type, crawler).info(f"Consume Message Fail! Exception:{err}\n")
+                Common.logging(log_type, crawler, env, f"Consume Message Fail! Exception:{err}\n")
+                time.sleep(2)
+                continue
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
+    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler')  ## 添加参数
+    parser.add_argument('--topic_name')  ## 添加参数
+    parser.add_argument('--group_id')  ## 添加参数
+    parser.add_argument('--env')  ## 添加参数
+    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
+    ShipinhaoSearchMain.shipinhao_search_main(log_type=args.log_type,
+                                              crawler=args.crawler,
+                                              topic_name=args.topic_name,
+                                              group_id=args.group_id,
+                                              env=args.env)

+ 12 - 12
shipinhao/shipinhao_search/shipinhao_search.py

@@ -395,48 +395,48 @@ class ShipinhaoSearch:
         # 点赞
         # 点赞
         like_id = driver.find_element(By.ID, 'com.tencent.mm:id/k04')  # 微信版本 8.0.30
         like_id = driver.find_element(By.ID, 'com.tencent.mm:id/k04')  # 微信版本 8.0.30
         like_cnt = like_id.get_attribute('name')
         like_cnt = like_id.get_attribute('name')
-        if like_cnt == "" or like_cnt == "喜欢" or like_cnt == "火" or cls.is_contain_chinese(like_cnt) is True:
-            like_cnt = 0
-        elif '万' in like_cnt:
+        if '万' in like_cnt:
             like_cnt = int(float(like_cnt.split('万')[0]) * 10000)
             like_cnt = int(float(like_cnt.split('万')[0]) * 10000)
         elif '万+' in like_cnt:
         elif '万+' in like_cnt:
             like_cnt = int(float(like_cnt.split('万+')[0]) * 10000)
             like_cnt = int(float(like_cnt.split('万+')[0]) * 10000)
+        elif like_cnt == "" or like_cnt == "喜欢" or like_cnt == "火" or cls.is_contain_chinese(like_cnt) is True:
+            like_cnt = 0
         else:
         else:
             like_cnt = int(float(like_cnt))
             like_cnt = int(float(like_cnt))
 
 
         # 分享
         # 分享
         share_id = driver.find_element(By.ID, 'com.tencent.mm:id/jhv')
         share_id = driver.find_element(By.ID, 'com.tencent.mm:id/jhv')
         share_cnt = share_id.get_attribute('name')
         share_cnt = share_id.get_attribute('name')
-        if share_cnt == "" or share_cnt == "转发" or cls.is_contain_chinese(share_cnt) is True:
-            share_cnt = 0
-        elif '万' in share_cnt:
+        if '万' in share_cnt:
             share_cnt = int(float(share_cnt.split('万')[0]) * 10000)
             share_cnt = int(float(share_cnt.split('万')[0]) * 10000)
         elif '万+' in share_cnt:
         elif '万+' in share_cnt:
             share_cnt = int(float(share_cnt.split('万+')[0]) * 10000)
             share_cnt = int(float(share_cnt.split('万+')[0]) * 10000)
+        elif share_cnt == "" or share_cnt == "转发" or cls.is_contain_chinese(share_cnt) is True:
+            share_cnt = 0
         else:
         else:
             share_cnt = int(float(share_cnt))
             share_cnt = int(float(share_cnt))
 
 
         # 收藏
         # 收藏
         favorite_id = driver.find_element(By.ID, 'com.tencent.mm:id/fnp')
         favorite_id = driver.find_element(By.ID, 'com.tencent.mm:id/fnp')
         favorite_cnt = favorite_id.get_attribute('name')
         favorite_cnt = favorite_id.get_attribute('name')
-        if favorite_cnt == "" or favorite_cnt == "收藏" or favorite_cnt == "推荐" or favorite_cnt == "火" or cls.is_contain_chinese(favorite_cnt) is True:
-            favorite_cnt = 0
-        elif '万' in favorite_cnt:
+        if '万' in favorite_cnt:
             favorite_cnt = int(float(favorite_cnt.split('万')[0]) * 10000)
             favorite_cnt = int(float(favorite_cnt.split('万')[0]) * 10000)
         elif '万+' in favorite_cnt:
         elif '万+' in favorite_cnt:
             favorite_cnt = int(float(favorite_cnt.split('万+')[0]) * 10000)
             favorite_cnt = int(float(favorite_cnt.split('万+')[0]) * 10000)
+        elif favorite_cnt == "" or favorite_cnt == "收藏" or favorite_cnt == "推荐" or favorite_cnt == "火" or cls.is_contain_chinese(favorite_cnt) is True:
+            favorite_cnt = 0
         else:
         else:
             favorite_cnt = int(float(favorite_cnt))
             favorite_cnt = int(float(favorite_cnt))
 
 
         # 评论
         # 评论
         comment_id = driver.find_element(By.ID, 'com.tencent.mm:id/bje')
         comment_id = driver.find_element(By.ID, 'com.tencent.mm:id/bje')
         comment_cnt = comment_id.get_attribute('name')
         comment_cnt = comment_id.get_attribute('name')
-        if comment_cnt == "" or comment_cnt == "评论" or cls.is_contain_chinese(comment_cnt) is True:
-            comment_cnt = 0
-        elif '万' in comment_cnt:
+        if '万' in comment_cnt:
             comment_cnt = int(float(comment_cnt.split('万')[0]) * 10000)
             comment_cnt = int(float(comment_cnt.split('万')[0]) * 10000)
         elif '万+' in comment_cnt:
         elif '万+' in comment_cnt:
             comment_cnt = int(float(comment_cnt.split('万+')[0]) * 10000)
             comment_cnt = int(float(comment_cnt.split('万+')[0]) * 10000)
+        elif comment_cnt == "" or comment_cnt == "评论" or cls.is_contain_chinese(comment_cnt) is True:
+            comment_cnt = 0
         else:
         else:
             comment_cnt = int(float(comment_cnt))
             comment_cnt = int(float(comment_cnt))
 
 

+ 120 - 347
shipinhao/shipinhao_search/shipinhao_search_scheduling.py

@@ -4,7 +4,6 @@
 import datetime
 import datetime
 import json
 import json
 import os
 import os
-import shutil
 import sys
 import sys
 import time
 import time
 from datetime import date, timedelta
 from datetime import date, timedelta
@@ -15,10 +14,9 @@ from appium.webdriver.webdriver import WebDriver
 from selenium.common import NoSuchElementException
 from selenium.common import NoSuchElementException
 from selenium.webdriver.common.by import By
 from selenium.webdriver.common.by import By
 sys.path.append(os.getcwd())
 sys.path.append(os.getcwd())
-from common.feishu import Feishu
-from common.publish import Publish
+from common.public import download_rule
+from common.mq import MQ
 from common.common import Common
 from common.common import Common
-from common.getuser import getUser
 from common.scheduling_db import MysqlHelper
 from common.scheduling_db import MysqlHelper
 
 
 
 
@@ -27,116 +25,14 @@ class ShipinhaoSearchScheduling:
     i = 0
     i = 0
     download_cnt = 0
     download_cnt = 0
 
 
-    # 基础门槛规则
-    @staticmethod
-    def download_rule(log_type, crawler, video_dict, rule_dict):
-        """
-        下载视频的基本规则
-        :param log_type: 日志
-        :param crawler: 哪款爬虫
-        :param video_dict: 视频信息,字典格式
-        :param rule_dict: 规则信息,字典格式
-        :return: 满足规则,返回 True;反之,返回 False
-        """
-        rule_play_cnt_min = rule_dict.get('play_cnt', {}).get('min', 0)
-        rule_play_cnt_max = rule_dict.get('play_cnt', {}).get('max', 100000000)
-        if rule_play_cnt_max == 0:
-            rule_play_cnt_max = 100000000
-
-        rule_duration_min = rule_dict.get('duration', {}).get('min', 0)
-        rule_duration_max = rule_dict.get('duration', {}).get('max', 100000000)
-        if rule_duration_max == 0:
-            rule_duration_max = 100000000
-
-        # rule_period_min = rule_dict.get('period', {}).get('min', 0)
-        # rule_period_max = rule_dict.get('period', {}).get('max', 100000000)
-        # if rule_period_max == 0:
-        #     rule_period_max = 100000000
-
-        # rule_fans_cnt_min = rule_dict.get('fans_cnt', {}).get('min', 0)
-        # rule_fans_cnt_max = rule_dict.get('fans_cnt', {}).get('max', 100000000)
-        # if rule_fans_cnt_max == 0:
-        #     rule_fans_cnt_max = 100000000
-
-        # rule_videos_cnt_min = rule_dict.get('videos_cnt', {}).get('min', 0)
-        # rule_videos_cnt_max = rule_dict.get('videos_cnt', {}).get('max', 100000000)
-        # if rule_videos_cnt_max == 0:
-        #     rule_videos_cnt_max = 100000000
-
-        rule_like_cnt_min = rule_dict.get('like_cnt', {}).get('min', 0)
-        rule_like_cnt_max = rule_dict.get('like_cnt', {}).get('max', 100000000)
-        if rule_like_cnt_max == 0:
-            rule_like_cnt_max = 100000000
-
-        rule_width_min = rule_dict.get('width', {}).get('min', 0)
-        rule_width_max = rule_dict.get('width', {}).get('max', 100000000)
-        if rule_width_max == 0:
-            rule_width_max = 100000000
-
-        rule_height_min = rule_dict.get('height', {}).get('min', 0)
-        rule_height_max = rule_dict.get('height', {}).get('max', 100000000)
-        if rule_height_max == 0:
-            rule_height_max = 100000000
-
-        rule_share_cnt_min = rule_dict.get('share_cnt', {}).get('min', 0)
-        rule_share_cnt_max = rule_dict.get('share_cnt', {}).get('max', 100000000)
-        if rule_share_cnt_max == 0:
-            rule_share_cnt_max = 100000000
-
-        rule_favorite_cnt_min = rule_dict.get('favorite_cnt', {}).get('min', 0)
-        rule_favorite_cnt_max = rule_dict.get('favorite_cnt', {}).get('max', 100000000)
-        if rule_favorite_cnt_max == 0:
-            rule_favorite_cnt_max = 100000000
-
-        rule_comment_cnt_min = rule_dict.get('comment_cnt', {}).get('min', 0)
-        rule_comment_cnt_max = rule_dict.get('comment_cnt', {}).get('max', 100000000)
-        if rule_comment_cnt_max == 0:
-            rule_comment_cnt_max = 100000000
-
-        rule_publish_time_min = rule_dict.get('publish_time', {}).get('min', 0)
-        rule_publish_time_max = rule_dict.get('publish_time', {}).get('max', 0)
-        if rule_publish_time_max == 0:
-            rule_publish_time_max = 4102415999000  # 2099-12-31 23:59:59
-
-        Common.logger(log_type, crawler).info(
-            f'rule_duration_max:{rule_duration_max} >= duration:{int(float(video_dict["duration"]))} >= rule_duration_min:{int(rule_duration_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_play_cnt_max:{int(rule_play_cnt_max)} >= play_cnt:{int(video_dict["play_cnt"])} >= rule_play_cnt_min:{int(rule_play_cnt_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_like_cnt_max:{int(rule_like_cnt_max)} >= like_cnt:{int(video_dict["like_cnt"])} >= rule_like_cnt_min:{int(rule_like_cnt_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_comment_cnt_max:{int(rule_comment_cnt_max)} >= comment_cnt:{int(video_dict["comment_cnt"])} >= rule_comment_cnt_min:{int(rule_comment_cnt_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_share_cnt_max:{int(rule_share_cnt_max)} >= share_cnt:{int(video_dict["share_cnt"])} >= rule_share_cnt_min:{int(rule_share_cnt_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_favorite_cnt_max:{int(rule_favorite_cnt_max)} >= favorite_cnt:{int(video_dict["favorite_cnt"])} >= rule_favorite_cnt_min:{int(rule_favorite_cnt_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_width_max:{int(rule_width_max)} >= video_width:{int(video_dict["video_width"])} >= rule_width_min:{int(rule_width_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_height_max:{int(rule_height_max)} >= video_height:{int(video_dict["video_height"])} >= rule_height_min:{int(rule_height_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_publish_time_max:{int(rule_publish_time_max)} >= publish_time_stamp:{int(video_dict["publish_time_stamp"])*1000} >= rule_publish_time_min:{int(rule_publish_time_min)}')
-
-        if int(rule_duration_max) >= int(float(video_dict["duration"])) >= int(rule_duration_min) \
-                and int(rule_play_cnt_max) >= int(video_dict['play_cnt']) >= int(rule_play_cnt_min) \
-                and int(rule_like_cnt_max) >= int(video_dict['like_cnt']) >= int(rule_like_cnt_min) \
-                and int(rule_comment_cnt_max) >= int(video_dict['comment_cnt']) >= int(rule_comment_cnt_min) \
-                and int(rule_share_cnt_max) >= int(video_dict['share_cnt']) >= int(rule_share_cnt_min) \
-                and int(rule_favorite_cnt_max) >= int(video_dict['favorite_cnt']) >= int(rule_favorite_cnt_min) \
-                and int(rule_width_max) >= int(video_dict['video_width']) >= int(rule_width_min) \
-                and int(rule_height_max) >= int(video_dict['video_height']) >= int(rule_height_min) \
-                and int(rule_publish_time_max) >= int(video_dict['publish_time_stamp'])*1000 >= int(rule_publish_time_min):
-            return True
-        else:
-            return False
-
     @classmethod
     @classmethod
-    def start_wechat(cls, log_type, crawler, word, rule_dict, our_uid, oss_endpoint, env):
+    def start_wechat(cls, log_type, crawler, rule_dict, user_dict, env):
         Common.logger(log_type, crawler).info('启动微信')
         Common.logger(log_type, crawler).info('启动微信')
+        Common.logging(log_type, crawler, env, '启动微信')
         if env == "dev":
         if env == "dev":
             chromedriverExecutable = "/Users/wangkun/Downloads/chromedriver/chromedriver_v107/chromedriver"
             chromedriverExecutable = "/Users/wangkun/Downloads/chromedriver/chromedriver_v107/chromedriver"
         else:
         else:
-            chromedriverExecutable = '/Users/piaoquan/Downloads/chromedriver'
+            chromedriverExecutable = "/Users/piaoquan/Downloads/chromedriver/chromedriver_v111/chromedriver"
         caps = {
         caps = {
             "platformName": "Android",  # 手机操作系统 Android / iOS
             "platformName": "Android",  # 手机操作系统 Android / iOS
             "deviceName": "Android",  # 连接的设备名(模拟器或真机),安卓可以随便写
             "deviceName": "Android",  # 连接的设备名(模拟器或真机),安卓可以随便写
@@ -156,31 +52,35 @@ class ShipinhaoSearchScheduling:
             "showChromedriverLog": True,
             "showChromedriverLog": True,
             # "chromeOptions": {"androidProcess": "com.tencent.mm:appbrand0"},
             # "chromeOptions": {"androidProcess": "com.tencent.mm:appbrand0"},
             "chromeOptions": {"androidProcess": "com.tencent.mm:tools"},
             "chromeOptions": {"androidProcess": "com.tencent.mm:tools"},
+            # "chromeOptions": {"androidProcess": "com.tencent.mm:toolsmp"},
+            # "chromeOptions": {"androidProcess": "com.tencent.mm"},
             'enableWebviewDetailsCollection': True,
             'enableWebviewDetailsCollection': True,
             'setWebContentsDebuggingEnabled': True,
             'setWebContentsDebuggingEnabled': True,
             'chromedriverExecutable': chromedriverExecutable,
             'chromedriverExecutable': chromedriverExecutable,
         }
         }
         driver = webdriver.Remote("http://localhost:4723/wd/hub", caps)
         driver = webdriver.Remote("http://localhost:4723/wd/hub", caps)
         driver.implicitly_wait(10)
         driver.implicitly_wait(10)
-        if len(driver.find_elements(By.ID, 'android:id/text1')) != 0:
-            driver.find_elements(By.ID, 'android:id/text1')[0].click()
+        # Common.logger(log_type, crawler).info("点击微信")
+        # if len(driver.find_elements(By.ID, 'android:id/text1')) != 0:
+        #     driver.find_elements(By.ID, 'android:id/text1')[0].click()
+        # Common.logger(log_type, crawler).info("等待 5s")
         time.sleep(5)
         time.sleep(5)
         cls.search_video(log_type=log_type,
         cls.search_video(log_type=log_type,
                          crawler=crawler,
                          crawler=crawler,
-                         word=word,
                          rule_dict=rule_dict,
                          rule_dict=rule_dict,
-                         our_uid=our_uid,
-                         oss_endpoint=oss_endpoint,
+                         user_dict=user_dict,
                          driver=driver,
                          driver=driver,
                          env=env)
                          env=env)
         cls.close_wechat(log_type=log_type,
         cls.close_wechat(log_type=log_type,
                          crawler=crawler,
                          crawler=crawler,
+                         env=env,
                          driver=driver)
                          driver=driver)
 
 
     @classmethod
     @classmethod
-    def close_wechat(cls, log_type, crawler, driver: WebDriver):
+    def close_wechat(cls, log_type, crawler, env, driver: WebDriver):
         driver.quit()
         driver.quit()
         Common.logger(log_type, crawler).info(f"微信退出成功\n")
         Common.logger(log_type, crawler).info(f"微信退出成功\n")
+        Common.logging(log_type, crawler, env, f"微信退出成功\n")
 
 
     @classmethod
     @classmethod
     def is_contain_chinese(cls, strword):
     def is_contain_chinese(cls, strword):
@@ -206,108 +106,132 @@ class ShipinhaoSearchScheduling:
 
 
     @classmethod
     @classmethod
     def check_to_webview(cls, log_type, crawler, driver: WebDriver):
     def check_to_webview(cls, log_type, crawler, driver: WebDriver):
-        # Common.logger(log_type, crawler).info('切换到webview')
         webviews = driver.contexts
         webviews = driver.contexts
+        Common.logger(log_type, crawler).info(f"webviews:{webviews}")
         driver.switch_to.context(webviews[1])
         driver.switch_to.context(webviews[1])
+        Common.logger(log_type, crawler).info(driver.current_context)
         time.sleep(1)
         time.sleep(1)
         windowHandles = driver.window_handles
         windowHandles = driver.window_handles
         for handle in windowHandles:
         for handle in windowHandles:
-            driver.switch_to.window(handle)
             try:
             try:
-                shipinhao_webview = driver.find_element(By.XPATH, '//div[@class="unit"]')
-                if shipinhao_webview:
-                    Common.logger(log_type, crawler).info('切换到视频号 webview 成功')
-                    return "成功"
-            except Exception as e:
-                Common.logger(log_type, crawler).info(f"{e}\n")
+                driver.switch_to.window(handle)
+                time.sleep(1)
+                driver.find_element(By.XPATH, '//div[@class="unit"]')
+                Common.logger(log_type, crawler).info('切换 webview 成功')
+                return "成功"
+            except Exception:
+                Common.logger(log_type, crawler).info("切换 webview 失败")
 
 
     @classmethod
     @classmethod
     def repeat_out_video_id(cls, log_type, crawler, out_video_id, env):
     def repeat_out_video_id(cls, log_type, crawler, out_video_id, env):
-        sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{out_video_id}"; """
+        sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and out_video_id="{out_video_id}"; """
         repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
         repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
         return len(repeat_video)
         return len(repeat_video)
 
 
     @classmethod
     @classmethod
     def repeat_video_url(cls, log_type, crawler, video_url, env):
     def repeat_video_url(cls, log_type, crawler, video_url, env):
-        sql = f""" select * from crawler_video where platform="{cls.platform}" and video_url="{video_url}"; """
+        sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and video_url="{video_url}"; """
         repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
         repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
         return len(repeat_video)
         return len(repeat_video)
 
 
     @classmethod
     @classmethod
-    def search_video(cls, log_type, crawler, word, rule_dict, driver: WebDriver, our_uid, oss_endpoint, env):
+    def search_video(cls, log_type, crawler, rule_dict, driver: WebDriver, user_dict, env):
+        mq = MQ(topic_name="topic_crawler_etl_" + env)
         # 点击微信搜索框,并输入搜索词
         # 点击微信搜索框,并输入搜索词
         driver.implicitly_wait(10)
         driver.implicitly_wait(10)
-        driver.find_element(By.ID, 'com.tencent.mm:id/j5t').click()
+        Common.logger(log_type, crawler).info("点击搜索框")
+        Common.logging(log_type, crawler, env, "点击搜索框")
+        driver.find_element(By.ID, 'com.tencent.mm:id/j5t').click()  # 微信8.0.30版本
+        # driver.find_element(By.ID, 'com.tencent.mm:id/he6').click()  # 微信8.0.16版本
         time.sleep(0.5)
         time.sleep(0.5)
-        Common.logger(log_type, crawler).info(f'输入搜索词:{word}')
-        driver.find_element(By.ID, 'com.tencent.mm:id/cd7').clear().send_keys(word)
+        driver.find_element(By.ID, 'com.tencent.mm:id/cd7').clear().send_keys(user_dict["link"])  # 微信8.0.30版本
+        # driver.find_element(By.ID, 'com.tencent.mm:id/bxz').clear().send_keys(word)  # 微信8.0.16版本
         driver.press_keycode(AndroidKey.ENTER)
         driver.press_keycode(AndroidKey.ENTER)
-        # driver.find_elements(By.ID, 'com.tencent.mm:id/oi4')[0].click()
-        driver.find_element(By.ID, 'com.tencent.mm:id/m94').click()
+        Common.logger(log_type, crawler).info("进入搜索词页面")
+        Common.logging(log_type, crawler, env, "进入搜索词页面")
+        driver.find_elements(By.ID, 'com.tencent.mm:id/br8')[0].click()  # 微信8.0.30版本
+        # driver.find_elements(By.ID, 'com.tencent.mm:id/jkg')[0].click()  # 微信8.0.16版本
         time.sleep(5)
         time.sleep(5)
 
 
         # 切换到微信搜索结果页 webview
         # 切换到微信搜索结果页 webview
         check_to_webview = cls.check_to_webview(log_type, crawler, driver)
         check_to_webview = cls.check_to_webview(log_type, crawler, driver)
         if check_to_webview is None:
         if check_to_webview is None:
             Common.logger(log_type, crawler).info("切换到视频号 webview 失败\n")
             Common.logger(log_type, crawler).info("切换到视频号 webview 失败\n")
+            Common.logging(log_type, crawler, env, "切换到视频号 webview 失败\n")
             return
             return
         time.sleep(1)
         time.sleep(1)
 
 
         # 切换到"视频号"分类
         # 切换到"视频号"分类
         shipinhao_tags = cls.search_elements(driver, '//div[@class="unit"]/*[2]')
         shipinhao_tags = cls.search_elements(driver, '//div[@class="unit"]/*[2]')
         Common.logger(log_type, crawler).info('点击"视频号"分类')
         Common.logger(log_type, crawler).info('点击"视频号"分类')
+        Common.logging(log_type, crawler, env, '点击"视频号"分类')
         shipinhao_tags[0].click()
         shipinhao_tags[0].click()
         time.sleep(5)
         time.sleep(5)
 
 
-        videos_cnt = rule_dict.get('videos_cnt', {}).get('min', 0)
+        videos_cnt = rule_dict.get('videos_cnt', {}).get('min', 30)
         index = 0
         index = 0
         while True:
         while True:
-
-            if cls.search_elements(driver, '//*[@class="double-rich double-rich_vertical"]') is None:
+            if cls.search_elements(driver, '//*[@class="mixed-box__bd"]') is None:
                 Common.logger(log_type, crawler).info('窗口已销毁\n')
                 Common.logger(log_type, crawler).info('窗口已销毁\n')
+                Common.logging(log_type, crawler, env, '窗口已销毁\n')
                 return
                 return
 
 
             Common.logger(log_type, crawler).info('获取视频列表\n')
             Common.logger(log_type, crawler).info('获取视频列表\n')
-            video_elements = cls.search_elements(driver, '//div[@class="vc active__mask"]')
+            Common.logging(log_type, crawler, env, '获取视频列表\n')
+            video_elements = cls.search_elements(driver, '//div[@class="rich-media active__absolute"]')
             if video_elements is None:
             if video_elements is None:
                 Common.logger(log_type, crawler).warning(f'video_elements:{video_elements}')
                 Common.logger(log_type, crawler).warning(f'video_elements:{video_elements}')
+                Common.logging(log_type, crawler, env, f'video_elements:{video_elements}')
                 return
                 return
 
 
             video_element_temp = video_elements[index:]
             video_element_temp = video_elements[index:]
             if len(video_element_temp) == 0:
             if len(video_element_temp) == 0:
                 Common.logger(log_type, crawler).info('到底啦~~~~~~~~~~~~~\n')
                 Common.logger(log_type, crawler).info('到底啦~~~~~~~~~~~~~\n')
+                Common.logging(log_type, crawler, env, '到底啦~~~~~~~~~~~~~\n')
                 return
                 return
 
 
             for i, video_element in enumerate(video_element_temp):
             for i, video_element in enumerate(video_element_temp):
                 try:
                 try:
                     Common.logger(log_type, crawler).info(f"download_cnt:{cls.download_cnt}")
                     Common.logger(log_type, crawler).info(f"download_cnt:{cls.download_cnt}")
+                    Common.logging(log_type, crawler, env, f"download_cnt:{cls.download_cnt}")
                     if cls.download_cnt >= int(videos_cnt):
                     if cls.download_cnt >= int(videos_cnt):
-                        Common.logger(log_type, crawler).info(f'搜索词:"{word}",已抓取视频数:{cls.download_cnt}')
+                        Common.logger(log_type, crawler).info(f'搜索词:{user_dict["link"]},已抓取视频数:{cls.download_cnt}')
+                        Common.logging(log_type, crawler, env, f'搜索词:{user_dict["link"]},已抓取视频数:{cls.download_cnt}')
                         cls.download_cnt = 0
                         cls.download_cnt = 0
                         return
                         return
 
 
                     if video_element is None:
                     if video_element is None:
                         Common.logger(log_type, crawler).info('到底啦~\n')
                         Common.logger(log_type, crawler).info('到底啦~\n')
+                        Common.logging(log_type, crawler, env, '到底啦~\n')
                         return
                         return
 
 
                     cls.i += 1
                     cls.i += 1
-                    cls.search_elements(driver, '//div[@class="vc active__mask"]')
+                    cls.search_elements(driver, '//*[@class="rich-media active__absolute"]')
 
 
                     Common.logger(log_type, crawler).info(f'拖动"视频"列表第{cls.i}个至屏幕中间')
                     Common.logger(log_type, crawler).info(f'拖动"视频"列表第{cls.i}个至屏幕中间')
+                    Common.logging(log_type, crawler, env, f'拖动"视频"列表第{cls.i}个至屏幕中间')
                     time.sleep(3)
                     time.sleep(3)
-                    driver.execute_script("arguments[0].scrollIntoView({block:'center',inline:'center'})",
-                                          video_element)
+                    driver.execute_script("arguments[0].scrollIntoView({block:'center',inline:'center'})", video_element)
                     if len(video_element.find_elements(By.XPATH, "//*[@text='没有更多的搜索结果']")) != 0:
                     if len(video_element.find_elements(By.XPATH, "//*[@text='没有更多的搜索结果']")) != 0:
                         Common.logger(log_type, crawler).info("没有更多的搜索结果\n")
                         Common.logger(log_type, crawler).info("没有更多的搜索结果\n")
+                        Common.logging(log_type, crawler, env, "没有更多的搜索结果\n")
                         return
                         return
-                    video_title = video_element.find_elements(By.XPATH, '//div[@class="title ellipsis_2"]/*[2]')[index + i].text
-                    video_url = video_element.find_elements(By.XPATH, '//div[@class="video-player"]')[index+i].get_attribute('src')
-                    cover_url = video_element.find_elements(By.XPATH, '//div[@class="video-player__bd"]')[index+i].get_attribute('style')
+                    video_title = \
+                    video_element.find_elements(By.XPATH, '//div[@class="rich-media__title ellipsis_2"]/span')[
+                        index + i].text[:40]
+                    video_url = video_element.find_elements(By.XPATH, '//div[@class="video-player"]')[
+                        index + i].get_attribute('src')
+                    cover_url = video_element.find_elements(By.XPATH, '//div[@class="video-player__bd"]')[
+                        index + i].get_attribute('style')
                     cover_url = cover_url.split('url("')[-1].split('")')[0]
                     cover_url = cover_url.split('url("')[-1].split('")')[0]
-                    duration = video_element.find_elements(By.XPATH, '//div[@class="play-mask__text"]/*[2]')[index+i].text
+                    duration = video_element.find_elements(By.XPATH, '//div[@class="video-player-mask__text"]')[
+                        index + i].text
                     duration = int(duration.split(':')[0]) * 60 + int(duration.split(':')[-1])
                     duration = int(duration.split(':')[0]) * 60 + int(duration.split(':')[-1])
-                    user_name = video_element.find_elements(By.XPATH, '//p[@class="vc-source__text"]')[index+i].text
-                    avatar_url = video_element.find_elements(By.XPATH, '//div[@class="ui-image-image ui-image vc-source__thumb"]')[index+i].get_attribute('style')
+                    user_name = video_element.find_elements(By.XPATH, '//div[@class="rich-media__source__title"]')[
+                        index + i].text
+                    avatar_url = video_element.find_elements(By.XPATH,
+                                                             '//div[@class="ui-image-image ui-image rich-media__source__thumb"]')[
+                        index + i].get_attribute('style')
                     avatar_url = avatar_url.split('url("')[-1].split('")')[0]
                     avatar_url = avatar_url.split('url("')[-1].split('")')[0]
                     out_video_id = md5(video_title.encode('utf8')).hexdigest()
                     out_video_id = md5(video_title.encode('utf8')).hexdigest()
                     out_user_id = md5(user_name.encode('utf8')).hexdigest()
                     out_user_id = md5(user_name.encode('utf8')).hexdigest()
@@ -317,6 +241,7 @@ class ShipinhaoSearchScheduling:
                         "video_id": out_video_id,
                         "video_id": out_video_id,
                         "play_cnt": 0,
                         "play_cnt": 0,
                         "duration": duration,
                         "duration": duration,
+                        # "duration": 60,
                         "user_name": user_name,
                         "user_name": user_name,
                         "user_id": out_user_id,
                         "user_id": out_user_id,
                         "avatar_url": avatar_url,
                         "avatar_url": avatar_url,
@@ -326,12 +251,16 @@ class ShipinhaoSearchScheduling:
                     }
                     }
                     for k, v in video_dict.items():
                     for k, v in video_dict.items():
                         Common.logger(log_type, crawler).info(f"{k}:{v}")
                         Common.logger(log_type, crawler).info(f"{k}:{v}")
+                    Common.logging(log_type, crawler, env, f"{video_dict}")
                     if video_title is None or video_url is None:
                     if video_title is None or video_url is None:
                         Common.logger(log_type, crawler).info("无效视频\n")
                         Common.logger(log_type, crawler).info("无效视频\n")
+                        Common.logging(log_type, crawler, env, "无效视频\n")
                     elif cls.repeat_out_video_id(log_type, crawler, out_video_id, env) != 0:
                     elif cls.repeat_out_video_id(log_type, crawler, out_video_id, env) != 0:
                         Common.logger(log_type, crawler).info('视频已下载\n')
                         Common.logger(log_type, crawler).info('视频已下载\n')
+                        Common.logging(log_type, crawler, env, '视频已下载\n')
                     elif cls.repeat_video_url(log_type, crawler, video_url, env) != 0:
                     elif cls.repeat_video_url(log_type, crawler, video_url, env) != 0:
                         Common.logger(log_type, crawler).info('视频已下载\n')
                         Common.logger(log_type, crawler).info('视频已下载\n')
+                        Common.logging(log_type, crawler, env, '视频已下载\n')
                     else:
                     else:
                         video_element.click()
                         video_element.click()
                         time.sleep(3)
                         time.sleep(3)
@@ -340,183 +269,85 @@ class ShipinhaoSearchScheduling:
                         video_dict["share_cnt"] = video_info_dict["share_cnt"]
                         video_dict["share_cnt"] = video_info_dict["share_cnt"]
                         video_dict["favorite_cnt"] = video_info_dict["favorite_cnt"]
                         video_dict["favorite_cnt"] = video_info_dict["favorite_cnt"]
                         video_dict["comment_cnt"] = video_info_dict["comment_cnt"]
                         video_dict["comment_cnt"] = video_info_dict["comment_cnt"]
-                        video_dict["publish_time_str"] = video_info_dict["publish_time_str"]
+                        video_dict["publish_time_str"] = video_info_dict["publish_time_str"] + " 00:00:00"
                         video_dict["publish_time_stamp"] = video_info_dict["publish_time_stamp"]
                         video_dict["publish_time_stamp"] = video_info_dict["publish_time_stamp"]
-
-                        cls.download_publish(log_type=log_type,
-                                             crawler=crawler,
-                                             word=word,
-                                             rule_dict=rule_dict,
-                                             video_dict=video_dict,
-                                             our_uid=our_uid,
-                                             oss_endpoint=oss_endpoint,
-                                             env=env)
+                        Common.logger(log_type, crawler).info(f'publish_time:{video_dict["publish_time_str"]}')
+                        Common.logging(log_type, crawler, env, f'publish_time:{video_dict["publish_time_str"]}')
+                        if download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
+                            Common.logger(log_type, crawler).info("不满足抓取规则\n")
+                            Common.logging(log_type, crawler, env, "不满足抓取规则\n")
+                        else:
+                            video_dict["out_user_id"] = video_dict["user_id"]
+                            video_dict["platform"] = crawler
+                            video_dict["strategy"] = log_type
+                            video_dict["out_video_id"] = video_dict["video_id"]
+                            video_dict["width"] = 0
+                            video_dict["height"] = 0
+                            video_dict["crawler_rule"] = json.dumps(rule_dict)
+                            video_dict["user_id"] = user_dict["uid"]
+                            video_dict["publish_time"] = video_dict["publish_time_str"]
+                            mq.send_msg(video_dict)
+                            cls.download_cnt += 1
                 except Exception as e:
                 except Exception as e:
-                    Common.logger(log_type, crawler).error(f"抓取单条视频时异常:{e}\n")
+                    Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
+                    Common.logging(log_type, crawler, env, f"抓取单条视频异常:{e}\n")
 
 
             Common.logger(log_type, crawler).info('已抓取完一组视频,休眠1秒\n')
             Common.logger(log_type, crawler).info('已抓取完一组视频,休眠1秒\n')
+            Common.logging(log_type, crawler, env, '已抓取完一组视频,休眠1秒\n')
             time.sleep(1)
             time.sleep(1)
             index = index + len(video_element_temp)
             index = index + len(video_element_temp)
 
 
-    @classmethod
-    def download_publish(cls, log_type, crawler, word, rule_dict, video_dict, our_uid, oss_endpoint, env):
-        # 下载视频
-        Common.download_method(log_type=log_type, crawler=crawler, text="video", title=video_dict["video_title"], url=video_dict["video_url"])
-
-        # ffmpeg 获取视频宽高
-        ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
-        if ffmpeg_dict is None:
-            md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
-            shutil.rmtree(f"./{crawler}/videos/{md_title}/")
-            Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
-            return
-        video_dict["video_width"] = ffmpeg_dict["width"]
-        video_dict["video_height"] = ffmpeg_dict["height"]
-
-        # 规则判断
-        if cls.download_rule(log_type=log_type,
-                             crawler=crawler,
-                             video_dict=video_dict,
-                             rule_dict=rule_dict) is False:
-            md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
-            shutil.rmtree(f"./{crawler}/videos/{md_title}/")
-            Common.logger(log_type, crawler).info("不满足抓取规则,删除成功\n")
-            return
-
-        # 下载封面
-        Common.download_method(log_type=log_type, crawler=crawler, text="cover", title=video_dict["video_title"], url=video_dict["cover_url"])
-        # 保存视频信息至 "./videos/{download_video_title}/info.txt"
-        Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
-
-        # 上传视频
-        Common.logger(log_type, crawler).info("开始上传视频...")
-        our_video_id = Publish.upload_and_publish(log_type=log_type,
-                                                  crawler=crawler,
-                                                  strategy="搜索爬虫策略",
-                                                  our_uid=our_uid,
-                                                  env=env,
-                                                  oss_endpoint=oss_endpoint)
-        if env == "dev":
-            our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
-        else:
-            our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
-        Common.logger(log_type, crawler).info("视频上传完成")
-
-        if our_video_id is None:
-            try:
-                # 删除视频文件夹
-                md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
-                shutil.rmtree(f"./{crawler}/videos/{md_title}")
-                Common.logger(log_type, crawler).warning(f"our_video_id:{our_video_id}, 删除成功\n")
-                return
-            except FileNotFoundError:
-                return
-
-        insert_sql = f""" insert into crawler_video(video_id,
-                                                out_user_id,
-                                                platform,
-                                                strategy,
-                                                out_video_id,
-                                                video_title,
-                                                cover_url,
-                                                video_url,
-                                                duration,
-                                                publish_time,
-                                                play_cnt,
-                                                crawler_rule,
-                                                width,
-                                                height)
-                                                values({our_video_id},
-                                                "{video_dict['user_id']}",
-                                                "{cls.platform}",
-                                                "搜索爬虫策略",
-                                                "{video_dict['video_id']}",
-                                                "{video_dict['video_title']}",
-                                                "{video_dict['cover_url']}",
-                                                "{video_dict['video_url']}",
-                                                {int(video_dict['duration'])},
-                                                "{video_dict['publish_time_str']}",
-                                                {int(video_dict['play_cnt'])},
-                                                '{json.dumps(rule_dict)}',
-                                                {int(video_dict['video_width'])},
-                                                {int(video_dict['video_height'])}) """
-        Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
-        MysqlHelper.update_values(log_type, crawler, insert_sql, env)
-        Common.logger(log_type, crawler).info('视频信息插入数据库成功!')
-
-        # 写飞书
-        Feishu.insert_columns(log_type, crawler, "xYWCzf", "ROWS", 1, 2)
-        time.sleep(0.5)
-        values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time()))),
-                   "搜索爬虫策略",
-                   word,
-                   video_dict["video_title"],
-                   our_video_link,
-                   video_dict["duration"],
-                   video_dict["like_cnt"],
-                   video_dict["share_cnt"],
-                   video_dict["favorite_cnt"],
-                   video_dict["comment_cnt"],
-                   f'{video_dict["video_width"]}*{video_dict["video_height"]}',
-                   video_dict["publish_time_str"],
-                   video_dict["user_name"],
-                   video_dict["avatar_url"],
-                   video_dict["cover_url"],
-                   video_dict["video_url"]]]
-        Feishu.update_values(log_type, crawler, "xYWCzf", "F2:Z2", values)
-        Common.logger(log_type, crawler).info("写入飞书成功\n")
-        cls.download_cnt += 1
-
     @classmethod
     @classmethod
     def get_video_info(cls, driver: WebDriver):
     def get_video_info(cls, driver: WebDriver):
         # Common.logger(log_type, crawler).info('切回NATIVE_APP')
         # Common.logger(log_type, crawler).info('切回NATIVE_APP')
         driver.switch_to.context('NATIVE_APP')
         driver.switch_to.context('NATIVE_APP')
 
 
         # 点赞
         # 点赞
-        like_id = driver.find_element(By.ID, 'com.tencent.mm:id/k04')
+        like_id = driver.find_element(By.ID, 'com.tencent.mm:id/k04')  # 微信版本 8.0.30
         like_cnt = like_id.get_attribute('name')
         like_cnt = like_id.get_attribute('name')
-        if like_cnt == "" or like_cnt == "喜欢" or like_cnt == "火" or cls.is_contain_chinese(like_cnt) is True:
-            like_cnt = 0
-        elif '万' in like_cnt:
+        if '万' in like_cnt:
             like_cnt = int(float(like_cnt.split('万')[0]) * 10000)
             like_cnt = int(float(like_cnt.split('万')[0]) * 10000)
         elif '万+' in like_cnt:
         elif '万+' in like_cnt:
             like_cnt = int(float(like_cnt.split('万+')[0]) * 10000)
             like_cnt = int(float(like_cnt.split('万+')[0]) * 10000)
+        elif like_cnt == "" or like_cnt == "喜欢" or like_cnt == "火" or cls.is_contain_chinese(like_cnt) is True:
+            like_cnt = 0
         else:
         else:
             like_cnt = int(float(like_cnt))
             like_cnt = int(float(like_cnt))
 
 
         # 分享
         # 分享
         share_id = driver.find_element(By.ID, 'com.tencent.mm:id/jhv')
         share_id = driver.find_element(By.ID, 'com.tencent.mm:id/jhv')
         share_cnt = share_id.get_attribute('name')
         share_cnt = share_id.get_attribute('name')
-        if share_cnt == "" or share_cnt == "转发" or cls.is_contain_chinese(share_cnt) is True:
-            share_cnt = 0
-        elif '万' in share_cnt:
+        if '万' in share_cnt:
             share_cnt = int(float(share_cnt.split('万')[0]) * 10000)
             share_cnt = int(float(share_cnt.split('万')[0]) * 10000)
         elif '万+' in share_cnt:
         elif '万+' in share_cnt:
             share_cnt = int(float(share_cnt.split('万+')[0]) * 10000)
             share_cnt = int(float(share_cnt.split('万+')[0]) * 10000)
+        elif share_cnt == "" or share_cnt == "转发" or cls.is_contain_chinese(share_cnt) is True:
+            share_cnt = 0
         else:
         else:
             share_cnt = int(float(share_cnt))
             share_cnt = int(float(share_cnt))
 
 
         # 收藏
         # 收藏
         favorite_id = driver.find_element(By.ID, 'com.tencent.mm:id/fnp')
         favorite_id = driver.find_element(By.ID, 'com.tencent.mm:id/fnp')
         favorite_cnt = favorite_id.get_attribute('name')
         favorite_cnt = favorite_id.get_attribute('name')
-        if favorite_cnt == "" or favorite_cnt == "收藏" or favorite_cnt == "推荐" or favorite_cnt == "火" or cls.is_contain_chinese(favorite_cnt) is True:
-            favorite_cnt = 0
-        elif '万' in favorite_cnt:
+        if '万' in favorite_cnt:
             favorite_cnt = int(float(favorite_cnt.split('万')[0]) * 10000)
             favorite_cnt = int(float(favorite_cnt.split('万')[0]) * 10000)
         elif '万+' in favorite_cnt:
         elif '万+' in favorite_cnt:
             favorite_cnt = int(float(favorite_cnt.split('万+')[0]) * 10000)
             favorite_cnt = int(float(favorite_cnt.split('万+')[0]) * 10000)
+        elif favorite_cnt == "" or favorite_cnt == "收藏" or favorite_cnt == "推荐" or favorite_cnt == "火" or cls.is_contain_chinese(
+                favorite_cnt) is True:
+            favorite_cnt = 0
         else:
         else:
             favorite_cnt = int(float(favorite_cnt))
             favorite_cnt = int(float(favorite_cnt))
 
 
         # 评论
         # 评论
         comment_id = driver.find_element(By.ID, 'com.tencent.mm:id/bje')
         comment_id = driver.find_element(By.ID, 'com.tencent.mm:id/bje')
         comment_cnt = comment_id.get_attribute('name')
         comment_cnt = comment_id.get_attribute('name')
-        if comment_cnt == "" or comment_cnt == "评论" or cls.is_contain_chinese(comment_cnt) is True:
-            comment_cnt = 0
-        elif '万' in comment_cnt:
+        if '万' in comment_cnt:
             comment_cnt = int(float(comment_cnt.split('万')[0]) * 10000)
             comment_cnt = int(float(comment_cnt.split('万')[0]) * 10000)
         elif '万+' in comment_cnt:
         elif '万+' in comment_cnt:
             comment_cnt = int(float(comment_cnt.split('万+')[0]) * 10000)
             comment_cnt = int(float(comment_cnt.split('万+')[0]) * 10000)
+        elif comment_cnt == "" or comment_cnt == "评论" or cls.is_contain_chinese(comment_cnt) is True:
+            comment_cnt = 0
         else:
         else:
             comment_cnt = int(float(comment_cnt))
             comment_cnt = int(float(comment_cnt))
 
 
@@ -575,84 +406,26 @@ class ShipinhaoSearchScheduling:
         return video_dict
         return video_dict
 
 
     @classmethod
     @classmethod
-    def get_users(cls, log_type, crawler, sheetid, env):
-        while True:
-            user_sheet = Feishu.get_values_batch(log_type, crawler, sheetid)
-            if user_sheet is None:
-                Common.logger(log_type, crawler).warning(f"user_sheet:{user_sheet}, 3秒钟后重试")
-                time.sleep(3)
-                continue
-            our_user_list = []
-            # for i in range(1, len(user_sheet)):
-            for i in range(1, 3):
-                search_word = user_sheet[i][4]
-                our_uid = user_sheet[i][6]
-                tag1 = user_sheet[i][8]
-                tag2 = user_sheet[i][9]
-                tag3 = user_sheet[i][10]
-                tag4 = user_sheet[i][11]
-                tag5 = user_sheet[i][12]
-                tag6 = user_sheet[i][13]
-                tag7 = user_sheet[i][14]
-                Common.logger(log_type, crawler).info(f"正在更新 {search_word} 搜索词信息")
-                if our_uid is None:
-                    default_user = getUser.get_default_user()
-                    # 用来创建our_id的信息
-                    user_dict = {
-                        'recommendStatus': -6,
-                        'appRecommendStatus': -6,
-                        'nickName': default_user['nickName'],
-                        'avatarUrl': default_user['avatarUrl'],
-                        'tagName': f'{tag1},{tag2},{tag3},{tag4},{tag5},{tag6},{tag7}',
-                    }
-                    our_uid = getUser.create_uid(log_type, crawler, user_dict, env)
-                    if env == 'prod':
-                        our_user_link = f'https://admin.piaoquantv.com/ums/user/{our_uid}/post'
-                    else:
-                        our_user_link = f'https://testadmin.piaoquantv.com/ums/user/{our_uid}/post'
-                    Feishu.update_values(log_type, crawler, sheetid, f'G{i + 1}:H{i + 1}',
-                                         [[our_uid, our_user_link]])
-                    Common.logger(log_type, crawler).info(f'站内用户主页创建成功:{our_user_link}\n')
-                our_user_dict = {
-                    'out_uid': '',
-                    'search_word': search_word,
-                    'our_uid': our_uid,
-                    'our_user_link': f'https://admin.piaoquantv.com/ums/user/{our_uid}/post',
-                }
-                our_user_list.append(our_user_dict)
-
-            return our_user_list
-
-
-    @classmethod
-    def get_search_videos(cls, log_type, crawler, rule_dict, oss_endpoint, env):
-        user_list = cls.get_users(log_type, crawler, "wNgi6Z", env)
-        for user in user_list:
-            cls.i = 0
-            cls.download_cnt = 0
-            search_word = user["search_word"]
-            our_uid = user["our_uid"]
-            Common.logger(log_type, crawler).info(f"开始抓取搜索词:{search_word}")
+    def get_search_videos(cls, log_type, crawler, rule_dict, user_list, env):
+        Common.logger(log_type, crawler).info(f"搜索词总数:{len(user_list)}\n")
+        Common.logging(log_type, crawler, env, f"搜索词总数:{len(user_list)}\n")
+        for user_dict in user_list:
             try:
             try:
+                cls.i = 0
+                cls.download_cnt = 0
+                Common.logger(log_type, crawler).info(f"开始抓取 {user_dict['link']}\n")
+                Common.logging(log_type, crawler, env, f"开始抓取 {user_dict['link']}\n")
                 cls.start_wechat(log_type=log_type,
                 cls.start_wechat(log_type=log_type,
                                  crawler=crawler,
                                  crawler=crawler,
-                                 word=search_word,
                                  rule_dict=rule_dict,
                                  rule_dict=rule_dict,
-                                 our_uid=our_uid,
-                                 oss_endpoint=oss_endpoint,
+                                 user_dict=user_dict,
                                  env=env)
                                  env=env)
             except Exception as e:
             except Exception as e:
-                Common.logger(log_type, crawler).error(f"抓取{user['search_word']}时异常:{e}\n")
+                Common.logger(log_type, crawler).error(f"抓取 {user_dict['link']} 时异常:{e}\n")
+                Common.logging(log_type, crawler, env, f"抓取 {user_dict['link']} 时异常:{e}\n")
 
 
 
 
 if __name__ == '__main__':
 if __name__ == '__main__':
-    # ShipinhaoSearchScheduling.get_search_videos(log_type="search",
-    #                                             crawler="shipinhao",
-    #                                             rule_dict='[{"videos_cnt":{"min":10,"max":0}},{"duration":{"min":30,"max":600}},{"share_cnt":{"min":3000,"max":0}},{"favorite_cnt":{"min":1000,"max":0}},{"publish_time":{"min":1672502400000,"max":0}}]',
-    #                                             oss_endpoint="out",
-    #                                             env="dev")
-    # print(ShipinhaoSearchScheduling.get_users("search", "shipinhao", "wNgi6Z", "dev"))
-    # print((date.today() + timedelta(days=0)).strftime("%Y-%m-%d"))
     print(ShipinhaoSearchScheduling.repeat_out_video_id(log_type="search",
     print(ShipinhaoSearchScheduling.repeat_out_video_id(log_type="search",
                                                         crawler="shipinhao",
                                                         crawler="shipinhao",
                                                         out_video_id="123",
                                                         out_video_id="123",