wangkun 1 rok pred
rodič
commit
26c0c462ba

BIN
.DS_Store


+ 3 - 2
common/getuser.py

@@ -164,5 +164,6 @@ class getUser:
 
 
 if __name__ == "__main__":
-    uid = getUser.create_uid('log', 'kanyikan', 'youtube爬虫,定向爬虫策略', 'dev')
-    print(uid)
+    # uid = getUser.create_uid('log', 'kanyikan', 'youtube爬虫,定向爬虫策略', 'dev')
+    # print(uid)
+    pass

+ 2 - 2
kuaishou/kuaishou_follow/kuaishou_follow.py

@@ -113,7 +113,7 @@ class KuaiShouFollow:
                 'Accept': '*/*',
                 'Content-Type': 'application/json',
                 'Origin': 'https://www.kuaishou.com',
-                'Cookie': 'kpf=PC_WEB; clientid=3; did=web_34861b8acf21e3e2bdd3fe5b68973e53; kpn=KUAISHOU_VISION',
+                'Cookie': 'kpf=PC_WEB; clientid=3; did=web_9d5ea328df00c51abab6fdcaeb594311; kpn=KUAISHOU_VISION',
                 'Content-Length': '552',
                 'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
                 'Host': 'www.kuaishou.com',
@@ -298,7 +298,7 @@ class KuaiShouFollow:
             'Accept': '*/*',
             'Content-Type': 'application/json',
             'Origin': 'https://www.kuaishou.com',
-            'Cookie': 'kpf=PC_WEB; clientid=3; did=web_34861b8acf21e3e2bdd3fe5b68973e53; kpn=KUAISHOU_VISION',
+            'Cookie': 'kpf=PC_WEB; clientid=3; did=web_9d5ea328df00c51abab6fdcaeb594311; kpn=KUAISHOU_VISION',
             'Content-Length': '1260',
             'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
             'Host': 'www.kuaishou.com',

BIN
scheduling/.DS_Store


BIN
shipinhao/.DS_Store


+ 0 - 21
shipinhao/shipinhao_main/run_shipinhao_search.py

@@ -1,21 +0,0 @@
-# -*- coding: utf-8 -*-
-# @Author: wangkun
-# @Time: 2023/4/27
-import os
-import sys
-sys.path.append(os.getcwd())
-from common.common import Common
-from shipinhao.shipinhao_search.shipinhao_search import ShipinhaoSearch
-
-
-class SearchMain:
-    @classmethod
-    def search_main(cls, log_type, crawler, env):
-        Common.logger(log_type, crawler).info("开始抓取视频号搜索\n")
-        ShipinhaoSearch.search_all_videos(log_type, crawler, env)
-        Common.del_logs(log_type, crawler)
-        Common.logger(log_type, crawler).info("视频号搜索抓取结束\n")
-
-
-if __name__ == "__main__":
-    SearchMain.search_main("search", "shipinhao", "dev")

+ 45 - 0
shipinhao/shipinhao_main/run_shipinhao_search_scheduling.py

@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/5/5
+import argparse
+import os
+import sys
+sys.path.append(os.getcwd())
+from common.public import task_fun
+from common.common import Common
+from common.scheduling_db import MysqlHelper
+from shipinhao.shipinhao_search.shipinhao_search_scheduling import ShipinhaoSearchScheduling
+
+
+def main(log_type, crawler, task, oss_endpoint, env):
+    task_dict = task_fun(task)['task_dict']
+    rule_dict = task_fun(task)['rule_dict']
+    task_id = task_dict['task_id']
+    select_user_sql = f"""select * from crawler_user_v3 where task_id={task_id}"""
+    user_list = MysqlHelper.get_values(log_type, crawler, select_user_sql, env, action="")
+    Common.logger(log_type, crawler).info(f"调度任务:\n{task_dict}")
+    Common.logger(log_type, crawler).info(f"抓取规则:\n{rule_dict}")
+    Common.logger(log_type, crawler).info(f"用户列表:\n{user_list}")
+    Common.logger(log_type, crawler).info('开始抓取 视频号 搜索爬虫策略\n')
+    ShipinhaoSearchScheduling.get_search_videos(log_type=log_type,
+                                                crawler=crawler,
+                                                rule_dict=rule_dict,
+                                                oss_endpoint=oss_endpoint,
+                                                env=env)
+    Common.del_logs(log_type, crawler)
+    Common.logger(log_type, crawler).info('抓取完一轮\n')
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
+    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler')  ## 添加参数
+    parser.add_argument('--task')  ## 添加参数
+    parser.add_argument('--oss_endpoint')  ## 添加参数
+    parser.add_argument('--env')  ## 添加参数
+    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
+    main(log_type=args.log_type,
+         crawler=args.crawler,
+         task=args.task,
+         oss_endpoint=args.oss_endpoint,
+         env=args.env)

+ 0 - 366
shipinhao/shipinhao_search/shipinhao_search.py

@@ -1,366 +0,0 @@
-# -*- coding: utf-8 -*-
-# @Author: wangkun
-# @Time: 2023/4/25
-import datetime
-import difflib
-import os
-import sys
-import time
-from datetime import date, timedelta
-from hashlib import md5
-from appium import webdriver
-from appium.webdriver.extensions.android.nativekey import AndroidKey
-from appium.webdriver.webdriver import WebDriver
-from selenium.common import NoSuchElementException
-from selenium.webdriver.common.by import By
-sys.path.append(os.getcwd())
-from common.feishu import Feishu
-from common.publish import Publish
-from common.common import Common
-from common.public import get_config_from_mysql
-
-
-class ShipinhaoSearch:
-    i = 0
-
-    @classmethod
-    def start_wechat(cls, log_type, crawler, word, env):
-        Common.logger(log_type, crawler).info('启动微信')
-        if env == "dev":
-            chromedriverExecutable = "/Users/wangkun/Downloads/chromedriver/chromedriver_v107/chromedriver"
-        else:
-            chromedriverExecutable = '/Users/piaoquan/Downloads/chromedriver'
-        caps = {
-            "platformName": "Android",  # 手机操作系统 Android / iOS
-            "deviceName": "Android",  # 连接的设备名(模拟器或真机),安卓可以随便写
-            "platforVersion": "13",  # 手机对应的系统版本(Android 13)
-            "appPackage": "com.tencent.mm",  # 被测APP的包名,乐活圈 Android
-            "appActivity": ".ui.LauncherUI",  # 启动的Activity名
-            "autoGrantPermissions": True,  # 让 appium 自动授权 base 权限,
-            # 如果 noReset 为 True,则该条不生效(该参数为 Android 独有),对应的值为 True 或 False
-            "unicodekeyboard": True,  # 使用自带输入法,输入中文时填True
-            "resetkeyboard": True,  # 执行完程序恢复原来输入法
-            "noReset": True,  # 不重置APP
-            "recreateChromeDriverSessions": True,  # 切换到非 chrome-Driver 会 kill 掉 session,就不需要手动 kill 了
-            "printPageSourceOnFailure": True,  # 找不到元素时,appium log 会完整记录当前页面的 pagesource
-            "newCommandTimeout": 6000,  # 初始等待时间
-            "automationName": "UiAutomator2",  # 使用引擎,默认为 Appium,
-            # 其中 Appium、UiAutomator2、Selendroid、Espresso 用于 Android,XCUITest 用于 iOS
-            "showChromedriverLog": True,
-            # "chromeOptions": {"androidProcess": "com.tencent.mm:appbrand0"},
-            "chromeOptions": {"androidProcess": "com.tencent.mm:tools"},
-            'enableWebviewDetailsCollection': True,
-            'setWebContentsDebuggingEnabled': True,
-            'chromedriverExecutable': chromedriverExecutable,
-        }
-        driver = webdriver.Remote("http://localhost:4723/wd/hub", caps)
-        driver.implicitly_wait(10)
-        # # 向下滑动页面,展示出小程序选择面板
-        # for i in range(120):
-        #     try:
-        #         # 发现微信消息 TAB,代表微信已启动成功
-        #         if driver.find_elements(By.ID, 'com.tencent.mm:id/f2s'):
-        #             break
-        #         # 发现并关闭系统菜单栏
-        #         elif driver.find_element(By.ID, 'com.android.systemui:id/dismiss_view'):
-        #             Common.logger(log_type, crawler).info('发现并关闭系统下拉菜单栏')
-        #             driver.find_element(By.ID, 'com.android.systemui:id/dismiss_view').click()
-        #         else:
-        #             pass
-        #     except NoSuchElementException:
-        #         time.sleep(1)
-        if len(driver.find_elements(By.ID, 'android:id/text1')) != 0:
-            driver.find_elements(By.ID, 'android:id/text1')[0].click()
-        time.sleep(5)
-        cls.search_video(log_type=log_type,
-                         crawler=crawler,
-                         word=word,
-                         driver=driver,
-                         env=env)
-        cls.close_wechat(log_type=log_type,
-                         crawler=crawler,
-                         driver=driver)
-
-    @classmethod
-    def close_wechat(cls, log_type, crawler, driver: WebDriver):
-        driver.quit()
-        Common.logger(log_type, crawler).info(f"微信退出成功\n")
-
-    @classmethod
-    def is_contain_chinese(cls, strword):
-        for ch in strword:
-            if u'\u4e00' <= ch <= u'\u9fff':
-                return True
-        return False
-
-    # 查找元素
-    @classmethod
-    def search_elements(cls, driver: WebDriver, xpath):
-        time.sleep(1)
-        windowHandles = driver.window_handles
-        for handle in windowHandles:
-            driver.switch_to.window(handle)
-            time.sleep(1)
-            try:
-                elements = driver.find_elements(By.XPATH, xpath)
-                if elements:
-                    return elements
-            except NoSuchElementException:
-                pass
-
-    @classmethod
-    def check_to_webview(cls, log_type, crawler, driver: WebDriver):
-        # Common.logger(log_type, crawler).info('切换到webview')
-        webviews = driver.contexts
-        driver.switch_to.context(webviews[1])
-        time.sleep(1)
-        windowHandles = driver.window_handles
-        for handle in windowHandles:
-            driver.switch_to.window(handle)
-            try:
-                shipinhao_webview = driver.find_element(By.XPATH, '//div[@class="unit"]')
-                if shipinhao_webview:
-                    Common.logger(log_type, crawler).info('切换到视频号 webview 成功')
-                    return "成功"
-            except Exception as e:
-                Common.logger(log_type, crawler).info(f"{e}\n")
-
-    @classmethod
-    def search_video(cls, log_type, crawler, word, driver: WebDriver, env):
-        # 点击微信搜索框,并输入搜索词
-        driver.implicitly_wait(10)
-        driver.find_element(By.ID, 'com.tencent.mm:id/j5t').click()
-        time.sleep(0.5)
-        Common.logger(log_type, crawler).info(f'输入搜索词:{word}')
-        driver.find_element(By.ID, 'com.tencent.mm:id/cd7').clear().send_keys(word)
-        driver.press_keycode(AndroidKey.ENTER)
-        # driver.find_elements(By.ID, 'com.tencent.mm:id/oi4')[0].click()
-        driver.find_element(By.ID, 'com.tencent.mm:id/m94').click()
-        time.sleep(5)
-
-        # 切换到微信搜索结果页 webview
-        check_to_webview = cls.check_to_webview(log_type, crawler, driver)
-        if check_to_webview is None:
-            Common.logger(log_type, crawler).info("切换到视频号 webview 失败\n")
-            return
-        time.sleep(1)
-
-        # 切换到"视频号"分类
-        shipinhao_tags = cls.search_elements(driver, '//div[@class="unit"]/*[2]')
-        Common.logger(log_type, crawler).info('点击"视频号"分类')
-        shipinhao_tags[0].click()
-        time.sleep(5)
-
-        index = 0
-        while True:
-            if cls.i >= 100:
-                Common.logger(log_type, crawler).info(f'搜索词:"{word}",已抓取视频数:{index}')
-                cls.i = 0
-                return
-            # try:
-            if cls.search_elements(driver, '//*[@class="double-rich double-rich_vertical"]') is None:
-                Common.logger(log_type, crawler).info('窗口已销毁\n')
-                return
-
-            Common.logger(log_type, crawler).info('获取视频列表\n')
-            video_elements = cls.search_elements(driver, '//div[@class="vc active__mask"]')
-            if video_elements is None:
-                Common.logger(log_type, crawler).warning(f'video_elements:{video_elements}')
-                return
-
-            video_element_temp = video_elements[index:]
-            if len(video_element_temp) == 0:
-                Common.logger(log_type, crawler).info('到底啦~~~~~~~~~~~~~\n')
-                return
-
-            for i, video_element in enumerate(video_element_temp):
-                if video_element is None:
-                    Common.logger(log_type, crawler).info('到底啦~\n')
-                    return
-                cls.i += 1
-                cls.search_elements(driver, '//div[@class="vc active__mask"]')
-
-                Common.logger(log_type, crawler).info(f'拖动"视频"列表第{cls.i}个至屏幕中间')
-                time.sleep(3)
-                driver.execute_script("arguments[0].scrollIntoView({block:'center',inline:'center'})",
-                                      video_element)
-                if len(video_element.find_elements(By.XPATH, "//*[@text='没有更多的搜索结果']")) != 0:
-                    Common.logger(log_type, crawler).info("没有更多的搜索结果\n")
-                    return
-                video_title = video_element.find_elements(By.XPATH, '//div[@class="title ellipsis_2"]/*[2]')[index + i].text
-                video_url = video_element.find_elements(By.XPATH, '//div[@class="video-player"]')[index+i].get_attribute('src')
-                cover_url = video_element.find_elements(By.XPATH, '//div[@class="video-player__bd"]')[index+i].get_attribute('style')
-                duration = video_element.find_elements(By.XPATH, '//div[@class="play-mask__text"]/*[2]')[index+i].text
-                duration = int(duration.split(':')[0]) * 60 + int(duration.split(':')[-1])
-                user_name = video_element.find_elements(By.XPATH, '//p[@class="vc-source__text"]')[index+i].text
-                avatar_url = video_element.find_elements(By.XPATH, '//div[@class="ui-image-image ui-image vc-source__thumb"]')[index+i].get_attribute('style')
-
-                # Common.logger(log_type, crawler).info(f"video_title:{video_title}")
-                # Common.logger(log_type, crawler).info(f"duration:{duration}")
-                video_element.click()
-                time.sleep(3)
-                video_dict = cls.get_video_info(log_type=log_type,
-                                   crawler=crawler,
-                                   driver=driver)
-                video_dict["video_title"] = video_title
-                video_dict["duration"] = duration
-                video_dict["video_url"] = video_url
-                for k, v in video_dict.items():
-                    Common.logger(log_type, crawler).info(f"{k}:{v}")
-                if video_title in [x for y in Feishu.get_values_batch(log_type, crawler, "xYWCzf") for x in y]:
-                    Common.logger(log_type, crawler).info("视频已存在\n")
-                else:
-                    cls.download_publish(log_type, crawler, word, video_dict)
-
-            Common.logger(log_type, crawler).info('已抓取完一组视频,休眠1秒\n')
-            time.sleep(1)
-            index = index + len(video_element_temp)
-            # except Exception as e:
-            #     Common.logger(log_type, crawler).info(f"get_videoList:{e}\n")
-            #     cls.i = 0
-
-    @classmethod
-    def download_publish(cls, log_type, crawler, word, video_dict):
-        Feishu.insert_columns(log_type, crawler, "xYWCzf", "ROWS", 1, 2)
-        time.sleep(0.5)
-        values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time()))),
-                   "视频号搜索",
-                   word,
-                   video_dict["video_title"],
-                   video_dict["duration"],
-                   video_dict["like_cnt"],
-                   video_dict["share_cnt"],
-                   video_dict["favorite_cnt"],
-                   video_dict["comment_cnt"],
-                   video_dict["publish_time_str"],
-                   "待获取",
-                   "待获取",
-                   "待获取",
-                   video_dict["video_url"]]]
-        Feishu.update_values(log_type, crawler, "xYWCzf", "F2:Z2", values)
-        Common.logger(log_type, crawler).info("写入飞书成功\n")
-
-    @classmethod
-    def get_video_info(cls, log_type, crawler, driver: WebDriver):
-        # Common.logger(log_type, crawler).info('切回NATIVE_APP')
-        driver.switch_to.context('NATIVE_APP')
-
-        # 点赞
-        like_id = driver.find_element(By.ID, 'com.tencent.mm:id/k04')
-        like_cnt = like_id.get_attribute('name')
-        if like_cnt == "" or like_cnt == "喜欢" or like_cnt == "火" or cls.is_contain_chinese(like_cnt) is True:
-            like_cnt = 0
-        elif '万' in like_cnt:
-            like_cnt = int(float(like_cnt.split('万')[0]) * 10000)
-        elif '万+' in like_cnt:
-            like_cnt = int(float(like_cnt.split('万+')[0]) * 10000)
-        else:
-            like_cnt = int(float(like_cnt))
-
-        # 分享
-        share_id = driver.find_element(By.ID, 'com.tencent.mm:id/jhv')
-        share_cnt = share_id.get_attribute('name')
-        if share_cnt == "" or share_cnt == "转发" or cls.is_contain_chinese(share_cnt) is True:
-            share_cnt = 0
-        elif '万' in share_cnt:
-            share_cnt = int(float(share_cnt.split('万')[0]) * 10000)
-        elif '万+' in share_cnt:
-            share_cnt = int(float(share_cnt.split('万+')[0]) * 10000)
-        else:
-            share_cnt = int(float(share_cnt))
-
-        # 收藏
-        favorite_id = driver.find_element(By.ID, 'com.tencent.mm:id/fnp')
-        favorite_cnt = favorite_id.get_attribute('name')
-        if favorite_cnt == "" or favorite_cnt == "收藏" or favorite_cnt == "推荐" or favorite_cnt == "火" or cls.is_contain_chinese(favorite_cnt) is True:
-            favorite_cnt = 0
-        elif '万' in favorite_cnt:
-            favorite_cnt = int(float(favorite_cnt.split('万')[0]) * 10000)
-        elif '万+' in favorite_cnt:
-            favorite_cnt = int(float(favorite_cnt.split('万+')[0]) * 10000)
-        else:
-            favorite_cnt = int(float(favorite_cnt))
-
-        # 评论
-        comment_id = driver.find_element(By.ID, 'com.tencent.mm:id/bje')
-        comment_cnt = comment_id.get_attribute('name')
-        if comment_cnt == "" or comment_cnt == "评论" or cls.is_contain_chinese(comment_cnt) is True:
-            comment_cnt = 0
-        elif '万' in comment_cnt:
-            comment_cnt = int(float(comment_cnt.split('万')[0]) * 10000)
-        elif '万+' in comment_cnt:
-            comment_cnt = int(float(comment_cnt.split('万+')[0]) * 10000)
-        else:
-            comment_cnt = int(float(comment_cnt))
-
-        # 发布时间
-        comment_id.click()
-        time.sleep(1)
-        publish_time = driver.find_element(By.ID, "com.tencent.mm:id/bre").get_attribute("name")
-        if "天前" in publish_time:
-            days = int(publish_time.replace("天前", ""))
-            publish_time_str = (date.today() + timedelta(days=-days)).strftime("%Y-%m-%d")
-        elif "年" in publish_time:
-            # publish_time_str = publish_time.replace("年", "-").replace("月", "-").replace("日", "")
-            year_str = publish_time.split("年")[0]
-            month_str = publish_time.split("年")[-1].split("月")[0]
-            day_str = publish_time.split("月")[-1].split("日")[0]
-            if int(month_str) < 10:
-                month_str = f"0{month_str}"
-            if int(day_str) < 10:
-                day_str = f"0{day_str}"
-            publish_time_str = f"{year_str}-{month_str}-{day_str}"
-        else:
-            year_str = str(datetime.datetime.now().year)
-            month_str = publish_time.split("月")[0]
-            day_str = publish_time.split("月")[-1].split("日")[0]
-            if int(month_str) < 10:
-                month_str = f"0{month_str}"
-            if int(day_str) < 10:
-                day_str = f"0{day_str}"
-            publish_time_str = f"{year_str}-{month_str}-{day_str}"
-            # publish_time_str = f'2023-{publish_time.replace("月", "-").replace("日", "")}'
-        publish_time_stamp = int(time.mktime(time.strptime(publish_time_str, "%Y-%m-%d")))
-
-        # 收起评论
-        # Common.logger(log_type, crawler).info("收起评论")
-        driver.find_element(By.ID, "com.tencent.mm:id/be_").click()
-        time.sleep(0.5)
-        # 返回 webview
-        # Common.logger(log_type, crawler).info(f"操作手机返回按键")
-        driver.find_element(By.ID, "com.tencent.mm:id/a2z").click()
-        time.sleep(0.5)
-        # driver.press_keycode(AndroidKey.BACK)
-        # cls.check_to_webview(log_type=log_type, crawler=crawler, driver=driver)
-        webviews = driver.contexts
-        driver.switch_to.context(webviews[1])
-
-        video_dict = {
-            "like_cnt": like_cnt,
-            "share_cnt": share_cnt,
-            "favorite_cnt": favorite_cnt,
-            "comment_cnt": comment_cnt,
-            "publish_time_str": publish_time_str,
-            "publish_time_stamp": publish_time_stamp,
-        }
-        return video_dict
-
-    @classmethod
-    def search_all_videos(cls, log_type, crawler, env):
-        word_list = get_config_from_mysql(log_type, crawler, env, "search_word", action="")
-        for word in word_list:
-            cls.i = 0
-            Common.logger(log_type, crawler).info(f"开始抓取搜索词:{word}")
-            # try:
-            cls.start_wechat(log_type=log_type,
-                             crawler=crawler,
-                             word=word,
-                             env=env)
-            # except Exception as e:
-            #     Common.logger(log_type, crawler).error(f"search_video:{e}\n")
-
-
-if __name__ == '__main__':
-    ShipinhaoSearch.search_all_videos(log_type="search", crawler="shipinhao", env="dev")
-    # print(datetime.datetime.now().year)
-    pass

+ 645 - 0
shipinhao/shipinhao_search/shipinhao_search_scheduling.py

@@ -0,0 +1,645 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/4/25
+import datetime
+import json
+import os
+import shutil
+import sys
+import time
+from datetime import date, timedelta
+from hashlib import md5
+from appium import webdriver
+from appium.webdriver.extensions.android.nativekey import AndroidKey
+from appium.webdriver.webdriver import WebDriver
+from selenium.common import NoSuchElementException
+from selenium.webdriver.common.by import By
+sys.path.append(os.getcwd())
+from common.feishu import Feishu
+from common.publish import Publish
+from common.common import Common
+from common.getuser import getUser
+from common.scheduling_db import MysqlHelper
+
+
+class ShipinhaoSearchScheduling:
+    platform = "视频号"
+    i = 0
+    download_cnt = 0
+
+    # 基础门槛规则
+    @staticmethod
+    def download_rule(log_type, crawler, video_dict, rule_dict):
+        """
+        下载视频的基本规则
+        :param log_type: 日志
+        :param crawler: 哪款爬虫
+        :param video_dict: 视频信息,字典格式
+        :param rule_dict: 规则信息,字典格式
+        :return: 满足规则,返回 True;反之,返回 False
+        """
+        rule_play_cnt_min = rule_dict.get('play_cnt', {}).get('min', 0)
+        rule_play_cnt_max = rule_dict.get('play_cnt', {}).get('max', 100000000)
+        if rule_play_cnt_max == 0:
+            rule_play_cnt_max = 100000000
+
+        rule_duration_min = rule_dict.get('duration', {}).get('min', 0)
+        rule_duration_max = rule_dict.get('duration', {}).get('max', 100000000)
+        if rule_duration_max == 0:
+            rule_duration_max = 100000000
+
+        rule_period_min = rule_dict.get('period', {}).get('min', 0)
+        # rule_period_max = rule_dict.get('period', {}).get('max', 100000000)
+        # if rule_period_max == 0:
+        #     rule_period_max = 100000000
+
+        # rule_fans_cnt_min = rule_dict.get('fans_cnt', {}).get('min', 0)
+        # rule_fans_cnt_max = rule_dict.get('fans_cnt', {}).get('max', 100000000)
+        # if rule_fans_cnt_max == 0:
+        #     rule_fans_cnt_max = 100000000
+
+        # rule_videos_cnt_min = rule_dict.get('videos_cnt', {}).get('min', 0)
+        # rule_videos_cnt_max = rule_dict.get('videos_cnt', {}).get('max', 100000000)
+        # if rule_videos_cnt_max == 0:
+        #     rule_videos_cnt_max = 100000000
+
+        rule_like_cnt_min = rule_dict.get('like_cnt', {}).get('min', 0)
+        rule_like_cnt_max = rule_dict.get('like_cnt', {}).get('max', 100000000)
+        if rule_like_cnt_max == 0:
+            rule_like_cnt_max = 100000000
+
+        rule_width_min = rule_dict.get('width', {}).get('min', 0)
+        rule_width_max = rule_dict.get('width', {}).get('max', 100000000)
+        if rule_width_max == 0:
+            rule_width_max = 100000000
+
+        rule_height_min = rule_dict.get('height', {}).get('min', 0)
+        rule_height_max = rule_dict.get('height', {}).get('max', 100000000)
+        if rule_height_max == 0:
+            rule_height_max = 100000000
+
+        rule_share_cnt_min = rule_dict.get('share_cnt', {}).get('min', 0)
+        rule_share_cnt_max = rule_dict.get('share_cnt', {}).get('max', 100000000)
+        if rule_share_cnt_max == 0:
+            rule_share_cnt_max = 100000000
+
+        rule_favorite_cnt_min = rule_dict.get('favorite_cnt', {}).get('min', 0)
+        rule_favorite_cnt_max = rule_dict.get('favorite_cnt', {}).get('max', 100000000)
+        if rule_favorite_cnt_max == 0:
+            rule_favorite_cnt_max = 100000000
+
+        rule_comment_cnt_min = rule_dict.get('comment_cnt', {}).get('min', 0)
+        rule_comment_cnt_max = rule_dict.get('comment_cnt', {}).get('max', 100000000)
+        if rule_comment_cnt_max == 0:
+            rule_comment_cnt_max = 100000000
+
+        rule_publish_time_min = rule_dict.get('publish_time', {}).get('min', 0)
+        rule_publish_time_max = rule_dict.get('publish_time', {}).get('max', 0)
+        if rule_publish_time_max == 0:
+            rule_publish_time_max = 4102415999000  # 2099-12-31 23:59:59
+
+        Common.logger(log_type, crawler).info(
+            f'rule_duration_max:{rule_duration_max} >= duration:{int(float(video_dict["duration"]))} >= rule_duration_min:{int(rule_duration_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_play_cnt_max:{int(rule_play_cnt_max)} >= play_cnt:{int(video_dict["play_cnt"])} >= rule_play_cnt_min:{int(rule_play_cnt_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_like_cnt_max:{int(rule_like_cnt_max)} >= like_cnt:{int(video_dict["like_cnt"])} >= rule_like_cnt_min:{int(rule_like_cnt_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_comment_cnt_max:{int(rule_comment_cnt_max)} >= comment_cnt:{int(video_dict["comment_cnt"])} >= rule_comment_cnt_min:{int(rule_comment_cnt_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_share_cnt_max:{int(rule_share_cnt_max)} >= share_cnt:{int(video_dict["share_cnt"])} >= rule_share_cnt_min:{int(rule_share_cnt_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_favorite_cnt_max:{int(rule_favorite_cnt_max)} >= favorite_cnt:{int(video_dict["favorite_cnt"])} >= rule_favorite_cnt_min:{int(rule_favorite_cnt_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_width_max:{int(rule_width_max)} >= video_width:{int(video_dict["video_width"])} >= rule_width_min:{int(rule_width_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_height_max:{int(rule_height_max)} >= video_height:{int(video_dict["video_height"])} >= rule_height_min:{int(rule_height_min)}')
+        Common.logger(log_type, crawler).info(
+            f'rule_publish_time_max:{int(rule_publish_time_max)} >= publish_time_stamp:{int(video_dict["publish_time_stamp"])} >= rule_publish_time_min:{int(rule_publish_time_min)}')
+
+        if int(rule_duration_max) >= int(float(video_dict["duration"])) >= int(rule_duration_min) \
+                and int(rule_play_cnt_max) >= int(video_dict['play_cnt']) >= int(rule_play_cnt_min) \
+                and int(rule_like_cnt_max) >= int(video_dict['like_cnt']) >= int(rule_like_cnt_min) \
+                and int(rule_comment_cnt_max) >= int(video_dict['comment_cnt']) >= int(rule_comment_cnt_min) \
+                and int(rule_share_cnt_max) >= int(video_dict['share_cnt']) >= int(rule_share_cnt_min) \
+                and int(rule_favorite_cnt_max) >= int(video_dict['favorite_cnt']) >= int(rule_favorite_cnt_min) \
+                and int(rule_width_max) >= int(video_dict['video_width']) >= int(rule_width_min) \
+                and int(rule_height_max) >= int(video_dict['video_height']) >= int(rule_height_min) \
+                and int(rule_publish_time_max) >= int(video_dict['publish_time_stamp']) >= int(rule_publish_time_min):
+            return True
+        else:
+            return False
+
+    @classmethod
+    def start_wechat(cls, log_type, crawler, word, rule_dict, our_uid, oss_endpoint, env):
+        Common.logger(log_type, crawler).info('启动微信')
+        if env == "dev":
+            chromedriverExecutable = "/Users/wangkun/Downloads/chromedriver/chromedriver_v107/chromedriver"
+        else:
+            chromedriverExecutable = '/Users/piaoquan/Downloads/chromedriver'
+        caps = {
+            "platformName": "Android",  # 手机操作系统 Android / iOS
+            "deviceName": "Android",  # 连接的设备名(模拟器或真机),安卓可以随便写
+            "platforVersion": "13",  # 手机对应的系统版本(Android 13)
+            "appPackage": "com.tencent.mm",  # 被测APP的包名,乐活圈 Android
+            "appActivity": ".ui.LauncherUI",  # 启动的Activity名
+            "autoGrantPermissions": True,  # 让 appium 自动授权 base 权限,
+            # 如果 noReset 为 True,则该条不生效(该参数为 Android 独有),对应的值为 True 或 False
+            "unicodekeyboard": True,  # 使用自带输入法,输入中文时填True
+            "resetkeyboard": True,  # 执行完程序恢复原来输入法
+            "noReset": True,  # 不重置APP
+            "recreateChromeDriverSessions": True,  # 切换到非 chrome-Driver 会 kill 掉 session,就不需要手动 kill 了
+            "printPageSourceOnFailure": True,  # 找不到元素时,appium log 会完整记录当前页面的 pagesource
+            "newCommandTimeout": 6000,  # 初始等待时间
+            "automationName": "UiAutomator2",  # 使用引擎,默认为 Appium,
+            # 其中 Appium、UiAutomator2、Selendroid、Espresso 用于 Android,XCUITest 用于 iOS
+            "showChromedriverLog": True,
+            # "chromeOptions": {"androidProcess": "com.tencent.mm:appbrand0"},
+            "chromeOptions": {"androidProcess": "com.tencent.mm:tools"},
+            'enableWebviewDetailsCollection': True,
+            'setWebContentsDebuggingEnabled': True,
+            'chromedriverExecutable': chromedriverExecutable,
+        }
+        driver = webdriver.Remote("http://localhost:4723/wd/hub", caps)
+        driver.implicitly_wait(10)
+        if len(driver.find_elements(By.ID, 'android:id/text1')) != 0:
+            driver.find_elements(By.ID, 'android:id/text1')[0].click()
+        time.sleep(5)
+        cls.search_video(log_type=log_type,
+                         crawler=crawler,
+                         word=word,
+                         rule_dict=rule_dict,
+                         our_uid=our_uid,
+                         oss_endpoint=oss_endpoint,
+                         driver=driver,
+                         env=env)
+        cls.close_wechat(log_type=log_type,
+                         crawler=crawler,
+                         driver=driver)
+
+    @classmethod
+    def close_wechat(cls, log_type, crawler, driver: WebDriver):
+        driver.quit()
+        Common.logger(log_type, crawler).info(f"微信退出成功\n")
+
+    @classmethod
+    def is_contain_chinese(cls, strword):
+        for ch in strword:
+            if u'\u4e00' <= ch <= u'\u9fff':
+                return True
+        return False
+
+    # 查找元素
+    @classmethod
+    def search_elements(cls, driver: WebDriver, xpath):
+        time.sleep(1)
+        windowHandles = driver.window_handles
+        for handle in windowHandles:
+            driver.switch_to.window(handle)
+            time.sleep(1)
+            try:
+                elements = driver.find_elements(By.XPATH, xpath)
+                if elements:
+                    return elements
+            except NoSuchElementException:
+                pass
+
+    @classmethod
+    def check_to_webview(cls, log_type, crawler, driver: WebDriver):
+        # Common.logger(log_type, crawler).info('切换到webview')
+        webviews = driver.contexts
+        driver.switch_to.context(webviews[1])
+        time.sleep(1)
+        windowHandles = driver.window_handles
+        for handle in windowHandles:
+            driver.switch_to.window(handle)
+            try:
+                shipinhao_webview = driver.find_element(By.XPATH, '//div[@class="unit"]')
+                if shipinhao_webview:
+                    Common.logger(log_type, crawler).info('切换到视频号 webview 成功')
+                    return "成功"
+            except Exception as e:
+                Common.logger(log_type, crawler).info(f"{e}\n")
+
+    @classmethod
+    def repeat_out_video_id(cls, log_type, crawler, out_video_id, env):
+        sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{out_video_id}"; """
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
+        return len(repeat_video)
+
+    @classmethod
+    def repeat_video_url(cls, log_type, crawler, video_url, env):
+        sql = f""" select * from crawler_video where platform="{cls.platform}" and video_url="{video_url}"; """
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
+        return len(repeat_video)
+
+    @classmethod
+    def search_video(cls, log_type, crawler, word, rule_dict, driver: WebDriver, our_uid, oss_endpoint, env):
+        # 点击微信搜索框,并输入搜索词
+        driver.implicitly_wait(10)
+        driver.find_element(By.ID, 'com.tencent.mm:id/j5t').click()
+        time.sleep(0.5)
+        Common.logger(log_type, crawler).info(f'输入搜索词:{word}')
+        driver.find_element(By.ID, 'com.tencent.mm:id/cd7').clear().send_keys(word)
+        driver.press_keycode(AndroidKey.ENTER)
+        # driver.find_elements(By.ID, 'com.tencent.mm:id/oi4')[0].click()
+        driver.find_element(By.ID, 'com.tencent.mm:id/m94').click()
+        time.sleep(5)
+
+        # 切换到微信搜索结果页 webview
+        check_to_webview = cls.check_to_webview(log_type, crawler, driver)
+        if check_to_webview is None:
+            Common.logger(log_type, crawler).info("切换到视频号 webview 失败\n")
+            return
+        time.sleep(1)
+
+        # 切换到"视频号"分类
+        shipinhao_tags = cls.search_elements(driver, '//div[@class="unit"]/*[2]')
+        Common.logger(log_type, crawler).info('点击"视频号"分类')
+        shipinhao_tags[0].click()
+        time.sleep(5)
+
+        videos_cnt = rule_dict.get('videos_cnt', {}).get('min', 0)
+        index = 0
+        while True:
+            if cls.download_cnt >= int(videos_cnt):
+                Common.logger(log_type, crawler).info(f'搜索词:"{word}",已抓取视频数:{index}')
+                cls.download_cnt = 0
+                return
+            # try:
+            if cls.search_elements(driver, '//*[@class="double-rich double-rich_vertical"]') is None:
+                Common.logger(log_type, crawler).info('窗口已销毁\n')
+                return
+
+            Common.logger(log_type, crawler).info('获取视频列表\n')
+            video_elements = cls.search_elements(driver, '//div[@class="vc active__mask"]')
+            if video_elements is None:
+                Common.logger(log_type, crawler).warning(f'video_elements:{video_elements}')
+                return
+
+            video_element_temp = video_elements[index:]
+            if len(video_element_temp) == 0:
+                Common.logger(log_type, crawler).info('到底啦~~~~~~~~~~~~~\n')
+                return
+
+            for i, video_element in enumerate(video_element_temp):
+                if video_element is None:
+                    Common.logger(log_type, crawler).info('到底啦~\n')
+                    return
+                cls.i += 1
+                cls.search_elements(driver, '//div[@class="vc active__mask"]')
+
+                Common.logger(log_type, crawler).info(f'拖动"视频"列表第{cls.i}个至屏幕中间')
+                time.sleep(3)
+                driver.execute_script("arguments[0].scrollIntoView({block:'center',inline:'center'})",
+                                      video_element)
+                if len(video_element.find_elements(By.XPATH, "//*[@text='没有更多的搜索结果']")) != 0:
+                    Common.logger(log_type, crawler).info("没有更多的搜索结果\n")
+                    return
+                video_title = video_element.find_elements(By.XPATH, '//div[@class="title ellipsis_2"]/*[2]')[index + i].text
+                video_url = video_element.find_elements(By.XPATH, '//div[@class="video-player"]')[index+i].get_attribute('src')
+                cover_url = video_element.find_elements(By.XPATH, '//div[@class="video-player__bd"]')[index+i].get_attribute('style')
+                cover_url = cover_url.split('url("')[-1].split('")')[0]
+                duration = video_element.find_elements(By.XPATH, '//div[@class="play-mask__text"]/*[2]')[index+i].text
+                duration = int(duration.split(':')[0]) * 60 + int(duration.split(':')[-1])
+                user_name = video_element.find_elements(By.XPATH, '//p[@class="vc-source__text"]')[index+i].text
+                avatar_url = video_element.find_elements(By.XPATH, '//div[@class="ui-image-image ui-image vc-source__thumb"]')[index+i].get_attribute('style')
+                avatar_url = avatar_url.split('url("')[-1].split('")')[0]
+                out_video_id = md5(video_title.encode('utf8')).hexdigest()
+                out_user_id = md5(user_name.encode('utf8')).hexdigest()
+
+                video_dict = {
+                    "video_title": video_title,
+                    "video_id": out_video_id,
+                    "play_cnt": 0,
+                    "duration": duration,
+                    "user_name": user_name,
+                    "user_id": out_user_id,
+                    "avatar_url": avatar_url,
+                    "cover_url": cover_url,
+                    "video_url": video_url,
+                    "session": f"shipinhao-search-{int(time.time())}"
+                }
+                for k, v in video_dict.items():
+                    Common.logger(log_type, crawler).info(f"{k}:{v}")
+                if video_title is None or video_url is None:
+                    Common.logger(log_type, crawler).info("无效视频\n")
+                elif cls.repeat_out_video_id(log_type, crawler, out_video_id, env) != 0:
+                    Common.logger(log_type, crawler).info('视频已下载\n')
+                elif cls.repeat_video_url(log_type, crawler, video_url, env) != 0:
+                    Common.logger(log_type, crawler).info('视频已下载\n')
+                else:
+                    video_element.click()
+                    time.sleep(3)
+                    video_info_dict = cls.get_video_info(driver)
+                    video_dict["like_cnt"] = video_info_dict["like_cnt"]
+                    video_dict["share_cnt"] = video_info_dict["share_cnt"]
+                    video_dict["favorite_cnt"] = video_info_dict["favorite_cnt"]
+                    video_dict["comment_cnt"] = video_info_dict["comment_cnt"]
+                    video_dict["publish_time_str"] = video_info_dict["publish_time_str"]
+                    video_dict["publish_time_stamp"] = video_info_dict["publish_time_stamp"]
+
+                    cls.download_publish(log_type=log_type,
+                                         crawler=crawler,
+                                         word=word,
+                                         rule_dict=rule_dict,
+                                         video_dict=video_dict,
+                                         our_uid=our_uid,
+                                         oss_endpoint=oss_endpoint,
+                                         env=env)
+
+            Common.logger(log_type, crawler).info('已抓取完一组视频,休眠1秒\n')
+            time.sleep(1)
+            index = index + len(video_element_temp)
+            # except Exception as e:
+            #     Common.logger(log_type, crawler).info(f"get_videoList:{e}\n")
+            #     cls.i = 0
+
+    @classmethod
+    def download_publish(cls, log_type, crawler, word, rule_dict, video_dict, our_uid, oss_endpoint, env):
+        # 下载视频
+        Common.download_method(log_type=log_type, crawler=crawler, text="video", title=video_dict["video_title"], url=video_dict["video_url"])
+
+        # ffmpeg 获取视频宽高
+        ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
+        if ffmpeg_dict is None:
+            md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
+            shutil.rmtree(f"./{crawler}/videos/{md_title}/")
+            Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+            return
+        video_dict["video_width"] = ffmpeg_dict["width"]
+        video_dict["video_height"] = ffmpeg_dict["height"]
+
+        # 规则判断
+        if cls.download_rule(log_type, crawler, video_dict, rule_dict) is False:
+            md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
+            shutil.rmtree(f"./{crawler}/videos/{md_title}/")
+            Common.logger(log_type, crawler).info("不满足抓取规则,删除成功\n")
+            return
+
+        # 下载封面
+        Common.download_method(log_type=log_type, crawler=crawler, text="cover", title=video_dict["video_title"], url=video_dict["cover_url"])
+        # 保存视频信息至 "./videos/{download_video_title}/info.txt"
+        Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
+
+        # 上传视频
+        Common.logger(log_type, crawler).info("开始上传视频...")
+        our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                  crawler=crawler,
+                                                  strategy="搜索爬虫策略",
+                                                  our_uid=our_uid,
+                                                  env=env,
+                                                  oss_endpoint=oss_endpoint)
+        if env == "dev":
+            our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+        else:
+            our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+        Common.logger(log_type, crawler).info("视频上传完成")
+
+        if our_video_id is None:
+            # 删除视频文件夹
+            shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
+            return
+
+        insert_sql = f""" insert into crawler_video(video_id,
+                                                out_user_id,
+                                                platform,
+                                                strategy,
+                                                out_video_id,
+                                                video_title,
+                                                cover_url,
+                                                video_url,
+                                                duration,
+                                                publish_time,
+                                                play_cnt,
+                                                crawler_rule,
+                                                width,
+                                                height)
+                                                values({our_video_id},
+                                                "{video_dict['user_id']}",
+                                                "{cls.platform}",
+                                                "搜索爬虫策略",
+                                                "{video_dict['video_id']}",
+                                                "{video_dict['video_title']}",
+                                                "{video_dict['cover_url']}",
+                                                "{video_dict['video_url']}",
+                                                {int(video_dict['duration'])},
+                                                "{video_dict['publish_time_str']}",
+                                                {int(video_dict['play_cnt'])},
+                                                '{json.dumps(rule_dict)}',
+                                                {int(video_dict['video_width'])},
+                                                {int(video_dict['video_height'])}) """
+        Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+        MysqlHelper.update_values(log_type, crawler, insert_sql, env)
+        Common.logger(log_type, crawler).info('视频信息插入数据库成功!')
+
+        # 写飞书
+        Feishu.insert_columns(log_type, crawler, "xYWCzf", "ROWS", 1, 2)
+        time.sleep(0.5)
+        values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time()))),
+                   "搜索爬虫策略",
+                   word,
+                   video_dict["video_title"],
+                   our_video_link,
+                   video_dict["duration"],
+                   video_dict["like_cnt"],
+                   video_dict["share_cnt"],
+                   video_dict["favorite_cnt"],
+                   video_dict["comment_cnt"],
+                   f'{video_dict["video_width"]}*{video_dict["video_height"]}',
+                   video_dict["publish_time_str"],
+                   video_dict["user_name"],
+                   video_dict["avatar_url"],
+                   video_dict["cover_url"],
+                   video_dict["video_url"]]]
+        Feishu.update_values(log_type, crawler, "xYWCzf", "F2:Z2", values)
+        Common.logger(log_type, crawler).info("写入飞书成功\n")
+        cls.download_cnt += 1
+
+    @classmethod
+    def get_video_info(cls, driver: WebDriver):
+        # Common.logger(log_type, crawler).info('切回NATIVE_APP')
+        driver.switch_to.context('NATIVE_APP')
+
+        # 点赞
+        like_id = driver.find_element(By.ID, 'com.tencent.mm:id/k04')
+        like_cnt = like_id.get_attribute('name')
+        if like_cnt == "" or like_cnt == "喜欢" or like_cnt == "火" or cls.is_contain_chinese(like_cnt) is True:
+            like_cnt = 0
+        elif '万' in like_cnt:
+            like_cnt = int(float(like_cnt.split('万')[0]) * 10000)
+        elif '万+' in like_cnt:
+            like_cnt = int(float(like_cnt.split('万+')[0]) * 10000)
+        else:
+            like_cnt = int(float(like_cnt))
+
+        # 分享
+        share_id = driver.find_element(By.ID, 'com.tencent.mm:id/jhv')
+        share_cnt = share_id.get_attribute('name')
+        if share_cnt == "" or share_cnt == "转发" or cls.is_contain_chinese(share_cnt) is True:
+            share_cnt = 0
+        elif '万' in share_cnt:
+            share_cnt = int(float(share_cnt.split('万')[0]) * 10000)
+        elif '万+' in share_cnt:
+            share_cnt = int(float(share_cnt.split('万+')[0]) * 10000)
+        else:
+            share_cnt = int(float(share_cnt))
+
+        # 收藏
+        favorite_id = driver.find_element(By.ID, 'com.tencent.mm:id/fnp')
+        favorite_cnt = favorite_id.get_attribute('name')
+        if favorite_cnt == "" or favorite_cnt == "收藏" or favorite_cnt == "推荐" or favorite_cnt == "火" or cls.is_contain_chinese(favorite_cnt) is True:
+            favorite_cnt = 0
+        elif '万' in favorite_cnt:
+            favorite_cnt = int(float(favorite_cnt.split('万')[0]) * 10000)
+        elif '万+' in favorite_cnt:
+            favorite_cnt = int(float(favorite_cnt.split('万+')[0]) * 10000)
+        else:
+            favorite_cnt = int(float(favorite_cnt))
+
+        # 评论
+        comment_id = driver.find_element(By.ID, 'com.tencent.mm:id/bje')
+        comment_cnt = comment_id.get_attribute('name')
+        if comment_cnt == "" or comment_cnt == "评论" or cls.is_contain_chinese(comment_cnt) is True:
+            comment_cnt = 0
+        elif '万' in comment_cnt:
+            comment_cnt = int(float(comment_cnt.split('万')[0]) * 10000)
+        elif '万+' in comment_cnt:
+            comment_cnt = int(float(comment_cnt.split('万+')[0]) * 10000)
+        else:
+            comment_cnt = int(float(comment_cnt))
+
+        # 发布时间
+        comment_id.click()
+        time.sleep(1)
+        publish_time = driver.find_element(By.ID, "com.tencent.mm:id/bre").get_attribute("name")
+        if "秒" in publish_time or "分钟" in publish_time or "小时" in publish_time:
+            publish_time_str = (date.today() + timedelta(days=0)).strftime("%Y-%m-%d")
+        elif "天前" in publish_time:
+            days = int(publish_time.replace("天前", ""))
+            publish_time_str = (date.today() + timedelta(days=-days)).strftime("%Y-%m-%d")
+        elif "年" in publish_time:
+            # publish_time_str = publish_time.replace("年", "-").replace("月", "-").replace("日", "")
+            year_str = publish_time.split("年")[0]
+            month_str = publish_time.split("年")[-1].split("月")[0]
+            day_str = publish_time.split("月")[-1].split("日")[0]
+            if int(month_str) < 10:
+                month_str = f"0{month_str}"
+            if int(day_str) < 10:
+                day_str = f"0{day_str}"
+            publish_time_str = f"{year_str}-{month_str}-{day_str}"
+        else:
+            year_str = str(datetime.datetime.now().year)
+            month_str = publish_time.split("月")[0]
+            day_str = publish_time.split("月")[-1].split("日")[0]
+            if int(month_str) < 10:
+                month_str = f"0{month_str}"
+            if int(day_str) < 10:
+                day_str = f"0{day_str}"
+            publish_time_str = f"{year_str}-{month_str}-{day_str}"
+            # publish_time_str = f'2023-{publish_time.replace("月", "-").replace("日", "")}'
+        publish_time_stamp = int(time.mktime(time.strptime(publish_time_str, "%Y-%m-%d")))
+
+        # 收起评论
+        # Common.logger(log_type, crawler).info("收起评论")
+        driver.find_element(By.ID, "com.tencent.mm:id/be_").click()
+        time.sleep(0.5)
+        # 返回 webview
+        # Common.logger(log_type, crawler).info(f"操作手机返回按键")
+        driver.find_element(By.ID, "com.tencent.mm:id/a2z").click()
+        time.sleep(0.5)
+        # driver.press_keycode(AndroidKey.BACK)
+        # cls.check_to_webview(log_type=log_type, crawler=crawler, driver=driver)
+        webviews = driver.contexts
+        driver.switch_to.context(webviews[1])
+
+        video_dict = {
+            "like_cnt": like_cnt,
+            "share_cnt": share_cnt,
+            "favorite_cnt": favorite_cnt,
+            "comment_cnt": comment_cnt,
+            "publish_time_str": publish_time_str,
+            "publish_time_stamp": publish_time_stamp,
+        }
+        return video_dict
+
+    @classmethod
+    def get_users(cls, log_type, crawler, sheetid, env):
+        while True:
+            user_sheet = Feishu.get_values_batch(log_type, crawler, sheetid)
+            if user_sheet is None:
+                Common.logger(log_type, crawler).warning(f"user_sheet:{user_sheet}, 3秒钟后重试")
+                time.sleep(3)
+                continue
+            our_user_list = []
+            # for i in range(1, len(user_sheet)):
+            for i in range(1, 4):
+                search_word = user_sheet[i][4]
+                our_uid = user_sheet[i][6]
+                tag1 = user_sheet[i][8]
+                tag2 = user_sheet[i][9]
+                tag3 = user_sheet[i][10]
+                tag4 = user_sheet[i][11]
+                tag5 = user_sheet[i][12]
+                tag6 = user_sheet[i][13]
+                tag7 = user_sheet[i][14]
+                Common.logger(log_type, crawler).info(f"正在更新 {search_word} 搜索词信息")
+                if our_uid is None:
+                    default_user = getUser.get_default_user()
+                    # 用来创建our_id的信息
+                    user_dict = {
+                        'recommendStatus': -6,
+                        'appRecommendStatus': -6,
+                        'nickName': default_user['nickName'],
+                        'avatarUrl': default_user['avatarUrl'],
+                        'tagName': f'{tag1},{tag2},{tag3},{tag4},{tag5},{tag6},{tag7}',
+                    }
+                    our_uid = getUser.create_uid(log_type, crawler, user_dict, env)
+                    if env == 'prod':
+                        our_user_link = f'https://admin.piaoquantv.com/ums/user/{our_uid}/post'
+                    else:
+                        our_user_link = f'https://testadmin.piaoquantv.com/ums/user/{our_uid}/post'
+                    Feishu.update_values(log_type, crawler, sheetid, f'G{i + 1}:H{i + 1}',
+                                         [[our_uid, our_user_link]])
+                    Common.logger(log_type, crawler).info(f'站内用户主页创建成功:{our_user_link}\n')
+                our_user_dict = {
+                    'out_uid': '',
+                    'search_word': search_word,
+                    'our_uid': our_uid,
+                    'our_user_link': f'https://admin.piaoquantv.com/ums/user/{our_uid}/post',
+                }
+                our_user_list.append(our_user_dict)
+
+            return our_user_list
+
+
+    @classmethod
+    def get_search_videos(cls, log_type, crawler, rule_dict, oss_endpoint, env):
+        user_list = cls.get_users(log_type, crawler, "wNgi6Z", env)
+        for user in user_list:
+            cls.i = 0
+            cls.download_cnt = 0
+            search_word = user["search_word"]
+            our_uid = user["our_uid"]
+            Common.logger(log_type, crawler).info(f"开始抓取搜索词:{search_word}")
+            # try:
+            cls.start_wechat(log_type=log_type,
+                             crawler=crawler,
+                             word=search_word,
+                             rule_dict=rule_dict,
+                             our_uid=our_uid,
+                             oss_endpoint=oss_endpoint,
+                             env=env)
+            # except Exception as e:
+            #     Common.logger(log_type, crawler).error(f"search_video:{e}\n")
+
+
+if __name__ == '__main__':
+    # ShipinhaoSearchScheduling.get_search_videos(log_type="search",
+    #                                             crawler="shipinhao",
+    #                                             rule_dict='[{"videos_cnt":{"min":10,"max":0}},{"duration":{"min":30,"max":600}},{"share_cnt":{"min":3000,"max":0}},{"favorite_cnt":{"min":1000,"max":0}},{"publish_time":{"min":1672502400000,"max":0}}]',
+    #                                             oss_endpoint="out",
+    #                                             env="dev")
+    # print(ShipinhaoSearchScheduling.get_users("search", "shipinhao", "wNgi6Z", "dev"))
+    print((date.today() + timedelta(days=0)).strftime("%Y-%m-%d"))
+    pass

BIN
shipinhao/videos/.DS_Store