wangkun 1 год назад
Родитель
Сommit
06d79125df

+ 12 - 1
README.MD

@@ -224,5 +224,16 @@ ps aux | grep search_key_mac | grep -v grep | awk '{print $2}' | xargs kill -9
 ps aux | grep gongzhonghao | grep -v grep | awk '{print $2}' | xargs kill -9
 ps aux | grep xiaoniangao | grep -v grep | awk '{print $2}' | xargs kill -9
 ps aux | grep Appium.app | grep -v grep | awk '{print $2}' | xargs kill -9
-ps aux | grep shipinhao | grep -v grep | awk '{print $2}' | xargs kill -9
+ps aux | grep shipinhao_search | grep -v grep | awk '{print $2}' | xargs kill -9
+```
+
+```commandline
+视频号搜索
+正式环境
+00 00 * * * /bin/sh /Users/piaoquan/Desktop/piaoquan_crawler/shipinhao/shipinhao_main/run_shipinhao.sh shipinhao/shipinhao_main/run_shipinhao_search.py --log_type="search" --crawler="shipinhao" --env="prod"
+线下调试
+sh shipinhao/shipinhao_main/run_shipinhao.sh shipinhao/shipinhao_main/run_shipinhao_search.py --log_type="search" --crawler="shipinhao" --env="dev"
+检测进程
+ps aux | grep shipinhao_search
+ps aux | grep shipinhao_search | grep -v grep | awk '{print $2}' | xargs kill -9
 ```

BIN
scheduling/.DS_Store


BIN
scheduling/scheduling_main/.DS_Store


BIN
scheduling/scheduling_v3/.DS_Store


BIN
shipinhao/logs/.DS_Store


+ 3 - 0
shipinhao/logs/__init__.py

@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/5/9

+ 51 - 0
shipinhao/shipinhao_main/run_shipinhao.sh

@@ -0,0 +1,51 @@
+#!/bin/bash
+crawler_dir=$1  # 爬虫执行路径,如: ./youtube/youtube_main/run_youtube_follow.py
+log_type=$2     # 日志命名格式,如: follow,则在 youtube/logs/目录下,生成 2023-02-08-follow.log
+crawler=$3      # 哪款爬虫,如: youtube / kanyikan / weixinzhishu
+env=$4          # 爬虫运行环境,正式环境: prod / 测试环境: dev
+#nohup_dir=$5    # nohup日志存储路径,如: ./youtube/nohup.log
+
+if [ ${env} = "--env=prod" ];then
+  piaoquan_crawler_dir=/Users/piaoquan/Desktop/piaoquan_crawler/
+  profile_path=/etc/profile
+  python=python3
+  node_path=/usr/local/bin/node
+  nohup_log_path=${piaoquan_crawler_dir}shipinhao/logs/$(date +%Y-%m-%d)-run-shell.log
+elif [ ${env} = "--env=dev" ];then
+  piaoquan_crawler_dir=/Users/wangkun/Desktop/crawler/piaoquan_crawler/
+  profile_path=/etc/profile
+  node_path=/opt/homebrew/bin/node
+  python=python3
+  nohup_log_path=${piaoquan_crawler_dir}shipinhao/logs/$(date +%Y-%m-%d)-run-shell.log
+fi
+
+#rm -f ${piaoquan_crawler_dir}shipinhao/logs/appium.log
+#ps aux | grep shipinhao_search | grep -v grep | awk '{print $2}' | xargs kill -9
+
+echo "开始..." >> ${nohup_log_path}
+echo "$(date "+%Y-%m-%d %H:%M:%S") 更新环境变量..." >> ${nohup_log_path}
+cd ~ && source ${profile_path}
+# shellcheck disable=SC2129
+echo "$(date "+%Y-%m-%d %H:%M:%S") 更新环境变量完成!" >> ${nohup_log_path}
+
+echo "$(date "+%Y-%m-%d %H:%M:%S") 正在更新代码..." >> ${nohup_log_path}
+#cd ${piaoquan_crawler_dir} && git pull origin master --force
+echo "$(date "+%Y-%m-%d %H:%M:%S") 代码更新完成!" >> ${nohup_log_path}
+
+echo "$(date "+%Y-%m-%d %H:%M:%S") 正在检测 Appium 运行状态" >> ${nohup_log_path}
+  ps -ef | grep "/Applications/Appium.app/Contents/Resources/app/node_modules/appium/build/lib/main.js" | grep -v "grep"
+  if [ "$?" -eq 1 ];then
+    echo "$(date "+%Y-%m-%d %H:%M:%S") Appium 异常停止,正在重启!" >> ${nohup_log_path}
+    nohup ${node_path} /Applications/Appium.app/Contents/Resources/app/node_modules/appium/build/lib/main.js >> ${piaoquan_crawler_dir}shipinhao/logs/appium.log 2>&1 &
+    echo "$(date "+%Y-%m-%d %H:%M:%S") 重启Appium完毕!" >> ${nohup_log_path}
+  else
+#    rm -f ${piaoquan_crawler_dir}shipinhao/logs/appium.log
+    echo "$(date "+%Y-%m-%d %H:%M:%S") Appium 运行状态正常" >> ${nohup_log_path}
+  fi
+
+echo "$(date "+%Y-%m-%d %H:%M:%S") 正在重启爬虫服务..." >> ${nohup_log_path}
+cd ${piaoquan_crawler_dir}
+nohup ${python} -u ${crawler_dir} ${log_type} ${crawler} ${env} >> ${nohup_log_path} 2>&1 &
+echo "$(date "+%Y-%m-%d %H:%M:%S") 爬虫服务重启完毕!" >> ${nohup_log_path}
+
+exit 0

+ 25 - 0
shipinhao/shipinhao_main/run_shipinhao_search.py

@@ -0,0 +1,25 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/5/8
+import argparse
+import os
+import sys
+sys.path.append(os.getcwd())
+from common.common import Common
+from shipinhao.shipinhao_search.shipinhao_search import ShipinhaoSearch
+
+
+def main(log_type, crawler, env):
+    Common.logger(log_type, crawler).info('开始抓取 视频号 搜索策略\n')
+    ShipinhaoSearch.get_search_videos(log_type, crawler, env)
+    Common.del_logs(log_type, crawler)
+    Common.logger(log_type, crawler).info('抓取完一轮\n')
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
+    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler')  ## 添加参数
+    parser.add_argument('--env')  ## 添加参数
+    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
+    main(log_type=args.log_type, crawler=args.crawler, env=args.env)

+ 685 - 0
shipinhao/shipinhao_search/shipinhao_search.py

@@ -0,0 +1,685 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/4/25
+import datetime
+import json
+import os
+import shutil
+import sys
+import time
+from datetime import date, timedelta
+from hashlib import md5
+from appium import webdriver
+from appium.webdriver.extensions.android.nativekey import AndroidKey
+from appium.webdriver.webdriver import WebDriver
+from selenium.common import NoSuchElementException
+from selenium.webdriver.common.by import By
+sys.path.append(os.getcwd())
+from common.feishu import Feishu
+from common.publish import Publish
+from common.common import Common
+from common.getuser import getUser
+from common.scheduling_db import MysqlHelper
+
+class ShipinhaoSearch:
+    platform = "视频号"
+    i = 0
+    download_cnt = 0
+
+    @staticmethod
+    def rule_dict(log_type, crawler):
+        while True:
+            shipinhao_rule_sheet = Feishu.get_values_batch(log_type, crawler, "YhfkNY")
+            if shipinhao_rule_sheet is None:
+                Common.logger(log_type, crawler).warning(f"shipinhao_rule_sheet:{shipinhao_rule_sheet}\n")
+                time.sleep(3)
+                continue
+
+            rule_duration_min = int(shipinhao_rule_sheet[1][0])
+            rule_duration_max = int(shipinhao_rule_sheet[1][2])
+            rule_share_cnt_min = int(shipinhao_rule_sheet[2][0])
+            rule_share_cnt_max = int(shipinhao_rule_sheet[2][2])
+            rule_favorite_cnt_min = int(shipinhao_rule_sheet[3][0])
+            rule_favorite_cnt_max = int(shipinhao_rule_sheet[3][2])
+            rule_publish_time_min = shipinhao_rule_sheet[4][0]
+            rule_publish_time_min_str = f"{str(rule_publish_time_min)[:4]}-{str(rule_publish_time_min)[4:6]}-{str(rule_publish_time_min)[6:]}"
+            rule_publish_time_min = int(time.mktime(time.strptime(rule_publish_time_min_str, "%Y-%m-%d")))
+            rule_publish_time_max = shipinhao_rule_sheet[4][2]
+            rule_publish_time_max_str = f"{str(rule_publish_time_max)[:4]}-{str(rule_publish_time_max)[4:6]}-{str(rule_publish_time_max)[6:]}"
+            rule_publish_time_max = int(time.mktime(time.strptime(rule_publish_time_max_str, "%Y-%m-%d")))
+            videos_cnt = Feishu.get_values_batch(log_type, crawler, "YhfkNY")[5][2]
+            rule_like_cnt_min = int(shipinhao_rule_sheet[6][0])
+            rule_like_cnt_max = int(shipinhao_rule_sheet[6][2])
+            rule_comment_cnt_min = int(shipinhao_rule_sheet[7][0])
+            rule_comment_cnt_max = int(shipinhao_rule_sheet[7][2])
+            rule_width_min = int(Feishu.get_values_batch(log_type, crawler, "YhfkNY")[8][0])
+            rule_width_max = int(Feishu.get_values_batch(log_type, crawler, "YhfkNY")[8][2])
+            rule_height_min = int(Feishu.get_values_batch(log_type, crawler, "YhfkNY")[9][0])
+            rule_height_max = int(Feishu.get_values_batch(log_type, crawler, "YhfkNY")[9][2])
+            rule_dict = {
+                "duration": {"min": rule_duration_min, "max": rule_duration_max},
+                "share_cnt": {"min": rule_share_cnt_min, "max": rule_share_cnt_max},
+                "favorite_cnt": {"min": rule_favorite_cnt_min, "max": rule_favorite_cnt_max},
+                "publish_time": {"min": rule_publish_time_min, "max": rule_publish_time_max},
+                "videos_cnt": {"min": videos_cnt},
+                "like_cnt": {"min": rule_like_cnt_min, "max": rule_like_cnt_max},
+                "comment_cnt": {"min": rule_comment_cnt_min, "max": rule_comment_cnt_max},
+                "width": {"min": rule_width_min, "max": rule_width_max},
+                "height": {"min": rule_height_min, "max": rule_height_max},
+            }
+            return rule_dict
+
+    # 基础门槛规则
+    @staticmethod
+    def download_rule(log_type, crawler, video_dict):
+        """
+        下载视频的基本规则
+        :param log_type: 日志
+        :param crawler: 哪款爬虫
+        :param video_dict: 视频信息,字典格式
+        :return: 满足规则,返回 True;反之,返回 False
+        """
+        while True:
+            shipinhao_rule_sheet = Feishu.get_values_batch(log_type, crawler, "YhfkNY")
+            if shipinhao_rule_sheet is None:
+                Common.logger(log_type, crawler).warning(f"shipinhao_rule_sheet:{shipinhao_rule_sheet}\n")
+                time.sleep(3)
+                continue
+
+            rule_duration_min = int(shipinhao_rule_sheet[1][0])
+            rule_duration_max = int(shipinhao_rule_sheet[1][2])
+            rule_share_cnt_min = int(shipinhao_rule_sheet[2][0])
+            rule_share_cnt_max = int(shipinhao_rule_sheet[2][2])
+            rule_favorite_cnt_min = int(shipinhao_rule_sheet[3][0])
+            rule_favorite_cnt_max = int(shipinhao_rule_sheet[3][2])
+            rule_publish_time_min = shipinhao_rule_sheet[4][0]
+            rule_publish_time_min_str = f"{str(rule_publish_time_min)[:4]}-{str(rule_publish_time_min)[4:6]}-{str(rule_publish_time_min)[6:]}"
+            rule_publish_time_min = int(time.mktime(time.strptime(rule_publish_time_min_str, "%Y-%m-%d")))
+            rule_publish_time_max = shipinhao_rule_sheet[4][2]
+            rule_publish_time_max_str = f"{str(rule_publish_time_max)[:4]}-{str(rule_publish_time_max)[4:6]}-{str(rule_publish_time_max)[6:]}"
+            rule_publish_time_max = int(time.mktime(time.strptime(rule_publish_time_max_str, "%Y-%m-%d")))
+            # videos_cnt = Feishu.get_values_batch(log_type, crawler, "YhfkNY")[5][2]
+            rule_like_cnt_min = int(shipinhao_rule_sheet[6][0])
+            rule_like_cnt_max = int(shipinhao_rule_sheet[6][2])
+            rule_comment_cnt_min = int(shipinhao_rule_sheet[7][0])
+            rule_comment_cnt_max = int(shipinhao_rule_sheet[7][2])
+
+            Common.logger(log_type, crawler).info(
+                f'rule_duration_max:{rule_duration_max} >= duration:{int(float(video_dict["duration"]))} >= rule_duration_min:{int(rule_duration_min)}')
+            Common.logger(log_type, crawler).info(
+                f'rule_like_cnt_max:{int(rule_like_cnt_max)} >= like_cnt:{int(video_dict["like_cnt"])} >= rule_like_cnt_min:{int(rule_like_cnt_min)}')
+            Common.logger(log_type, crawler).info(
+                f'rule_comment_cnt_max:{int(rule_comment_cnt_max)} >= comment_cnt:{int(video_dict["comment_cnt"])} >= rule_comment_cnt_min:{int(rule_comment_cnt_min)}')
+            Common.logger(log_type, crawler).info(
+                f'rule_share_cnt_max:{int(rule_share_cnt_max)} >= share_cnt:{int(video_dict["share_cnt"])} >= rule_share_cnt_min:{int(rule_share_cnt_min)}')
+            Common.logger(log_type, crawler).info(
+                f'rule_favorite_cnt_max:{int(rule_favorite_cnt_max)} >= favorite_cnt:{int(video_dict["favorite_cnt"])} >= rule_favorite_cnt_min:{int(rule_favorite_cnt_min)}')
+            Common.logger(log_type, crawler).info(
+                f'rule_publish_time_max:{int(rule_publish_time_max)} >= publish_time_stamp:{int(video_dict["publish_time_stamp"])} >= rule_publish_time_min:{int(rule_publish_time_min)}')
+
+            if int(rule_duration_max) >= int(float(video_dict["duration"])) >= int(rule_duration_min) \
+                    and int(rule_like_cnt_max) >= int(video_dict['like_cnt']) >= int(rule_like_cnt_min) \
+                    and int(rule_comment_cnt_max) >= int(video_dict['comment_cnt']) >= int(rule_comment_cnt_min) \
+                    and int(rule_share_cnt_max) >= int(video_dict['share_cnt']) >= int(rule_share_cnt_min) \
+                    and int(rule_favorite_cnt_max) >= int(video_dict['favorite_cnt']) >= int(rule_favorite_cnt_min) \
+                    and int(rule_publish_time_max) >= int(video_dict['publish_time_stamp']) >= int(rule_publish_time_min):
+                return True
+            else:
+                return False
+
+    @staticmethod
+    def width_height_rule(log_type, crawler, width, height):
+        while True:
+            shipinhao_rule_sheet = Feishu.get_values_batch(log_type, crawler, "YhfkNY")
+            if shipinhao_rule_sheet is None:
+                Common.logger(log_type, crawler).warning(f"shipinhao_rule_sheet:{shipinhao_rule_sheet}\n")
+                time.sleep(3)
+                continue
+            rule_width_min = int(Feishu.get_values_batch(log_type, crawler, "YhfkNY")[8][0])
+            rule_width_max = int(Feishu.get_values_batch(log_type, crawler, "YhfkNY")[8][2])
+            rule_height_min = int(Feishu.get_values_batch(log_type, crawler, "YhfkNY")[9][0])
+            rule_height_max = int(Feishu.get_values_batch(log_type, crawler, "YhfkNY")[9][2])
+
+            Common.logger(log_type, crawler).info(
+                f'rule_width_max:{int(rule_width_max)} >= width:{int(width)} >= rule_width_min:{int(rule_width_min)}')
+            Common.logger(log_type, crawler).info(
+                f'rule_height_max:{int(rule_height_max)} >= width:{int(height)} >= rule_height_min:{int(rule_height_min)}')
+
+            if rule_width_max >= int(width) >= rule_width_min and rule_height_max >= int(height) >= rule_height_min:
+                return True
+            else:
+                return False
+
+    @staticmethod
+    def videos_cnt(log_type, crawler):
+        while True:
+            shipinhao_rule_sheet = Feishu.get_values_batch(log_type, crawler, "YhfkNY")
+            if shipinhao_rule_sheet is None:
+                Common.logger(log_type, crawler).warning(f"shipinhao_rule_sheet:{shipinhao_rule_sheet}\n")
+                time.sleep(3)
+                continue
+            videos_cnt = Feishu.get_values_batch(log_type, crawler, "YhfkNY")[5][2]
+            return int(videos_cnt)
+
+    @classmethod
+    def start_wechat(cls, log_type, crawler, word, our_uid, env):
+        Common.logger(log_type, crawler).info('启动微信')
+        if env == "dev":
+            chromedriverExecutable = "/Users/wangkun/Downloads/chromedriver/chromedriver_v107/chromedriver"
+        else:
+            chromedriverExecutable = '/Users/piaoquan/Downloads/chromedriver'
+        caps = {
+            "platformName": "Android",  # 手机操作系统 Android / iOS
+            "deviceName": "Android",  # 连接的设备名(模拟器或真机),安卓可以随便写
+            "platforVersion": "13",  # 手机对应的系统版本(Android 13)
+            "appPackage": "com.tencent.mm",  # 被测APP的包名,乐活圈 Android
+            "appActivity": ".ui.LauncherUI",  # 启动的Activity名
+            "autoGrantPermissions": True,  # 让 appium 自动授权 base 权限,
+            # 如果 noReset 为 True,则该条不生效(该参数为 Android 独有),对应的值为 True 或 False
+            "unicodekeyboard": True,  # 使用自带输入法,输入中文时填True
+            "resetkeyboard": True,  # 执行完程序恢复原来输入法
+            "noReset": True,  # 不重置APP
+            "recreateChromeDriverSessions": True,  # 切换到非 chrome-Driver 会 kill 掉 session,就不需要手动 kill 了
+            "printPageSourceOnFailure": True,  # 找不到元素时,appium log 会完整记录当前页面的 pagesource
+            "newCommandTimeout": 6000,  # 初始等待时间
+            "automationName": "UiAutomator2",  # 使用引擎,默认为 Appium,
+            # 其中 Appium、UiAutomator2、Selendroid、Espresso 用于 Android,XCUITest 用于 iOS
+            "showChromedriverLog": True,
+            # "chromeOptions": {"androidProcess": "com.tencent.mm:appbrand0"},
+            "chromeOptions": {"androidProcess": "com.tencent.mm:tools"},
+            'enableWebviewDetailsCollection': True,
+            'setWebContentsDebuggingEnabled': True,
+            'chromedriverExecutable': chromedriverExecutable,
+        }
+        driver = webdriver.Remote("http://localhost:4723/wd/hub", caps)
+        driver.implicitly_wait(10)
+        if len(driver.find_elements(By.ID, 'android:id/text1')) != 0:
+            driver.find_elements(By.ID, 'android:id/text1')[0].click()
+        time.sleep(5)
+        cls.search_video(log_type=log_type,
+                         crawler=crawler,
+                         word=word,
+                         our_uid=our_uid,
+                         driver=driver,
+                         env=env)
+        cls.close_wechat(log_type=log_type,
+                         crawler=crawler,
+                         driver=driver)
+
+    @classmethod
+    def close_wechat(cls, log_type, crawler, driver: WebDriver):
+        driver.quit()
+        Common.logger(log_type, crawler).info(f"微信退出成功\n")
+
+    @classmethod
+    def is_contain_chinese(cls, strword):
+        for ch in strword:
+            if u'\u4e00' <= ch <= u'\u9fff':
+                return True
+        return False
+
+    # 查找元素
+    @classmethod
+    def search_elements(cls, driver: WebDriver, xpath):
+        time.sleep(1)
+        windowHandles = driver.window_handles
+        for handle in windowHandles:
+            driver.switch_to.window(handle)
+            time.sleep(1)
+            try:
+                elements = driver.find_elements(By.XPATH, xpath)
+                if elements:
+                    return elements
+            except NoSuchElementException:
+                pass
+
+    @classmethod
+    def check_to_webview(cls, log_type, crawler, driver: WebDriver):
+        # Common.logger(log_type, crawler).info('切换到webview')
+        webviews = driver.contexts
+        driver.switch_to.context(webviews[1])
+        time.sleep(1)
+        windowHandles = driver.window_handles
+        for handle in windowHandles:
+            driver.switch_to.window(handle)
+            try:
+                shipinhao_webview = driver.find_element(By.XPATH, '//div[@class="unit"]')
+                if shipinhao_webview:
+                    Common.logger(log_type, crawler).info('切换到视频号 webview 成功')
+                    return "成功"
+            except Exception as e:
+                Common.logger(log_type, crawler).info(f"{e}\n")
+
+    @classmethod
+    def repeat_out_video_id(cls, log_type, crawler, out_video_id, env):
+        sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{out_video_id}"; """
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
+        return len(repeat_video)
+
+    @classmethod
+    def repeat_video_url(cls, log_type, crawler, video_url, env):
+        sql = f""" select * from crawler_video where platform="{cls.platform}" and video_url="{video_url}"; """
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
+        return len(repeat_video)
+
+    @classmethod
+    def search_video(cls, log_type, crawler, word, driver: WebDriver, our_uid, env):
+        # 点击微信搜索框,并输入搜索词
+        driver.implicitly_wait(10)
+        driver.find_element(By.ID, 'com.tencent.mm:id/j5t').click()
+        time.sleep(0.5)
+        Common.logger(log_type, crawler).info(f'输入搜索词:{word}')
+        driver.find_element(By.ID, 'com.tencent.mm:id/cd7').clear().send_keys(word)
+        driver.press_keycode(AndroidKey.ENTER)
+        # driver.find_elements(By.ID, 'com.tencent.mm:id/oi4')[0].click()
+        driver.find_element(By.ID, 'com.tencent.mm:id/m94').click()
+        time.sleep(5)
+
+        # 切换到微信搜索结果页 webview
+        check_to_webview = cls.check_to_webview(log_type, crawler, driver)
+        if check_to_webview is None:
+            Common.logger(log_type, crawler).info("切换到视频号 webview 失败\n")
+            return
+        time.sleep(1)
+
+        # 切换到"视频号"分类
+        shipinhao_tags = cls.search_elements(driver, '//div[@class="unit"]/*[2]')
+        Common.logger(log_type, crawler).info('点击"视频号"分类')
+        shipinhao_tags[0].click()
+        time.sleep(5)
+
+        index = 0
+        while True:
+            # try:
+            if cls.search_elements(driver, '//*[@class="double-rich double-rich_vertical"]') is None:
+                Common.logger(log_type, crawler).info('窗口已销毁\n')
+                return
+
+            Common.logger(log_type, crawler).info('获取视频列表\n')
+            video_elements = cls.search_elements(driver, '//div[@class="vc active__mask"]')
+            if video_elements is None:
+                Common.logger(log_type, crawler).warning(f'video_elements:{video_elements}')
+                return
+
+            video_element_temp = video_elements[index:]
+            if len(video_element_temp) == 0:
+                Common.logger(log_type, crawler).info('到底啦~~~~~~~~~~~~~\n')
+                return
+
+            for i, video_element in enumerate(video_element_temp):
+                Common.logger(log_type, crawler).info(f"download_cnt:{cls.download_cnt}")
+                if cls.download_cnt >= cls.videos_cnt(log_type, crawler):
+                    Common.logger(log_type, crawler).info(f'搜索词:"{word}",已抓取视频数:{cls.download_cnt}')
+                    cls.download_cnt = 0
+                    return
+
+                if video_element is None:
+                    Common.logger(log_type, crawler).info('到底啦~\n')
+                    return
+
+                cls.i += 1
+                cls.search_elements(driver, '//div[@class="vc active__mask"]')
+
+                Common.logger(log_type, crawler).info(f'拖动"视频"列表第{cls.i}个至屏幕中间')
+                time.sleep(3)
+                driver.execute_script("arguments[0].scrollIntoView({block:'center',inline:'center'})",
+                                      video_element)
+                if len(video_element.find_elements(By.XPATH, "//*[@text='没有更多的搜索结果']")) != 0:
+                    Common.logger(log_type, crawler).info("没有更多的搜索结果\n")
+                    return
+                video_title = video_element.find_elements(By.XPATH, '//div[@class="title ellipsis_2"]/*[2]')[index + i].text[:40]
+                video_url = video_element.find_elements(By.XPATH, '//div[@class="video-player"]')[index+i].get_attribute('src')
+                cover_url = video_element.find_elements(By.XPATH, '//div[@class="video-player__bd"]')[index+i].get_attribute('style')
+                cover_url = cover_url.split('url("')[-1].split('")')[0]
+                duration = video_element.find_elements(By.XPATH, '//div[@class="play-mask__text"]/*[2]')[index+i].text
+                duration = int(duration.split(':')[0]) * 60 + int(duration.split(':')[-1])
+                user_name = video_element.find_elements(By.XPATH, '//p[@class="vc-source__text"]')[index+i].text
+                avatar_url = video_element.find_elements(By.XPATH, '//div[@class="ui-image-image ui-image vc-source__thumb"]')[index+i].get_attribute('style')
+                avatar_url = avatar_url.split('url("')[-1].split('")')[0]
+                out_video_id = md5(video_title.encode('utf8')).hexdigest()
+                out_user_id = md5(user_name.encode('utf8')).hexdigest()
+
+                video_dict = {
+                    "video_title": video_title,
+                    "video_id": out_video_id,
+                    "play_cnt": 0,
+                    "duration": duration,
+                    "user_name": user_name,
+                    "user_id": out_user_id,
+                    "avatar_url": avatar_url,
+                    "cover_url": cover_url,
+                    "video_url": video_url,
+                    "session": f"shipinhao-search-{int(time.time())}"
+                }
+                for k, v in video_dict.items():
+                    Common.logger(log_type, crawler).info(f"{k}:{v}")
+                if video_title is None or video_url is None:
+                    Common.logger(log_type, crawler).info("无效视频\n")
+                elif cls.repeat_out_video_id(log_type, crawler, out_video_id, env) != 0:
+                    Common.logger(log_type, crawler).info('视频已下载\n')
+                elif cls.repeat_video_url(log_type, crawler, video_url, env) != 0:
+                    Common.logger(log_type, crawler).info('视频已下载\n')
+                else:
+                    video_element.click()
+                    time.sleep(3)
+                    video_info_dict = cls.get_video_info(driver)
+                    video_dict["like_cnt"] = video_info_dict["like_cnt"]
+                    video_dict["share_cnt"] = video_info_dict["share_cnt"]
+                    video_dict["favorite_cnt"] = video_info_dict["favorite_cnt"]
+                    video_dict["comment_cnt"] = video_info_dict["comment_cnt"]
+                    video_dict["publish_time_str"] = video_info_dict["publish_time_str"]
+                    video_dict["publish_time_stamp"] = video_info_dict["publish_time_stamp"]
+                    Common.logger(log_type, crawler).info(f'publish_time:{video_dict["publish_time_str"]}')
+                    if cls.download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict) is False:
+                        Common.logger(log_type, crawler).info("不满足抓取规则\n")
+                    else:
+                        cls.download_publish(log_type=log_type,
+                                             crawler=crawler,
+                                             word=word,
+                                             video_dict=video_dict,
+                                             our_uid=our_uid,
+                                             env=env)
+
+            Common.logger(log_type, crawler).info('已抓取完一组视频,休眠1秒\n')
+            time.sleep(1)
+            index = index + len(video_element_temp)
+            # except Exception as e:
+            #     Common.logger(log_type, crawler).info(f"get_videoList:{e}\n")
+            #     cls.i = 0
+
+    @classmethod
+    def download_publish(cls, log_type, crawler, word, video_dict, our_uid, env):
+        # 下载视频
+        Common.download_method(log_type=log_type, crawler=crawler, text="video", title=video_dict["video_title"], url=video_dict["video_url"])
+
+        # ffmpeg 获取视频宽高
+        ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
+        if ffmpeg_dict is None:
+            md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
+            shutil.rmtree(f"./{crawler}/videos/{md_title}/")
+            Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+            return
+        video_dict["video_width"] = ffmpeg_dict["width"]
+        video_dict["video_height"] = ffmpeg_dict["height"]
+
+        # 规则判断
+        if cls.width_height_rule(log_type, crawler, video_dict["video_width"], video_dict["video_height"]) is False:
+            md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
+            shutil.rmtree(f"./{crawler}/videos/{md_title}/")
+            Common.logger(log_type, crawler).info("宽高不满足抓取规则,删除成功\n")
+            return
+
+        # 下载封面
+        Common.download_method(log_type=log_type, crawler=crawler, text="cover", title=video_dict["video_title"], url=video_dict["cover_url"])
+        # 保存视频信息至 "./videos/{download_video_title}/info.txt"
+        Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
+
+        # 上传视频
+        Common.logger(log_type, crawler).info("开始上传视频...")
+        our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                  crawler=crawler,
+                                                  strategy="搜索爬虫策略",
+                                                  our_uid=our_uid,
+                                                  env=env,
+                                                  oss_endpoint="out")
+        if env == "dev":
+            our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+        else:
+            our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+        Common.logger(log_type, crawler).info("视频上传完成")
+
+        if our_video_id is None:
+            try:
+                # 删除视频文件夹
+                md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
+                shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                Common.logger(log_type, crawler).warning(f"our_video_id:{our_video_id}, 删除成功\n")
+                return
+            except FileNotFoundError:
+                return
+
+        rule_dict = cls.rule_dict(log_type, crawler)
+        insert_sql = f""" insert into crawler_video(video_id,
+                                                out_user_id,
+                                                platform,
+                                                strategy,
+                                                out_video_id,
+                                                video_title,
+                                                cover_url,
+                                                video_url,
+                                                duration,
+                                                publish_time,
+                                                play_cnt,
+                                                crawler_rule,
+                                                width,
+                                                height)
+                                                values({our_video_id},
+                                                "{video_dict['user_id']}",
+                                                "{cls.platform}",
+                                                "搜索爬虫策略",
+                                                "{video_dict['video_id']}",
+                                                "{video_dict['video_title']}",
+                                                "{video_dict['cover_url']}",
+                                                "{video_dict['video_url']}",
+                                                {int(video_dict['duration'])},
+                                                "{video_dict['publish_time_str']}",
+                                                {int(video_dict['play_cnt'])},
+                                                '{json.dumps(rule_dict)}',
+                                                {int(video_dict['video_width'])},
+                                                {int(video_dict['video_height'])}) """
+        Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+        MysqlHelper.update_values(log_type, crawler, insert_sql, env)
+        Common.logger(log_type, crawler).info('视频信息插入数据库成功!')
+
+        # 写飞书
+        Feishu.insert_columns(log_type, crawler, "xYWCzf", "ROWS", 1, 2)
+        time.sleep(0.5)
+        values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time()))),
+                   "搜索爬虫策略",
+                   word,
+                   video_dict["video_title"],
+                   our_video_link,
+                   video_dict["duration"],
+                   video_dict["like_cnt"],
+                   video_dict["share_cnt"],
+                   video_dict["favorite_cnt"],
+                   video_dict["comment_cnt"],
+                   f'{video_dict["video_width"]}*{video_dict["video_height"]}',
+                   video_dict["publish_time_str"],
+                   video_dict["user_name"],
+                   video_dict["avatar_url"],
+                   video_dict["cover_url"],
+                   video_dict["video_url"]]]
+        Feishu.update_values(log_type, crawler, "xYWCzf", "F2:Z2", values)
+        Common.logger(log_type, crawler).info("写入飞书成功\n")
+        cls.download_cnt += 1
+
+    @classmethod
+    def get_video_info(cls, driver: WebDriver):
+        # Common.logger(log_type, crawler).info('切回NATIVE_APP')
+        driver.switch_to.context('NATIVE_APP')
+
+        # 点赞
+        like_id = driver.find_element(By.ID, 'com.tencent.mm:id/k04')
+        like_cnt = like_id.get_attribute('name')
+        if like_cnt == "" or like_cnt == "喜欢" or like_cnt == "火" or cls.is_contain_chinese(like_cnt) is True:
+            like_cnt = 0
+        elif '万' in like_cnt:
+            like_cnt = int(float(like_cnt.split('万')[0]) * 10000)
+        elif '万+' in like_cnt:
+            like_cnt = int(float(like_cnt.split('万+')[0]) * 10000)
+        else:
+            like_cnt = int(float(like_cnt))
+
+        # 分享
+        share_id = driver.find_element(By.ID, 'com.tencent.mm:id/jhv')
+        share_cnt = share_id.get_attribute('name')
+        if share_cnt == "" or share_cnt == "转发" or cls.is_contain_chinese(share_cnt) is True:
+            share_cnt = 0
+        elif '万' in share_cnt:
+            share_cnt = int(float(share_cnt.split('万')[0]) * 10000)
+        elif '万+' in share_cnt:
+            share_cnt = int(float(share_cnt.split('万+')[0]) * 10000)
+        else:
+            share_cnt = int(float(share_cnt))
+
+        # 收藏
+        favorite_id = driver.find_element(By.ID, 'com.tencent.mm:id/fnp')
+        favorite_cnt = favorite_id.get_attribute('name')
+        if favorite_cnt == "" or favorite_cnt == "收藏" or favorite_cnt == "推荐" or favorite_cnt == "火" or cls.is_contain_chinese(favorite_cnt) is True:
+            favorite_cnt = 0
+        elif '万' in favorite_cnt:
+            favorite_cnt = int(float(favorite_cnt.split('万')[0]) * 10000)
+        elif '万+' in favorite_cnt:
+            favorite_cnt = int(float(favorite_cnt.split('万+')[0]) * 10000)
+        else:
+            favorite_cnt = int(float(favorite_cnt))
+
+        # 评论
+        comment_id = driver.find_element(By.ID, 'com.tencent.mm:id/bje')
+        comment_cnt = comment_id.get_attribute('name')
+        if comment_cnt == "" or comment_cnt == "评论" or cls.is_contain_chinese(comment_cnt) is True:
+            comment_cnt = 0
+        elif '万' in comment_cnt:
+            comment_cnt = int(float(comment_cnt.split('万')[0]) * 10000)
+        elif '万+' in comment_cnt:
+            comment_cnt = int(float(comment_cnt.split('万+')[0]) * 10000)
+        else:
+            comment_cnt = int(float(comment_cnt))
+
+        # 发布时间
+        comment_id.click()
+        time.sleep(1)
+        publish_time = driver.find_element(By.ID, "com.tencent.mm:id/bre").get_attribute("name")
+        if "秒" in publish_time or "分钟" in publish_time or "小时" in publish_time:
+            publish_time_str = (date.today() + timedelta(days=0)).strftime("%Y-%m-%d")
+        elif "天前" in publish_time:
+            days = int(publish_time.replace("天前", ""))
+            publish_time_str = (date.today() + timedelta(days=-days)).strftime("%Y-%m-%d")
+        elif "年" in publish_time:
+            # publish_time_str = publish_time.replace("年", "-").replace("月", "-").replace("日", "")
+            year_str = publish_time.split("年")[0]
+            month_str = publish_time.split("年")[-1].split("月")[0]
+            day_str = publish_time.split("月")[-1].split("日")[0]
+            if int(month_str) < 10:
+                month_str = f"0{month_str}"
+            if int(day_str) < 10:
+                day_str = f"0{day_str}"
+            publish_time_str = f"{year_str}-{month_str}-{day_str}"
+        else:
+            year_str = str(datetime.datetime.now().year)
+            month_str = publish_time.split("月")[0]
+            day_str = publish_time.split("月")[-1].split("日")[0]
+            if int(month_str) < 10:
+                month_str = f"0{month_str}"
+            if int(day_str) < 10:
+                day_str = f"0{day_str}"
+            publish_time_str = f"{year_str}-{month_str}-{day_str}"
+            # publish_time_str = f'2023-{publish_time.replace("月", "-").replace("日", "")}'
+        publish_time_stamp = int(time.mktime(time.strptime(publish_time_str, "%Y-%m-%d")))
+
+        # 收起评论
+        # Common.logger(log_type, crawler).info("收起评论")
+        driver.find_element(By.ID, "com.tencent.mm:id/be_").click()
+        time.sleep(0.5)
+        # 返回 webview
+        # Common.logger(log_type, crawler).info(f"操作手机返回按键")
+        driver.find_element(By.ID, "com.tencent.mm:id/a2z").click()
+        time.sleep(0.5)
+        # driver.press_keycode(AndroidKey.BACK)
+        # cls.check_to_webview(log_type=log_type, crawler=crawler, driver=driver)
+        webviews = driver.contexts
+        driver.switch_to.context(webviews[1])
+
+        video_dict = {
+            "like_cnt": like_cnt,
+            "share_cnt": share_cnt,
+            "favorite_cnt": favorite_cnt,
+            "comment_cnt": comment_cnt,
+            "publish_time_str": publish_time_str,
+            "publish_time_stamp": publish_time_stamp,
+        }
+        return video_dict
+
+    @classmethod
+    def get_users(cls, log_type, crawler, sheetid, env):
+        while True:
+            user_sheet = Feishu.get_values_batch(log_type, crawler, sheetid)
+            if user_sheet is None:
+                Common.logger(log_type, crawler).warning(f"user_sheet:{user_sheet}, 3秒钟后重试")
+                time.sleep(3)
+                continue
+            our_user_list = []
+            # for i in range(1, len(user_sheet)):
+            for i in range(1, 3):
+                search_word = user_sheet[i][4]
+                our_uid = user_sheet[i][6]
+                tag1 = user_sheet[i][8]
+                tag2 = user_sheet[i][9]
+                tag3 = user_sheet[i][10]
+                tag4 = user_sheet[i][11]
+                tag5 = user_sheet[i][12]
+                Common.logger(log_type, crawler).info(f"正在更新 {search_word} 搜索词信息")
+                if our_uid is None:
+                    default_user = getUser.get_default_user()
+                    # 用来创建our_id的信息
+                    user_dict = {
+                        'recommendStatus': -6,
+                        'appRecommendStatus': -6,
+                        'nickName': default_user['nickName'],
+                        'avatarUrl': default_user['avatarUrl'],
+                        'tagName': f'{tag1},{tag2},{tag3},{tag4},{tag5}',
+                    }
+                    our_uid = getUser.create_uid(log_type, crawler, user_dict, env)
+                    if env == 'prod':
+                        our_user_link = f'https://admin.piaoquantv.com/ums/user/{our_uid}/post'
+                    else:
+                        our_user_link = f'https://testadmin.piaoquantv.com/ums/user/{our_uid}/post'
+                    Feishu.update_values(log_type, crawler, sheetid, f'G{i + 1}:H{i + 1}',
+                                         [[our_uid, our_user_link]])
+                    Common.logger(log_type, crawler).info(f'站内用户主页创建成功:{our_user_link}\n')
+                our_user_dict = {
+                    'out_uid': '',
+                    'search_word': search_word,
+                    'our_uid': our_uid,
+                    'our_user_link': f'https://admin.piaoquantv.com/ums/user/{our_uid}/post',
+                }
+                our_user_list.append(our_user_dict)
+
+            return our_user_list
+
+
+    @classmethod
+    def get_search_videos(cls, log_type, crawler, env):
+        user_list = cls.get_users(log_type, crawler, "wNgi6Z", env)
+        for user in user_list:
+            cls.i = 0
+            cls.download_cnt = 0
+            search_word = user["search_word"]
+            our_uid = user["our_uid"]
+            Common.logger(log_type, crawler).info(f"开始抓取搜索词:{search_word}")
+            # try:
+            cls.start_wechat(log_type=log_type,
+                             crawler=crawler,
+                             word=search_word,
+                             our_uid=our_uid,
+                             env=env)
+            # except Exception as e:
+            #     Common.logger(log_type, crawler).error(f"search_video:{e}\n")
+
+
+if __name__ == '__main__':
+    # ShipinhaoSearchScheduling.get_search_videos(log_type="search",
+    #                                             crawler="shipinhao",
+    #                                             rule_dict='[{"videos_cnt":{"min":10,"max":0}},{"duration":{"min":30,"max":600}},{"share_cnt":{"min":3000,"max":0}},{"favorite_cnt":{"min":1000,"max":0}},{"publish_time":{"min":1672502400000,"max":0}}]',
+    #                                             oss_endpoint="out",
+    #                                             env="dev")
+    # print(ShipinhaoSearchScheduling.get_users("search", "shipinhao", "wNgi6Z", "dev"))
+    # print((date.today() + timedelta(days=0)).strftime("%Y-%m-%d"))
+    # print(ShipinhaoSearchScheduling.repeat_out_video_id(log_type="search",
+    #                                                     crawler="shipinhao",
+    #                                                     out_video_id="123",
+    #                                                     env="dev"))
+    # ShipinhaoSearch.download_rule(log_type="search", crawler="shipinhao", video_dict={})
+    print(ShipinhaoSearch.rule_dict(log_type="search", crawler="shipinhao"))
+    pass