Ver código fonte

Merge remote-tracking branch 'origin/master'

罗俊辉 1 ano atrás
pai
commit
27c0329186

+ 0 - 1
common/common.py

@@ -159,7 +159,6 @@ class Common:
         while True:
             # charles 抓包文件保存目录
             charles_file_dir = f"./{crawler}/chlsfiles/"
-
             if int(len(os.listdir(charles_file_dir))) == 1:
                 Common.logger(log_type, crawler).info("未找到chlsfile文件,等待60s")
                 cls.logging(log_type, crawler, env, "未找到chlsfile文件,等待60s")

+ 23 - 16
jixiangxingfu/jixiangxingfu_recommend/jixiangxingfu_recommend.py

@@ -52,9 +52,13 @@ class JixiangxingfuRecommend:
         try:
             if env == "dev":
                 chromedriverExecutable = '/Users/wangkun/Downloads/chromedriver/chromedriver_v107/chromedriver'
+                # chromedriverExecutable = '/Users/tzld/Downloads/chromedriver_v111/chromedriver'  # 本地
+
                 # chromedriverExecutable = 'C:\\chromedriver\\chromedriver.exe'  # 阿里云 Windows
             else:
                 chromedriverExecutable = '/Users/piaoquan/Downloads/chromedriver'  # Mac 爬虫机器
+                # chromedriverExecutable = '/Users/tzld/Downloads/chromedriver_v111/chromedriver'  # 本地
+
                 # chromedriverExecutable = 'C:\\chromedriver\\chromedriver.exe'  # 阿里云 Windows
 
             Common.logger(log_type, crawler).info('启动微信')
@@ -109,7 +113,7 @@ class JixiangxingfuRecommend:
             driver.find_elements(By.XPATH, '//*[@text="祝福每天好运来相伴"]')[-1].click()
 
             # 获取视频信息
-            time.sleep(20)
+            time.sleep(10)
             # time.sleep(60)
             cls.get_videoList(log_type, crawler, driver, env)
 
@@ -135,12 +139,14 @@ class JixiangxingfuRecommend:
                 driver.switch_to.window(handle)
                 time.sleep(1)
                 try:
-                    video_list = driver.find_element(By.XPATH, '//wx-view[text()="视频"]')
-                    video_list.click()
-                    time.sleep(2)
-                    if len(cls.search_elements(driver, '//*[@class="customNavigationBar--titleBack2"]')) != 0:
-                        Common.logger(log_type, crawler).info("直接进入了视频详情,返回视频列表")
-                        cls.search_elements(driver, '//*[@class="customNavigationBar--titleBack2"]')[0].click()
+                    # video_list = driver.find_element(By.XPATH, '//wx-view[text()="视频"]')
+                    driver.find_element(By.XPATH, '//wx-view[text()="为您精选"]')
+
+                    # video_list.click()
+                    # time.sleep(2)
+                    # # if len(cls.search_elements(driver, '//*[@class="customNavigationBar--titleBack2"]')) != 0:
+                    # #     Common.logger(log_type, crawler).info("直接进入了视频详情,返回视频列表")
+                    # #     cls.search_elements(driver, '//*[@class="customNavigationBar--titleBack2"]')[0].click()
                     Common.logger(log_type, crawler).info('切换到小程序视频列表成功\n')
                     return
                 except NoSuchElementException:
@@ -210,7 +216,7 @@ class JixiangxingfuRecommend:
             driver.switch_to.window(handle)
             time.sleep(1)
             try:
-                video_url_element = driver.find_element(By.XPATH, '//wx-video[@class="video-section"]')
+                video_url_element = driver.find_element(By.XPATH, '//wx-video[@class="main-video-container"]')
                 video_url = video_url_element.get_attribute("src")
                 cls.find_ad(log_type, crawler, driver)
                 return video_url
@@ -224,19 +230,20 @@ class JixiangxingfuRecommend:
         # Common.logger(log_type, crawler).info('关闭广告')
         # size = driver.get_window_size()
         # TouchAction(driver).tap(x=int(size['width'] * 0.5), y=int(size['height'] * 0.1)).perform()
+        driver.press_keycode(AndroidKey.BACK)
         # 切换到小程序
         cls.check_to_applet(log_type, crawler, driver)
-
-        time.sleep(10)
+        time.sleep(5)
         index = 0
+
         while True:
             try:
-                if cls.search_elements(driver, '//wx-view[@class="video-list-container"]') is None:
+                if cls.search_elements(driver, '//wx-view[@class="list-container"]') is None:
                     Common.logger(log_type, crawler).info('窗口已销毁\n')
                     return
 
                 Common.logger(log_type, crawler).info('获取视频列表\n')
-                video_elements = cls.search_elements(driver, '//wx-view[@class="video-item"]')
+                video_elements = cls.search_elements(driver, '//wx-view[@class="listCardVideo--video-title"]')
                 if video_elements is None:
                     Common.logger(log_type, crawler).warning(f'video_elements:{video_elements}')
                     return
@@ -251,7 +258,7 @@ class JixiangxingfuRecommend:
                         Common.logger(log_type, crawler).info('到底啦~\n')
                         return
                     cls.i += 1
-                    cls.search_elements(driver, '//wx-view[@class="video-item"]')
+                    cls.search_elements(driver, '//wx-view[@class="listCardVideo--video-title"]')
 
                     Common.logger(log_type, crawler).info(f'拖动"视频"列表第{cls.i}个至屏幕中间')
                     time.sleep(3)
@@ -261,9 +268,9 @@ class JixiangxingfuRecommend:
                     # cover_url = video_element.find_elements(By.XPATH, '//wx-image[@class="video-cover-img"]')[cls.i - 1].get_attribute('src')
                     # play_cnt = video_element.find_elements(By.XPATH, '//wx-view[@class="video-play-num"]')[cls.i - 1].text
 
-                    video_title = video_element.find_elements(By.XPATH, '//wx-view[@class="video-title"]')[index+i].text
-                    cover_url = video_element.find_elements(By.XPATH, '//wx-image[@class="video-cover-img"]')[index+i].get_attribute('src')
-                    play_cnt = video_element.find_elements(By.XPATH, '//wx-view[@class="video-play-num"]')[index+i].text
+                    video_title = video_element.find_elements(By.XPATH, '//wx-view[@class="listCardVideo--video-title"]')[index+i].text
+                    cover_url = video_element.find_elements(By.XPATH, '//wx-image[@class="listCardVideo--cover-img"]')[index+i].get_attribute('src')
+                    play_cnt = video_element.find_elements(By.XPATH, '//wx-view[@class="listCardVideo--play-num"]')[index+i].text
 
                     if "万" in play_cnt:
                         play_cnt = int(play_cnt.split("万")[0])*10000

+ 110 - 0
kanyikan/kanyikan_main/run_kykhcm_recommend.py

@@ -0,0 +1,110 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/6/21
+import argparse
+import random
+from mq_http_sdk.mq_client import *
+from mq_http_sdk.mq_consumer import *
+from mq_http_sdk.mq_exception import MQExceptionBase
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.public import get_consumer, ack_message, task_fun_mq
+from common.scheduling_db import MysqlHelper
+from kanyikan.kanyikan_recommend.kanyikan_recommend_hcm import KanyikanRecommend
+
+
+def main(log_type, crawler, topic_name, group_id, env):
+    consumer = get_consumer(topic_name, group_id)
+    # 长轮询表示如果Topic没有消息,则客户端请求会在服务端挂起3秒,3秒内如果有消息可以消费则立即返回响应。
+    # 长轮询时间3秒(最多可设置为30秒)。
+    wait_seconds = 30
+    # 一次最多消费3条(最多可设置为16条)。
+    batch = 1
+    Common.logger(log_type, crawler).info(f'{10 * "="}Consume And Ack Message From Topic{10 * "="}\n'
+                                          f'WaitSeconds:{wait_seconds}\n'
+                                          f'TopicName:{topic_name}\n'
+                                          f'MQConsumer:{group_id}')
+    Common.logging(log_type, crawler, env, f'{10 * "="}Consume And Ack Message From Topic{10 * "="}\n'
+                                           f'WaitSeconds:{wait_seconds}\n'
+                                           f'TopicName:{topic_name}\n'
+                                           f'MQConsumer:{group_id}')
+    while True:
+        try:
+            # 长轮询消费消息。
+            recv_msgs = consumer.consume_message(batch, wait_seconds)
+            for msg in recv_msgs:
+                Common.logger(log_type, crawler).info(f"Receive\n"
+                                                      f"MessageId:{msg.message_id}\n"
+                                                      f"MessageBodyMD5:{msg.message_body_md5}\n"
+                                                      f"MessageTag:{msg.message_tag}\n"
+                                                      f"ConsumedTimes:{msg.consumed_times}\n"
+                                                      f"PublishTime:{msg.publish_time}\n"
+                                                      f"Body:{msg.message_body}\n"
+                                                      f"NextConsumeTime:{msg.next_consume_time}\n"
+                                                      f"ReceiptHandle:{msg.receipt_handle}\n"
+                                                      f"Properties:{msg.properties}")
+                Common.logging(log_type, crawler, env, f"Receive\n"
+                                                       f"MessageId:{msg.message_id}\n"
+                                                       f"MessageBodyMD5:{msg.message_body_md5}\n"
+                                                       f"MessageTag:{msg.message_tag}\n"
+                                                       f"ConsumedTimes:{msg.consumed_times}\n"
+                                                       f"PublishTime:{msg.publish_time}\n"
+                                                       f"Body:{msg.message_body}\n"
+                                                       f"NextConsumeTime:{msg.next_consume_time}\n"
+                                                       f"ReceiptHandle:{msg.receipt_handle}\n"
+                                                       f"Properties:{msg.properties}")
+                # ack_mq_message
+                ack_message(log_type=log_type, crawler=crawler, recv_msgs=recv_msgs, consumer=consumer)
+
+                # 处理爬虫业务
+                task_dict = task_fun_mq(msg.message_body)['task_dict']
+                rule_dict = task_fun_mq(msg.message_body)['rule_dict']
+                task_id = task_dict['id']
+                select_user_sql = f"""select * from crawler_user_v3 where task_id={task_id}"""
+                user_list = MysqlHelper.get_values(log_type, crawler, select_user_sql, env, action="")
+                our_uid_list = []
+                for user in user_list:
+                    our_uid_list.append(user["uid"])
+                our_uid = random.choice(our_uid_list)
+                Common.logger(log_type, crawler).info(f"调度任务:{task_dict}")
+                Common.logging(log_type, crawler, env, f"调度任务:{task_dict}")
+                Common.logger(log_type, crawler).info(f"抓取规则:{rule_dict}")
+                Common.logging(log_type, crawler, env, f"抓取规则:{rule_dict}")
+                Common.logger(log_type, crawler).info(f"用户列表:{user_list}\n")
+                Common.logger(log_type, crawler).info(f'开始抓取:{task_dict["taskName"]}\n')
+                Common.logging(log_type, crawler, env, f'开始抓取:{task_dict["taskName"]}\n')
+                KanyikanRecommend.get_videoList(log_type=log_type,
+                                                crawler=crawler,
+                                                rule_dict=rule_dict,
+                                                our_uid=our_uid,
+                                                env=env)
+                Common.del_charles_files(log_type, crawler)
+                Common.logger(log_type, crawler).info('抓取一轮结束\n')
+                Common.logging(log_type, crawler, env, '抓取一轮结束\n')
+
+        except MQExceptionBase as err:
+            # Topic中没有消息可消费。
+            if err.type == "MessageNotExist":
+                Common.logger(log_type, crawler).info(f"No new message! RequestId:{err.req_id}\n")
+                Common.logging(log_type, crawler, env, f"No new message! RequestId:{err.req_id}\n")
+                continue
+
+            Common.logger(log_type, crawler).info(f"Consume Message Fail! Exception:{err}\n")
+            Common.logging(log_type, crawler, env, f"Consume Message Fail! Exception:{err}\n")
+            time.sleep(2)
+            continue
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
+    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler')  ## 添加参数
+    parser.add_argument('--topic_name')  ## 添加参数
+    parser.add_argument('--group_id')  ## 添加参数
+    parser.add_argument('--env')  ## 添加参数
+    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
+    main(log_type=args.log_type,
+         crawler=args.crawler,
+         topic_name=args.topic_name,
+         group_id=args.group_id,
+         env=args.env)

+ 111 - 0
kanyikan/kanyikan_main/run_kykzzcl_recommend.py

@@ -0,0 +1,111 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/6/21
+import argparse
+import random
+from mq_http_sdk.mq_client import *
+from mq_http_sdk.mq_consumer import *
+from mq_http_sdk.mq_exception import MQExceptionBase
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.public import get_consumer, ack_message, task_fun_mq
+from common.scheduling_db import MysqlHelper
+from kanyikan.kanyikan_recommend.kanyikan_recommend_zzcl import KanyikanRecommend
+
+
+def main(log_type, crawler, topic_name, group_id, env):
+    consumer = get_consumer(topic_name, group_id)
+    # 长轮询表示如果Topic没有消息,则客户端请求会在服务端挂起3秒,3秒内如果有消息可以消费则立即返回响应。
+    # 长轮询时间3秒(最多可设置为30秒)。
+    wait_seconds = 30
+    # 一次最多消费3条(最多可设置为16条)。
+    batch = 1
+    Common.logger(log_type, crawler).info(f'{10 * "="}Consume And Ack Message From Topic{10 * "="}\n'
+                                          f'WaitSeconds:{wait_seconds}\n'
+                                          f'TopicName:{topic_name}\n'
+                                          f'MQConsumer:{group_id}')
+    Common.logging(log_type, crawler, env, f'{10 * "="}Consume And Ack Message From Topic{10 * "="}\n'
+                                           f'WaitSeconds:{wait_seconds}\n'
+                                           f'TopicName:{topic_name}\n'
+                                           f'MQConsumer:{group_id}')
+    while True:
+        try:
+            # 长轮询消费消息。
+            recv_msgs = consumer.consume_message(batch, wait_seconds)
+            for msg in recv_msgs:
+                Common.logger(log_type, crawler).info(f"Receive\n"
+                                                      f"MessageId:{msg.message_id}\n"
+                                                      f"MessageBodyMD5:{msg.message_body_md5}\n"
+                                                      f"MessageTag:{msg.message_tag}\n"
+                                                      f"ConsumedTimes:{msg.consumed_times}\n"
+                                                      f"PublishTime:{msg.publish_time}\n"
+                                                      f"Body:{msg.message_body}\n"
+                                                      f"NextConsumeTime:{msg.next_consume_time}\n"
+                                                      f"ReceiptHandle:{msg.receipt_handle}\n"
+                                                      f"Properties:{msg.properties}")
+                Common.logging(log_type, crawler, env, f"Receive\n"
+                                                       f"MessageId:{msg.message_id}\n"
+                                                       f"MessageBodyMD5:{msg.message_body_md5}\n"
+                                                       f"MessageTag:{msg.message_tag}\n"
+                                                       f"ConsumedTimes:{msg.consumed_times}\n"
+                                                       f"PublishTime:{msg.publish_time}\n"
+                                                       f"Body:{msg.message_body}\n"
+                                                       f"NextConsumeTime:{msg.next_consume_time}\n"
+                                                       f"ReceiptHandle:{msg.receipt_handle}\n"
+                                                       f"Properties:{msg.properties}")
+                # ack_mq_message
+                ack_message(log_type=log_type, crawler=crawler, recv_msgs=recv_msgs, consumer=consumer)
+
+                # 处理爬虫业务
+                task_dict = task_fun_mq(msg.message_body)['task_dict']
+                rule_dict = task_fun_mq(msg.message_body)['rule_dict']
+                task_id = task_dict['id']
+                select_user_sql = f"""select * from crawler_user_v3 where task_id={task_id}"""
+                user_list = MysqlHelper.get_values(log_type, crawler, select_user_sql, env, action="")
+                our_uid_list = []
+                for user in user_list:
+                    our_uid_list.append(user["uid"])
+                our_uid = random.choice(our_uid_list)
+                Common.logger(log_type, crawler).info(f"调度任务:{task_dict}")
+                Common.logging(log_type, crawler, env, f"调度任务:{task_dict}")
+                Common.logger(log_type, crawler).info(f"抓取规则:{rule_dict}")
+                Common.logging(log_type, crawler, env, f"抓取规则:{rule_dict}")
+                Common.logger(log_type, crawler).info(f"用户列表:{user_list}\n")
+                Common.logger(log_type, crawler).info(f'开始抓取:{task_dict["taskName"]}\n')
+                Common.logging(log_type, crawler, env, f'开始抓取:{task_dict["taskName"]}\n')
+
+                KanyikanRecommend.get_videoList(log_type=log_type,
+                                                crawler=crawler,
+                                                rule_dict=rule_dict,
+                                                our_uid=our_uid,
+                                                env=env)
+                Common.del_charles_files(log_type, crawler)
+                Common.logger(log_type, crawler).info('抓取一轮结束\n')
+                Common.logging(log_type, crawler, env, '抓取一轮结束\n')
+
+        except MQExceptionBase as err:
+            # Topic中没有消息可消费。
+            if err.type == "MessageNotExist":
+                Common.logger(log_type, crawler).info(f"No new message! RequestId:{err.req_id}\n")
+                Common.logging(log_type, crawler, env, f"No new message! RequestId:{err.req_id}\n")
+                continue
+
+            Common.logger(log_type, crawler).info(f"Consume Message Fail! Exception:{err}\n")
+            Common.logging(log_type, crawler, env, f"Consume Message Fail! Exception:{err}\n")
+            time.sleep(2)
+            continue
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
+    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler')  ## 添加参数
+    parser.add_argument('--topic_name')  ## 添加参数
+    parser.add_argument('--group_id')  ## 添加参数
+    parser.add_argument('--env')  ## 添加参数
+    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
+    main(log_type=args.log_type,
+         crawler=args.crawler,
+         topic_name=args.topic_name,
+         group_id=args.group_id,
+         env=args.env)

+ 172 - 0
kanyikan/kanyikan_recommend/kanyikan_recommend_hcm.py

@@ -0,0 +1,172 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/6/21
+import json
+import os
+import random
+import sys
+import time
+import requests
+import urllib3
+sys.path.append(os.getcwd())
+from common.mq import MQ
+from common.common import Common
+from common.scheduling_db import MysqlHelper
+from common.public import get_config_from_mysql, download_rule
+proxies = {"http": None, "https": None}
+
+
+class KanyikanRecommend:
+    platform = "看一看"
+    strategy = "推荐抓取策略"
+
+    @classmethod
+    def repeat_video(cls, log_type, crawler, video_id, env):
+        sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and create_time>='2023-10-09' and out_video_id="{video_id}"; """
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
+        return len(repeat_video)
+
+    @classmethod
+    def get_videoList(cls, log_type, crawler, our_uid, rule_dict, env):
+        mq = MQ(topic_name="topic_crawler_etl_" + env)
+        try:
+            Common.logger(log_type, crawler).info(f"正在抓取列表页")
+            Common.logging(log_type, crawler, env, f"正在抓取列表页")
+            session = Common.get_session(log_type, crawler, env)
+            if session is None:
+                time.sleep(1)
+                cls.get_videoList(log_type, crawler, our_uid, rule_dict, env)
+            url = 'https://search.weixin.qq.com/cgi-bin/recwxa/recwxavideolist?'
+            header = {
+                "Connection": "keep-alive",
+                "content-type": "application/json",
+                "Accept-Encoding": "gzip,compress,br,deflate",
+                "User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X) "
+                              "AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 MicroMessenger/8.0.18(0x18001236) "
+                              "NetType/WIFI Language/zh_CN",
+                "Referer": "https://servicewechat.com/wxbb9a805eb4f9533c/234/page-frame.html",
+            }
+            params = {
+                'session': session,
+                "offset": 0,
+                "wxaVersion": "3.9.2",
+                "count": "10",
+                "channelid": "208",
+                "scene": '310',
+                "subscene": '1089',
+                "clientVersion": '8.0.18',
+                "sharesearchid": '0',
+                "nettype": 'wifi',
+                "switchprofile": "0",
+                "switchnewuser": "0",
+            }
+            urllib3.disable_warnings()
+            response = requests.get(url=url, headers=header, params=params, proxies=proxies, verify=False)
+            if "data" not in response.text:
+                Common.logger(log_type, crawler).info("获取视频list时,session过期,随机睡眠 31-50 秒")
+                Common.logging(log_type, crawler, env, "获取视频list时,session过期,随机睡眠 31-50 秒")
+                # 如果返回空信息,则随机睡眠 31-40 秒
+                time.sleep(random.randint(31, 40))
+                cls.get_videoList(log_type, crawler, our_uid, rule_dict, env)
+            elif "items" not in response.json()["data"]:
+                Common.logger(log_type, crawler).info(f"get_feeds:{response.json()},随机睡眠 1-3 分钟")
+                Common.logging(log_type, crawler, env, f"get_feeds:{response.json()},随机睡眠 1-3 分钟")
+                # 如果返回空信息,则随机睡眠 1-3 分钟
+                time.sleep(random.randint(60, 180))
+                cls.get_videoList(log_type, crawler, our_uid, rule_dict, env)
+            feeds = response.json().get("data", {}).get("items", "")
+            if feeds == "":
+                Common.logger(log_type, crawler).info(f"feeds:{feeds}")
+                Common.logging(log_type, crawler, env, f"feeds:{feeds}")
+                return
+            for i in range(len(feeds)):
+                try:
+                    video_title = feeds[i].get("title", "").strip().replace("\n", "") \
+                        .replace("/", "").replace("\\", "").replace("\r", "") \
+                        .replace(":", "").replace("*", "").replace("?", "") \
+                        .replace("?", "").replace('"', "").replace("<", "") \
+                        .replace(">", "").replace("|", "").replace(" ", "") \
+                        .replace("&NBSP", "").replace(".", "。").replace(" ", "") \
+                        .replace("'", "").replace("#", "").replace("Merge", "")
+                    publish_time_stamp = feeds[i].get("date", 0)
+                    publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
+                    # 获取播放地址
+                    if "videoInfo" not in feeds[i]:
+                        video_url = ""
+                    elif "mpInfo" in feeds[i]["videoInfo"]["videoCdnInfo"]:
+                        if len(feeds[i]["videoInfo"]["videoCdnInfo"]["mpInfo"]["urlInfo"]) > 2:
+                            video_url = feeds[i]["videoInfo"]["videoCdnInfo"]["mpInfo"]["urlInfo"][2]["url"]
+                        else:
+                            video_url = feeds[i]["videoInfo"]["videoCdnInfo"]["mpInfo"]["urlInfo"][0]["url"]
+                    elif "ctnInfo" in feeds[i]["videoInfo"]["videoCdnInfo"]:
+                        video_url = feeds[i]["videoInfo"]["videoCdnInfo"]["ctnInfo"]["urlInfo"][0]["url"]
+                    else:
+                        video_url = feeds[i]["videoInfo"]["videoCdnInfo"]["urlInfo"][0]["url"]
+                    video_dict = {
+                        "video_title": video_title,
+                        "video_id": feeds[i].get("videoId", ""),
+                        "play_cnt": feeds[i].get("playCount", 0),
+                        "like_cnt": feeds[i].get("liked_cnt", 0),
+                        "comment_cnt": feeds[i].get("comment_cnt", 0),
+                        "share_cnt": feeds[i].get("shared_cnt", 0),
+                        "duration": feeds[i].get("mediaDuration", 0),
+                        "video_width": feeds[i].get("short_video_info", {}).get("width", 0),
+                        "video_height": feeds[i].get("short_video_info", {}).get("height", 0),
+                        "publish_time_stamp": publish_time_stamp,
+                        "publish_time_str": publish_time_str,
+                        "user_name": feeds[i].get("source", "").strip().replace("\n", ""),
+                        "user_id": feeds[i].get("openid", ""),
+                        "avatar_url": feeds[i].get("bizIcon", ""),
+                        "cover_url": feeds[i].get("thumbUrl", ""),
+                        "video_url": video_url,
+                        "session": session,
+                    }
+                    for k, v in video_dict.items():
+                        Common.logger(log_type, crawler).info(f"{k}:{v}")
+                    Common.logging(log_type, crawler, env, f"video_dict:{video_dict}")
+
+                    if video_dict["video_id"] == "" or video_dict["video_title"] == "" or video_dict["video_url"] == "":
+                        Common.logger(log_type, crawler).info("无效视频\n")
+                        Common.logging(log_type, crawler, env, "无效视频\n")
+                    elif download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
+                        Common.logger(log_type, crawler).info("不满足抓取规则\n")
+                        Common.logging(log_type, crawler, env, "不满足抓取规则\n")
+                    elif any(str(word) if str(word) in video_dict["video_title"] else False
+                             for word in get_config_from_mysql(log_type=log_type,
+                                                               source=crawler,
+                                                               env=env,
+                                                               text="filter",
+                                                               action="")) is True:
+                        Common.logger(log_type, crawler).info('已中过滤词\n')
+                        Common.logging(log_type, crawler, env, '已中过滤词\n')
+                    elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
+                        Common.logger(log_type, crawler).info('视频已下载\n')
+                        Common.logging(log_type, crawler, env, '视频已下载\n')
+                    else:
+                        video_dict["out_user_id"] = video_dict["user_id"]
+                        video_dict["platform"] = crawler
+                        video_dict["strategy"] = log_type
+                        video_dict["strategy_type"] = "hcm"
+                        video_dict["out_video_id"] = video_dict["video_id"]
+                        video_dict["width"] = video_dict["video_width"]
+                        video_dict["height"] = video_dict["video_height"]
+                        video_dict["crawler_rule"] = json.dumps(rule_dict)
+                        video_dict["user_id"] = our_uid
+                        video_dict["publish_time"] = video_dict["publish_time_str"]
+
+                        mq.send_msg(video_dict)
+                except Exception as e:
+                    Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
+                    Common.logging(log_type, crawler, env, f"抓取单条视频异常:{e}\n")
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f"抓取列表页时异常:{e}\n")
+            Common.logging(log_type, crawler, env, f"抓取列表页时异常:{e}\n")
+
+
+if __name__ == "__main__":
+    print(get_config_from_mysql(log_type="recommend",
+                                source="kanyikan",
+                                env="dev",
+                                text="filter",
+                                action=""))
+    pass

+ 181 - 0
kanyikan/kanyikan_recommend/kanyikan_recommend_zzcl.py

@@ -0,0 +1,181 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/6/21
+import json
+import os
+import random
+import sys
+import time
+import requests
+import urllib3
+sys.path.append(os.getcwd())
+from common.mq import MQ
+from common.common import Common
+from common.scheduling_db import MysqlHelper
+from common.public import get_config_from_mysql, download_rule
+proxies = {"http": None, "https": None}
+
+
+class KanyikanRecommend:
+    platform = "看一看"
+    strategy = "推荐抓取策略"
+
+    @classmethod
+    def repeat_video(cls, log_type, crawler, video_id, env):
+        sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and create_time>='2023-10-09' and out_video_id="{video_id}"; """
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
+        return len(repeat_video)
+
+    @classmethod
+    def get_videoList(cls, log_type, crawler, our_uid, rule_dict, env):
+        mq = MQ(topic_name="topic_crawler_etl_" + env)
+        try:
+            Common.logger(log_type, crawler).info(f"正在抓取列表页{crawler}")
+            Common.logging(log_type, crawler, env, f"正在抓取列表页")
+            Common.logger(log_type, crawler).info(f"Test{crawler}")
+            session = Common.get_session(log_type, crawler, env)
+            if session is None:
+                time.sleep(1)
+                cls.get_videoList(log_type, crawler, our_uid, rule_dict, env)
+            url = 'https://search.weixin.qq.com/cgi-bin/recwxa/recwxavideolist?'
+            header = {
+                "Connection": "keep-alive",
+                "content-type": "application/json",
+                "Accept-Encoding": "gzip,compress,br,deflate",
+                "User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X) "
+                              "AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 MicroMessenger/8.0.18(0x18001236) "
+                              "NetType/WIFI Language/zh_CN",
+                "Referer": "https://servicewechat.com/wxbb9a805eb4f9533c/234/page-frame.html",
+            }
+            params = {
+                'session': session,
+                "offset": 0,
+                "wxaVersion": "3.9.2",
+                "count": "10",
+                "channelid": "208",
+                "scene": '310',
+                "subscene": '1089',
+                "clientVersion": '8.0.18',
+                "sharesearchid": '0',
+                "nettype": 'wifi',
+                "switchprofile": "0",
+                "switchnewuser": "0",
+            }
+            urllib3.disable_warnings()
+            response = requests.get(url=url, headers=header, params=params, proxies=proxies, verify=False)
+            # print(response)
+            if "data" not in response.text:
+                Common.logger(log_type, crawler).info("获取视频list时,session过期,随机睡眠 31-50 秒")
+                Common.logging(log_type, crawler, env, "获取视频list时,session过期,随机睡眠 31-50 秒")
+                # 如果返回空信息,则随机睡眠 31-40 秒
+                time.sleep(random.randint(31, 40))
+                cls.get_videoList(log_type, crawler, our_uid, rule_dict, env)
+            elif "items" not in response.json()["data"]:
+                Common.logger(log_type, crawler).info(f"get_feeds:{response.json()},随机睡眠 1-3 分钟")
+                Common.logging(log_type, crawler, env, f"get_feeds:{response.json()},随机睡眠 1-3 分钟")
+                # 如果返回空信息,则随机睡眠 1-3 分钟
+                time.sleep(random.randint(60, 180))
+                cls.get_videoList(log_type, crawler, our_uid, rule_dict, env)
+            feeds = response.json().get("data", {}).get("items", "")
+            if feeds == "":
+                Common.logger(log_type, crawler).info(f"feeds:{feeds}")
+                Common.logging(log_type, crawler, env, f"feeds:{feeds}")
+                return
+            for i in range(len(feeds)):
+                try:
+                    video_title = feeds[i].get("title", "").strip().replace("\n", "") \
+                        .replace("/", "").replace("\\", "").replace("\r", "") \
+                        .replace(":", "").replace("*", "").replace("?", "") \
+                        .replace("?", "").replace('"', "").replace("<", "") \
+                        .replace(">", "").replace("|", "").replace(" ", "") \
+                        .replace("&NBSP", "").replace(".", "。").replace(" ", "") \
+                        .replace("'", "").replace("#", "").replace("Merge", "")
+                    publish_time_stamp = feeds[i].get("date", 0)
+                    publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
+                    # 获取播放地址
+                    if "videoInfo" not in feeds[i]:
+                        video_url = ""
+                    elif "mpInfo" in feeds[i]["videoInfo"]["videoCdnInfo"]:
+                        if len(feeds[i]["videoInfo"]["videoCdnInfo"]["mpInfo"]["urlInfo"]) > 2:
+                            video_url = feeds[i]["videoInfo"]["videoCdnInfo"]["mpInfo"]["urlInfo"][2]["url"]
+                        else:
+                            video_url = feeds[i]["videoInfo"]["videoCdnInfo"]["mpInfo"]["urlInfo"][0]["url"]
+                    elif "ctnInfo" in feeds[i]["videoInfo"]["videoCdnInfo"]:
+                        video_url = feeds[i]["videoInfo"]["videoCdnInfo"]["ctnInfo"]["urlInfo"][0]["url"]
+                    else:
+                        video_url = feeds[i]["videoInfo"]["videoCdnInfo"]["urlInfo"][0]["url"]
+                    video_dict = {
+                        "video_title": video_title,
+                        "video_id": feeds[i].get("videoId", ""),
+                        "play_cnt": feeds[i].get("playCount", 0),
+                        "like_cnt": feeds[i].get("liked_cnt", 0),
+                        "comment_cnt": feeds[i].get("comment_cnt", 0),
+                        "share_cnt": feeds[i].get("shared_cnt", 0),
+                        "duration": feeds[i].get("mediaDuration", 0),
+                        "video_width": feeds[i].get("short_video_info", {}).get("width", 0),
+                        "video_height": feeds[i].get("short_video_info", {}).get("height", 0),
+                        "publish_time_stamp": publish_time_stamp,
+                        "publish_time_str": publish_time_str,
+                        "user_name": feeds[i].get("source", "").strip().replace("\n", ""),
+                        "user_id": feeds[i].get("openid", ""),
+                        "avatar_url": feeds[i].get("bizIcon", ""),
+                        "cover_url": feeds[i].get("thumbUrl", ""),
+                        "video_url": video_url,
+                        "session": session,
+                    }
+                    for k, v in video_dict.items():
+                        Common.logger(log_type, crawler).info(f"{k}:{v}")
+                    Common.logging(log_type, crawler, env, f"video_dict:{video_dict}")
+
+                    if video_dict["video_id"] == "" or video_dict["video_title"] == "" or video_dict["video_url"] == "":
+                        Common.logger(log_type, crawler).info("无效视频\n")
+                        Common.logging(log_type, crawler, env, "无效视频\n")
+                    elif download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
+                        Common.logger(log_type, crawler).info("不满足抓取规则\n")
+                        Common.logging(log_type, crawler, env, "不满足抓取规则\n")
+                    elif any(str(word) if str(word) in video_dict["video_title"] else False
+                             for word in get_config_from_mysql(log_type=log_type,
+                                                               source=crawler,
+                                                               env=env,
+                                                               text="filter",
+                                                               action="")) is True:
+                        Common.logger(log_type, crawler).info('已中过滤词\n')
+                        Common.logging(log_type, crawler, env, '已中过滤词\n')
+                    elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
+                        Common.logger(log_type, crawler).info('视频已下载\n')
+                        Common.logging(log_type, crawler, env, '视频已下载\n')
+                    else:
+                        video_dict["out_user_id"] = video_dict["user_id"]
+                        video_dict["platform"] = crawler
+                        video_dict["strategy"] = log_type
+                        video_dict["strategy_type"] = "zzcl"
+                        video_dict["out_video_id"] = video_dict["video_id"]
+                        video_dict["width"] = video_dict["video_width"]
+                        video_dict["height"] = video_dict["video_height"]
+                        video_dict["crawler_rule"] = json.dumps(rule_dict)
+                        video_dict["user_id"] = our_uid
+                        video_dict["publish_time"] = video_dict["publish_time_str"]
+
+                        mq.send_msg(video_dict)
+                except Exception as e:
+                    Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
+                    Common.logging(log_type, crawler, env, f"抓取单条视频异常:{e}\n")
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f"抓取列表页时异常:{e}\n")
+            Common.logging(log_type, crawler, env, f"抓取列表页时异常:{e}\n")
+
+
+if __name__ == "__main__":
+    KanyikanRecommend.get_videoList(
+        log_type="recommend",
+        crawler="kanyikan",
+        env="prod",
+        rule_dict={'share_cnt': {'min': 300, 'max': 0}},
+        our_uid=64080779
+    )
+    # print(get_config_from_mysql(log_type="recommend",
+    #                             source="kanyikan",
+    #                             env="dev",
+    #                             text="filter",
+    #                             action=""))
+    # pass

+ 7 - 2
main/process_mq.sh

@@ -6,12 +6,12 @@ log_type=$3 # 爬虫策略
 env=$4      # 环境
 
 if [ ${env} = "dev" ];then
-  piaoquan_crawler_dir=/Users/tzld/Desktop/piaoquan_crawler/
+  piaoquan_crawler_dir=/Users/crawler/Desktop/piaoquan_crawler/
   profile_path=/etc/profile
   python=python3
   log_path=${piaoquan_crawler_dir}main/main_logs/process-mq-$(date +%Y-%m-%d).log
 elif [ ${crawler} = "zfqz" ];then
-  piaoquan_crawler_dir=/Users/piaoquan/Desktop/piaoquan_crawler/
+  piaoquan_crawler_dir=/Users/crawler/Desktop/piaoquan_crawler/
   profile_path=./base_profile
   python=python3
   log_path=${piaoquan_crawler_dir}main/main_logs/process-mq-$(date +%Y-%m-%d).log
@@ -25,6 +25,11 @@ elif [ ${crawler} = "kykjk" ];then
   profile_path=/.base_profile
   python=python3
   log_path=${piaoquan_crawler_dir}main/main_logs/process-mq-$(date +%Y-%m-%d).log
+elif [ ${crawler} = "kykzzcl" ];then
+  piaoquan_crawler_dir=/Users/crawler/Desktop/piaoquan_crawler/
+  profile_path=/.base_profile
+  python=python3
+  log_path=${piaoquan_crawler_dir}main/main_logs/process-mq-$(date +%Y-%m-%d).log
 elif [ ${crawler} = "sph" ] && [ ${log_type} = "search" ];then
   piaoquan_crawler_dir=/Users/piaoquan/Desktop/piaoquan_crawler/
   profile_path=/etc/profile

+ 8 - 54
main/process_offline.sh

@@ -29,38 +29,16 @@ else
   echo "$(date "+%Y-%m-%d %H:%M:%S") Appium 进程状态正常" >> ${log_path}
 fi
 
-# 知青天天看
-if [[ "$time" > "00:00:00" ]] && [[ "$time" < "00:59:59" ]]; then
-  echo "$(date "+%Y-%m-%d %H:%M:%S") 开始启动 知青天天看 爬虫脚本任务" >> ${log_path}
-  ps aux | grep run_zhongmiaoyinxin | grep -v grep | awk '{print $2}' | xargs kill -9
-  ps aux | grep run_ganggangdouchuan | grep -v grep | awk '{print $2}' | xargs kill -9
-  ps aux | grep run_jixiangxingfu | grep -v grep | awk '{print $2}' | xargs kill -9
-  ps aux | grep run_zfqz | grep -v grep | awk '{print $2}' | xargs kill -9
-  ps aux | grep run_htzf | grep -v grep | awk '{print $2}' | xargs kill -9
-  ps -ef | grep "run_zhiqingtiantiankan_recommend.py" | grep -v "grep"
-  if [ "$?" -eq 1 ];then
-    echo "$(date "+%Y-%m-%d %H:%M:%S") 知青天天看小程序爬虫, 异常停止, 正在重启!" >> ${log_path}
-    cd ${piaoquan_crawler_dir}
-    nohup python3 -u zhiqingtiantiankan/zhiqingtiantiankan_main/run_zhiqingtiantiankan_recommend.py --log_type="recommend" --crawler="zhiqingtiantiankan" --env=${env} >>zhiqingtiantiankan/logs/nohup-recommend.log 2>&1 &
-    echo "$(date "+%Y-%m-%d %H:%M:%S") 重启完成!" >> ${log_path}
-  else
-    echo "$(date "+%Y-%m-%d %H:%M:%S") 知青天天看小程序爬虫, 进程状态正常" >> ${log_path}
-  fi
-else
-  echo "$(date "+%Y-%m-%d %H:%M:%S") 知青天天看 爬虫脚本任务结束" >> ${log_path}
-fi
-
 # 海豚祝福
-if [[ "$time" > "01:00:00" ]] && [[ "$time" < "03:59:59" ]]; then
+if [[ "$time" > "00:00:00"  &&  "$time" < "00:59:59" || "$time" > "12:00:00"  &&  "$time" < "12:59:59" ]];then
   echo "$(date "+%Y-%m-%d %H:%M:%S") 开始启动 海豚祝福 爬虫脚本任务" >> ${log_path}
-  ps aux | grep run_zhiqingtiantiankan | grep -v grep | awk '{print $2}' | xargs kill -9
   ps aux | grep run_zhongmiaoyinxin | grep -v grep | awk '{print $2}' | xargs kill -9
   ps aux | grep run_ganggangdouchuan | grep -v grep | awk '{print $2}' | xargs kill -9
   ps aux | grep run_jixiangxingfu | grep -v grep | awk '{print $2}' | xargs kill -9
-  ps aux | grep run_zfqz | grep -v grep | awk '{print $2}' | xargs kill -9
   ps -ef | grep "run_htzf_recommend.py" | grep -v "grep"
   if [ "$?" -eq 1 ];then
     echo "$(date "+%Y-%m-%d %H:%M:%S") 海豚祝福小程序爬虫, 异常停止, 正在重启!" >> ${log_path}
+    adb forward --remmove-all
     cd ${piaoquan_crawler_dir}
     nohup python3 -u haitunzhufu/haitunzhufu_main/run_htzf_recommend.py --log_type="recommend" --crawler="haitunzhufu" --env=${env} >> haitunzhufu/logs/nohup-recommend.log 2>&1 &
     echo "$(date "+%Y-%m-%d %H:%M:%S") 重启完成!" >> ${log_path}
@@ -72,16 +50,15 @@ else
 fi
 
 # 刚刚都传
-if [[ "$time" > "04:00:00" ]] && [[ "$time" < "06:59:59" ]]; then
+if [[ "$time" > "01:00:00"  &&  "$time" < "01:59:59" || "$time" > "13:00:00"  &&  "$time" < "13:59:59" ]];then
   echo "$(date "+%Y-%m-%d %H:%M:%S") 开始启动 刚刚都传 爬虫脚本任务" >> ${log_path}
   ps aux | grep run_htzf | grep -v grep | awk '{print $2}' | xargs kill -9
   ps aux | grep run_zhongmiaoyinxin | grep -v grep | awk '{print $2}' | xargs kill -9
-  ps aux | grep run_zhiqingtiantiankan | grep -v grep | awk '{print $2}' | xargs kill -9
   ps aux | grep run_jixiangxingfu | grep -v grep | awk '{print $2}' | xargs kill -9
-  ps aux | grep run_zfqz | grep -v grep | awk '{print $2}' | xargs kill -9
   ps -ef | grep "run_ganggangdouchuan_recommend.py" | grep -v "grep"
   if [ "$?" -eq 1 ];then
     echo "$(date "+%Y-%m-%d %H:%M:%S") 刚刚都传小程序爬虫, 异常停止, 正在重启!" >> ${log_path}
+    adb forward --remmove-all
     cd ${piaoquan_crawler_dir}
     nohup python3 -u ganggangdouchuan/ganggangdouchuan_main/run_ganggangdouchuan_recommend.py --log_type="recommend" --crawler="ganggangdouchuan" --env=${env} >>ganggangdouchuan/logs/nohup-recommend.log 2>&1 &
     echo "$(date "+%Y-%m-%d %H:%M:%S") 重启完成!" >> ${log_path}
@@ -93,16 +70,15 @@ else
 fi
 
 # 吉祥幸福
-if [[ "$time" > "07:00:00" ]] && [[ "$time" < "09:59:59" ]]; then
+if [[ "$time" > "02:00:00"  &&  "$time" < "02:59:59" || "$time" > "14:00:00"  &&  "$time" < "14:59:59" || "$time" > "19:00:00"  &&  "$time" < "20:59:59" ]];then
   echo "$(date "+%Y-%m-%d %H:%M:%S") 开始启动 吉祥幸福 爬虫脚本任务" >> ${log_path}
   ps aux | grep run_htzf | grep -v grep | awk '{print $2}' | xargs kill -9
   ps aux | grep run_zhongmiaoyinxin | grep -v grep | awk '{print $2}' | xargs kill -9
-  ps aux | grep run_zhiqingtiantiankan | grep -v grep | awk '{print $2}' | xargs kill -9
   ps aux | grep run_ganggangdouchuan | grep -v grep | awk '{print $2}' | xargs kill -9
-  ps aux | grep run_zfqz | grep -v grep | awk '{print $2}' | xargs kill -9
   ps -ef | grep "run_jixiangxingfu_recommend.py" | grep -v "grep"
   if [ "$?" -eq 1 ];then
     echo "$(date "+%Y-%m-%d %H:%M:%S") 吉祥幸福爬虫, 异常停止, 正在重启!" >> ${log_path}
+    adb forward --remmove-all
     cd ${piaoquan_crawler_dir}
     nohup python3 -u jixiangxingfu/jixiangxingfu_main/run_jixiangxingfu_recommend.py --log_type="recommend" --crawler="jixiangxingfu" --env=${env} >>jixiangxingfu/logs/nohup-recommend.log 2>&1 &
     echo "$(date "+%Y-%m-%d %H:%M:%S") 重启完成!" >> ${log_path}
@@ -114,16 +90,15 @@ else
 fi
 
 # 众妙音信
-if [[ "$time" > "10:00:00" ]] && [[ "$time" < "12:59:59" ]]; then
+if [[ "$time" > "03:00:00"  &&  "$time" < "03:59:59" || "$time" > "15:00:00"  &&  "$time" < "15:59:59" ]];then
   echo "$(date "+%Y-%m-%d %H:%M:%S") 开始启动 众妙音信 爬虫脚本任务" >> ${log_path}
   ps aux | grep run_htzf | grep -v grep | awk '{print $2}' | xargs kill -9
   ps aux | grep run_ganggangdouchuan | grep -v grep | awk '{print $2}' | xargs kill -9
-  ps aux | grep run_zhiqingtiantiankan | grep -v grep | awk '{print $2}' | xargs kill -9
   ps aux | grep run_jixiangxingfu | grep -v grep | awk '{print $2}' | xargs kill -9
-  ps aux | grep run_zfqz | grep -v grep | awk '{print $2}' | xargs kill -9
   ps -ef | grep "run_zhongmiaoyinxin_recommend.py" | grep -v "grep"
   if [ "$?" -eq 1 ];then
     echo "$(date "+%Y-%m-%d %H:%M:%S") 众妙音信小程序爬虫, 异常停止, 正在重启!" >> ${log_path}
+    adb forward --remmove-all
     cd ${piaoquan_crawler_dir}
     nohup python3 -u zhongmiaoyinxin/zhongmiaoyinxin_main/run_zhongmiaoyinxin_recommend.py --log_type="recommend" --crawler="zhongmiaoyinxin" --env=${env} >>zhongmiaoyinxin/logs/nohup-recommend.log 2>&1 &
     echo "$(date "+%Y-%m-%d %H:%M:%S") 重启完成!" >> ${log_path}
@@ -135,27 +110,6 @@ else
   echo "$(date "+%Y-%m-%d %H:%M:%S") 众妙音信 爬虫脚本任务结束" >> ${log_path}
 fi
 
-# 祝福圈子
-if [[ "$time" > "13:00:00" ]] && [[ "$time" < "23:59:59" ]]; then
-  echo "$(date "+%Y-%m-%d %H:%M:%S") 开始启动 祝福圈子 爬虫脚本任务" >> ${log_path}
-  ps aux | grep run_htzf | grep -v grep | awk '{print $2}' | xargs kill -9
-  ps aux | grep run_zhongmiaoyinxin | grep -v grep | awk '{print $2}' | xargs kill -9
-  ps aux | grep run_ganggangdouchuan | grep -v grep | awk '{print $2}' | xargs kill -9
-  ps aux | grep run_zhiqingtiantiankan | grep -v grep | awk '{print $2}' | xargs kill -9
-  ps aux | grep run_jixiangxingfu | grep -v grep | awk '{print $2}' | xargs kill -9
-  ps -ef | grep "run_zfqz_recommend.py" | grep -v "grep"
-  if [ "$?" -eq 1 ];then
-    echo "$(date "+%Y-%m-%d %H:%M:%S") 祝福圈子小程序爬虫, 异常停止, 正在重启!" >> ${log_path}
-    cd ${piaoquan_crawler_dir}
-    /bin/sh ${piaoquan_crawler_dir}main/process_mq.sh "zfqz" "zhufuquanzi" "recommend" ${env}
-    echo "$(date "+%Y-%m-%d %H:%M:%S") 重启完成!" >> ${log_path}
-  else
-    echo "$(date "+%Y-%m-%d %H:%M:%S") 祝福圈子小程序爬虫, 进程状态正常" >> ${log_path}
-  fi
-
-else
-  echo "$(date "+%Y-%m-%d %H:%M:%S") 祝福圈子 爬虫脚本任务结束" >> ${log_path}
-fi
 
 # 删除日志
 echo "$(date "+%Y-%m-%d %H:%M:%S") 开始清理 10 天前的日志文件" >> ${log_path}

+ 5 - 2
zhongmiaoyinxin/zhongmiaoyinxin_recommend/zhongmiaoyinxin_recommend.py

@@ -6,6 +6,7 @@ import os
 import shutil
 import sys
 import time
+import random
 from hashlib import md5
 from appium import webdriver
 from appium.webdriver.common.touch_action import TouchAction
@@ -301,8 +302,10 @@ class ZhongmiaoyinxinRecommend:
                             Common.logger(log_type, crawler).info(f"video_url:{video_url}\n")
                             # driver.press_keycode(AndroidKey.BACK)
                             cls.download_publish(log_type, crawler, video_dict, env, driver)
-                Common.logger(log_type, crawler).info('已抓取完一组视频,休眠10秒\n')
-                time.sleep(10)
+
+                interval = random.randrange(50, 70)
+                Common.logger(log_type, crawler).info(f'已抓取完一组视频,休眠{interval}秒\n')
+                time.sleep(interval)
                 index = index + len(video_element_temp)
             except Exception as e:
                 Common.logger(log_type, crawler).info(f"get_videoList:{e}\n")

+ 37 - 6
zhufuquanzi/zhufuquanzi_main/run_zfqz_recommend.py

@@ -3,6 +3,8 @@
 # @Time: 2023/9/7
 import argparse
 import random
+import multiprocessing
+import time
 
 from mq_http_sdk.mq_client import *
 from mq_http_sdk.mq_consumer import *
@@ -15,6 +17,14 @@ from common.scheduling_db import MysqlHelper
 from zhufuquanzi.zhufuquanzi_recommend.zhufuquanzi_recommend2 import ZFQZRecommend
 
 
+def run(args1, args2, args3, args4, args5):
+    ZFQZRecommend.start_wechat(log_type=args1,
+                               crawler=args2,
+                               rule_dict=args3,
+                               our_uid=args4,
+                               env=args5)
+
+
 class ZFQZMain:
     @classmethod
     def zhufuquanzi_main(cls, log_type, crawler, topic_name, group_id, env):
@@ -79,12 +89,33 @@ class ZFQZMain:
                     Common.logger(log_type, crawler).info(f'开始抓取:{task_dict["taskName"]}\n')
                     Common.logging(log_type, crawler, env, f'开始抓取:{task_dict["taskName"]}\n')
 
-                    # 抓取符合规则的视频列表
-                    ZFQZRecommend.start_wechat(log_type=log_type,
-                                               crawler=crawler,
-                                               rule_dict=rule_dict,
-                                               our_uid=our_uid,
-                                               env=env)
+                    process = multiprocessing.Process(
+                        target=run,
+                        args=(log_type, crawler, rule_dict, our_uid, env)
+                    )
+                    process.start()
+                    print("进程开始")
+
+                    while True:
+                        if not process.is_alive():
+
+                            print("进程异常,准备重启")
+                            process.terminate()
+                            os.system("adb forward --remove-all")
+                            time.sleep(60)
+                            process = multiprocessing.Process(
+                                target=run,
+                                args=(log_type, crawler, rule_dict, our_uid, env)
+                            )
+                            process.start()
+                        time.sleep(60)
+
+                    # # 抓取符合规则的视频列表
+                    # ZFQZRecommend.start_wechat(log_type=log_type,
+                    #                            crawler=crawler,
+                    #                            rule_dict=rule_dict,
+                    #                            our_uid=our_uid,
+                    #                            env=env)
                     Common.logger(log_type, crawler).info('抓取一轮结束\n')
                     Common.logging(log_type, crawler, env, '抓取一轮结束\n')
 

+ 2 - 2
zhufuquanzi/zhufuquanzi_recommend/zhufuquanzi_recommend2.py

@@ -32,14 +32,14 @@ class ZFQZRecommend:
         if env == "dev":
             chromedriverExecutable = "/Users/wangkun/Downloads/chromedriver/chromedriver_v111/chromedriver"
         else:
-            chromedriverExecutable = "/Users/piaoquan/Downloads/chromedriver"
+            chromedriverExecutable = "/Users/crawler/Downloads/chromedriver_v111/chromedriver"
 
         Common.logger(log_type, crawler).info("启动微信")
         Common.logging(log_type, crawler, env, '启动微信')
         caps = {
             "platformName": "Android",
             "devicesName": "Android",
-            "platformVersion": "11",
+            # "platformVersion": "11",
             # "udid": "emulator-5554",
             "appPackage": "com.tencent.mm",
             "appActivity": ".ui.LauncherUI",