wangkun 1 tahun lalu
induk
melakukan
3df67442e1
58 mengubah file dengan 2808 tambahan dan 2311 penghapusan
  1. 39 6
      README.MD
  2. 36 10
      benshanzhufu/benshanzhufu_main/run_bszf_recommend.py
  3. 10 10
      benshanzhufu/benshanzhufu_recommend/benshanzhufu_recommend_scheduling.py
  4. 6 1
      common/common.py
  5. 14 0
      common/public.py
  6. 84 0
      dev/dev_script/get_cpu_mem.py
  7. 71 0
      dev/dev_script/get_intervals.py
  8. 158 0
      dev/dev_script/mask_watermark.py
  9. 89 0
      dev/dev_script/mitmproxy_test.py
  10. 33 0
      dev/dev_script/shipinhao.py
  11. 30 0
      dev/dev_script/title_like.py
  12. 0 312
      dev/dev_script/xg_recommend.py
  13. 6 1
      douyin/douyin_main/run_dy_author.py
  14. 7 2
      douyin/douyin_main/run_dy_recommend.py
  15. 9 1
      ganggangdouchuan/ganggangdouchuan_main/run_ganggangdouchuan_recommend.py
  16. 38 11
      ganggangdouchuan/ganggangdouchuan_recommend/ganggangdouchuan_recommend.py
  17. 30 20
      gongzhonghao/gongzhonghao_author/gongzhonghao_author_lock.py
  18. 0 519
      gongzhonghao/gongzhonghao_follow/gongzhonghao_follow_2.py
  19. 0 547
      gongzhonghao/gongzhonghao_follow/gongzhonghao_follow_3.py
  20. 1 1
      gongzhonghao/gongzhonghao_main/run_gzh_author_old.py
  21. 63 61
      jixiangxingfu/jixiangxingfu_recommend/jixiangxingfu_recommend.py
  22. 0 0
      kanyikan/kanyikan_main/run_kykjk_recommend.py
  23. 1 2
      kanyikan/kanyikan_recommend/kanyikan_recommend.py
  24. 128 128
      kanyikan/kanyikan_recommend/kanyikan_recommend0627.py
  25. 2 128
      kanyikan/kanyikan_recommend/kanyikan_recommend0705.py
  26. 6 1
      kuaishou/kuaishou_main/run_ks_author.py
  27. 6 1
      kuaishou/kuaishou_main/run_ks_recommend.py
  28. 14 4
      main/process_mq.sh
  29. 11 0
      main/process_offline.sh
  30. 53 0
      main/start_appium.sh
  31. 1 1
      monitor/__init__.py
  32. 1 1
      monitor/cpu_memory/__init__.py
  33. 111 0
      monitor/cpu_memory/cpu_memory.py
  34. 1 1
      monitor/monitor_main/__init__.py
  35. 28 0
      monitor/monitor_main/run_cpu_memory.py
  36. 39 0
      monitor/monitor_main/run_monitor.sh
  37. 8 1
      requirements.txt
  38. 0 45
      shipinhao/shipinhao_main/run_shipinhao_search_scheduling.py
  39. 124 0
      shipinhao/shipinhao_main/run_sph_recommend.py
  40. 41 0
      shipinhao/shipinhao_main/run_sph_recommend_dev.py
  41. 109 0
      shipinhao/shipinhao_main/run_sph_search.py
  42. 3 0
      shipinhao/shipinhao_recommend/__init__.py
  43. 270 0
      shipinhao/shipinhao_recommend/recommend_h5.py
  44. 287 0
      shipinhao/shipinhao_recommend/shipinhao_recommend.py
  45. 13 13
      shipinhao/shipinhao_search/shipinhao_search.py
  46. 122 347
      shipinhao/shipinhao_search/shipinhao_search_scheduling.py
  47. 28 1
      suisuiniannianyingfuqi/suisuiniannianyingfuqi_main/run_ssnnyfq_recommend.py
  48. 9 7
      suisuiniannianyingfuqi/suisuiniannianyingfuqi_recommend/suisuiniannianyingfuqi_recommend_scheduling.py
  49. 6 1
      xiaoniangao/xiaoniangao_main/run_xng_author.py
  50. 8 1
      xiaoniangao/xiaoniangao_main/run_xng_hour.py
  51. 6 2
      xiaoniangao/xiaoniangao_main/run_xng_play.py
  52. 5 0
      xigua/xigua_main/run_xg_author.py
  53. 5 1
      xigua/xigua_main/run_xg_recommend.py
  54. 6 1
      xigua/xigua_main/run_xg_search.py
  55. 183 0
      xigua/xigua_main/run_xgms_recommend.py
  56. 323 0
      xigua/xigua_recommend/xgms_recommend.py
  57. 65 63
      zhiqingtiantiankan/zhiqingtiantiankan_recommend/zhiqingtiantiankan_recommend.py
  58. 61 59
      zhongmiaoyinxin/zhongmiaoyinxin_recommend/zhongmiaoyinxin_recommend.py

+ 39 - 6
README.MD

@@ -112,6 +112,16 @@ ps aux | grep shipinhao_search
 ps aux | grep shipinhao_search | grep -v grep | awk '{print $2}' | xargs kill -9
 ```
 
+#### 207 服务器,CPU/MEMORY 监控
+```commandline
+正式环境
+* * * * * /usr/bin/sh /root/piaoquan_crawler/monitor/monitor_main/run_monitor.sh monitor/monitor_main/run_cpu_memory.py "cpumemory" "monitor" "prod"
+线下调试
+sh monitor/monitor_main/run_monitor.sh monitor/monitor_main/run_cpu_memory.py "cpumemory" "monitor" "dev"
+检测进程
+ps aux | grep run_monitor | grep -v grep | awk '{print $2}' | xargs kill -9
+```
+
 
 #### 调用MQ的爬虫进程守护: main/process_mq.sh
 ```commandline
@@ -130,27 +140,48 @@ ps aux | grep shipinhao_search | grep -v grep | awk '{print $2}' | xargs kill -9
 /bin/sh /Users/wangkun/Desktop/crawler/piaoquan_crawler/main/process_mq.sh "xng" "xiaoniangao" "play" "dev"
 /bin/sh /Users/wangkun/Desktop/crawler/piaoquan_crawler/main/process_mq.sh "xng" "xiaoniangao" "hour" "dev"
 /bin/sh /Users/wangkun/Desktop/crawler/piaoquan_crawler/main/process_mq.sh "xng" "xiaoniangao" "author" "dev"
-/bin/sh /Users/wangkun/Desktop/crawler/piaoquan_crawler/main/process_mq.sh "kyk" "kanyikan" "recommend" "dev"
+/bin/sh /Users/wangkun/Desktop/crawler/piaoquan_crawler/main/process_mq.sh "kykjk" "kanyikan" "recommend" "dev"
 
-207 服务器
-# 调用 MQ 爬虫守护进程
+
+207 服务器, 调用 MQ 爬虫守护进程
+# 岁岁年年迎福气
 * * * * * /usr/bin/sh /root/piaoquan_crawler/main/process_mq.sh "ssnnyfq" "suisuiniannianyingfuqi" "recommend" "prod"
+# 公众号(根据抓取目标用户数,自动计算需要启动 X 个进程同时抓取。每 100 个目标抓取用户,占用一个进程)
 * * * * * /usr/bin/sh /root/piaoquan_crawler/main/process_mq.sh "gzh" "gongzhonghao" "author" "prod"
+# 西瓜账号
 * * * * * /usr/bin/sh /root/piaoquan_crawler/main/process_mq.sh "xg" "xigua" "author" "prod"
+# 西瓜搜索
 * * * * * /usr/bin/sh /root/piaoquan_crawler/main/process_mq.sh "xg" "xigua" "search" "prod"
+# 本山祝福
 * * * * * /usr/bin/sh /root/piaoquan_crawler/main/process_mq.sh "bszf" "benshanzhufu" "recommend" "prod"
+# 快手推荐
 * * * * * /usr/bin/sh /root/piaoquan_crawler/main/process_mq.sh "ks" "kuaishou" "recommend" "prod"
+# 快手账号
 * * * * * /usr/bin/sh /root/piaoquan_crawler/main/process_mq.sh "ks" "kuaishou" "author" "prod"
+# 抖音推荐
 * * * * * /usr/bin/sh /root/piaoquan_crawler/main/process_mq.sh "dy" "douyin" "recommend" "prod"
+# 抖音账号
 * * * * * /usr/bin/sh /root/piaoquan_crawler/main/process_mq.sh "dy" "douyin" "author" "prod"
+# 小年糕播放榜
 * * * * * /usr/bin/sh /root/piaoquan_crawler/main/process_mq.sh "xng" "xiaoniangao" "play" "prod"
+# 小年糕上升榜
 * * * * * /usr/bin/sh /root/piaoquan_crawler/main/process_mq.sh "xng" "xiaoniangao" "hour" "prod"
+# 小年糕账号
 * * * * * /usr/bin/sh /root/piaoquan_crawler/main/process_mq.sh "xng" "xiaoniangao" "author" "prod"
+# 看一看推荐 1
 * * * * * /bin/sh /Users/lieyunye/Desktop/crawler/piaoquan_crawler/main/process_mq.sh "kyk" "kanyikan" "recommend" "prod"
-* * * * * /bin/sh /Users/kanyikan/Desktop/crawler/piaoquan_crawler/main/process_mq.sh "kykmv" "kanyikan" "recommend" "prod"
+# 看一看推荐健康类
+* * * * * /bin/sh /Users/kanyikan/Desktop/crawler/piaoquan_crawler/main/process_mq.sh "kykjk" "kanyikan" "recommend" "prod"
+# 西瓜推荐 1
 * * * * * /bin/sh /Users/kanyikan/Desktop/crawler/piaoquan_crawler/main/process_mq.sh "xg" "xigua" "recommend" "prod"
-
-线下服务器
+# 西瓜推荐民生类
+* * * * * /bin/sh /Users/piaoquan/Desktop/piaoquan_crawler/main/process_mq.sh "xgms" "xigua" "recommend" "prod"
+# 启动 Appium 
+* * * * * /bin/sh /Users/lieyunye/Desktop/crawler/piaoquan_crawler/main/start_appium.sh "recommend" "shipinhao" "prod"
+# 视频号推荐
+* * * * * /bin/sh /Users/lieyunye/Desktop/crawler/piaoquan_crawler/main/process_mq.sh "sph" "shipinhao" "recommend" "prod"
+# 视频号搜索
+* * * * * /bin/sh /Users/piaoquan/Desktop/piaoquan_crawler/main/process_mq.sh "sph" "shipinhao" "search" "prod"
 
 杀进程
 ps aux | grep suisuiniannianyingfuqi | grep -v grep | awk '{print $2}' | xargs kill -9
@@ -207,4 +238,6 @@ workalendar==17.0.0
 aliyun_python_sdk==2.2.0
 # pip3 install opencv-python / pip3 install opencv-contrib-python
 opencv-python~=4.8.0.74
+# pip3 install scikit-learn
+scikit-learn~=1.3.0
 ```

+ 36 - 10
benshanzhufu/benshanzhufu_main/run_bszf_recommend.py

@@ -13,7 +13,6 @@ from common.scheduling_db import MysqlHelper
 from benshanzhufu.benshanzhufu_recommend.benshanzhufu_recommend_scheduling import BenshanzhufuRecommend
 
 
-
 def main(log_type, crawler, topic_name, group_id, env):
     consumer = get_consumer(topic_name, group_id)
     # 长轮询表示如果Topic没有消息,则客户端请求会在服务端挂起3秒,3秒内如果有消息可以消费则立即返回响应。
@@ -25,11 +24,16 @@ def main(log_type, crawler, topic_name, group_id, env):
                                           f'WaitSeconds:{wait_seconds}\n'
                                           f'TopicName:{topic_name}\n'
                                           f'MQConsumer:{group_id}')
+    Common.logging(log_type, crawler, env, f'{10 * "="}Consume And Ack Message From Topic{10 * "="}\n'
+                                            f'WaitSeconds:{wait_seconds}\n'
+                                            f'TopicName:{topic_name}\n'
+                                            f'MQConsumer:{group_id}')
     while True:
         try:
             # 长轮询消费消息。
             recv_msgs = consumer.consume_message(batch, wait_seconds)
             for msg in recv_msgs:
+                benshanzhufu_start_time = int(time.time())
                 Common.logger(log_type, crawler).info(f"Receive\n"
                                                       f"MessageId:{msg.message_id}\n"
                                                       f"MessageBodyMD5:{msg.message_body_md5}\n"
@@ -40,6 +44,16 @@ def main(log_type, crawler, topic_name, group_id, env):
                                                       f"NextConsumeTime:{msg.next_consume_time}\n"
                                                       f"ReceiptHandle:{msg.receipt_handle}\n"
                                                       f"Properties:{msg.properties}")
+                Common.logging(logging, crawler, env, f"Receive\n"
+                                                      f"MessageId:{msg.message_id}\n"
+                                                      f"MessageBodyMD5:{msg.message_body_md5}\n"
+                                                      f"MessageTag:{msg.message_tag}\n"
+                                                      f"ConsumedTimes:{msg.consumed_times}\n"
+                                                      f"PublishTime:{msg.publish_time}\n"
+                                                      f"Body:{msg.message_body}\n"
+                                                      f"NextConsumeTime:{msg.next_consume_time}\n"
+                                                      f"ReceiptHandle:{msg.receipt_handle}\n"
+                                                      f"Properties:{msg.properties}")
                 # ack_mq_message
                 ack_message(log_type=log_type, crawler=crawler, recv_msgs=recv_msgs, consumer=consumer)
 
@@ -54,37 +68,49 @@ def main(log_type, crawler, topic_name, group_id, env):
                     our_uid_list.append(user["uid"])
                 our_uid = random.choice(our_uid_list)
                 Common.logger(log_type, crawler).info(f"调度任务:{task_dict}")
+                Common.logging(log_type, crawler, env, f"调度任务:{task_dict}")
                 Common.logger(log_type, crawler).info(f"抓取规则:{rule_dict}")
+                Common.logging(log_type, crawler, env, f"抓取规则:{rule_dict}")
                 Common.logger(log_type, crawler).info(f"用户列表:{user_list}\n")
+                Common.logging(log_type, crawler, env, f"用户列表:{user_list}\n")
                 Common.logger(log_type, crawler).info(f'开始抓取:{task_dict["taskName"]}\n')
+                Common.logging(log_type, crawler, env, f'开始抓取:{task_dict["taskName"]}\n')
                 BenshanzhufuRecommend.get_videoList(log_type=log_type,
                                                     crawler=crawler,
                                                     our_uid=our_uid,
                                                     rule_dict=rule_dict,
                                                     env=env)
                 # Common.del_logs(log_type, crawler)
-                Common.logger(log_type, crawler).info('抓取一轮结束\n')
+                Common.logger(log_type, crawler).info('抓取一轮结束')
+                Common.logging(log_type, crawler, env, '抓取一轮结束')
+                benshanzhufu_end_time = int(time.time())
+                benshanzhufu_duration = benshanzhufu_end_time - benshanzhufu_start_time
+                Common.logger(log_type, crawler).info(f"duration {benshanzhufu_duration}\n")
+                Common.logging(log_type, crawler, env, f"duration {benshanzhufu_duration}\n")
 
         except MQExceptionBase as err:
             # Topic中没有消息可消费。
             if err.type == "MessageNotExist":
                 Common.logger(log_type, crawler).info(f"No new message! RequestId:{err.req_id}\n")
+                Common.logging(log_type, crawler, env, f"No new message! RequestId:{err.req_id}\n")
                 continue
 
             Common.logger(log_type, crawler).info(f"Consume Message Fail! Exception:{err}\n")
+            Common.logging(log_type, crawler, env, f"Consume Message Fail! Exception:{err}\n")
             time.sleep(2)
             continue
 
+
 if __name__ == "__main__":
-    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
-    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
-    parser.add_argument('--crawler')  ## 添加参数
-    parser.add_argument('--topic_name')  ## 添加参数
-    parser.add_argument('--group_id')  ## 添加参数
-    parser.add_argument('--env')  ## 添加参数
-    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
+    parser = argparse.ArgumentParser()  # 新建参数解释器对象
+    parser.add_argument('--log_type', type=str)  # 添加参数,注明参数类型
+    parser.add_argument('--crawler')  # 添加参数
+    parser.add_argument('--topic_name')  # 添加参数
+    parser.add_argument('--group_id')  # 添加参数
+    parser.add_argument('--env')  # 添加参数
+    args = parser.parse_args()  # 参数赋值,也可以通过终端赋值
     main(log_type=args.log_type,
          crawler=args.crawler,
          topic_name=args.topic_name,
          group_id=args.group_id,
-         env=args.env)
+         env=args.env)

+ 10 - 10
benshanzhufu/benshanzhufu_recommend/benshanzhufu_recommend_scheduling.py

@@ -13,9 +13,8 @@ from hashlib import md5
 from urllib import parse
 import requests
 import urllib3
-
-from common.mq import MQ
 sys.path.append(os.getcwd())
+from common.mq import MQ
 from common.common import Common
 from common.scheduling_db import MysqlHelper
 from common.feishu import Feishu
@@ -29,7 +28,6 @@ class BenshanzhufuRecommend:
 
     @classmethod
     def repeat_video(cls, log_type, crawler, video_id, env):
-        # sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}"; """
         sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and out_video_id="{video_id}"; """
         repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
         return len(repeat_video)
@@ -64,15 +62,19 @@ class BenshanzhufuRecommend:
             r = requests.get(headers=header, url=url, proxies=proxies, verify=False)
             if r.status_code != 200:
                 Common.logger(log_type, crawler).warning(f"get_videoList:{r.status_code}, {r.text}\n")
+                Common.logging(log_type, crawler, env, f"get_videoList:{r.status_code}, {r.text}\n")
                 return
             elif r.json()['message'] != "list success":
                 Common.logger(log_type, crawler).warning(f"get_videoList:{r.status_code}, {r.json()}\n")
+                Common.logging(log_type, crawler, env, f"get_videoList:{r.status_code}, {r.text}\n")
                 return
             elif "data" not in r.json():
                 Common.logger(log_type, crawler).warning(f"get_videoList:{r.status_code}, {r.json()}\n")
+                Common.logging(log_type, crawler, env, f"get_videoList:{r.status_code}, {r.text}\n")
                 return
             elif len(r.json()['data']["list"]) == 0:
                 Common.logger(log_type, crawler).info(f"没有更多数据了~ {r.json()}\n")
+                Common.logging(log_type, crawler, env, f"没有更多数据了~ {r.json()}\n")
                 return
             else:
                 # 翻页
@@ -105,12 +107,15 @@ class BenshanzhufuRecommend:
                     }
                     for k, v in video_dict.items():
                         Common.logger(log_type, crawler).info(f"{k}:{v}")
+                    Common.logging(log_type, crawler, env, f"video_dict:{video_dict}")
 
                     # 过滤无效视频
                     if video_dict["video_id"] == "" or video_dict["cover_url"] == "" or video_dict["video_url"] == "":
                         Common.logger(log_type, crawler).info("无效视频\n")
+                        Common.logging(log_type, crawler, env, "无效视频\n")
                     elif download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
                         Common.logger(log_type, crawler).info("不满足抓取规则\n")
+                        Common.logging(log_type, crawler, env, "不满足抓取规则\n")
                     elif any(str(word) if str(word) in video_dict["video_title"] else False
                              for word in get_config_from_mysql(log_type=log_type,
                                                                source=crawler,
@@ -118,15 +123,11 @@ class BenshanzhufuRecommend:
                                                                text="filter",
                                                                action="")) is True:
                         Common.logger(log_type, crawler).info('已中过滤词\n')
+                        Common.logging(log_type, crawler, env, '已中过滤词\n')
                     elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
                         Common.logger(log_type, crawler).info('视频已下载\n')
+                        Common.logging(log_type, crawler, env, '视频已下载\n')
                     else:
-                        # cls.download_publish(log_type=log_type,
-                        #                      crawler=crawler,
-                        #                      our_uid=our_uid,
-                        #                      video_dict=video_dict,
-                        #                      rule_dict=rule_dict,
-                        #                      env=env)
                         video_dict["out_user_id"] = video_dict["user_id"]
                         video_dict["platform"] = crawler
                         video_dict["strategy"] = log_type
@@ -138,7 +139,6 @@ class BenshanzhufuRecommend:
                         video_dict["publish_time"] = video_dict["publish_time_str"]
                         video_dict["fans_cnt"] = 0
                         video_dict["videos_cnt"] = 0
-
                         mq.send_msg(video_dict)
 
                         # except Exception as e:

+ 6 - 1
common/common.py

@@ -85,7 +85,12 @@ class Common:
             project = 'crawler-log-prod'
             logstore = 'crawler-log-prod'
             endpoint = 'cn-hangzhou.log.aliyuncs.com'
-        elif crawler == "shipinhao" or crawler == "kanyikan":
+        elif crawler == "shipinhao"\
+            or crawler == "kanyikan"\
+            or crawler == "ganggangdouchuan"\
+            or crawler == "zhiqingtiantiankan"\
+            or crawler == "jixiangxingfu"\
+            or crawler == "zhongmiaoyinxin":
             project = 'crawler-log-prod'
             logstore = 'crawler-log-prod'
             endpoint = 'cn-hangzhou.log.aliyuncs.com'

+ 14 - 0
common/public.py

@@ -4,6 +4,8 @@
 import requests
 from mq_http_sdk.mq_client import *
 from mq_http_sdk.mq_exception import MQExceptionBase
+from sklearn.feature_extraction.text import TfidfVectorizer
+from sklearn.metrics.pairwise import cosine_similarity
 import os, sys, jieba
 import time
 import random
@@ -26,6 +28,18 @@ def get_user_from_mysql(log_type, crawler, source, env, action=''):
         Common.logger(log_type, crawler).warning(f"爬虫:{crawler},没有查到抓取名单")
         return []
 
+def similarity(title1, title2):
+    # 分词
+    seg1 = jieba.lcut(title1)
+    seg2 = jieba.lcut(title2)
+
+    # 构建TF-IDF向量
+    tfidf_vectorizer = TfidfVectorizer()
+    tfidf_matrix = tfidf_vectorizer.fit_transform(["".join(seg1), "".join(seg2)])
+
+    # 计算余弦相似度
+    similar = cosine_similarity(tfidf_matrix[0], tfidf_matrix[1])[0][0]
+    return similar
 
 def title_like(log_type, crawler, platform, title, env):
     """

+ 84 - 0
dev/dev_script/get_cpu_mem.py

@@ -0,0 +1,84 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/7/19
+import socket
+import psutil
+
+
+class GetCpuMen:
+    @classmethod
+    def get_ip_address(cls):
+        try:
+            # 创建一个 UDP 套接字
+            sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+            # 连接到一个外部 IP 地址
+            sock.connect(("8.8.8.8", 80))
+            # 获取本地 IP 地址
+            local_ip = sock.getsockname()[0]
+            return local_ip
+        except socket.error:
+            return "无法获取本机 IP 地址"
+
+    @classmethod
+    def get_pid(cls, script):
+        # 遍历所有正在运行的进程
+        for proc in psutil.process_iter():
+            try:
+                # 获取进程的命令行参数
+                cmds = proc.cmdline()
+                # 检查命令行参数是否包含爬虫脚本的名称或关键字
+                for cmd in cmds:
+                    if script in cmd:
+
+                        print(f"cmd:{cmd}")
+                        # 获取进程的PID
+                        pid = proc.pid
+                        return pid
+            except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
+                pass
+
+    @classmethod
+    def get_cpu_mem(cls, script):
+        import psutil
+
+        # 获取当前进程的PID
+        pid = cls.get_pid(script)
+        print(f"pid:{pid}")
+        # 获取CPU的使用情况
+        cpu_percent = round(psutil.Process(pid).cpu_percent(), 2)
+
+        # 获取内存的使用情况
+        memory_percent = round(psutil.Process(pid).memory_percent(), 2)
+
+        print(f"CPU使用率:{cpu_percent}")
+        print(f"内存使用率:{memory_percent}")
+
+    @classmethod
+    def get_all_cpu_mem(cls):
+        script_list = ["run_xg_search",
+                       "run_xg_author",
+                       "run_xng_author",
+                       "run_xng_play",
+                       "run_xng_hour",
+                       "run_dy_author",
+                       "run_dy_recommend",
+                       "run_ks_recommend",
+                       "run_ks_author",
+                       "run_bszf_recommend",
+                       "run_ssnnyfq_recommend",
+                       "run_gzh_author",
+                       "run_weixinzhishu_score",
+                       "get_cpu_mem"]
+
+        for scrip in script_list:
+            print(f"scrip:{scrip}")
+            cls.get_cpu_mem(scrip)
+            print("\n")
+
+
+if __name__ == "__main__":
+    # GetCpuMen.get_cpu_mem("get_cpu_mem")
+    # GetCpuMen.get_all_cpu_mem()
+    print(GetCpuMen.get_ip_address())
+
+    pass

+ 71 - 0
dev/dev_script/get_intervals.py

@@ -0,0 +1,71 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/7/18
+import datetime
+
+# 读取日志文件并将每行日志存储到一个列表中
+# with open('../logs/benshanzhufu-recommend-2023-07-17.log', 'r') as file:
+# with open('../logs/douyin-author-2023-07-17.log', 'r') as file:
+with open('../logs/douyin-recommend-2023-07-19.log', 'r') as file:
+# with open('../logs/kuaishou-author-2023-07-17.log', 'r') as file:
+# with open('../logs/kuaishou-recommend-2023-07-17.log', 'r') as file:
+# with open('../logs/xigua-author-2023-07-17.log', 'r') as file:
+# with open('../logs/xigua-search-2023-07-17.log', 'r') as file:
+# with open('../logs/xiaoniangao-author-2023-07-17.log', 'r') as file:
+# with open('../logs/xiaoniangao-play-2023-07-17.log', 'r') as file:
+# with open('../logs/xiaoniangao-hour-2023-07-17.log', 'r') as file:
+# with open('../logs/suisuiniannianyingfuqi-recommend-2023-07-17.log', 'r') as file:
+# with open('../logs/gongzhonghao-author1-2023-07-18.log', 'r') as file:
+# with open('../logs/gongzhonghao-author2-2023-07-18.log', 'r') as file:
+# with open('../logs/gongzhonghao-author3-2023-07-18.log', 'r') as file:
+# with open('../logs/gongzhonghao-author4-2023-07-18.log', 'r') as file:
+# with open('../logs/gongzhonghao-author5-2023-07-18.log', 'r') as file:
+# with open('../logs/gongzhonghao-author6-2023-07-18.log', 'r') as file:
+    log_lines = file.readlines()
+
+# 存储间隔时间的列表
+intervals = []
+
+# 遍历日志列表,计算相邻两条日志的时间间隔
+for i in range(1, len(log_lines)):
+    if "2023-" not in log_lines[i-1] or "2023-" not in log_lines[i]:
+        continue
+    # 解析时间戳
+    timestamp1 = datetime.datetime.strptime(log_lines[i - 1].split(".")[0], '%Y-%m-%d %H:%M:%S')
+    timestamp2 = datetime.datetime.strptime(log_lines[i].split(".")[0], '%Y-%m-%d %H:%M:%S')
+
+    # 计算时间间隔
+    interval = timestamp2 - timestamp1
+
+    # 将时间间隔添加到间隔时间列表中
+    intervals.append(interval)
+
+# 对间隔时间列表进行倒序排序
+intervals.sort(reverse=True)
+
+# 取前10条间隔时间
+top_10_intervals = intervals[:10]
+# 取前10条间隔时间的秒数
+top_10_intervals_seconds = [int(interval.total_seconds()) for interval in top_10_intervals]
+
+# 打印结果
+print(top_10_intervals_seconds)
+
+# benshanzhufu  [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
+# douyin_author [30, 28, 25, 20, 18, 18, 17, 17, 17, 16]
+# douyin_recommend  [62, 50, 38, 34, 33, 31, 31, 31, 31, 31]
+# kuaishou_author  [31, 27, 23, 21, 21, 21, 19, 19, 17, 15]
+# kuaishou_recommend  [27, 23, 23, 23, 23, 22, 22, 22, 21, 21]
+# xigua_author  [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
+# xigua_search  [18, 14, 14, 13, 13, 12, 11, 11, 11, 10]
+# xiaoniangao_author  [4, 4, 4, 4, 4, 4, 4, 4, 4, 4]
+# xiaoniangao_play  [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
+# xiaoniangao_hour  [61, 3, 2, 2, 2, 2, 2, 2, 1, 1]
+# suisuiniannian  [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
+# gzh1  [26, 26, 26, 25, 25, 25, 25, 24, 24, 24]
+# gzh2  [28, 26, 25, 25, 24, 24, 24, 24, 24, 24]
+# gzh3  [27, 26, 26, 25, 25, 25, 24, 24, 24, 24]
+# gzh4  [26, 26, 25, 24, 24, 24, 24, 24, 24, 24]
+# gzh5  [29, 26, 25, 25, 24, 24, 24, 24, 24, 24]
+# gzh6  [26, 25, 25, 25, 25, 24, 24, 24, 24, 24]
+

+ 158 - 0
dev/dev_script/mask_watermark.py

@@ -0,0 +1,158 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/7/21
+import cv2
+
+
+class MaskWatermark:
+    @classmethod
+    def find_watermark(cls, image_path):
+        """
+        基于OpenCV自动识别水印并获取其位置
+        :param image_path:水印
+        :return:watermark_area
+        """
+        # 读取图像
+        image = cv2.imread(image_path)
+
+        # 将图像转换为灰度图像
+        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
+
+        # 使用边缘检测算法如Canny来检测图像中的边缘信息
+        edges = cv2.Canny(gray, 100, 200)
+
+        # 使用霍夫变换检测直线,以获得边缘中的直线段
+        lines = cv2.HoughLinesP(edges, 1, cv2.pi / 180, threshold=100, minLineLength=100, maxLineGap=10)
+
+        # 对检测到的直线进行筛选,以识别可能表示水印的直线
+        watermark_lines = []
+        if lines is not None:
+            for line in lines:
+                x1, y1, x2, y2 = line[0]
+                # 根据实际情况确定水印直线的特征,例如长度、斜率等
+                # 这里只是一个示例,您需要根据具体情况进行调整
+                if abs(y2 - y1) < 5 and abs(x2 - x1) > 50:
+                    watermark_lines.append(line)
+
+        # 根据检测到的水印直线,计算水印区域的位置和大小
+        if len(watermark_lines) > 1:
+            x_coords = [line[0][0] for line in watermark_lines] + [line[0][2] for line in watermark_lines]
+            y_coords = [line[0][1] for line in watermark_lines] + [line[0][3] for line in watermark_lines]
+            min_x = min(x_coords)
+            max_x = max(x_coords)
+            min_y = min(y_coords)
+            max_y = max(y_coords)
+            watermark_area = (min_x, min_y, max_x - min_x, max_y - min_y)  # 水印区域的位置和大小
+        else:
+            watermark_area = None
+
+        return watermark_area
+
+    @classmethod
+    def mask_watermark(cls, input_path, output_path, watermark_area):
+        # 读取视频
+        video = cv2.VideoCapture(input_path)
+
+        # 获取视频的宽度和高度
+        width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
+        height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
+
+        # 创建输出视频对象
+        fourcc = cv2.VideoWriter_fourcc(*'mp4v')  # 可根据需要更换视频编码器
+        output = cv2.VideoWriter(output_path, fourcc, 30.0, (width, height))
+
+        while True:
+            ret, frame = video.read()
+
+            if not ret:
+                break
+
+            # 在水印区域替换成其他像素或进行遮挡处理
+            x, y, w, h = watermark_area
+            frame[y:y + h, x:x + w] = 0  # 这里将水印区域像素设为0,可根据需要进行更复杂的像素替换或遮挡处理
+
+            # 将处理后的帧写入输出视频
+            output.write(frame)
+
+        # 释放资源
+        video.release()
+        output.release()
+
+        print("成功去除水印,并保存为", output_path)
+
+    @classmethod
+    def remove_watermark(cls, video_path, output_path):
+        # 读取视频
+        video = cv2.VideoCapture(video_path)
+
+        # 获取视频的宽度和高度
+        width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
+        height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
+
+        # 创建输出视频对象
+        fourcc = cv2.VideoWriter_fourcc(*'mp4v')  # 可根据需要更换视频编码器
+        output = cv2.VideoWriter(output_path, fourcc, 30.0, (width, height))
+
+        # 读取第一帧作为背景帧
+        ret, background = video.read()
+        if not ret:
+            print("无法读取背景帧")
+            return
+
+        while True:
+            ret, frame = video.read()
+
+            if not ret:
+                break
+
+            # 计算当前帧与背景帧的差值
+            diff = cv2.absdiff(frame, background)
+
+            # 将差值转换为灰度图像
+            gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
+
+            # 应用阈值二值化,通过调整阈值以过滤差异
+            _, threshold = cv2.threshold(gray, 25, 255, cv2.THRESH_BINARY)
+
+            # 进行形态学操作,填充小区域,平滑边缘
+            kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
+            dilated = cv2.dilate(threshold, kernel, iterations=3)
+
+            # 寻找轮廓
+            contours, _ = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
+
+            # 去除检测到的水印轮廓区域
+            for contour in contours:
+                # 这里只是一个示例,您可以根据具体情况进行调整,例如根据轮廓面积、形状等进行过滤
+                if cv2.contourArea(contour) > threshold_area:
+                    # 在当前帧上用背景帧进行填充
+                    cv2.drawContours(frame, [contour], -1, (0, 0, 0), cv2.FILLED)
+
+            # 将处理后的帧写入输出视频
+            output.write(frame)
+
+        # 释放资源
+        video.release()
+        output.release()
+
+
+if __name__ == "__main__":
+    # 示例调用
+    image_path = 'image.jpg'  # 替换为待识别水印的图像路径
+    watermark_area = MaskWatermark.find_watermark(image_path)
+    print("水印区域的位置和大小:", watermark_area)
+
+    # 示例调用
+    input_path = 'input.mp4'  # 替换为输入视频文件路径
+    output_path = 'output.mp4'  # 替换为输出视频文件路径
+    watermark_area = (100, 100, 200, 200)  # 替换为水印区域的位置和大小,表示为 (x, y, width, height)
+    MaskWatermark.mask_watermark(input_path, output_path, watermark_area)
+
+    # 示例调用
+    video_path = 'video.mp4'  # 替换为视频文件路径
+    output_path = 'output.mp4'  # 替换为输出视频文件路径
+    threshold_area = 1000  # 轮廓区域的阈值,根据具体情况进行调整
+    MaskWatermark.remove_watermark(video_path, output_path)
+    print("成功去除水印,并保存为", output_path)
+
+    pass

+ 89 - 0
dev/dev_script/mitmproxy_test.py

@@ -0,0 +1,89 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/7/24
+import os
+import time
+
+from mitmproxy import http, proxy, options, proxyconfig
+from mitmproxy.proxy.config import ProxyConfig
+from mitmproxy.tools.dump import DumpMaster
+from selenium import webdriver
+from selenium.webdriver import DesiredCapabilities
+from selenium.webdriver.chrome.service import Service
+
+
+class ProxyData:
+    requests_data = []
+    response_data = []
+
+    @classmethod
+    def start_proxy(cls):
+        # 创建代理配置选项
+        opts = options.Options(listen_host='0.0.0.0', listen_port=8888)
+
+        # 创建代理配置
+        config = ProxyConfig(opts)
+
+        # 创建DumpMaster实例
+        master = DumpMaster(opts)
+        master.server = config
+
+        # 启动代理
+        print("Proxy started")
+        master.run()
+
+    @classmethod
+    def intercept_request(cls, flow: http.HTTPFlow):
+        # 拦截请求
+        request_data = {
+            'url': flow.request.url,
+            'method': flow.request.method,
+            'headers': dict(flow.request.headers),
+            'content': flow.request.content.decode('utf-8')
+        }
+        cls.requests_data.append(request_data)
+
+    @classmethod
+    def intercept_response(cls, flow: http.HTTPFlow):
+        # 拦截响应
+        response_data = {
+            'url': flow.request.url,
+            'status_code': flow.response.status_code,
+            'headers': dict(flow.response.headers),
+            'content': flow.response.content.decode('utf-8')
+        }
+        cls.response_data.append(response_data)
+
+    @classmethod
+    def start_selenium(cls):
+        quit_cmd = "ps aux | grep Chrome | grep -v grep | awk '{print $2}' | xargs kill -9"
+        os.system(quit_cmd)
+        time.sleep(1)
+        # 启动 Chrome,指定端口号:8888
+        cmd = 'open -a "Google Chrome" --args --remote-debugging-port=8888'
+        os.system(cmd)
+        # 打印请求配置
+        ca = DesiredCapabilities.CHROME
+        ca["goog:loggingPrefs"] = {"performance": "ALL"}
+        # 配置 chromedriver
+        chromedriver = "/Users/wangkun/Downloads/chromedriver/chromedriver_v114/chromedriver"
+        # 初始化浏览器
+        browser = webdriver.ChromeOptions()
+        browser.add_experimental_option("debuggerAddress", "127.0.0.1:8888")
+        # driver初始化
+        driver = webdriver.Chrome(desired_capabilities=ca, options=browser, service=Service(chromedriver))
+        driver.implicitly_wait(10)
+        print("打开抖音推荐页")
+        driver.get(f"https://www.douyin.com/")
+
+
+if __name__ == "__main__":
+    ProxyData.start_proxy()
+    ProxyData.start_selenium()
+    print("requests_data:", ProxyData.requests_data)
+    print("response_data:", ProxyData.response_data)
+
+    # 分析包含链接 www.douyin.com 的响应数据
+    for response in ProxyData.response_data:
+        if "www.douyin.com" in response['url']:
+            print("Douyin response:", response)

+ 33 - 0
dev/dev_script/shipinhao.py

@@ -0,0 +1,33 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/7/21
+import requests
+from bs4 import BeautifulSoup
+
+class Shipinhao:
+    @classmethod
+    def get_shipinhao(cls):
+        # 微信视频号推荐页面的URL
+        url = 'https://mp.weixin.qq.com/mp/videoplayer?action=get_recommend_video_list&__biz=MzI1OTQxMjE0Nw==&uin=&key=&pass_ticket=&wxtoken=777&devicetype=Windows+10&clientversion=1000&appmsg_token=cc11373ab7db78508003b6d2f46bab1a779666d3&f=json'
+
+        # 发送GET请求并获取响应
+        response = requests.get(url)
+
+        # 解析响应的JSON数据
+        data = response.json()
+        print(f'data: {data}')
+        # 解析推荐视频列表
+        video_list = data['recommend_video_list']
+        for video in video_list:
+            # 获取视频标题
+            title = video['title']
+            # 获取视频URL
+            video_url = video['video_url']
+            # 打印视频标题和URL
+            print(f'Title: {title}')
+            print(f'Video URL: {video_url}')
+            print('---')
+
+
+if __name__ == "__main__":
+    Shipinhao.get_shipinhao()

+ 30 - 0
dev/dev_script/title_like.py

@@ -0,0 +1,30 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/7/26
+import jieba
+from sklearn.feature_extraction.text import TfidfVectorizer
+from sklearn.metrics.pairwise import cosine_similarity
+
+
+class TitleLike:
+
+    @classmethod
+    def similarity(cls, title1, title2):
+        # 分词
+        seg1 = jieba.lcut(title1)
+        seg2 = jieba.lcut(title2)
+
+        # 构建TF-IDF向量
+        tfidf_vectorizer = TfidfVectorizer()
+        # tfidf_matrix = tfidf_vectorizer.fit_transform([title1, title2])
+        tfidf_matrix = tfidf_vectorizer.fit_transform(["".join(seg1), "".join(seg2)])
+
+        # 计算余弦相似度
+        similarity = cosine_similarity(tfidf_matrix[0], tfidf_matrix[1])[0][0]
+        return similarity
+
+if __name__ == "__main__":
+    t1 = """#发现未来 7月18日(发布)广东(发布)男生满心欢喜准备迎接喜欢的女孩 下一秒"""
+    t2 = "...7月18日(发布)广东(发布)男生满心欢喜准备迎接喜欢的女孩 下一秒其他出"
+    # t2 = "2月23日,广东。男子地铁口挥拳重击抱娃女子。网友:对于家暴零容忍"
+    print(TitleLike.similarity(t1, t2))

+ 0 - 312
dev/dev_script/xg_recommend.py

@@ -1,312 +0,0 @@
-# -*- coding: utf-8 -*-
-# @Author: wangkun
-# @Time: 2023/7/6
-import os
-import sys
-import time
-import cv2
-from selenium.webdriver.common.keys import Keys
-from selenium.webdriver import DesiredCapabilities
-from selenium import webdriver
-from selenium.webdriver.chrome.service import Service
-from selenium.webdriver.common.by import By
-sys.path.append(os.getcwd())
-from common.common import Common
-
-
-class XGRecommend(object):
-
-    def __init__(self, log_type, crawler, env):
-        """
-        本地启动 Chrome,指定端口号:12306
-        open -a "Google Chrome" --args --remote-debugging-port=12306
-        """
-        Common.logger(log_type, crawler).info("启动 Chrome 浏览器")
-        cmd = 'open -a "Google Chrome" --args --remote-debugging-port=12306'
-        os.system(cmd)
-
-        if env == "dev":
-            chromedriver = "/Users/wangkun/Downloads/chromedriver/chromedriver_v114/chromedriver"
-        else:
-            chromedriver = "/usr/bin/chromedriver"
-
-        # 打印请求配置
-        ca = DesiredCapabilities.CHROME
-        ca["goog:loggingPrefs"] = {"performance": "ALL"}
-        # 初始化浏览器
-        self.browser = webdriver.ChromeOptions()
-        self.browser.add_experimental_option("debuggerAddress", "127.0.0.1:12306")
-        # # 设置user-agent
-        # self.browser.add_argument(
-        #     f'user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36')
-        # # 去掉提示:Chrome正收到自动测试软件的控制
-        # self.browser.add_argument('--disable-infobars')
-
-        # # 禁用GPU加速
-        # self.browser.add_argument('--disable-gpu')
-        # # 关闭开发者模式
-        # self.browser.add_experimental_option("useAutomationExtension", False)
-        # # 以键值对的形式加入参数
-        # self.browser.add_experimental_option('excludeSwitches', ['enable-automation'])
-        # # 禁用启用Blink运行时的功能
-        # self.browser.add_argument('--disable-blink-features=AutomationControlled')
-        # 不打开浏览器运行
-        # self.browser.add_argument("--headless")
-        # # linux 环境中,静默模式必须要加的参数
-        # self.browser.add_argument("--no-sandbox")
-        # # 设置浏览器size
-        # self.browser.add_argument("--window-size=1920,1080")
-
-        # driver初始化
-        self.driver = webdriver.Chrome(desired_capabilities=ca, options=self.browser, service=Service(chromedriver))
-        self.driver.implicitly_wait(10)
-        Common.logger(log_type, crawler).info("打开西瓜推荐页")
-        self.driver.get(f"https://www.ixigua.com/")
-        # 在当前页面打开新的标签页
-        self.driver.find_element(By.TAG_NAME, 'body').send_keys(Keys.COMMAND + 't')
-        # 切换到新打开的标签页
-        self.driver.switch_to.window(self.driver.window_handles[-1])
-        self.username = "19831265541"
-        self.password = "Test111111"
-        time.sleep(2)
-
-    def quit(self, log_type, crawler):
-        Common.logger(log_type, crawler).info("退出浏览器")
-        self.driver.quit()
-
-    #  传入滑块背景图片本地路径和滑块本地路径,返回滑块到缺口的距离
-    @staticmethod
-    def findPic(log_type, crawler, img_bg_path, img_slider_path):
-        """
-        找出图像中最佳匹配位置
-        :param log_type: log
-        :param crawler: 爬虫
-        :param img_bg_path: 滑块背景图本地路径
-        :param img_slider_path: 滑块图片本地路径
-        :return: 返回最差匹配、最佳匹配对应的x坐标
-        """
-
-        # 读取滑块背景图片,参数是图片路径,Opencv2默认使用BGR模式
-        # cv2.imread()是 image read的简写
-        # img_bg 是一个numpy库ndarray数组对象
-        img_bg = cv2.imread(img_bg_path)
-
-        # 对滑块背景图片进行处理,由BGR模式转为gray模式(即灰度模式,也就是黑白图片)
-        # 为什么要处理? BGR模式(彩色图片)的数据比黑白图片的数据大,处理后可以加快算法的计算
-        # BGR模式:常见的是RGB模式
-        # R代表红,red; G代表绿,green;  B代表蓝,blue。
-        # RGB模式就是,色彩数据模式,R在高位,G在中间,B在低位。BGR正好相反。
-        # 如红色:RGB模式是(255,0,0),BGR模式是(0,0,255)
-        img_bg_gray = cv2.cvtColor(img_bg, cv2.COLOR_BGR2GRAY)
-
-        # 读取滑块,参数1是图片路径,参数2是使用灰度模式
-        img_slider_gray = cv2.imread(img_slider_path, 0)
-
-        # 在滑块背景图中匹配滑块。参数cv2.TM_CCOEFF_NORMED是opencv2中的一种算法
-        res = cv2.matchTemplate(img_bg_gray, img_slider_gray, cv2.TM_CCOEFF_NORMED)
-
-        Common.logger(log_type, crawler).info(f"{'#' * 50}")
-        Common.logger(log_type, crawler).info(type(res))  # 打印:<class 'numpy.ndarray'>
-        Common.logger(log_type, crawler).info(res)
-        # 打印:一个二维的ndarray数组
-        # [[0.05604218  0.05557462  0.06844381... - 0.1784117 - 0.1811338 - 0.18415523]
-        #  [0.06151756  0.04408009  0.07010461... - 0.18493137 - 0.18440475 - 0.1843424]
-        # [0.0643926    0.06221284  0.0719175... - 0.18742703 - 0.18535161 - 0.1823346]
-        # ...
-        # [-0.07755355 - 0.08177952 - 0.08642308... - 0.16476074 - 0.16210903 - 0.15467581]
-        # [-0.06975575 - 0.07566144 - 0.07783117... - 0.1412715 - 0.15145643 - 0.14800543]
-        # [-0.08476129 - 0.08415948 - 0.0949327... - 0.1371379 - 0.14271489 - 0.14166716]]
-
-        Common.logger(log_type, crawler).info(f"{'#' * 50}")
-
-        # cv22.minMaxLoc() 从ndarray数组中找到最小值、最大值及他们的坐标
-        value = cv2.minMaxLoc(res)
-        # 得到的value,如:(-0.1653602570295334, 0.6102921366691589, (144, 1), (141, 56))
-
-        Common.logger(log_type, crawler).info(f"{value, '#' * 30}")
-
-        # 获取x坐标,如上面的144、141
-        return value[2:][0][0], value[2:][1][0]
-
-    # 返回两个数组:一个用于加速拖动滑块,一个用于减速拖动滑块
-    @staticmethod
-    def generate_tracks(distance):
-        # 给距离加上20,这20像素用在滑块滑过缺口后,减速折返回到缺口
-        distance += 20
-        v = 0
-        t = 0.2
-        forward_tracks = []
-        current = 0
-        mid = distance * 3 / 5  # 减速阀值
-        while current < distance:
-            if current < mid:
-                a = 2  # 加速度为+2
-            else:
-                a = -3  # 加速度-3
-            s = v * t + 0.5 * a * (t ** 2)
-            v = v + a * t
-            current += s
-            forward_tracks.append(round(s))
-
-        back_tracks = [-3, -3, -2, -2, -2, -2, -2, -1, -1, -1, -1]
-        return forward_tracks, back_tracks
-
-    # 获取距离值
-    @staticmethod
-    def get_tracks(distance):
-        """
-        模拟人的滑动行为,先匀加速后匀减速
-        匀变速基本公式
-        v=v0+at
-        s=vot+1/2at2
-        """
-        # 初速度
-        v = 0
-        # 设置时间
-        t = 0.3
-        # 存储每段距离值
-        tracks = []
-        # 当前距离
-        current = 0
-        # 中间位置为4/5距离处
-        mid = distance * 4 / 5
-        while current < distance:
-            if current < mid:
-                # 加速阶段
-                a = 2
-            else:
-                # 减速阶段
-                a = -3
-            # 当前速度
-            v0 = v
-            # 当前位移
-            s = v0 * t + 0.5 * a * t ** 2
-            # 更新当前速度
-            v = v0 + a * t
-            # 更新当前位移
-            current += s
-            # 添加到轨迹列表
-            tracks.append(round(s))
-        return tracks
-
-    @staticmethod
-    def FindPic(log_type, crawler, target, template):
-        """
-        找出图像中最佳匹配位置
-        :param log_type: 日志
-        :param crawler: 爬虫
-        :param target: 目标即背景图
-        :param template: 模板即需要找到的图
-        :return: 返回最佳匹配及其最差匹配和对应的坐标
-        """
-        target_rgb = cv2.imread(target)
-        target_gray = cv2.cvtColor(target_rgb, cv2.COLOR_BGR2GRAY)
-        template_rgb = cv2.imread(template, 0)
-        res = cv2.matchTemplate(target_gray, template_rgb, cv2.TM_CCOEFF_NORMED)
-        value = cv2.minMaxLoc(res)
-        Common.logger(log_type, crawler).info(value)
-        # 计算缺口的 X 轴距离
-        x_val = int(value[3][0])
-        Common.logger(log_type, crawler).info(f"缺口的 X 轴距离:{x_val}")
-        # 获取模板图的宽高
-        template_height, template_width, template_c = cv2.imread(template).shape
-        Common.logger(log_type, crawler).info(f"模板高:{template_height}")
-        Common.logger(log_type, crawler).info(f"模板宽:{template_width}")
-        Common.logger(log_type, crawler).info(f"图片的通道数:{template_c}")
-        # 计算需要滑动的距离
-        move_val = x_val - template_width
-        Common.logger(log_type, crawler).info(f"需要滑动的距离:{move_val}")
-        return x_val
-
-    def login(self, log_type, crawler, env):
-        # Common.logger(log_type, crawler).info("点击登录")
-        # self.driver.find_element(By.XPATH, '//*[@class="xg-button xg-button-primary xg-button-middle loginButton"]').click()
-        # time.sleep(random.randint(1, 2))
-        # Common.logger(log_type, crawler).info("点击密码登录")
-        # self.driver.find_element(By.XPATH, '//*[@class="web-login-link-list__item__text"]').click()
-        # time.sleep(random.randint(1, 2))
-        # Common.logger(log_type, crawler).info("输入手机号")
-        # self.driver.find_element(By.XPATH, '//*[@class="web-login-normal-input__input"]').send_keys(self.username)
-        # time.sleep(random.randint(1, 2))
-        # Common.logger(log_type, crawler).info("输入密码")
-        # self.driver.find_element(By.XPATH, '//*[@class="web-login-button-input__input"]').send_keys(self.password)
-        # time.sleep(random.randint(1, 2))
-        # Common.logger(log_type, crawler).info("点击登录")
-        # self.driver.find_element(By.XPATH, '//*[@class="web-login-account-password__button-wrapper"]/*[1]').click()
-        # time.sleep(random.randint(1, 2))
-
-        # # 获取滑块
-        # Common.logger(log_type, crawler).info("获取滑块")
-        # move_btns = self.driver.find_elements(By.XPATH, '//*[@class="sc-kkGfuU bujTgx"]')
-        # if len(move_btns) == 0:
-        #     Common.logger(log_type, crawler).info("未发现滑块,3-5 秒后重试")
-        #     self.quit(log_type, crawler)
-        #     time.sleep(random.randint(3, 5))
-        #     self.__init__(log_type, crawler, env)
-        #     self.login(log_type, crawler, env)
-        # move_btn = move_btns[0]
-        #
-        # while True:
-        #
-        #     # 使用requests下载滑块
-        #     slide_url = self.driver.find_element(By.XPATH,
-        #                                          '//*[@class="captcha_verify_img_slide react-draggable sc-VigVT ggNWOG"]').get_attribute(
-        #         "src")
-        #     slide_dir = f"./{crawler}/photo/img_slide.png"
-        #     urllib3.disable_warnings()
-        #     slide_url_response = requests.get(slide_url, verify=False)
-        #     with open(slide_dir, "wb") as file:
-        #         file.write(slide_url_response.content)
-        #
-        #     # 使用urllib下载背景图
-        #     bg_image_url = self.driver.find_element(By.XPATH, '//*[@id="captcha-verify-image"]').get_attribute("src")
-        #     bg_image_dir = f"./{crawler}/photo/img_bg.png"
-        #     urllib3.disable_warnings()
-        #     bg_image_url_response = requests.get(bg_image_url, verify=False)
-        #     with open(bg_image_dir, "wb") as file:
-        #         file.write(bg_image_url_response.content)
-        #
-        #     offset = self.FindPic(log_type, crawler, bg_image_dir, slide_dir)
-        #     Common.logger(log_type, crawler).info(f"offset:{offset}")
-        #
-        #     # 在滑块上暂停
-        #     Common.logger(log_type, crawler).info("在滑块上暂停")
-        #     ActionChains(self.driver).click_and_hold(on_element=move_btn).perform()
-        #     # 拖动滑块
-        #     Common.logger(log_type, crawler).info("拖动滑块0.7*距离")
-        #     ActionChains(self.driver).move_to_element_with_offset(to_element=move_btn, xoffset=int(0.5*offset), yoffset=0).perform()
-        #     # 拖动剩余像素
-        #     Common.logger(log_type, crawler).info("拖动剩余像素")
-        #     tracks = self.get_tracks(int(0.15*offset))
-        #     # 遍历梅一段距离
-        #     for track in tracks:
-        #         # 滑块移动响应距离
-        #         ActionChains(self.driver).move_by_offset(xoffset=track, yoffset=0).perform()
-        #     # 休息1s
-        #     Common.logger(log_type, crawler).info("休息1s")
-        #     time.sleep(1)
-        #     # 释放滑块
-        #     Common.logger(log_type, crawler).info("释放滑块")
-        #     ActionChains(self.driver).release().perform()
-        #
-        #     if len(move_btns) != 0:
-        #         time.sleep(1)
-        #         continue
-        #     break
-        Common.logger(log_type, crawler).info("刷新页面")
-        self.driver.refresh()
-
-        Common.logger(log_type, crawler).info("关闭当前标签页")
-        time.sleep(5)
-        # 关闭当前标签页
-        self.driver.find_element(By.TAG_NAME, 'body').send_keys(Keys.COMMAND + 'w')
-        Common.logger(log_type, crawler).info("已关闭")
-        Common.logger(log_type, crawler).info("退出浏览器")
-        self.quit(log_type, crawler)
-
-
-
-if __name__ == "__main__":
-    Recommend = XGRecommend("search", "dev", "dev")
-    Recommend.login("search", "dev", "dev")
-    pass

+ 6 - 1
douyin/douyin_main/run_dy_author.py

@@ -32,6 +32,7 @@ def main(log_type, crawler, topic_name, group_id, env):
             # 长轮询消费消息。
             recv_msgs = consumer.consume_message(batch, wait_seconds)
             for msg in recv_msgs:
+                dy_author_start_time = int(time.time())
                 Common.logger(log_type, crawler).info(f"Receive\n"
                                                       f"MessageId:{msg.message_id}\n"
                                                       f"MessageBodyMD5:{msg.message_body_md5}\n"
@@ -73,9 +74,13 @@ def main(log_type, crawler, topic_name, group_id, env):
                                                          rule_dict=rule_dict,
                                                          user_list=user_list,
                                                          env=env)
-                Common.del_logs(log_type, crawler)
+                # Common.del_logs(log_type, crawler)
                 Common.logger(log_type, crawler).info('抓取一轮结束\n')
                 Common.logging(log_type, crawler, env, '抓取一轮结束\n')
+                dy_author_end_time = int(time.time())
+                dy_author_duration = dy_author_start_time - dy_author_end_time
+                Common.logger(log_type, crawler).info(f"duration {dy_author_duration}")
+                Common.logging(logging, crawler, env, f"duration {dy_author_duration}")
 
         except MQExceptionBase as err:
             # Topic中没有消息可消费。

+ 7 - 2
douyin/douyin_main/run_dy_recommend.py

@@ -33,6 +33,7 @@ def main(log_type, crawler, topic_name, group_id, env):
             # 长轮询消费消息。
             recv_msgs = consumer.consume_message(batch, wait_seconds)
             for msg in recv_msgs:
+                dy_recommend_start_time = int(time.time())
                 Common.logger(log_type, crawler).info(f"Receive\n"
                                                       f"MessageId:{msg.message_id}\n"
                                                       f"MessageBodyMD5:{msg.message_body_md5}\n"
@@ -78,12 +79,16 @@ def main(log_type, crawler, topic_name, group_id, env):
                                                         rule_dict=rule_dict,
                                                         our_uid=our_uid,
                                                         env=env)
-                Common.del_logs(log_type, crawler)
+                # Common.del_logs(log_type, crawler)
                 Common.logger(log_type, crawler).info('抓取一轮结束\n')
                 Common.logging(log_type, crawler, env, '抓取一轮结束\n')
+                dy_recommend_end_time = int(time.time())
+                dy_recommend_duration = dy_recommend_start_time - dy_recommend_end_time
+                Common.logger(log_type, crawler).info(f"duration {dy_recommend_duration}")
+                Common.logging(log_type, crawler, env, f"duration {dy_recommend_duration}")
 
         except MQExceptionBase as err:
-            # Topic中没有消息可消费。
+            # Topic中没有消息可消费。ew
             if err.type == "MessageNotExist":
                 Common.logger(log_type, crawler).info(f"No new message! RequestId:{err.req_id}\n")
                 Common.logging(log_type, crawler, env, f"No new message! RequestId:{err.req_id}\n")

+ 9 - 1
ganggangdouchuan/ganggangdouchuan_main/run_ganggangdouchuan_recommend.py

@@ -4,6 +4,7 @@
 import argparse
 import os
 import sys
+import time
 sys.path.append(os.getcwd())
 from common.common import Common
 from ganggangdouchuan.ganggangdouchuan_recommend.ganggangdouchuan_recommend import GanggangdouchuanRecommend
@@ -12,9 +13,16 @@ from ganggangdouchuan.ganggangdouchuan_recommend.ganggangdouchuan_recommend impo
 def main(log_type, crawler, env):
     oss_endpoint = "out"
     Common.logger(log_type, crawler).info('开始抓取:刚刚都传小程序\n')
+    Common.logging(log_type, crawler, env, '开始抓取:刚刚都传小程序\n')
+    ganggangdouchuan_start_time = int(time.time())
     GanggangdouchuanRecommend.start_wechat(log_type, crawler, oss_endpoint, env)
-    Common.del_logs(log_type, crawler)
+    # Common.del_logs(log_type, crawler)
     Common.logger(log_type, crawler).info('抓取完一轮\n')
+    Common.logging(log_type, crawler, env, '抓取完一轮\n')
+    ganggangdouchuan_end_time = int(time.time())
+    ganggangdouchuan_duration = ganggangdouchuan_end_time - ganggangdouchuan_start_time
+    Common.logger(log_type, crawler).info(f"duration {ganggangdouchuan_duration}")
+    Common.logging(log_type, crawler, env, f"duration {ganggangdouchuan_duration}")
 
 
 if __name__ == "__main__":

+ 38 - 11
ganggangdouchuan/ganggangdouchuan_recommend/ganggangdouchuan_recommend.py

@@ -12,7 +12,6 @@ from appium.webdriver.extensions.android.nativekey import AndroidKey
 from appium.webdriver.webdriver import WebDriver
 from selenium.common import NoSuchElementException
 from selenium.webdriver.common.by import By
-
 sys.path.append(os.getcwd())
 from common.common import Common
 from common.feishu import Feishu
@@ -33,6 +32,7 @@ class GanggangdouchuanRecommend:
                 chromedriverExecutable = '/Users/piaoquan/Downloads/chromedriver'
 
             Common.logger(log_type, crawler).info('启动微信')
+            Common.logging(log_type, crawler, env, '启动微信')
             caps = {
                 "platformName": "Android",  # 手机操作系统 Android / iOS
                 "deviceName": "a0a65126",  # 连接的设备名(模拟器或真机),安卓可以随便写
@@ -67,28 +67,35 @@ class GanggangdouchuanRecommend:
                     # 发现并关闭系统菜单栏
                     elif driver.find_element(By.ID, 'com.android.systemui:id/dismiss_view'):
                         Common.logger(log_type, crawler).info('发现并关闭系统下拉菜单栏')
+                        Common.logging(log_type, crawler, env, '发现并关闭系统下拉菜单栏')
                         driver.find_element(By.ID, 'com.android.systemui:id/dismiss_view').click()
                     else:
                         pass
                 except NoSuchElementException:
                     time.sleep(1)
             Common.logger(log_type, crawler).info('下滑,展示小程序选择面板')
+            Common.logging(log_type, crawler, env, '下滑,展示小程序选择面板')
             size = driver.get_window_size()
             driver.swipe(int(size['width'] * 0.5), int(size['height'] * 0.2),
                          int(size['width'] * 0.5), int(size['height'] * 0.8), 200)
             # 打开小程序"刚刚都传"
             time.sleep(3)
             Common.logger(log_type, crawler).info('打开小程序"刚刚都传"')
+            Common.logging(log_type, crawler, env, '打开小程序"刚刚都传"')
             driver.find_elements(By.XPATH, '//*[@text="刚刚都传"]')[-1].click()
             time.sleep(10)
             cls.get_videoList(log_type, crawler, oss_endpoint, env, driver)
             driver.quit()
             Common.logger(log_type, crawler).info('退出微信成功\n')
+            Common.logging(log_type, crawler, env, '退出微信成功\n')
         except Exception as e:
             Common.logger(log_type, crawler).error(f'start_wechat异常:{e}\n')
+            Common.logging(log_type, crawler, env, f'start_wechat异常:{e}\n')
+            cmd = "adb kill-server && adb start-server"
+            os.system(cmd)
 
     @classmethod
-    def get_video_url(cls, log_type, crawler, driver: WebDriver, video_element):
+    def get_video_url(cls, log_type, crawler, env, driver: WebDriver, video_element):
         try:
             time.sleep(1)
             # Common.logger(log_type, crawler).info('进入视频详情')
@@ -109,10 +116,11 @@ class GanggangdouchuanRecommend:
                     time.sleep(1)
         except Exception as e:
             Common.logger(log_type, crawler).error(f'get_video_url异常:{e}\n')
+            Common.logging(log_type, crawler, env, f'get_video_url异常:{e}\n')
 
     # 切换 Handle
     @classmethod
-    def search_elements(cls, log_type, crawler, driver: WebDriver, xpath):
+    def search_elements(cls, log_type, crawler, env, driver: WebDriver, xpath):
         try:
             windowHandles = driver.window_handles
             # Common.logger(log_type, crawler).info('windowHandles:{}', windowHandles)
@@ -129,12 +137,14 @@ class GanggangdouchuanRecommend:
                     pass
         except Exception as e:
             Common.logger(log_type, crawler).warning(f'search_elements异常:{e}\n')
+            Common.logging(log_type, crawler, env, f'search_elements异常:{e}\n')
 
     @classmethod
-    def check_to_applet(cls, log_type, crawler, driver: WebDriver):
+    def check_to_applet(cls, log_type, crawler, env, driver: WebDriver):
         while True:
             webview = driver.contexts
             Common.logger(log_type, crawler).info(f"webviews:{webview}")
+            Common.logging(log_type, crawler, env, f"webviews:{webview}")
             driver.switch_to.context(webview[1])
             windowHandles = driver.window_handles
             for handle in windowHandles:
@@ -144,55 +154,64 @@ class GanggangdouchuanRecommend:
                     video_list = driver.find_element(By.XPATH, '//wx-view[text()="视频"]')
                     video_list.click()
                     Common.logger(log_type, crawler).info('切换到小程序视频列表成功\n')
+                    Common.logging(log_type, crawler, env, '切换到小程序视频列表成功\n')
                     return
                 except NoSuchElementException:
                     time.sleep(1)
             Common.logger(log_type, crawler).info("切换到小程序失败\n")
+            Common.logging(log_type, crawler, env, "切换到小程序失败\n")
             break
 
     @classmethod
     def repeat_out_video_id(cls, log_type, crawler, out_video_id, env):
-        sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{out_video_id}"; """
+        sql = f""" select * from crawler_video where platform in ("{crawler}", "{cls.platform}") and out_video_id="{out_video_id}"; """
         repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
         return len(repeat_video)
 
     @classmethod
     def repeat_video_url(cls, log_type, crawler, video_url, env):
-        sql = f""" select * from crawler_video where platform="{cls.platform}" and video_url="{video_url}"; """
+        # sql = f""" select * from crawler_video where platform="{cls.platform}" and video_url="{video_url}"; """
+        sql = f""" select * from crawler_video where platform in ("{cls.platform}", "{crawler}") and video_url="{video_url}"; """
         repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
         return len(repeat_video)
 
     @classmethod
     def get_videoList(cls, log_type, crawler, oss_endpoint, env, driver: WebDriver):
         # 切换到小程序
-        cls.check_to_applet(log_type, crawler, driver)
+        cls.check_to_applet(log_type, crawler, env, driver)
         time.sleep(1)
         index = 0
 
         while True:
             try:
-                if cls.search_elements(log_type, crawler, driver, '//wx-view[@class="lists"]') is None:
+                if cls.search_elements(log_type, crawler, env, driver, '//wx-view[@class="lists"]') is None:
                     Common.logger(log_type, crawler).info('窗口已销毁\n')
+                    Common.logging(log_type, crawler, env, '窗口已销毁\n')
                     return
 
                 Common.logger(log_type, crawler).info('获取视频列表\n')
-                video_elements = cls.search_elements(log_type, crawler, driver, '//wx-view[@class="list"]')
+                Common.logging(log_type, crawler, env, '获取视频列表\n')
+                video_elements = cls.search_elements(log_type, crawler, env, driver, '//wx-view[@class="list"]')
                 if video_elements is None:
                     Common.logger(log_type, crawler).warning(f'video_elements:{video_elements}')
+                    Common.logging(log_type, crawler, env, f'video_elements:{video_elements}')
                     return
 
                 video_element_temp = video_elements[index:]
                 if len(video_element_temp) == 0:
                     Common.logger(log_type, crawler).info('到底啦~~~~~~~~~~~~~\n')
+                    Common.logging(log_type, crawler, env, '到底啦~~~~~~~~~~~~~\n')
                     return
 
                 for i, video_element in enumerate(video_element_temp):
                     if video_element is None:
                         Common.logger(log_type, crawler).info('到底啦~\n')
+                        Common.logging(log_type, crawler, env, '到底啦~\n')
                         return
                     cls.i += 1
-                    cls.search_elements(log_type, crawler, driver, '//wx-view[@class="list"]')
+                    cls.search_elements(log_type, crawler, env, driver, '//wx-view[@class="list"]')
                     Common.logger(log_type, crawler).info(f'拖动"视频"列表第{cls.i}个至屏幕中间')
+                    Common.logging(log_type, crawler, env, f'拖动"视频"列表第{cls.i}个至屏幕中间')
                     time.sleep(3)
                     driver.execute_script("arguments[0].scrollIntoView({block:'center',inline:'center'})", video_element)
 
@@ -220,29 +239,37 @@ class GanggangdouchuanRecommend:
                     }
                     for k, v in video_dict.items():
                         Common.logger(log_type, crawler).info(f"{k}:{v}")
+                    Common.logging(log_type, crawler, env, f'video_dict:{video_dict}')
 
                     if video_title is None or cover_url is None:
                         Common.logger(log_type, crawler).info("无效视频\n")
+                        Common.logging(log_type, crawler, env, "无效视频\n")
                     elif cls.repeat_out_video_id(log_type, crawler, out_video_id, env) != 0:
                         Common.logger(log_type, crawler).info('视频已下载\n')
+                        Common.logging(log_type, crawler, env, '视频已下载\n')
                     else:
-                        video_url = cls.get_video_url(log_type, crawler, driver, video_element)
+                        video_url = cls.get_video_url(log_type, crawler, env, driver, video_element)
                         Common.logger(log_type, crawler).info(f'video_url:{video_url}')
+                        Common.logging(log_type, crawler, env, f'video_url:{video_url}')
                         if video_url is None:
                             Common.logger(log_type, crawler).info("未获取到视频播放地址\n")
+                            Common.logging(log_type, crawler, env, "未获取到视频播放地址\n")
                             driver.press_keycode(AndroidKey.BACK)
                         elif cls.repeat_video_url(log_type, crawler, video_url, env) != 0:
                             Common.logger(log_type, crawler).info('视频已下载\n')
+                            Common.logging(log_type, crawler, env, '视频已下载\n')
                             driver.press_keycode(AndroidKey.BACK)
                         else:
                             video_dict["video_url"]=video_url
                             cls.download_publish(log_type, crawler, video_dict, oss_endpoint, env, driver)
                 Common.logger(log_type, crawler).info('已抓取完一组视频,休眠10秒\n')
+                Common.logging(log_type, crawler, env, '已抓取完一组视频,休眠10秒\n')
                 time.sleep(10)
                 index = index + len(video_element_temp)
 
             except Exception as e:
                 Common.logger(log_type, crawler).error(f'get_videoList异常:{e}\n')
+                Common.logging(log_type, crawler, env, f'get_videoList异常:{e}\n')
                 cls.i = 0
                 return
 

+ 30 - 20
gongzhonghao/gongzhonghao_author/gongzhonghao_author_lock.py

@@ -13,6 +13,7 @@ from selenium.webdriver import DesiredCapabilities
 from selenium.webdriver.chrome.service import Service
 from selenium.webdriver.common.by import By
 from selenium import webdriver
+
 sys.path.append(os.getcwd())
 from common.mq import MQ
 from common.common import Common
@@ -40,7 +41,7 @@ class GongzhonghaoAuthor:
                 Common.logging(log_type, crawler, env, "暂无可用的token\n")
                 if 20 >= datetime.datetime.now().hour >= 10:
                     Feishu.bot(log_type, crawler, "暂无可用的token,请更新\n")
-                time.sleep(60*15)
+                time.sleep(60 * 15)
                 continue
             token_info = random.choice(token_list)
             lock_time_stamp = cls.lock_token(log_type, crawler, env, token_info["id"])
@@ -71,7 +72,7 @@ class GongzhonghaoAuthor:
         :param token_id: token_id
         :return: None
         """
-        lock_time_stamp = int(time.time()*1000)
+        lock_time_stamp = int(time.time() * 1000)
         lock_sql = f""" update crawler_config set status={1}, update_time={lock_time_stamp} WHERE id ={token_id} and status={0} ; """
         lock_token = MysqlHelper.update_values(log_type, crawler, lock_sql, env, action="")
         # Common.logger(log_type, crawler).info(f"lock_token:{lock_token}")
@@ -91,7 +92,7 @@ class GongzhonghaoAuthor:
         :param status: 0,正常可用状态;1,被占用状态;-2,不可用状态(过期/频控)
         :return: None
         """
-        release_sql = f""" update crawler_config set status={status}, update_time={int(time.time()*1000)} WHERE id ={token_id} ; """
+        release_sql = f""" update crawler_config set status={status}, update_time={int(time.time() * 1000)} WHERE id ={token_id} ; """
         MysqlHelper.update_values(log_type, crawler, release_sql, env, action="")
 
     # 获取腾讯视频下载链接
@@ -178,9 +179,10 @@ class GongzhonghaoAuthor:
             token_dict = cls.get_token(log_type, crawler, env)
             Common.logger(log_type, crawler).info(f"get_user_info_token:{token_dict}")
 
-            if int(time.time()*1000) - token_dict["update_time_stamp"] >= 3600*24*1000:
-            # if int(time.time()*1000) - token_dict["update_time_stamp"] >= 30000:
-                Common.logger(log_type, crawler).info(f"{int(time.time()*1000)}-{token_dict['update_time_stamp']}={(int(time.time()*1000)-token_dict['update_time_stamp'])}")
+            if int(time.time() * 1000) - token_dict["update_time_stamp"] >= 3600 * 24 * 1000:
+                # if int(time.time()*1000) - token_dict["update_time_stamp"] >= 30000:
+                Common.logger(log_type, crawler).info(
+                    f"{int(time.time() * 1000)}-{token_dict['update_time_stamp']}={(int(time.time() * 1000) - token_dict['update_time_stamp'])}")
                 Common.logger(log_type, crawler).info("token使用时长>=24小时,申请释放")
                 Common.logging(log_type, crawler, env, "token使用时长>=24小时,申请释放")
                 cls.release_token(log_type, crawler, env, token_dict["token_id"], 0)
@@ -224,7 +226,7 @@ class GongzhonghaoAuthor:
                 if 20 >= datetime.datetime.now().hour >= 10:
                     Feishu.bot(log_type, crawler,
                                f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n过期啦,请扫码更换token\nhttps://mp.weixin.qq.com/")
-                time.sleep(60*15)
+                time.sleep(60 * 15)
                 continue
             if r.json()["base_resp"]["err_msg"] == "freq control":
                 Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}, get_fakeid:{r.text}\n")
@@ -233,7 +235,7 @@ class GongzhonghaoAuthor:
                 if 20 >= datetime.datetime.now().hour >= 10:
                     Feishu.bot(log_type, crawler,
                                f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
-                time.sleep(60*15)
+                time.sleep(60 * 15)
                 continue
             if r.json()["base_resp"]["err_msg"] == "ok" and len(r.json()["list"]) == 0:
                 Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}, get_fakeid:{r.text}\n")
@@ -254,7 +256,7 @@ class GongzhonghaoAuthor:
                               'user_id': r.json()["list"][0]["fakeid"],
                               'avatar_url': r.json()["list"][0]["round_head_img"]}
             return user_info_dict
-        
+
     # 获取文章列表
     @classmethod
     def get_videoList(cls, log_type, crawler, task_dict, rule_dict, user_dict, env):
@@ -311,31 +313,36 @@ class GongzhonghaoAuthor:
                 Common.logging(log_type, crawler, env, f"status_code:{r.status_code}, get_videoList:{r.text}\n")
                 cls.release_token(log_type, crawler, env, token_dict["token_id"], -2)
                 if 20 >= datetime.datetime.now().hour >= 10:
-                    Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']}\n过期啦,请扫码更换token\nhttps://mp.weixin.qq.com/")
-                time.sleep(60*15)
+                    Feishu.bot(log_type, crawler,
+                               f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']}\n过期啦,请扫码更换token\nhttps://mp.weixin.qq.com/")
+                time.sleep(60 * 15)
                 continue
             if r.json()["base_resp"]["err_msg"] == "freq control":
                 Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}, get_videoList:{r.text}\n")
                 Common.logging(log_type, crawler, env, f"status_code:{r.status_code}, get_videoList:{r.text}\n")
                 cls.release_token(log_type, crawler, env, token_dict["token_id"], -2)
                 if 20 >= datetime.datetime.now().hour >= 10:
-                    Feishu.bot(log_type, crawler,f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
-                time.sleep(60*15)
+                    Feishu.bot(log_type, crawler,
+                               f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
+                time.sleep(60 * 15)
                 continue
             if r.json()["base_resp"]["err_msg"] == "invalid args" and r.json()["base_resp"]["ret"] == 200002:
                 Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}, get_videoList:{r.text}\n")
                 Common.logging(log_type, crawler, env, f"status_code:{r.status_code}, get_videoList:{r.text}\n")
-                task_unbind(log_type=log_type, crawler=crawler, taskid=task_dict['id'], uids=str(user_dict["uid"]), env=env)
+                task_unbind(log_type=log_type, crawler=crawler, taskid=task_dict['id'], uids=str(user_dict["uid"]),
+                            env=env)
                 if 20 >= datetime.datetime.now().hour >= 10:
-                    Feishu.bot(log_type, crawler,f"公众号:{user_dict['link']}, 站内昵称:{user_dict['nick_name']}\n抓取异常, 已取消抓取该公众号\n")
+                    Feishu.bot(log_type, crawler,
+                               f"公众号:{user_dict['link']}, 站内昵称:{user_dict['nick_name']}\n抓取异常, 已取消抓取该公众号\n")
                 return
             if 'app_msg_list' not in r.json():
                 Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}, get_videoList:{r.text}\n")
                 Common.logging(log_type, crawler, env, f"status_code:{r.status_code}, get_videoList:{r.text}\n")
                 cls.release_token(log_type, crawler, env, token_dict["token_id"], -2)
                 if 20 >= datetime.datetime.now().hour >= 10:
-                    Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']}\n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
-                time.sleep(60*15)
+                    Feishu.bot(log_type, crawler,
+                               f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']}\n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
+                time.sleep(60 * 15)
                 continue
             if len(r.json()['app_msg_list']) == 0:
                 Common.logger(log_type, crawler).info('没有更多视频了\n')
@@ -371,9 +378,12 @@ class GongzhonghaoAuthor:
                             Common.logger(log_type, crawler).info(f"{k}:{v}")
                         Common.logging(log_type, crawler, env, f'video_dict:{video_dict}')
 
-                        if int(time.time()) - publish_time_stamp > 3600 * 24 * int(rule_dict.get('period', {}).get('max', 1000)):
-                            Common.logger(log_type, crawler).info(f"发布时间超过{int(rule_dict.get('period', {}).get('max', 1000))}天\n")
-                            Common.logging(log_type, crawler, env, f"发布时间超过{int(rule_dict.get('period', {}).get('max', 1000))}天\n")
+                        if int(time.time()) - publish_time_stamp > 3600 * 24 * int(
+                                rule_dict.get('period', {}).get('max', 1000)):
+                            Common.logger(log_type, crawler).info(
+                                f"发布时间超过{int(rule_dict.get('period', {}).get('max', 1000))}天\n")
+                            Common.logging(log_type, crawler, env,
+                                           f"发布时间超过{int(rule_dict.get('period', {}).get('max', 1000))}天\n")
                             return
 
                         if video_dict['article_url'] == 0 or video_dict['video_url'] == 0:

File diff ditekan karena terlalu besar
+ 0 - 519
gongzhonghao/gongzhonghao_follow/gongzhonghao_follow_2.py


+ 0 - 547
gongzhonghao/gongzhonghao_follow/gongzhonghao_follow_3.py

@@ -1,547 +0,0 @@
-# -*- coding: utf-8 -*-
-# @Author: wangkun
-# @Time: 2023/3/28
-import datetime
-import difflib
-import json
-import os
-import shutil
-import sys
-import time
-from hashlib import md5
-
-import requests
-import urllib3
-# from requests.adapters import HTTPAdapter
-from selenium.webdriver import DesiredCapabilities
-from selenium.webdriver.chrome.service import Service
-from selenium.webdriver.common.by import By
-from selenium import webdriver
-
-sys.path.append(os.getcwd())
-from common.common import Common
-from common.feishu import Feishu
-from common.public import get_config_from_mysql
-from common.publish import Publish
-from common.scheduling_db import MysqlHelper
-
-
-class GongzhonghaoFollow3:
-    # 翻页参数
-    begin = 0
-    platform = "公众号"
-
-    # 基础门槛规则
-    @staticmethod
-    def download_rule(video_dict):
-        """
-        下载视频的基本规则
-        :param video_dict: 视频信息,字典格式
-        :return: 满足规则,返回 True;反之,返回 False
-        """
-        # 视频时长 20秒 - 45 分钟
-        if 60 * 45 >= int(float(video_dict['duration'])) >= 20:
-            # 宽或高
-            if int(video_dict['video_width']) >= 0 or int(video_dict['video_height']) >= 0:
-                return True
-            else:
-                return False
-        else:
-            return False
-
-    @classmethod
-    def title_like(cls, log_type, crawler, title, env):
-        select_sql = f""" select * from crawler_video where platform="公众号" """
-        video_list = MysqlHelper.get_values(log_type, crawler, select_sql, env, action="")
-        if len(video_list) == 0:
-            return None
-        for video_dict in video_list:
-            video_title = video_dict["video_title"]
-            if difflib.SequenceMatcher(None, title, video_title).quick_ratio() >= 0.8:
-                return True
-            else:
-                pass
-
-    # 获取 token
-    @classmethod
-    def get_token(cls, log_type, crawler):
-        while True:
-            # try:
-            sheet = Feishu.get_values_batch(log_type, crawler, "l1VZki")
-            if sheet is None:
-                time.sleep(1)
-                continue
-            token = sheet[0][1]
-            cookie = sheet[1][1]
-            gzh_name = sheet[2][1]
-            gzh_time = sheet[3][1]
-            token_dict = {'token': token, 'cookie': cookie, 'gzh_name': gzh_name, 'gzh_time': gzh_time}
-            return token_dict
-            # except Exception as e:
-            #     Common.logger(log_type, crawler).error(f"get_cookie_token异常:{e}\n")
-
-    # 获取用户 fakeid
-    @classmethod
-    def get_fakeid(cls, log_type, crawler, user, index):
-        # try:
-        while True:
-            token_dict = cls.get_token(log_type, crawler)
-            url = "https://mp.weixin.qq.com/cgi-bin/searchbiz?"
-            headers = {
-                "accept": "*/*",
-                "accept-encoding": "gzip, deflate, br",
-                "accept-language": "zh-CN,zh;q=0.9",
-                "referer": "https://mp.weixin.qq.com/cgi-bin/appmsg?"
-                           "t=media/appmsg_edit_v2&action=edit&isNew=1"
-                           "&type=77&createType=5&token=1011071554&lang=zh_CN",
-                'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
-                "sec-ch-ua-mobile": "?0",
-                "sec-ch-ua-platform": '"Windows"',
-                "sec-fetch-dest": "empty",
-                "sec-fetch-mode": "cors",
-                "sec-fetch-site": "same-origin",
-                "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
-                              " (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
-                "x-requested-with": "XMLHttpRequest",
-                'cookie': token_dict['cookie'],
-            }
-            params = {
-                "action": "search_biz",
-                "begin": "0",
-                "count": "5",
-                "query": str(user),
-                "token": token_dict['token'],
-                "lang": "zh_CN",
-                "f": "json",
-                "ajax": "1",
-            }
-            urllib3.disable_warnings()
-            # s = requests.session()
-            # # max_retries=3 重试3次
-            # s.mount('http://', HTTPAdapter(max_retries=3))
-            # s.mount('https://', HTTPAdapter(max_retries=3))
-            # r = s.get(url=url, headers=headers, params=params, verify=False, proxies=Common.tunnel_proxies(), timeout=5)
-            r = requests.get(url=url, headers=headers, params=params, verify=False)
-            r.close()
-            if r.json()["base_resp"]["err_msg"] == "invalid session":
-                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
-                Common.logger(log_type, crawler).warning(f"get_fakeid:{r.text}\n")
-                Common.logger(log_type, crawler).warning(
-                    f"公众号_3:{token_dict['gzh_name']}, 更换日期:{token_dict['gzh_time']} 过期啦\n")
-                if 20 >= datetime.datetime.now().hour >= 10:
-                    Feishu.bot(log_type, crawler, f"token_3:{token_dict['gzh_name']}\n更换日期:{token_dict['gzh_time']}\n过期啦,请扫码更换token\nhttps://mp.weixin.qq.com/")
-                time.sleep(60 * 10)
-                continue
-            if r.json()["base_resp"]["err_msg"] == "freq control":
-                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
-                Common.logger(log_type, crawler).warning(f"get_fakeid:{r.text}\n")
-                Common.logger(log_type, crawler).warning(
-                    f"公众号_3:{token_dict['gzh_name']}, 更换日期:{token_dict['gzh_time']} 频控啦\n")
-                if 20 >= datetime.datetime.now().hour >= 10:
-                    Feishu.bot(log_type, crawler, f"公众号_3:{token_dict['gzh_name']}\n更换日期:{token_dict['gzh_time']}\n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
-                time.sleep(60 * 10)
-                continue
-            if "list" not in r.json() or len(r.json()["list"]) == 0:
-                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
-                Common.logger(log_type, crawler).warning(f"get_fakeid:{r.text}\n")
-                Common.logger(log_type, crawler).warning(
-                    f"公众号_3:{token_dict['gzh_name']}, 更换日期:{token_dict['gzh_time']} 频控啦\n")
-                if 20 >= datetime.datetime.now().hour >= 10:
-                    Feishu.bot(log_type, crawler, f"公众号_3:{token_dict['gzh_name']}\n更换日期:{token_dict['gzh_time']}\n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
-                time.sleep(60 * 10)
-                continue
-            fakeid = r.json()["list"][int(index) - 1]["fakeid"]
-            head_url = r.json()["list"][int(index) - 1]["round_head_img"]
-            fakeid_dict = {'fakeid': fakeid, 'head_url': head_url}
-            return fakeid_dict
-        # except Exception as e:
-        #     Common.logger(log_type, crawler).error(f"get_fakeid异常:{e}\n")
-
-    # 获取腾讯视频下载链接
-    @classmethod
-    def get_tencent_video_url(cls, video_id):
-        # try:
-        url = 'https://vv.video.qq.com/getinfo?vids=' + str(video_id) + '&platform=101001&charge=0&otype=json'
-        response = requests.get(url=url).text.replace('QZOutputJson=', '').replace('"};', '"}')
-        response = json.loads(response)
-        url = response['vl']['vi'][0]['ul']['ui'][0]['url']
-        fvkey = response['vl']['vi'][0]['fvkey']
-        video_url = url + str(video_id) + '.mp4?vkey=' + fvkey
-        return video_url
-        # except Exception as e:
-        #     Common.logger(log_type, crawler).error(f"get_tencent_video_url异常:{e}\n")
-
-    @classmethod
-    def get_video_url(cls, article_url, env):
-        # try:
-        # 打印请求配置
-        ca = DesiredCapabilities.CHROME
-        ca["goog:loggingPrefs"] = {"performance": "ALL"}
-
-        # 不打开浏览器运行
-        chrome_options = webdriver.ChromeOptions()
-        chrome_options.add_argument("headless")
-        chrome_options.add_argument(
-            f'user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36')
-        chrome_options.add_argument("--no-sandbox")
-
-        # driver初始化
-        if env == "prod":
-            driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options)
-        else:
-            driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options, service=Service(
-                '/Users/wangkun/Downloads/chromedriver/chromedriver_v111/chromedriver'))
-
-        driver.implicitly_wait(10)
-        # Common.logger(log_type, crawler).info('打开文章链接')
-        driver.get(article_url)
-        time.sleep(1)
-
-        if len(driver.find_elements(By.XPATH, '//div[@class="js_video_poster video_poster"]/*[2]')) != 0:
-            video_url = driver.find_element(
-                By.XPATH, '//div[@class="js_video_poster video_poster"]/*[2]').get_attribute('src')
-        elif len(driver.find_elements(By.XPATH, '//span[@class="js_tx_video_container"]/*[1]')) != 0:
-            iframe = driver.find_element(By.XPATH, '//span[@class="js_tx_video_container"]/*[1]').get_attribute(
-                'src')
-            video_id = iframe.split('vid=')[-1].split('&')[0]
-            video_url = cls.get_tencent_video_url(video_id)
-        else:
-            video_url = 0
-        driver.quit()
-        return video_url
-        # except Exception as e:
-        #     Common.logger(log_type, crawler).info(f'get_video_url异常:{e}\n')
-
-    # 获取文章列表
-    @classmethod
-    def get_videoList(cls, log_type, crawler, user, index, oss_endpoint, env):
-        # try:
-        while True:
-            fakeid_dict = cls.get_fakeid(log_type, crawler, user, index)
-            token_dict = cls.get_token(log_type, crawler)
-            url = "https://mp.weixin.qq.com/cgi-bin/appmsg?"
-            headers = {
-                "accept": "*/*",
-                "accept-encoding": "gzip, deflate, br",
-                "accept-language": "zh-CN,zh;q=0.9",
-                "referer": "https://mp.weixin.qq.com/cgi-bin/appmsg?"
-                           "t=media/appmsg_edit_v2&action=edit&isNew=1"
-                           "&type=77&createType=5&token=" + str(token_dict['token']) + "&lang=zh_CN",
-                'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
-                "sec-ch-ua-mobile": "?0",
-                "sec-ch-ua-platform": '"Windows"',
-                "sec-fetch-dest": "empty",
-                "sec-fetch-mode": "cors",
-                "sec-fetch-site": "same-origin",
-                "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
-                              " (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
-                "x-requested-with": "XMLHttpRequest",
-                'cookie': token_dict['cookie'],
-            }
-            params = {
-                "action": "list_ex",
-                "begin": str(cls.begin),
-                "count": "5",
-                "fakeid": fakeid_dict['fakeid'],
-                "type": "9",
-                "query": "",
-                "token": str(token_dict['token']),
-                "lang": "zh_CN",
-                "f": "json",
-                "ajax": "1",
-            }
-            urllib3.disable_warnings()
-            # s = requests.session()
-            # # max_retries=3 重试3次
-            # s.mount('http://', HTTPAdapter(max_retries=3))
-            # s.mount('https://', HTTPAdapter(max_retries=3))
-            # r = s.get(url=url, headers=headers, params=params, verify=False, proxies=Common.tunnel_proxies(), timeout=5)
-            r = requests.get(url=url, headers=headers, params=params, verify=False)
-            r.close()
-            if r.json()["base_resp"]["err_msg"] == "invalid session":
-                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
-                Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
-                Common.logger(log_type, crawler).warning(
-                    f"公众号_3:{token_dict['gzh_name']}, 更换日期:{token_dict['gzh_time']} 过期啦\n")
-                if 20 >= datetime.datetime.now().hour >= 10:
-                    Feishu.bot(log_type, crawler, f"token_3:{token_dict['gzh_name']}\n更换日期:{token_dict['gzh_time']}\n过期啦,请扫码更换token\nhttps://mp.weixin.qq.com/")
-                time.sleep(60 * 10)
-                continue
-            if r.json()["base_resp"]["err_msg"] == "freq control":
-                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
-                Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
-                Common.logger(log_type, crawler).warning(
-                    f"公众号_3:{token_dict['gzh_name']}, 更换日期:{token_dict['gzh_time']} 频控啦\n")
-                if 20 >= datetime.datetime.now().hour >= 10:
-                    Feishu.bot(log_type, crawler, f"公众号_3:{token_dict['gzh_name']}\n更换日期:{token_dict['gzh_time']}\n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
-                time.sleep(60 * 10)
-                continue
-            if 'app_msg_list' not in r.json():
-                Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
-                Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
-                Common.logger(log_type, crawler).warning(
-                    f"公众号_3:{token_dict['gzh_name']}, 更换日期:{token_dict['gzh_time']} 频控啦\n")
-                if 20 >= datetime.datetime.now().hour >= 10:
-                    Feishu.bot(log_type, crawler, f"公众号_3:{token_dict['gzh_name']}\n更换日期:{token_dict['gzh_time']}\n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
-                time.sleep(60 * 10)
-                continue
-            if len(r.json()['app_msg_list']) == 0:
-                Common.logger(log_type, crawler).info('没有更多视频了\n')
-                return
-            else:
-                cls.begin += 5
-                app_msg_list = r.json()['app_msg_list']
-                for article_url in app_msg_list:
-                    # title
-                    if 'title' in article_url:
-                        title = article_url['title'].replace('/', '').replace('\n', '') \
-                            .replace('.', '').replace('“', '').replace('”', '').replace(' ', '')\
-                            .replace('"', '').replace("'", "")
-                    else:
-                        title = 0
-
-                    # aid
-                    if 'aid' in article_url:
-                        aid = article_url['aid']
-                    else:
-                        aid = 0
-
-                    # create_time
-                    if 'create_time' in article_url:
-                        create_time = article_url['create_time']
-                    else:
-                        create_time = 0
-                    publish_time_stamp = int(create_time)
-                    publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
-
-                    avatar_url = fakeid_dict['head_url']
-
-                    # cover_url
-                    if 'cover' in article_url:
-                        cover_url = article_url['cover']
-                    else:
-                        cover_url = 0
-
-                    # article_url
-                    if 'link' in article_url:
-                        article_url = article_url['link']
-                    else:
-                        article_url = 0
-
-                    video_url = cls.get_video_url(article_url, env)
-
-                    video_dict = {
-                        'video_id': aid,
-                        'video_title': title,
-                        'publish_time_stamp': publish_time_stamp,
-                        'publish_time_str': publish_time_str,
-                        'user_name': user,
-                        'play_cnt': 0,
-                        'comment_cnt': 0,
-                        'like_cnt': 0,
-                        'share_cnt': 0,
-                        'user_id': fakeid_dict['fakeid'],
-                        'avatar_url': avatar_url,
-                        'cover_url': cover_url,
-                        'article_url': article_url,
-                        'video_url': video_url,
-                        'session': f'gongzhonghao-follow-{int(time.time())}'
-                    }
-                    for k, v in video_dict.items():
-                        Common.logger(log_type, crawler).info(f"{k}:{v}")
-                    if int(time.time()) - publish_time_stamp >= 3600 * 24 * 3:
-                        Common.logger(log_type, crawler).info(f'发布时间{publish_time_str} > 3 天\n')
-                        cls.begin = 0
-                        return
-                    cls.download_publish(log_type, crawler, video_dict, oss_endpoint, env)
-
-                Common.logger(log_type, crawler).info('休眠 60 秒\n')
-                time.sleep(60)
-        # except Exception as e:
-        #     Common.logger(log_type, crawler).error("get_videoList异常:{}\n", e)
-
-    @classmethod
-    def repeat_video(cls, log_type, crawler, video_id, env):
-        sql = f""" select * from crawler_video where platform="公众号" and out_video_id="{video_id}"; """
-        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
-        return len(repeat_video)
-
-    # 下载/上传
-    @classmethod
-    def download_publish(cls, log_type, crawler, video_dict, oss_endpoint, env):
-        # try:
-        if video_dict['article_url'] == 0 or video_dict['video_url'] == 0:
-            Common.logger(log_type, crawler).info("文章涉嫌违反相关法律法规和政策\n")
-        # 标题敏感词过滤
-        elif any(word if word in video_dict['video_title']
-                 else False for word in get_config_from_mysql(log_type=log_type,
-                                                              source=crawler,
-                                                              env=env,
-                                                              text="filter",
-                                                              action="")) is True:
-            Common.logger(log_type, crawler).info("标题已中过滤词\n")
-        # 已下载判断
-        elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
-            Common.logger(log_type, crawler).info("视频已下载\n")
-        # 标题相似度
-        elif cls.title_like(log_type, crawler, video_dict['video_title'], env) is True:
-            Common.logger(log_type, crawler).info(f'标题相似度>=80%:{video_dict["video_title"]}\n')
-        else:
-            # 下载视频
-            Common.download_method(log_type=log_type, crawler=crawler, text="video",
-                                   title=video_dict["video_title"], url=video_dict["video_url"])
-            md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
-            # 获取视频时长
-            ffmpeg_dict = Common.ffmpeg(log_type, crawler,
-                                        f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
-            if ffmpeg_dict is None:
-                # 删除视频文件夹
-                shutil.rmtree(f"./{crawler}/videos/{md_title}")
-                Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
-                return
-            video_dict["video_width"] = ffmpeg_dict["width"]
-            video_dict["video_height"] = ffmpeg_dict["height"]
-            video_dict["duration"] = ffmpeg_dict["duration"]
-            video_size = ffmpeg_dict["size"]
-            Common.logger(log_type, crawler).info(f'video_width:{video_dict["video_width"]}')
-            Common.logger(log_type, crawler).info(f'video_height:{video_dict["video_height"]}')
-            Common.logger(log_type, crawler).info(f'duration:{video_dict["duration"]}')
-            Common.logger(log_type, crawler).info(f'video_size:{video_size}')
-            # 视频size=0,直接删除
-            if int(video_size) == 0 or cls.download_rule(video_dict) is False:
-                # 删除视频文件夹
-                shutil.rmtree(f"./{crawler}/videos/{md_title}")
-                Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
-                return
-            # 下载封面
-            Common.download_method(log_type=log_type, crawler=crawler, text="cover",
-                                   title=video_dict["video_title"], url=video_dict["cover_url"])
-            # 保存视频信息至 "./videos/{video_title}/info.txt"
-            Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
-
-            # 上传视频
-            Common.logger(log_type, crawler).info("开始上传视频...")
-            strategy = "定向爬虫策略"
-            our_video_id = Publish.upload_and_publish(log_type=log_type,
-                                                      crawler=crawler,
-                                                      strategy=strategy,
-                                                      our_uid="follow",
-                                                      oss_endpoint=oss_endpoint,
-                                                      env=env)
-            if env == 'prod':
-                our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
-            else:
-                our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
-            Common.logger(log_type, crawler).info("视频上传完成")
-
-            if our_video_id is None:
-                # 删除视频文件夹
-                shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
-                return
-
-            # 视频信息保存数据库
-            rule_dict = {
-                "duration": {"min": 20, "max": 45 * 60},
-                "publish_day": {"min": 3}
-            }
-
-            insert_sql = f""" insert into crawler_video(video_id,
-                                                        out_user_id,
-                                                        platform,
-                                                        strategy,
-                                                        out_video_id,
-                                                        video_title,
-                                                        cover_url,
-                                                        video_url,
-                                                        duration,
-                                                        publish_time,
-                                                        play_cnt,
-                                                        crawler_rule,
-                                                        width,
-                                                        height)
-                                                        values({our_video_id},
-                                                        "{video_dict['user_id']}",
-                                                        "{cls.platform}",
-                                                        "定向爬虫策略",
-                                                        "{video_dict['video_id']}",
-                                                        "{video_dict['video_title']}",
-                                                        "{video_dict['cover_url']}",
-                                                        "{video_dict['video_url']}",
-                                                        {int(video_dict['duration'])},
-                                                        "{video_dict['publish_time_str']}",
-                                                        {int(video_dict['play_cnt'])},
-                                                        '{json.dumps(rule_dict)}',
-                                                        {int(video_dict['video_width'])},
-                                                        {int(video_dict['video_height'])}) """
-            Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
-            MysqlHelper.update_values(log_type, crawler, insert_sql, env)
-            Common.logger(log_type, crawler).info('视频信息插入数据库成功!')
-
-            # 视频写入飞书
-            Feishu.insert_columns(log_type, crawler, "47e39d", "ROWS", 1, 2)
-            # 视频ID工作表,首行写入数据
-            upload_time = int(time.time())
-            values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
-                       "用户主页",
-                       video_dict['video_title'],
-                       video_dict['video_id'],
-                       our_video_link,
-                       int(video_dict['duration']),
-                       f"{video_dict['video_width']}*{video_dict['video_height']}",
-                       video_dict['publish_time_str'],
-                       video_dict['user_name'],
-                       video_dict['user_id'],
-                       video_dict['avatar_url'],
-                       video_dict['cover_url'],
-                       video_dict['article_url'],
-                       video_dict['video_url']]]
-            time.sleep(0.5)
-            Feishu.update_values(log_type, crawler, "47e39d", "F2:Z2", values)
-            Common.logger(log_type, crawler).info('视频下载/上传成功\n')
-        # except Exception as e:
-        #     Common.logger(log_type, crawler).error(f"download_publish异常:{e}\n")
-
-    @classmethod
-    def get_users(cls):
-        # user_sheet = Feishu.get_values_batch("follow", 'gongzhonghao', 'Bzv72P')
-        # user_list = []
-        # for i in range(81, len(user_sheet)):
-        #     user_name = user_sheet[i][0]
-        #     index = user_sheet[i][1]
-        #     user_dict = {
-        #         "user_name": user_name,
-        #         "index": index,
-        #     }
-        #     user_list.append(user_dict)
-        # print(len(user_list))
-        # print(user_list)
-
-        user_list = [{'user_name': '方敏爱美食', 'index': 1}, {'user_name': '针灸推拿特色技术', 'index': 1}, {'user_name': '挺进天山', 'index': 1}, {'user_name': '紫陌捻花', 'index': 1}, {'user_name': '巨响养身', 'index': 1}, {'user_name': '荣观世界', 'index': 1}, {'user_name': 'Music音乐世界', 'index': 1}, {'user_name': '微观调查组', 'index': 1}, {'user_name': '用汉方拥抱世界', 'index': 1}, {'user_name': '医学养身秘诀', 'index': 1}, {'user_name': '医学老人养身', 'index': 1}, {'user_name': '热文微观', 'index': 1}, {'user_name': '医学养身秘笈', 'index': 1}, {'user_name': '你未读消息', 'index': 1}, {'user_name': '零点相聚', 'index': 2}, {'user_name': '观念颠覆一切', 'index': 1}, {'user_name': '侯老师说食疗精选', 'index': 1}, {'user_name': '侯老师说食疗', 'index': 1}, {'user_name': '今日看点收集', 'index': 1}, {'user_name': '君拍', 'index': 1}, {'user_name': '惊爆视频', 'index': 3}, {'user_name': '绝美生活', 'index': 2}, {'user_name': '新龙虎局势', 'index': 1}, {'user_name': '行走的足音', 'index': 1}, {'user_name': '月光下小夜曲', 'index': 1}, {'user_name': '罪与罚的言', 'index': 1}, {'user_name': '祝福音画', 'index': 1}, {'user_name': '这年头儿', 'index': 1}, {'user_name': '祝福励志正能量', 'index': 1}, {'user_name': '出借人清查组', 'index': 1}, {'user_name': '强哥来了', 'index': 1}, {'user_name': '绝美相册', 'index': 1}, {'user_name': '绝美立体相册', 'index': 1}, {'user_name': '生活美相册', 'index': 1}, {'user_name': '祝您生活幸福', 'index': 1}, {'user_name': '完美生活', 'index': 3}, {'user_name': '新龙虎局世', 'index': 1}, {'user_name': '精美音画相册', 'index': 1}, {'user_name': '音画场景', 'index': 1}, {'user_name': '出借人投诉处', 'index': 1}]
-        return user_list
-
-    @classmethod
-    def get_all_videos(cls, log_type, crawler, oss_endpoint, env):
-        user_list = cls.get_users()
-        for user_dict in user_list:
-            try:
-                user_name = user_dict['user_name']
-                index = user_dict['index']
-                Common.logger(log_type, crawler).info(f'获取 {user_name} 公众号视频\n')
-                cls.get_videoList(log_type, crawler, user_name, index, oss_endpoint, env)
-                cls.begin = 0
-                Common.logger(log_type, crawler).info('休眠 60 秒\n')
-                time.sleep(60)
-            except Exception as e:
-                Common.logger(log_type, crawler).info(f'get_all_videos异常:{e}\n')
-
-
-if __name__ == "__main__":
-    GongzhonghaoFollow3.get_users()
-    # GongzhonghaoFollow.get_users()
-    # GongzhonghaoFollow.get_videoList(log_type="follow",
-    #                                  crawler="gongzhonghao",
-    #                                  user="香音难忘",
-    #                                  index=1,
-    #                                  oss_endpoint="out",
-    #                                  env="dev")
-    pass

+ 1 - 1
gongzhonghao/gongzhonghao_main/run_gzh_author_old.py

@@ -135,4 +135,4 @@ if __name__ == "__main__":
          crawler=args.crawler,
          topic_name=args.topic_name,
          group_id=args.group_id,
-         env=args.env)
+         env=args.env)

+ 63 - 61
jixiangxingfu/jixiangxingfu_recommend/jixiangxingfu_recommend.py

@@ -49,70 +49,72 @@ class JixiangxingfuRecommend:
 
     @classmethod
     def start_wechat(cls, log_type, crawler, env):
-        # try:
-        if env == "dev":
-            chromedriverExecutable = '/Users/wangkun/Downloads/chromedriver/chromedriver_v107/chromedriver'
-        else:
-            chromedriverExecutable = '/Users/piaoquan/Downloads/chromedriver'
-        Common.logger(log_type, crawler).info('启动微信')
-        caps = {
-            "platformName": "Android",  # 手机操作系统 Android / iOS
-            "deviceName": "a0a65126",  # 连接的设备名(模拟器或真机),安卓可以随便写
-            "platforVersion": "11",  # 手机对应的系统版本(Android 11)
-            "appPackage": "com.tencent.mm",  # 被测APP的包名,乐活圈 Android
-            "appActivity": ".ui.LauncherUI",  # 启动的Activity名
-            "autoGrantPermissions": "true",  # 让 appium 自动授权 base 权限,
-            # 如果 noReset 为 True,则该条不生效(该参数为 Android 独有),对应的值为 True 或 False
-            "unicodekeyboard": True,  # 使用自带输入法,输入中文时填True
-            "resetkeyboard": True,  # 执行完程序恢复原来输入法
-            "noReset": True,  # 不重置APP
-            "printPageSourceOnFailure": True,  # 找不到元素时,appium log 会完整记录当前页面的 pagesource
-            "newCommandTimeout": 6000,  # 初始等待时间
-            "automationName": "UiAutomator2",  # 使用引擎,默认为 Appium,
-            # 其中 Appium、UiAutomator2、Selendroid、Espresso 用于 Android,XCUITest 用于 iOS
-            "showChromedriverLog": True,
-            'enableWebviewDetailsCollection': True,
-            'setWebContentsDebuggingEnabled': True,
-            'recreateChromeDriverSessions': True,
-            'chromedriverExecutable': chromedriverExecutable,
-            "chromeOptions": {"androidProcess": "com.tencent.mm:appbrand0"},
-            # "chromeOptions": {"androidProcess": "com.tencent.mm:tools"},
-            'browserName': ''
-        }
-        driver = webdriver.Remote("http://localhost:4723/wd/hub", caps)
-        driver.implicitly_wait(20)
-        # 向下滑动页面,展示出小程序选择面板
-        for i in range(120):
-            try:
-                # 发现微信消息 TAB,代表微信已启动成功
-                if driver.find_elements(By.ID, 'com.tencent.mm:id/f2s'):
-                    break
-                # 发现并关闭系统菜单栏
-                elif driver.find_element(By.ID, 'com.android.systemui:id/dismiss_view'):
-                    Common.logger(log_type, crawler).info('发现并关闭系统下拉菜单栏')
-                    driver.find_element(By.ID, 'com.android.systemui:id/dismiss_view').click()
-                else:
-                    pass
-            except NoSuchElementException:
-                time.sleep(1)
-        Common.logger(log_type, crawler).info('下滑,展示小程序选择面板')
-        size = driver.get_window_size()
-        driver.swipe(int(size['width'] * 0.5), int(size['height'] * 0.2), int(size['width'] * 0.5),
-                     int(size['height'] * 0.8), 200)
-        # 打开小程序"祝福大家好才是真好"
-        time.sleep(5)
-        Common.logger(log_type, crawler).info('打开小程序"祝福大家好才是真好"')
-        driver.find_elements(By.XPATH, '//*[@text="祝福大家好才是真好"]')[-1].click()
+        try:
+            if env == "dev":
+                chromedriverExecutable = '/Users/wangkun/Downloads/chromedriver/chromedriver_v107/chromedriver'
+            else:
+                chromedriverExecutable = '/Users/piaoquan/Downloads/chromedriver'
+            Common.logger(log_type, crawler).info('启动微信')
+            caps = {
+                "platformName": "Android",  # 手机操作系统 Android / iOS
+                "deviceName": "a0a65126",  # 连接的设备名(模拟器或真机),安卓可以随便写
+                "platforVersion": "11",  # 手机对应的系统版本(Android 11)
+                "appPackage": "com.tencent.mm",  # 被测APP的包名,乐活圈 Android
+                "appActivity": ".ui.LauncherUI",  # 启动的Activity名
+                "autoGrantPermissions": "true",  # 让 appium 自动授权 base 权限,
+                # 如果 noReset 为 True,则该条不生效(该参数为 Android 独有),对应的值为 True 或 False
+                "unicodekeyboard": True,  # 使用自带输入法,输入中文时填True
+                "resetkeyboard": True,  # 执行完程序恢复原来输入法
+                "noReset": True,  # 不重置APP
+                "printPageSourceOnFailure": True,  # 找不到元素时,appium log 会完整记录当前页面的 pagesource
+                "newCommandTimeout": 6000,  # 初始等待时间
+                "automationName": "UiAutomator2",  # 使用引擎,默认为 Appium,
+                # 其中 Appium、UiAutomator2、Selendroid、Espresso 用于 Android,XCUITest 用于 iOS
+                "showChromedriverLog": True,
+                'enableWebviewDetailsCollection': True,
+                'setWebContentsDebuggingEnabled': True,
+                'recreateChromeDriverSessions': True,
+                'chromedriverExecutable': chromedriverExecutable,
+                "chromeOptions": {"androidProcess": "com.tencent.mm:appbrand0"},
+                # "chromeOptions": {"androidProcess": "com.tencent.mm:tools"},
+                'browserName': ''
+            }
+            driver = webdriver.Remote("http://localhost:4723/wd/hub", caps)
+            driver.implicitly_wait(20)
+            # 向下滑动页面,展示出小程序选择面板
+            for i in range(120):
+                try:
+                    # 发现微信消息 TAB,代表微信已启动成功
+                    if driver.find_elements(By.ID, 'com.tencent.mm:id/f2s'):
+                        break
+                    # 发现并关闭系统菜单栏
+                    elif driver.find_element(By.ID, 'com.android.systemui:id/dismiss_view'):
+                        Common.logger(log_type, crawler).info('发现并关闭系统下拉菜单栏')
+                        driver.find_element(By.ID, 'com.android.systemui:id/dismiss_view').click()
+                    else:
+                        pass
+                except NoSuchElementException:
+                    time.sleep(1)
+            Common.logger(log_type, crawler).info('下滑,展示小程序选择面板')
+            size = driver.get_window_size()
+            driver.swipe(int(size['width'] * 0.5), int(size['height'] * 0.2), int(size['width'] * 0.5),
+                         int(size['height'] * 0.8), 200)
+            # 打开小程序"祝福大家好才是真好"
+            time.sleep(5)
+            Common.logger(log_type, crawler).info('打开小程序"祝福大家好才是真好"')
+            driver.find_elements(By.XPATH, '//*[@text="祝福大家好才是真好"]')[-1].click()
 
-        # 获取视频信息
-        time.sleep(5)
-        cls.get_videoList(log_type, crawler, driver, env)
+            # 获取视频信息
+            time.sleep(5)
+            cls.get_videoList(log_type, crawler, driver, env)
 
-        # 退出微信
-        cls.quit(log_type, crawler, driver)
+            # 退出微信
+            cls.quit(log_type, crawler, driver)
 
-        # except Exception as e:
-        #     Common.logger(log_type, crawler).error('start_wechat异常:{},重启 ADB\n', e)
+        except Exception as e:
+            Common.logger(log_type, crawler).error('start_wechat异常:{},重启 ADB\n', e)
+            cmd = "adb kill-server && adb start-server"
+            os.system(cmd)
 
     # 退出 APP
     @classmethod

+ 0 - 0
kanyikan/kanyikan_main/run_kykmv_recommend.py → kanyikan/kanyikan_main/run_kykjk_recommend.py


+ 1 - 2
kanyikan/kanyikan_recommend/kanyikan_recommend.py

@@ -26,8 +26,7 @@ class KanyikanRecommend:
 
     @classmethod
     def repeat_video(cls, log_type, crawler, video_id, env):
-        # sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}" """
-        sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and out_video_id="{video_id}"; """
+        sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and create_time>='2023-06-26' and out_video_id="{video_id}"; """
         repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
         return len(repeat_video)
 

+ 128 - 128
kanyikan/kanyikan_recommend/kanyikan_recommend0627.py

@@ -26,143 +26,143 @@ class KanyikanRecommend:
 
     @classmethod
     def repeat_video(cls, log_type, crawler, video_id, env):
-        # sql = f""" select * from crawler_video where platform="{cls.platform}" and strategy="{cls.strategy}" and out_video_id="{video_id}" """
-        sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and out_video_id="{video_id}"; """
+        sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and create_time>='2023-06-26' and out_video_id="{video_id}"; """
         repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
         return len(repeat_video)
 
     @classmethod
     def get_videoList(cls, log_type, crawler, our_uid, rule_dict, env):
         mq = MQ(topic_name="topic_crawler_etl_" + env)
-        try:
-            Common.logger(log_type, crawler).info(f"正在抓取列表页")
-            Common.logging(log_type, crawler, env, f"正在抓取列表页")
-            session = Common.get_session(log_type, crawler, env)
-            if session is None:
-                time.sleep(1)
-                cls.get_videoList(log_type, crawler, our_uid, rule_dict, env)
-            url = 'https://search.weixin.qq.com/cgi-bin/recwxa/recwxavideolist?'
-            header = {
-                "Connection": "keep-alive",
-                "content-type": "application/json",
-                "Accept-Encoding": "gzip,compress,br,deflate",
-                "User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X) "
-                              "AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 MicroMessenger/8.0.18(0x18001236) "
-                              "NetType/WIFI Language/zh_CN",
-                "Referer": "https://servicewechat.com/wxbb9a805eb4f9533c/234/page-frame.html",
-            }
-            params = {
-                'session': session,
-                "offset": 0,
-                "wxaVersion": "3.9.2",
-                "count": "10",
-                "channelid": "208",
-                "scene": '310',
-                "subscene": '1089',
-                "clientVersion": '8.0.18',
-                "sharesearchid": '0',
-                "nettype": 'wifi',
-                "switchprofile": "0",
-                "switchnewuser": "0",
-            }
-            urllib3.disable_warnings()
-            response = requests.get(url=url, headers=header, params=params, proxies=proxies, verify=False)
-            if "data" not in response.text:
-                Common.logger(log_type, crawler).info("获取视频list时,session过期,随机睡眠 31-50 秒")
-                Common.logging(log_type, crawler, env, "获取视频list时,session过期,随机睡眠 31-50 秒")
-                # 如果返回空信息,则随机睡眠 31-40 秒
-                time.sleep(random.randint(31, 40))
-                cls.get_videoList(log_type, crawler, our_uid, rule_dict, env)
-            elif "items" not in response.json()["data"]:
-                Common.logger(log_type, crawler).info(f"get_feeds:{response.json()},随机睡眠 1-3 分钟")
-                Common.logging(log_type, crawler, env, f"get_feeds:{response.json()},随机睡眠 1-3 分钟")
-                # 如果返回空信息,则随机睡眠 1-3 分钟
-                time.sleep(random.randint(60, 180))
-                cls.get_videoList(log_type, crawler, our_uid, rule_dict, env)
-            feeds = response.json().get("data", {}).get("items", "")
-            if feeds == "":
-                Common.logger(log_type, crawler).info(f"feeds:{feeds}")
-                Common.logging(log_type, crawler, env, f"feeds:{feeds}")
-                return
-            for i in range(len(feeds)):
-                try:
-                    video_title = feeds[i].get("title", "").strip().replace("\n", "") \
-                        .replace("/", "").replace("\\", "").replace("\r", "") \
-                        .replace(":", "").replace("*", "").replace("?", "") \
-                        .replace("?", "").replace('"', "").replace("<", "") \
-                        .replace(">", "").replace("|", "").replace(" ", "") \
-                        .replace("&NBSP", "").replace(".", "。").replace(" ", "") \
-                        .replace("'", "").replace("#", "").replace("Merge", "")
-                    publish_time_stamp = feeds[i].get("date", 0)
-                    publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
-                    # 获取播放地址
-                    if "videoInfo" not in feeds[i]:
-                        video_url = ""
-                    elif "mpInfo" in feeds[i]["videoInfo"]["videoCdnInfo"]:
-                        if len(feeds[i]["videoInfo"]["videoCdnInfo"]["mpInfo"]["urlInfo"]) > 2:
-                            video_url = feeds[i]["videoInfo"]["videoCdnInfo"]["mpInfo"]["urlInfo"][2]["url"]
+        for page in range(1, 3):
+            try:
+                Common.logger(log_type, crawler).info(f"正在抓取第{page}页")
+                Common.logging(log_type, crawler, env, f"正在抓取第{page}页")
+                session = Common.get_session(log_type, crawler, env)
+                if session is None:
+                    time.sleep(1)
+                    cls.get_videoList(log_type, crawler, our_uid, rule_dict, env)
+                url = 'https://search.weixin.qq.com/cgi-bin/recwxa/recwxavideolist?'
+                header = {
+                    "Connection": "keep-alive",
+                    "content-type": "application/json",
+                    "Accept-Encoding": "gzip,compress,br,deflate",
+                    "User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X) "
+                                  "AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 MicroMessenger/8.0.18(0x18001236) "
+                                  "NetType/WIFI Language/zh_CN",
+                    "Referer": "https://servicewechat.com/wxbb9a805eb4f9533c/234/page-frame.html",
+                }
+                params = {
+                    'session': session,
+                    "offset": 0,
+                    "wxaVersion": "3.9.2",
+                    "count": "10",
+                    "channelid": "208",
+                    "scene": '310',
+                    "subscene": '1089',
+                    "clientVersion": '8.0.18',
+                    "sharesearchid": '0',
+                    "nettype": 'wifi',
+                    "switchprofile": "0",
+                    "switchnewuser": "0",
+                }
+                urllib3.disable_warnings()
+                response = requests.get(url=url, headers=header, params=params, proxies=proxies, verify=False)
+                if "data" not in response.text:
+                    Common.logger(log_type, crawler).info("获取视频list时,session过期,随机睡眠 31-50 秒")
+                    Common.logging(log_type, crawler, env, "获取视频list时,session过期,随机睡眠 31-50 秒")
+                    # 如果返回空信息,则随机睡眠 31-40 秒
+                    time.sleep(random.randint(31, 40))
+                    cls.get_videoList(log_type, crawler, our_uid, rule_dict, env)
+                elif "items" not in response.json()["data"]:
+                    Common.logger(log_type, crawler).info(f"get_feeds:{response.json()},随机睡眠 1-3 分钟")
+                    Common.logging(log_type, crawler, env, f"get_feeds:{response.json()},随机睡眠 1-3 分钟")
+                    # 如果返回空信息,则随机睡眠 1-3 分钟
+                    time.sleep(random.randint(60, 180))
+                    cls.get_videoList(log_type, crawler, our_uid, rule_dict, env)
+                feeds = response.json().get("data", {}).get("items", "")
+                if feeds == "":
+                    Common.logger(log_type, crawler).info(f"feeds:{feeds}")
+                    Common.logging(log_type, crawler, env, f"feeds:{feeds}")
+                    return
+                for i in range(len(feeds)):
+                    try:
+                        video_title = feeds[i].get("title", "").strip().replace("\n", "") \
+                            .replace("/", "").replace("\\", "").replace("\r", "") \
+                            .replace(":", "").replace("*", "").replace("?", "") \
+                            .replace("?", "").replace('"', "").replace("<", "") \
+                            .replace(">", "").replace("|", "").replace(" ", "") \
+                            .replace("&NBSP", "").replace(".", "。").replace(" ", "") \
+                            .replace("'", "").replace("#", "").replace("Merge", "")
+                        publish_time_stamp = feeds[i].get("date", 0)
+                        publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
+                        # 获取播放地址
+                        if "videoInfo" not in feeds[i]:
+                            video_url = ""
+                        elif "mpInfo" in feeds[i]["videoInfo"]["videoCdnInfo"]:
+                            if len(feeds[i]["videoInfo"]["videoCdnInfo"]["mpInfo"]["urlInfo"]) > 2:
+                                video_url = feeds[i]["videoInfo"]["videoCdnInfo"]["mpInfo"]["urlInfo"][2]["url"]
+                            else:
+                                video_url = feeds[i]["videoInfo"]["videoCdnInfo"]["mpInfo"]["urlInfo"][0]["url"]
+                        elif "ctnInfo" in feeds[i]["videoInfo"]["videoCdnInfo"]:
+                            video_url = feeds[i]["videoInfo"]["videoCdnInfo"]["ctnInfo"]["urlInfo"][0]["url"]
                         else:
-                            video_url = feeds[i]["videoInfo"]["videoCdnInfo"]["mpInfo"]["urlInfo"][0]["url"]
-                    elif "ctnInfo" in feeds[i]["videoInfo"]["videoCdnInfo"]:
-                        video_url = feeds[i]["videoInfo"]["videoCdnInfo"]["ctnInfo"]["urlInfo"][0]["url"]
-                    else:
-                        video_url = feeds[i]["videoInfo"]["videoCdnInfo"]["urlInfo"][0]["url"]
-                    video_dict = {
-                        "video_title": video_title,
-                        "video_id": feeds[i].get("videoId", ""),
-                        "play_cnt": feeds[i].get("playCount", 0),
-                        "like_cnt": feeds[i].get("liked_cnt", 0),
-                        "comment_cnt": feeds[i].get("comment_cnt", 0),
-                        "share_cnt": feeds[i].get("shared_cnt", 0),
-                        "duration": feeds[i].get("mediaDuration", 0),
-                        "video_width": feeds[i].get("short_video_info", {}).get("width", 0),
-                        "video_height": feeds[i].get("short_video_info", {}).get("height", 0),
-                        "publish_time_stamp": publish_time_stamp,
-                        "publish_time_str": publish_time_str,
-                        "user_name": feeds[i].get("source", "").strip().replace("\n", ""),
-                        "user_id": feeds[i].get("openid", ""),
-                        "avatar_url": feeds[i].get("bizIcon", ""),
-                        "cover_url": feeds[i].get("thumbUrl", ""),
-                        "video_url": video_url,
-                        "session": session,
-                    }
-                    for k, v in video_dict.items():
-                        Common.logger(log_type, crawler).info(f"{k}:{v}")
-                    Common.logging(log_type, crawler, env, f"video_dict:{video_dict}")
+                            video_url = feeds[i]["videoInfo"]["videoCdnInfo"]["urlInfo"][0]["url"]
+                        video_dict = {
+                            "video_title": video_title,
+                            "video_id": feeds[i].get("videoId", ""),
+                            "play_cnt": feeds[i].get("playCount", 0),
+                            "like_cnt": feeds[i].get("liked_cnt", 0),
+                            "comment_cnt": feeds[i].get("comment_cnt", 0),
+                            "share_cnt": feeds[i].get("shared_cnt", 0),
+                            "duration": feeds[i].get("mediaDuration", 0),
+                            "video_width": feeds[i].get("short_video_info", {}).get("width", 0),
+                            "video_height": feeds[i].get("short_video_info", {}).get("height", 0),
+                            "publish_time_stamp": publish_time_stamp,
+                            "publish_time_str": publish_time_str,
+                            "user_name": feeds[i].get("source", "").strip().replace("\n", ""),
+                            "user_id": feeds[i].get("openid", ""),
+                            "avatar_url": feeds[i].get("bizIcon", ""),
+                            "cover_url": feeds[i].get("thumbUrl", ""),
+                            "video_url": video_url,
+                            "session": session,
+                        }
+                        for k, v in video_dict.items():
+                            Common.logger(log_type, crawler).info(f"{k}:{v}")
+                        Common.logging(log_type, crawler, env, f"video_dict:{video_dict}")
 
-                    if video_dict["video_id"] == "" or video_dict["video_title"] == "" or video_dict["video_url"] == "":
-                        Common.logger(log_type, crawler).info("无效视频\n")
-                        Common.logging(log_type, crawler, env, "无效视频\n")
-                    elif download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
-                        Common.logger(log_type, crawler).info("不满足抓取规则\n")
-                        Common.logging(log_type, crawler, env, "不满足抓取规则\n")
-                    elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
-                        Common.logger(log_type, crawler).info('视频已下载\n')
-                        Common.logging(log_type, crawler, env, '视频已下载\n')
-                    else:
-                        # cls.download_publish(log_type=log_type,
-                        #                      crawler=crawler,
-                        #                      our_uid=our_uid,
-                        #                      video_dict=video_dict,
-                        #                      rule_dict=rule_dict,
-                        #                      env=env)
-                        video_dict["out_user_id"] = video_dict["user_id"]
-                        video_dict["platform"] = crawler
-                        video_dict["strategy"] = log_type
-                        video_dict["out_video_id"] = video_dict["video_id"]
-                        video_dict["width"] = video_dict["video_width"]
-                        video_dict["height"] = video_dict["video_height"]
-                        video_dict["crawler_rule"] = json.dumps(rule_dict)
-                        video_dict["user_id"] = our_uid
-                        video_dict["publish_time"] = video_dict["publish_time_str"]
+                        if video_dict["video_id"] == "" or video_dict["video_title"] == "" or video_dict["video_url"] == "":
+                            Common.logger(log_type, crawler).info("无效视频\n")
+                            Common.logging(log_type, crawler, env, "无效视频\n")
+                        elif download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
+                            Common.logger(log_type, crawler).info("不满足抓取规则\n")
+                            Common.logging(log_type, crawler, env, "不满足抓取规则\n")
+                        elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
+                            Common.logger(log_type, crawler).info('视频已下载\n')
+                            Common.logging(log_type, crawler, env, '视频已下载\n')
+                        else:
+                            # cls.download_publish(log_type=log_type,
+                            #                      crawler=crawler,
+                            #                      our_uid=our_uid,
+                            #                      video_dict=video_dict,
+                            #                      rule_dict=rule_dict,
+                            #                      env=env)
+                            video_dict["out_user_id"] = video_dict["user_id"]
+                            video_dict["platform"] = crawler
+                            video_dict["strategy"] = log_type
+                            video_dict["out_video_id"] = video_dict["video_id"]
+                            video_dict["width"] = video_dict["video_width"]
+                            video_dict["height"] = video_dict["video_height"]
+                            video_dict["crawler_rule"] = json.dumps(rule_dict)
+                            video_dict["user_id"] = our_uid
+                            video_dict["publish_time"] = video_dict["publish_time_str"]
 
-                        mq.send_msg(video_dict)
-                except Exception as e:
-                    Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
-                    Common.logging(log_type, crawler, env, f"抓取单条视频异常:{e}\n")
-        except Exception as e:
-            Common.logger(log_type, crawler).error(f"抓取列表页时异常:{e}\n")
-            Common.logging(log_type, crawler, env, f"抓取列表页时异常:{e}\n")
+                            mq.send_msg(video_dict)
+                    except Exception as e:
+                        Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
+                        Common.logging(log_type, crawler, env, f"抓取单条视频异常:{e}\n")
+            except Exception as e:
+                Common.logger(log_type, crawler).error(f"抓取第{page}页时异常:{e}\n")
+                Common.logging(log_type, crawler, env, f"抓取第{page}页时异常:{e}\n")
 
     @classmethod
     def download_publish(cls, log_type, crawler, our_uid, video_dict, rule_dict, env):

+ 2 - 128
kanyikan/kanyikan_recommend/kanyikan_recommend0705.py

@@ -4,17 +4,13 @@
 import json
 import os
 import random
-import shutil
 import sys
 import time
-from hashlib import md5
 import requests
 import urllib3
 sys.path.append(os.getcwd())
 from common.mq import MQ
 from common.common import Common
-from common.feishu import Feishu
-from common.publish import Publish
 from common.scheduling_db import MysqlHelper
 from common.public import get_config_from_mysql, download_rule
 proxies = {"http": None, "https": None}
@@ -26,8 +22,7 @@ class KanyikanRecommend:
 
     @classmethod
     def repeat_video(cls, log_type, crawler, video_id, env):
-        # sql = f""" select * from crawler_video where platform="{cls.platform}" and strategy="{cls.strategy}" and out_video_id="{video_id}" """
-        sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and out_video_id="{video_id}"; """
+        sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and create_time>='2023-06-26' and out_video_id="{video_id}"; """
         repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
         return len(repeat_video)
 
@@ -140,16 +135,10 @@ class KanyikanRecommend:
                         Common.logger(log_type, crawler).info('视频已下载\n')
                         Common.logging(log_type, crawler, env, '视频已下载\n')
                     else:
-                        # cls.download_publish(log_type=log_type,
-                        #                      crawler=crawler,
-                        #                      our_uid=our_uid,
-                        #                      video_dict=video_dict,
-                        #                      rule_dict=rule_dict,
-                        #                      env=env)
                         video_dict["out_user_id"] = video_dict["user_id"]
                         video_dict["platform"] = crawler
                         video_dict["strategy"] = log_type
-                        video_dict["strategy_type"] = "mv"
+                        video_dict["strategy_type"] = "jk"
                         video_dict["out_video_id"] = video_dict["video_id"]
                         video_dict["width"] = video_dict["video_width"]
                         video_dict["height"] = video_dict["video_height"]
@@ -165,121 +154,6 @@ class KanyikanRecommend:
             Common.logger(log_type, crawler).error(f"抓取列表页时异常:{e}\n")
             Common.logging(log_type, crawler, env, f"抓取列表页时异常:{e}\n")
 
-    @classmethod
-    def download_publish(cls, log_type, crawler, our_uid, video_dict, rule_dict, env):
-        # 下载视频
-        Common.download_method(log_type=log_type, crawler=crawler, text='video', title=video_dict['video_title'], url=video_dict['video_url'])
-        md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
-        try:
-            if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
-                # 删除视频文件夹
-                shutil.rmtree(f"./{crawler}/videos/{md_title}")
-                Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
-                Common.logging(log_type, crawler, env, "视频size=0,删除成功\n")
-                return
-        except FileNotFoundError:
-            # 删除视频文件夹
-            shutil.rmtree(f"./{crawler}/videos/{md_title}")
-            Common.logger(log_type, crawler).info("视频文件不存在,删除文件夹成功\n")
-            Common.logging(log_type, crawler, env, "视频文件不存在,删除文件夹成功\n")
-            return
-        # 下载封面
-        Common.download_method(log_type=log_type, crawler=crawler, text='cover', title=video_dict['video_title'], url=video_dict['cover_url'])
-        # 保存视频信息至txt
-        Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
-
-        # 上传视频
-        Common.logger(log_type, crawler).info("开始上传视频...")
-        Common.logging(log_type, crawler, env, "开始上传视频...")
-        if env == "dev":
-            oss_endpoint = "out"
-            our_video_id = Publish.upload_and_publish(log_type=log_type,
-                                                      crawler=crawler,
-                                                      strategy=cls.strategy,
-                                                      our_uid=our_uid,
-                                                      env=env,
-                                                      oss_endpoint=oss_endpoint)
-            our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
-        else:
-            our_video_id = Publish.upload_and_publish(log_type=log_type,
-                                                      crawler=crawler,
-                                                      strategy=cls.strategy,
-                                                      our_uid=our_uid,
-                                                      env=env,
-                                                      oss_endpoint="out")
-
-            our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
-
-        if our_video_id is None:
-            try:
-                # 删除视频文件夹
-                shutil.rmtree(f"./{crawler}/videos/{md_title}")
-                return
-            except FileNotFoundError:
-                return
-
-        # 视频信息保存数据库
-        insert_sql = f""" insert into crawler_video(video_id,
-                                                user_id,
-                                                out_user_id,
-                                                platform,
-                                                strategy,
-                                                out_video_id,
-                                                video_title,
-                                                cover_url,
-                                                video_url,
-                                                duration,
-                                                publish_time,
-                                                play_cnt,
-                                                crawler_rule,
-                                                width,
-                                                height)
-                                                values({our_video_id},
-                                                {our_uid},
-                                                "{video_dict['user_id']}",
-                                                "{cls.platform}",
-                                                "{cls.strategy}",
-                                                "{video_dict['video_id']}",
-                                                "{video_dict['video_title']}",
-                                                "{video_dict['cover_url']}",
-                                                "{video_dict['video_url']}",
-                                                {int(video_dict['duration'])},
-                                                "{video_dict['publish_time_str']}",
-                                                {int(video_dict['play_cnt'])},
-                                                '{json.dumps(rule_dict)}',
-                                                {int(video_dict['video_width'])},
-                                                {int(video_dict['video_height'])}) """
-        Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
-        Common.logging(log_type, crawler, env, f"insert_sql:{insert_sql}")
-        MysqlHelper.update_values(log_type, crawler, insert_sql, env, action="")
-        Common.logger(log_type, crawler).info('视频信息写入数据库成功')
-        Common.logging(log_type, crawler, env, '视频信息写入数据库成功')
-
-        # 保存视频信息到云文档:
-        Feishu.insert_columns(log_type, crawler, "20ce0c", "ROWS", 1, 2)
-        # 看一看+ ,视频ID工作表,首行写入数据
-        values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time()))),
-                   "推荐榜",
-                   str(video_dict["video_id"]),
-                   str(video_dict["video_title"]),
-                   our_video_link,
-                   video_dict["play_cnt"],
-                   video_dict["comment_cnt"],
-                   video_dict["like_cnt"],
-                   video_dict["share_cnt"],
-                   video_dict["duration"],
-                   f'{video_dict["video_width"]}*{video_dict["video_height"]}',
-                   video_dict["publish_time_str"],
-                   video_dict["user_name"],
-                   video_dict["user_id"],
-                   video_dict["avatar_url"],
-                   video_dict["cover_url"],
-                   video_dict["video_url"]]]
-        time.sleep(0.5)
-        Feishu.update_values(log_type, crawler, "20ce0c", "F2:Z2", values)
-        Common.logger(log_type, crawler).info("视频信息保存至云文档成功\n")
-        Common.logging(log_type, crawler, env, "视频信息保存至云文档成功\n")
-
 
 if __name__ == "__main__":
     print(get_config_from_mysql(log_type="recommend",

+ 6 - 1
kuaishou/kuaishou_main/run_ks_author.py

@@ -32,6 +32,7 @@ def main(log_type, crawler, topic_name, group_id, env):
             # 长轮询消费消息。
             recv_msgs = consumer.consume_message(batch, wait_seconds)
             for msg in recv_msgs:
+                kuaishou_author_start_time = int(time.time())
                 Common.logger(log_type, crawler).info(f"Receive\n"
                                                       f"MessageId:{msg.message_id}\n"
                                                       f"MessageBodyMD5:{msg.message_body_md5}\n"
@@ -73,9 +74,13 @@ def main(log_type, crawler, topic_name, group_id, env):
                                                            rule_dict=rule_dict,
                                                            user_list=user_list,
                                                            env=env)
-                Common.del_logs(log_type, crawler)
+                # Common.del_logs(log_type, crawler)
                 Common.logger(log_type, crawler).info('抓取一轮结束\n')
                 Common.logging(log_type, crawler, env, '抓取一轮结束\n')
+                kuaishou_author_end_time = int(time.time())
+                kuaishou_author_duration = kuaishou_author_start_time - kuaishou_author_end_time
+                Common.logger(log_type, crawler).info(f"duration {kuaishou_author_duration}")
+                Common.logging(log_type, crawler, env, f"duration {kuaishou_author_duration}")
 
         except MQExceptionBase as err:
             # Topic中没有消息可消费。

+ 6 - 1
kuaishou/kuaishou_main/run_ks_recommend.py

@@ -33,6 +33,7 @@ def main(log_type, crawler, topic_name, group_id, env):
             # 长轮询消费消息。
             recv_msgs = consumer.consume_message(batch, wait_seconds)
             for msg in recv_msgs:
+                kuaishou_recommend_start_time = int(time.time())
                 Common.logger(log_type, crawler).info(f"Receive\n"
                                                       f"MessageId:{msg.message_id}\n"
                                                       f"MessageBodyMD5:{msg.message_body_md5}\n"
@@ -78,9 +79,13 @@ def main(log_type, crawler, topic_name, group_id, env):
                                                           rule_dict=rule_dict,
                                                           our_uid=our_uid,
                                                           env=env)
-                Common.del_logs(log_type, crawler)
+                # Common.del_logs(log_type, crawler)
                 Common.logger(log_type, crawler).info('抓取一轮结束\n')
                 Common.logging(log_type, crawler, env, '抓取一轮结束\n')
+                kuaishou_recommend_end_time = int(time.time())
+                kuaishou_recommend_duration = kuaishou_recommend_start_time - kuaishou_recommend_end_time
+                Common.logger(log_type, crawler).info(f"duration {kuaishou_recommend_duration}")
+                Common.logging(log_type, crawler, env, f"duration {kuaishou_recommend_duration}")
 
         except MQExceptionBase as err:
             # Topic中没有消息可消费。

+ 14 - 4
main/process_mq.sh

@@ -15,14 +15,24 @@ elif [ ${env} = "hk" ];then
   profile_path=/etc/profile
   python=python3
   log_path=${piaoquan_crawler_dir}main/main_logs/process-mq-$(date +%Y-%m-%d).log
-elif [ ${crawler} = "kyk" ];then
+elif [ ${crawler} = "kykjk" ];then
+  piaoquan_crawler_dir=/Users/kanyikan/Desktop/crawler/piaoquan_crawler/
+  profile_path=/.base_profile
+  python=python3
+  log_path=${piaoquan_crawler_dir}main/main_logs/process-mq-$(date +%Y-%m-%d).log
+elif [ ${crawler} = "sph" ] && [ ${log_type} = "search" ];then
+  piaoquan_crawler_dir=/Users/piaoquan/Desktop/piaoquan_crawler/
+  profile_path=/etc/profile
+  python=python3
+  log_path=${piaoquan_crawler_dir}main/main_logs/process-mq-$(date +%Y-%m-%d).log
+elif [ ${crawler} = "kyk" ] || [ ${crawler} = "sph" ];then
   piaoquan_crawler_dir=/Users/lieyunye/Desktop/crawler/piaoquan_crawler/
   profile_path=./base_profile
   python=python3
   log_path=${piaoquan_crawler_dir}main/main_logs/process-mq-$(date +%Y-%m-%d).log
-elif [ ${crawler} = "kykmv" ];then
-  piaoquan_crawler_dir=/Users/kanyikan/Desktop/crawler/piaoquan_crawler/
-  profile_path=/.base_profile
+elif [ ${crawler} = "xgms" ];then
+  piaoquan_crawler_dir=/Users/piaoquan/Desktop/piaoquan_crawler/
+  profile_path=/etc/profile
   python=python3
   log_path=${piaoquan_crawler_dir}main/main_logs/process-mq-$(date +%Y-%m-%d).log
 elif [ ${crawler} = "xg" ] && [ ${log_type} = "recommend" ];then

+ 11 - 0
main/process_offline.sh

@@ -32,6 +32,8 @@ fi
 # 知青天天看
 if [[ "$time" > "00:00:00" ]] && [[ "$time" < "08:59:59" ]]; then
   echo "$(date "+%Y-%m-%d %H:%M:%S") 开始启动 知青天天看 爬虫脚本任务" >> ${log_path}
+#  adb kill-server
+#  adb start-server
   ps aux | grep run_zhongmiaoyinxin | grep -v grep | awk '{print $2}' | xargs kill -9
   ps aux | grep run_ganggangdouchuan | grep -v grep | awk '{print $2}' | xargs kill -9
   ps aux | grep run_jixiangxingfu | grep -v grep | awk '{print $2}' | xargs kill -9
@@ -51,6 +53,8 @@ fi
 # 刚刚都传
 if [[ "$time" > "09:00:00" ]] && [[ "$time" < "12:59:59" ]]; then
   echo "$(date "+%Y-%m-%d %H:%M:%S") 开始启动 刚刚都传 爬虫脚本任务" >> ${log_path}
+#  adb kill-server
+#  adb start-server
   ps aux | grep run_zhongmiaoyinxin | grep -v grep | awk '{print $2}' | xargs kill -9
   ps aux | grep run_zhiqingtiantiankan | grep -v grep | awk '{print $2}' | xargs kill -9
   ps aux | grep run_jixiangxingfu | grep -v grep | awk '{print $2}' | xargs kill -9
@@ -70,6 +74,11 @@ fi
 # 吉祥幸福
 if [[ "$time" > "13:00:00" ]] && [[ "$time" < "16:59:59" ]]; then
   echo "$(date "+%Y-%m-%d %H:%M:%S") 开始启动 吉祥幸福 爬虫脚本任务" >> ${log_path}
+<<<<<<< HEAD
+=======
+#  adb kill-server
+#  adb start-server
+>>>>>>> master
   ps aux | grep run_zhongmiaoyinxin | grep -v grep | awk '{print $2}' | xargs kill -9
   ps aux | grep run_zhiqingtiantiankan | grep -v grep | awk '{print $2}' | xargs kill -9
   ps aux | grep run_ganggangdouchuan | grep -v grep | awk '{print $2}' | xargs kill -9
@@ -89,6 +98,8 @@ fi
 # 众妙音信
 if [[ "$time" > "17:00:00" ]] && [[ "$time" < "23:59:59" ]]; then
   echo "$(date "+%Y-%m-%d %H:%M:%S") 开始启动 众妙音信 爬虫脚本任务" >> ${log_path}
+#  adb kill-server
+#  adb start-server
   ps aux | grep run_ganggangdouchuan | grep -v grep | awk '{print $2}' | xargs kill -9
   ps aux | grep run_zhiqingtiantiankan | grep -v grep | awk '{print $2}' | xargs kill -9
   ps aux | grep run_jixiangxingfu | grep -v grep | awk '{print $2}' | xargs kill -9

+ 53 - 0
main/start_appium.sh

@@ -0,0 +1,53 @@
+#! /bin/bash
+log_type=$1   # 爬虫策略
+crawler=$2    # 哪款爬虫
+env=$3        # 爬虫运行环境,正式环境: prod / 测试环境: dev
+
+if [ ${crawler} = "shipinhao" ] && [ ${log_type} = "recommend" ] && [ ${env} = "prod" ];then
+  piaoquan_crawler_dir=/Users/lieyunye/Desktop/crawler/piaoquan_crawler/
+  profile_path=~/.bash_profile
+  node_path=/usr/local/bin/node
+  log_path=${piaoquan_crawler_dir}main/main_logs/start-appium-$(date +%Y-%m-%d).log
+elif [ ${crawler} = "shipinhao" ] && [ ${log_type} = "search" ] && [ ${env} = "prod" ];then
+  piaoquan_crawler_dir=/Users/piaoquan/Desktop/piaoquan_crawler/
+  profile_path=~/.bash_profile
+  node_path=/usr/local/bin/node
+  log_path=${piaoquan_crawler_dir}main/main_logs/start-appium-$(date +%Y-%m-%d).log
+elif [ ${crawler} = "jixiangxingfu" ] || [ ${crawler} = "zhongmiaoyinxin" ] || [ ${crawler} = "zhiqingtiantiankan" ] || [ ${crawler} = "ganggangdouchuan" ];then
+  piaoquan_crawler_dir=/Users/piaoquan/Desktop/piaoquan_crawler/
+  profile_path=./base_profile
+  node_path=/usr/local/bin/node
+  log_path=${piaoquan_crawler_dir}main/main_logs/start-appium-$(date +%Y-%m-%d).log
+elif [ ${crawler} = "xigua" ] || [ ${log_type} = "recommend" ];then
+  piaoquan_crawler_dir=/Users/kanyikan/Desktop/crawler/piaoquan_crawler/
+  profile_path=/etc/profile
+  node_path=/usr/local/bin/node
+  log_path=${piaoquan_crawler_dir}main/main_logs/start-appium-$(date +%Y-%m-%d).log
+elif [ ${crawler} = "shipinhao" ] || [ ${log_type} = "search" ];then
+  piaoquan_crawler_dir=/Users/piaoquan/Desktop/piaoquan_crawler/
+  profile_path=/etc/profile
+  node_path=/usr/local/bin/node
+  log_path=${piaoquan_crawler_dir}main/main_logs/start-appium-$(date +%Y-%m-%d).log
+else
+  piaoquan_crawler_dir=/Users/wangkun/Desktop/crawler/piaoquan_crawler/
+  profile_path=/etc/profile
+  node_path=/opt/homebrew/bin/node
+  log_path=${piaoquan_crawler_dir}main/main_logs/start-appium-$(date +%Y-%m-%d).log
+fi
+
+echo "$(date "+%Y-%m-%d %H:%M:%S") 正在检测 Appium 运行状态 ..." >> ${log_path}
+ps -ef | grep "/Applications/Appium.app/Contents/Resources/app/node_modules/appium/build/lib/main.js" | grep -v "grep"
+if [ "$?" -eq 1 ];then
+  echo "$(date "+%Y-%m-%d %H:%M:%S") Appium异常停止,正在重启!" >> ${log_path}
+  cd ${piaoquan_crawler_dir} && nohup ${node_path} /Applications/Appium.app/Contents/Resources/app/node_modules/appium/build/lib/main.js >> main/main_logs/Appium.log 2>&1 &
+  echo "$(date "+%Y-%m-%d %H:%M:%S") 重启Appium完毕!" >> ${log_path}
+else
+  cd ${piaoquan_crawler_dir} && rm -f main/main_logs/Appium.log
+  echo "$(date "+%Y-%m-%d %H:%M:%S") Appium 运行状态正常。" >> ${log_path}
+fi
+
+# 删除日志
+echo "$(date "+%Y-%m-%d %H:%M:%S") 开始清理 10 天前的日志文件" >> ${log_path}
+find ${piaoquan_crawler_dir}main/main_logs/ -mtime +10 -name "*.log" -exec rm -rf {} \;
+echo "$(date "+%Y-%m-%d %H:%M:%S") 日志文件清理完毕" >> ${log_path}
+exit 0

+ 1 - 1
kanyikan/kanyikan_moment/__init__.py → monitor/__init__.py

@@ -1,3 +1,3 @@
 # -*- coding: utf-8 -*-
 # @Author: wangkun
-# @Time: 2023/6/21
+# @Time: 2023/7/20

+ 1 - 1
dev/logs/__init__.py → monitor/cpu_memory/__init__.py

@@ -1,3 +1,3 @@
 # -*- coding: utf-8 -*-
 # @Author: wangkun
-# @Time: 2023/7/4
+# @Time: 2023/7/20

+ 111 - 0
monitor/cpu_memory/cpu_memory.py

@@ -0,0 +1,111 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/7/20
+import json
+import os
+import socket
+import sys
+import psutil
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.scheduling_db import MysqlHelper
+
+
+class MonitorCpuMemory:
+    @classmethod
+    def get_script_list(cls, log_type, crawler, env):
+        script_list = []
+        select_sql = f""" SELECT DISTINCT `key` FROM crawler_enum WHERE `key` LIKE "%run%";  """
+        sql_response = MysqlHelper.get_values(log_type, crawler, select_sql, env, action="")
+        for response in sql_response:
+            script_list.append(response["key"])
+        script_list.append("run_cpu_memory")
+        return script_list
+
+    @classmethod
+    def get_ip_address(cls):
+        try:
+            # 创建一个 UDP 套接字
+            sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+            # 连接到一个外部 IP 地址
+            sock.connect(("8.8.8.8", 80))
+            # 获取本地 IP 地址
+            local_ip = sock.getsockname()[0]
+            return local_ip
+        except socket.error:
+            return None
+
+    @classmethod
+    def get_pid_path(cls, script_name):
+        # 遍历所有正在运行的进程
+        for proc in psutil.process_iter():
+            try:
+                # 获取进程的命令行参数
+                cmds = proc.cmdline()
+                # 检查命令行参数是否包含爬虫脚本的名称或关键字
+                for cmd in cmds:
+                    if script_name in cmd:
+                        # Common.logger(log_type, crawler).info(f"cmd:{cmd}")
+                        # 获取进程的PID
+                        pid = proc.pid
+                        pid_path = {
+                            "pid": pid,
+                            "path": cmd,
+                        }
+                        return pid_path
+            except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
+                pass
+
+    @classmethod
+    def get_cpu_memory(cls, script_name):
+        # 获取当前进程的PID
+        pid_path = cls.get_pid_path(script_name)
+        # print(f"pid_path:{pid_path}")
+        if pid_path is None:
+            return None
+        # 获取CPU的使用情况
+        cpu_percent = round(psutil.Process(pid_path["pid"]).cpu_percent(), 2)
+        # 获取内存的使用情况
+        memory_percent = round(psutil.Process(pid_path["pid"]).memory_percent(), 2)
+        cpu_memory = {
+            "pid": pid_path["pid"],
+            "path": pid_path["path"],
+            "cpu": cpu_percent,
+            "memory": memory_percent,
+        }
+
+        return cpu_memory
+
+    @classmethod
+    def get_all_script_cpu_memory(cls, log_type, crawler, env):
+        script_list = cls.get_script_list(log_type, crawler, env)
+        for script_name in script_list:
+            try:
+                Common.logger(log_type, crawler).info(f"开始监控:{script_name}")
+                Common.logging(log_type, crawler, env, f"开始监控:{script_name}")
+                crawler_info = cls.get_cpu_memory(script_name)
+                if crawler_info is None:
+                    Common.logger(log_type, crawler).info(f"脚本未运行\n")
+                    Common.logging(log_type, crawler, env, f"脚本未运行\n")
+                    continue
+
+                script_info_dict = json.dumps({
+                    "crawler_name": script_name,
+                    "crawler_ip": cls.get_ip_address(),
+                    "crawler_pid": crawler_info["pid"],
+                    "crawler_path": crawler_info["path"],
+                    "crawler_cpu": crawler_info["cpu"],
+                    "crawler_memory": crawler_info["memory"]
+                })
+                Common.logger(log_type, crawler).info(f'script_info:{script_info_dict}\n')
+                Common.logging(log_type, crawler, env, f'{script_info_dict}')
+            except Exception as e:
+                Common.logger(log_type, crawler).error(f"监控{script_name}时异常:{e}\n")
+                Common.logging(log_type, crawler, env, f"监控{script_name}时异常:{e}\n")
+
+
+if __name__ == "__main__":
+    ipaddress = MonitorCpuMemory.get_ip_address()
+    print(ipaddress)
+
+    pass

+ 1 - 1
shipinhao/logs/__init__.py → monitor/monitor_main/__init__.py

@@ -1,3 +1,3 @@
 # -*- coding: utf-8 -*-
 # @Author: wangkun
-# @Time: 2023/5/9
+# @Time: 2023/7/20

+ 28 - 0
monitor/monitor_main/run_cpu_memory.py

@@ -0,0 +1,28 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/7/20
+import argparse
+import os
+import sys
+sys.path.append(os.getcwd())
+from common.common import Common
+from monitor.cpu_memory.cpu_memory import MonitorCpuMemory
+
+
+class MonitorMain:
+    @classmethod
+    def monitor_main(cls, log_type, crawler, env):
+        Common.logger(log_type, crawler).info(f"开始监控脚本 cpu、memory 状态\n")
+        Common.logging(log_type, crawler, env, f"开始监控脚本 cpu、memory 状态\n")
+        MonitorCpuMemory.get_all_script_cpu_memory(log_type, crawler, env)
+        Common.logger(log_type, crawler).info("监控一轮结束\n")
+        Common.logging(log_type, crawler, env, "监控一轮结束\n")
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
+    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler')  ## 添加参数
+    parser.add_argument('--env')  ## 添加参数
+    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
+    MonitorMain.monitor_main(log_type=args.log_type, crawler=args.crawler, env=args.env)

+ 39 - 0
monitor/monitor_main/run_monitor.sh

@@ -0,0 +1,39 @@
+#!/bin/bash
+
+path=$1     # 爬虫路径
+log_type=$2 # 爬虫策略
+crawler=$3  # 哪款爬虫
+env=$4      # 环境
+
+if [ ${env} = "dev" ];then
+  piaoquan_crawler_dir=/Users/wangkun/Desktop/crawler/piaoquan_crawler/
+  profile_path=/etc/profile
+  python=python3
+  log_path=${piaoquan_crawler_dir}monitor/logs/$(date +%Y-%m-%d)-shell.log
+else
+  piaoquan_crawler_dir=/root/piaoquan_crawler/
+  profile_path=/etc/profile
+  python=python3
+  log_path=${piaoquan_crawler_dir}monitor/logs/$(date +%Y-%m-%d)-shell.log
+fi
+
+echo "开始..." >> ${log_path}
+
+# ====================进程心跳检测====================
+echo "$(date "+%Y-%m-%d %H:%M:%S") 正在监测 run_cpu_memory 进程状态" >> ${log_path}
+# 使用pgrep命令匹配进程名称为run_cpu_memory.py的进程数量
+process_count=$(pgrep -f "run_cpu_memory.py" | wc -l)
+if [[ "$process_count" -gt 0 ]]; then
+  echo "$(date "+%Y-%m-%d %H:%M:%S") run_cpu_memory 进程状态正常。" >> ${log_path}
+else
+  echo "$(date "+%Y-%m-%d_%H:%M:%S") 未运行, 正在启动..." >> ${log_path}
+  cd ${piaoquan_crawler_dir} && nohup ${python} -u ${path} --log_type="${log_type}" --crawler="${crawler}" --env="${env}" >> ${log_path}  &
+  echo "$(date "+%Y-%m-%d %H:%M:%S") 启动完成!" >> ${log_path}
+fi
+# ===================================================
+
+# 删除日志
+echo "$(date "+%Y-%m-%d %H:%M:%S") 开始清理 10 天前的日志文件" >> ${log_path}
+find ${piaoquan_crawler_dir}monitor/logs/ -mtime +10 -name "*.log" -exec rm -rf {} \;
+echo "$(date "+%Y-%m-%d %H:%M:%S") 日志文件清理完毕" >> ${log_path}
+exit 0

+ 8 - 1
requirements.txt

@@ -13,4 +13,11 @@ urllib3==1.26.9
 workalendar==17.0.0
 opencv-python~=4.8.0.74
 Appium-Python-Client~=2.8.1
-crontab~=1.0.1
+<<<<<<< HEAD
+crontab~=1.0.1
+=======
+mitmproxy~=9.0.1
+bs4~=0.0.1
+beautifulsoup4~=4.11.1
+scikit-learn~=1.3.0
+>>>>>>> master

+ 0 - 45
shipinhao/shipinhao_main/run_shipinhao_search_scheduling.py

@@ -1,45 +0,0 @@
-# -*- coding: utf-8 -*-
-# @Author: wangkun
-# @Time: 2023/5/5
-import argparse
-import os
-import sys
-sys.path.append(os.getcwd())
-from common.public import task_fun
-from common.common import Common
-from common.scheduling_db import MysqlHelper
-from shipinhao.shipinhao_search.shipinhao_search_scheduling import ShipinhaoSearchScheduling
-
-
-def main(log_type, crawler, task, oss_endpoint, env):
-    task_dict = task_fun(task)['task_dict']
-    rule_dict = task_fun(task)['rule_dict']
-    task_id = task_dict['task_id']
-    select_user_sql = f"""select * from crawler_user_v3 where task_id={task_id}"""
-    user_list = MysqlHelper.get_values(log_type, crawler, select_user_sql, env, action="")
-    Common.logger(log_type, crawler).info(f"调度任务:\n{task_dict}")
-    Common.logger(log_type, crawler).info(f"抓取规则:\n{rule_dict}")
-    Common.logger(log_type, crawler).info(f"用户列表:\n{user_list}")
-    Common.logger(log_type, crawler).info('开始抓取 视频号 搜索爬虫策略\n')
-    ShipinhaoSearchScheduling.get_search_videos(log_type=log_type,
-                                                crawler=crawler,
-                                                rule_dict=rule_dict,
-                                                oss_endpoint=oss_endpoint,
-                                                env=env)
-    Common.del_logs(log_type, crawler)
-    Common.logger(log_type, crawler).info('抓取完一轮\n')
-
-
-if __name__ == "__main__":
-    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
-    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
-    parser.add_argument('--crawler')  ## 添加参数
-    parser.add_argument('--task')  ## 添加参数
-    parser.add_argument('--oss_endpoint')  ## 添加参数
-    parser.add_argument('--env')  ## 添加参数
-    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
-    main(log_type=args.log_type,
-         crawler=args.crawler,
-         task=args.task,
-         oss_endpoint=args.oss_endpoint,
-         env=args.env)

+ 124 - 0
shipinhao/shipinhao_main/run_sph_recommend.py

@@ -0,0 +1,124 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/7/27
+import argparse
+import random
+from mq_http_sdk.mq_client import *
+from mq_http_sdk.mq_consumer import *
+from mq_http_sdk.mq_exception import MQExceptionBase
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.public import get_consumer, ack_message, task_fun_mq
+from common.scheduling_db import MysqlHelper
+from shipinhao.shipinhao_recommend.recommend_h5 import RecommendH5
+from shipinhao.shipinhao_recommend.shipinhao_recommend import ShipinhaoRecommend
+
+
+class ShipinhaoRecommendMain:
+    @classmethod
+    def shipinhao_recommend_main(cls, log_type, crawler, topic_name, group_id, env):
+        consumer = get_consumer(topic_name, group_id)
+        # 长轮询表示如果Topic没有消息,则客户端请求会在服务端挂起3秒,3秒内如果有消息可以消费则立即返回响应。
+        # 长轮询时间3秒(最多可设置为30秒)。
+        wait_seconds = 30
+        # 一次最多消费3条(最多可设置为16条)。
+        batch = 1
+        Common.logger(log_type, crawler).info(f'{10 * "="}Consume And Ack Message From Topic{10 * "="}\n'
+                                              f'WaitSeconds:{wait_seconds}\n'
+                                              f'TopicName:{topic_name}\n'
+                                              f'MQConsumer:{group_id}')
+        Common.logging(log_type, crawler, env, f'{10 * "="}Consume And Ack Message From Topic{10 * "="}\n'
+                                               f'WaitSeconds:{wait_seconds}\n'
+                                               f'TopicName:{topic_name}\n'
+                                               f'MQConsumer:{group_id}')
+        while True:
+            try:
+                # 长轮询消费消息。
+                recv_msgs = consumer.consume_message(batch, wait_seconds)
+                for msg in recv_msgs:
+                    Common.logger(log_type, crawler).info(f"Receive\n"
+                                                          f"MessageId:{msg.message_id}\n"
+                                                          f"MessageBodyMD5:{msg.message_body_md5}\n"
+                                                          f"MessageTag:{msg.message_tag}\n"
+                                                          f"ConsumedTimes:{msg.consumed_times}\n"
+                                                          f"PublishTime:{msg.publish_time}\n"
+                                                          f"Body:{msg.message_body}\n"
+                                                          f"NextConsumeTime:{msg.next_consume_time}\n"
+                                                          f"ReceiptHandle:{msg.receipt_handle}\n"
+                                                          f"Properties:{msg.properties}")
+                    Common.logging(log_type, crawler, env, f"Receive\n"
+                                                           f"MessageId:{msg.message_id}\n"
+                                                           f"MessageBodyMD5:{msg.message_body_md5}\n"
+                                                           f"MessageTag:{msg.message_tag}\n"
+                                                           f"ConsumedTimes:{msg.consumed_times}\n"
+                                                           f"PublishTime:{msg.publish_time}\n"
+                                                           f"Body:{msg.message_body}\n"
+                                                           f"NextConsumeTime:{msg.next_consume_time}\n"
+                                                           f"ReceiptHandle:{msg.receipt_handle}\n"
+                                                           f"Properties:{msg.properties}")
+                    # ack_mq_message
+                    ack_message(log_type=log_type, crawler=crawler, recv_msgs=recv_msgs, consumer=consumer)
+
+                    # 处理爬虫业务
+                    task_dict = task_fun_mq(msg.message_body)['task_dict']
+                    rule_dict = task_fun_mq(msg.message_body)['rule_dict']
+                    task_id = task_dict['id']
+                    select_user_sql = f"""select * from crawler_user_v3 where task_id={task_id}"""
+                    user_list = MysqlHelper.get_values(log_type, crawler, select_user_sql, env, action="")
+                    our_uid_list = []
+                    for user in user_list:
+                        our_uid_list.append(user["uid"])
+                    our_uid = random.choice(our_uid_list)
+                    Common.logger(log_type, crawler).info(f"调度任务:{task_dict}")
+                    Common.logging(log_type, crawler, env, f"调度任务:{task_dict}")
+                    Common.logger(log_type, crawler).info(f"抓取规则:{rule_dict}")
+                    Common.logging(log_type, crawler, env, f"抓取规则:{rule_dict}")
+                    Common.logger(log_type, crawler).info(f"用户列表:{user_list}\n")
+                    Common.logging(log_type, crawler, env, f"用户列表:{user_list}\n")
+                    Common.logger(log_type, crawler).info(f'开始抓取:{task_dict["taskName"]}\n')
+                    Common.logging(log_type, crawler, env, f'开始抓取:{task_dict["taskName"]}\n')
+
+                    # 每轮扫描视频数
+                    scan_count = 20
+                    # 抓取符合规则的视频列表
+                    ShipinhaoRecommend.get_recommend_list(log_type=log_type,
+                                                          crawler=crawler,
+                                                          rule_dict=rule_dict,
+                                                          scan_count=scan_count,
+                                                          env=env)
+                    # 抓取符合规则视频的 URL,并发送 MQ 消息给 ETL
+                    RecommendH5.download_videos(log_type=log_type,
+                                                crawler=crawler,
+                                                env=env,
+                                                rule_dict=rule_dict,
+                                                our_uid=our_uid)
+                    ShipinhaoRecommend.download_video_list = []
+                    Common.logger(log_type, crawler).info('抓取一轮结束\n')
+                    Common.logging(log_type, crawler, env, '抓取一轮结束\n')
+
+            except MQExceptionBase as err:
+                # Topic中没有消息可消费。
+                if err.type == "MessageNotExist":
+                    Common.logger(log_type, crawler).info(f"No new message! RequestId:{err.req_id}\n")
+                    Common.logging(log_type, crawler, env, f"No new message! RequestId:{err.req_id}\n")
+                    continue
+
+                Common.logger(log_type, crawler).info(f"Consume Message Fail! Exception:{err}\n")
+                Common.logging(log_type, crawler, env, f"Consume Message Fail! Exception:{err}\n")
+                time.sleep(2)
+                continue
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
+    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler')  ## 添加参数
+    parser.add_argument('--topic_name')  ## 添加参数
+    parser.add_argument('--group_id')  ## 添加参数
+    parser.add_argument('--env')  ## 添加参数
+    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
+    ShipinhaoRecommendMain.shipinhao_recommend_main(log_type=args.log_type,
+                                                    crawler=args.crawler,
+                                                    topic_name=args.topic_name,
+                                                    group_id=args.group_id,
+                                                    env=args.env)

+ 41 - 0
shipinhao/shipinhao_main/run_sph_recommend_dev.py

@@ -0,0 +1,41 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/7/26
+import os
+import sys
+import time
+sys.path.append(os.getcwd())
+from common.common import Common
+from shipinhao.shipinhao_recommend.recommend_h5 import RecommendH5
+from shipinhao.shipinhao_recommend.shipinhao_recommend import ShipinhaoRecommend
+
+
+class ShipinhaoRecommendMain:
+    @classmethod
+    def shipinhao_recommend_main(cls, log_type, crawler, env):
+        while True:
+            Common.logger(log_type, crawler).info("开始抓取视频号推荐\n")
+            Common.logging(log_type, crawler, env, "开始抓取视频号推荐\n")
+            scan_count = 20
+            rule_dict = {"period": {"min": 365, "max": 365},
+                         "duration": {"min": 10, "max": 1800},
+                         "favorite_cnt": {"min": 5000, "max": 0},
+                         "share_cnt": {"min": 1000, "max": 0}}
+            ShipinhaoRecommend.get_recommend_list(log_type=log_type,
+                                                  crawler=crawler,
+                                                  rule_dict=rule_dict,
+                                                  scan_count = scan_count,
+                                                  env=env)
+            RecommendH5.download_videos(log_type=log_type,
+                                        crawler=crawler,
+                                        env=env,
+                                        rule_dict=rule_dict,
+                                        our_uid="6267140")
+            Common.logger(log_type, crawler).info("抓取一轮结束\n")
+            Common.logging(log_type, crawler, env, "抓取一轮结束\n")
+            ShipinhaoRecommend.download_video_list = []
+            time.sleep(5)
+
+
+if __name__ == "__main__":
+    ShipinhaoRecommendMain.shipinhao_recommend_main("recommend", "shipinhao", "prod")

+ 109 - 0
shipinhao/shipinhao_main/run_sph_search.py

@@ -0,0 +1,109 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/5/5
+import argparse
+from mq_http_sdk.mq_client import *
+from mq_http_sdk.mq_consumer import *
+from mq_http_sdk.mq_exception import MQExceptionBase
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.public import get_consumer, ack_message, task_fun_mq
+from common.scheduling_db import MysqlHelper
+from shipinhao.shipinhao_search.shipinhao_search_scheduling import ShipinhaoSearchScheduling
+
+
+class ShipinhaoSearchMain:
+    @classmethod
+    def shipinhao_search_main(cls, log_type, crawler, topic_name, group_id, env):
+        consumer = get_consumer(topic_name, group_id)
+        # 长轮询表示如果Topic没有消息,则客户端请求会在服务端挂起3秒,3秒内如果有消息可以消费则立即返回响应。
+        # 长轮询时间3秒(最多可设置为30秒)。
+        wait_seconds = 30
+        # 一次最多消费3条(最多可设置为16条)。
+        batch = 1
+        Common.logger(log_type, crawler).info(f'{10 * "="}Consume And Ack Message From Topic{10 * "="}\n'
+                                              f'WaitSeconds:{wait_seconds}\n'
+                                              f'TopicName:{topic_name}\n'
+                                              f'MQConsumer:{group_id}')
+        Common.logging(log_type, crawler, env, f'{10 * "="}Consume And Ack Message From Topic{10 * "="}\n'
+                                               f'WaitSeconds:{wait_seconds}\n'
+                                               f'TopicName:{topic_name}\n'
+                                               f'MQConsumer:{group_id}')
+        while True:
+            try:
+                # 长轮询消费消息。
+                recv_msgs = consumer.consume_message(batch, wait_seconds)
+                for msg in recv_msgs:
+                    Common.logger(log_type, crawler).info(f"Receive\n"
+                                                          f"MessageId:{msg.message_id}\n"
+                                                          f"MessageBodyMD5:{msg.message_body_md5}\n"
+                                                          f"MessageTag:{msg.message_tag}\n"
+                                                          f"ConsumedTimes:{msg.consumed_times}\n"
+                                                          f"PublishTime:{msg.publish_time}\n"
+                                                          f"Body:{msg.message_body}\n"
+                                                          f"NextConsumeTime:{msg.next_consume_time}\n"
+                                                          f"ReceiptHandle:{msg.receipt_handle}\n"
+                                                          f"Properties:{msg.properties}")
+                    Common.logging(log_type, crawler, env, f"Receive\n"
+                                                           f"MessageId:{msg.message_id}\n"
+                                                           f"MessageBodyMD5:{msg.message_body_md5}\n"
+                                                           f"MessageTag:{msg.message_tag}\n"
+                                                           f"ConsumedTimes:{msg.consumed_times}\n"
+                                                           f"PublishTime:{msg.publish_time}\n"
+                                                           f"Body:{msg.message_body}\n"
+                                                           f"NextConsumeTime:{msg.next_consume_time}\n"
+                                                           f"ReceiptHandle:{msg.receipt_handle}\n"
+                                                           f"Properties:{msg.properties}")
+                    # ack_mq_message
+                    ack_message(log_type=log_type, crawler=crawler, recv_msgs=recv_msgs, consumer=consumer)
+
+                    # 处理爬虫业务
+                    task_dict = task_fun_mq(msg.message_body)['task_dict']
+                    rule_dict = task_fun_mq(msg.message_body)['rule_dict']
+                    task_id = task_dict['id']
+                    select_user_sql = f"""select * from crawler_user_v3 where task_id={task_id}"""
+                    user_list = MysqlHelper.get_values(log_type, crawler, select_user_sql, env, action="")
+                    Common.logger(log_type, crawler).info(f"调度任务:{task_dict}")
+                    Common.logging(log_type, crawler, env, f"调度任务:{task_dict}")
+                    Common.logger(log_type, crawler).info(f"抓取规则:{rule_dict}")
+                    Common.logging(log_type, crawler, env, f"抓取规则:{rule_dict}")
+                    # Common.logger(log_type, crawler).info(f"用户列表:{user_list}\n")
+                    # Common.logging(log_type, crawler, env, f"用户列表:{user_list}\n")
+                    Common.logger(log_type, crawler).info(f'开始抓取:{task_dict["taskName"]}\n')
+                    Common.logging(log_type, crawler, env, f'开始抓取:{task_dict["taskName"]}\n')
+
+                    # 抓取符合规则的视频列表
+                    ShipinhaoSearchScheduling.get_search_videos(log_type=log_type,
+                                                                crawler=crawler,
+                                                                rule_dict=rule_dict,
+                                                                user_list=user_list,
+                                                                env=env)
+                    Common.logger(log_type, crawler).info('抓取一轮结束\n')
+                    Common.logging(log_type, crawler, env, '抓取一轮结束\n')
+
+            except MQExceptionBase as err:
+                # Topic中没有消息可消费。
+                if err.type == "MessageNotExist":
+                    Common.logger(log_type, crawler).info(f"No new message! RequestId:{err.req_id}\n")
+                    Common.logging(log_type, crawler, env, f"No new message! RequestId:{err.req_id}\n")
+                    continue
+
+                Common.logger(log_type, crawler).info(f"Consume Message Fail! Exception:{err}\n")
+                Common.logging(log_type, crawler, env, f"Consume Message Fail! Exception:{err}\n")
+                time.sleep(2)
+                continue
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
+    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler')  ## 添加参数
+    parser.add_argument('--topic_name')  ## 添加参数
+    parser.add_argument('--group_id')  ## 添加参数
+    parser.add_argument('--env')  ## 添加参数
+    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
+    ShipinhaoSearchMain.shipinhao_search_main(log_type=args.log_type,
+                                              crawler=args.crawler,
+                                              topic_name=args.topic_name,
+                                              group_id=args.group_id,
+                                              env=args.env)

+ 3 - 0
shipinhao/shipinhao_recommend/__init__.py

@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/7/25

+ 270 - 0
shipinhao/shipinhao_recommend/recommend_h5.py

@@ -0,0 +1,270 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/7/26
+import json
+import os
+import sys
+import time
+from appium import webdriver
+from selenium.common import NoSuchElementException
+from appium.webdriver.webdriver import WebDriver
+from hashlib import md5
+from selenium.webdriver.common.by import By
+sys.path.append(os.getcwd())
+from common.mq import MQ
+from common.public import similarity
+from common.common import Common
+from shipinhao.shipinhao_recommend.shipinhao_recommend import ShipinhaoRecommend
+
+
+class RecommendH5:
+    platform = "视频号"
+
+    @classmethod
+    def start_wechat(cls, log_type, crawler, env):
+        Common.logger(log_type, crawler).info('启动微信')
+        Common.logging(log_type, crawler, env, '启动微信')
+        if env == "dev":
+            chromedriverExecutable = "/Users/wangkun/Downloads/chromedriver/chromedriver_v107/chromedriver"
+        else:
+            chromedriverExecutable = '/Users/lieyunye/Downloads/chromedriver/chromedriver_v111/chromedriver'
+        caps = {
+            "platformName": "Android",  # 手机操作系统 Android / iOS
+            "deviceName": "Android",  # 连接的设备名(模拟器或真机),安卓可以随便写
+            "platforVersion": "13",  # 手机对应的系统版本(Android 13)
+            "appPackage": "com.tencent.mm",  # 被测APP的包名,乐活圈 Android
+            "appActivity": ".ui.LauncherUI",  # 启动的Activity名
+            "autoGrantPermissions": True,  # 让 appium 自动授权 base 权限,
+            # 如果 noReset 为 True,则该条不生效(该参数为 Android 独有),对应的值为 True 或 False
+            "unicodekeyboard": True,  # 使用自带输入法,输入中文时填True
+            "resetkeyboard": True,  # 执行完程序恢复原来输入法
+            "noReset": True,  # 不重置APP
+            "recreateChromeDriverSessions": True,  # 切换到非 chrome-Driver 会 kill 掉 session,就不需要手动 kill 了
+            "printPageSourceOnFailure": True,  # 找不到元素时,appium log 会完整记录当前页面的 pagesource
+            "newCommandTimeout": 6000,  # 初始等待时间
+            "automationName": "UiAutomator2",  # 使用引擎,默认为 Appium,
+            # 其中 Appium、UiAutomator2、Selendroid、Espresso 用于 Android,XCUITest 用于 iOS
+            "showChromedriverLog": True,
+            # "chromeOptions": {"androidProcess": "com.tencent.mm:appbrand0"},
+            "chromeOptions": {"androidProcess": "com.tencent.mm:tools"},
+            # "chromeOptions": {"androidProcess": "com.tencent.mm:toolsmp"},
+            # "chromeOptions": {"androidProcess": "com.tencent.mm"},
+            'enableWebviewDetailsCollection': True,
+            'setWebContentsDebuggingEnabled': True,
+            'chromedriverExecutable': chromedriverExecutable,
+        }
+        driver = webdriver.Remote("http://localhost:4723/wd/hub", caps)
+        driver.implicitly_wait(10)
+        time.sleep(5)
+        return driver
+
+    # 查找元素
+    @classmethod
+    def search_elements(cls, driver: WebDriver, xpath):
+        time.sleep(1)
+        windowHandles = driver.window_handles
+        for handle in windowHandles:
+            driver.switch_to.window(handle)
+            time.sleep(1)
+            try:
+                elements = driver.find_elements(By.XPATH, xpath)
+                if elements:
+                    return elements
+            except NoSuchElementException:
+                pass
+
+    # noinspection PyBroadException
+    @classmethod
+    def check_to_webview(cls, log_type, crawler, env, driver: WebDriver):
+        webviews = driver.contexts
+        Common.logger(log_type, crawler).info(f"webviews:{webviews}")
+        Common.logging(log_type, crawler, env, f"webviews:{webviews}")
+        driver.switch_to.context(webviews[1])
+        Common.logger(log_type, crawler).info(driver.current_context)
+        Common.logging(log_type, crawler, env, driver.current_context)
+        time.sleep(1)
+        windowHandles = driver.window_handles
+        for handle in windowHandles:
+            try:
+                driver.switch_to.window(handle)
+                time.sleep(1)
+                driver.find_element(By.XPATH, '//div[@class="unit"]')
+                Common.logger(log_type, crawler).info('切换 webview 成功')
+                Common.logging(log_type, crawler, env, '切换 webview 成功')
+                return "成功"
+            except Exception:
+                Common.logger(log_type, crawler).info("切换 webview 失败")
+                Common.logging(log_type, crawler, env, "切换 webview 失败")
+
+    @classmethod
+    def search_video(cls, log_type, crawler, env, video_dict, rule_dict, our_uid):
+        mq = MQ(topic_name="topic_crawler_etl_" + env)
+        driver = cls.start_wechat(log_type, crawler, env)
+        # 点击微信搜索框,并输入搜索词
+        driver.implicitly_wait(10)
+        Common.logger(log_type, crawler).info("点击搜索框")
+        Common.logging(log_type, crawler, env, "点击搜索框")
+        driver.find_element(By.ID, 'com.tencent.mm:id/j5t').click()  # 微信8.0.30版本
+        time.sleep(0.5)
+        driver.find_element(By.ID, 'com.tencent.mm:id/cd7').clear().send_keys(
+            video_dict['video_title'].replace('"', "").replace('“', "").replace('”', "").replace('#', ""))  # 微信8.0.30版本
+        # driver.press_keycode(AndroidKey.ENTER)
+        Common.logger(log_type, crawler).info("进入搜索词页面")
+        Common.logging(log_type, crawler, env, "进入搜索词页面")
+        driver.find_element(By.ID, 'com.tencent.mm:id/m94').click()  # 微信8.0.30版本
+
+        # 切换到微信搜索结果页 webview
+        check_to_webview = cls.check_to_webview(log_type, crawler, env, driver)
+        if check_to_webview is None:
+            Common.logger(log_type, crawler).info("切换到视频号 webview 失败\n")
+            Common.logging(log_type, crawler, env, "切换到视频号 webview 失败\n")
+            return
+        time.sleep(1)
+
+        # 切换到"视频号"分类
+        shipinhao_tags = cls.search_elements(driver, '//div[@class="unit"]/*[2]')
+        Common.logger(log_type, crawler).info('点击"视频号"分类')
+        Common.logging(log_type, crawler, env, '点击"视频号"分类')
+        shipinhao_tags[0].click()
+        time.sleep(5)
+
+        global h5_page
+        for i in range(3):
+            h5_page = cls.search_elements(driver, '//*[@class="mixed-box__bd"]')
+            if h5_page is None:
+                Common.logger(log_type, crawler).info('未发现H5页面')
+                Common.logging(log_type, crawler, env, '未发现H5页面')
+                driver.refresh()
+            else:
+                break
+
+        if h5_page is None:
+            driver.quit()
+            return
+
+        Common.logger(log_type, crawler).info('获取视频列表\n')
+        Common.logging(log_type, crawler, env, '获取视频列表\n')
+        video_elements = cls.search_elements(driver, '//div[@class="rich-media active__absolute"]')
+        if video_elements is None:
+            Common.logger(log_type, crawler).warning(f'video_elements:{video_elements}')
+            Common.logging(log_type, crawler, env, f'video_elements:{video_elements}')
+            return
+
+        for i, video_element in enumerate(video_elements):
+            try:
+                if video_element is None:
+                    Common.logger(log_type, crawler).info('到底啦~\n')
+                    Common.logging(log_type, crawler, env, '到底啦~\n')
+                    return
+
+                Common.logger(log_type, crawler).info(f'拖动"视频"列表第{i + 1}条至屏幕中间')
+                Common.logging(log_type, crawler, env, f'拖动"视频"列表第{i + 1}条至屏幕中间')
+                time.sleep(3)
+                driver.execute_script("arguments[0].scrollIntoView({block:'center',inline:'center'})",
+                                      video_element)
+                if len(video_element.find_elements(By.XPATH, "//*[@text='没有更多的搜索结果']")) != 0:
+                    Common.logger(log_type, crawler).info("没有更多的搜索结果\n")
+                    Common.logging(log_type, crawler, env, "没有更多的搜索结果\n")
+                    return
+                h5_video_title = \
+                video_element.find_elements(By.XPATH, '//div[@class="rich-media__title ellipsis_2"]/span')[i].text[:40]
+                h5_user_name = video_element.find_elements(By.XPATH, '//div[@class="rich-media__source__title"]')[
+                    i].text
+                h5_video_url = video_element.find_elements(By.XPATH, '//div[@class="video-player"]')[i].get_attribute(
+                    'src')
+                cover_url = video_element.find_elements(By.XPATH, '//div[@class="video-player__bd"]')[i].get_attribute(
+                    'style')
+                h5_cover_url = cover_url.split('url("')[-1].split('")')[0]
+                avatar_url = video_element.find_elements(By.XPATH,
+                                                         '//div[@class="ui-image-image ui-image rich-media__source__thumb"]')[
+                    i].get_attribute('style')
+                h5_avatar_url = avatar_url.split('url("')[-1].split('")')[0]
+                h5_out_video_id = md5(h5_video_title.encode('utf8')).hexdigest()
+                h5_out_user_id = md5(h5_user_name.encode('utf8')).hexdigest()
+
+                title_similarity = similarity(video_dict['video_title'], h5_video_title)
+                user_name_similarity = similarity(video_dict['user_name'], h5_user_name)
+
+                if title_similarity >= 0.5 and user_name_similarity >= 1.0:
+                    video_dict['cover_url'] = h5_cover_url
+                    video_dict['avatar_url'] = h5_avatar_url
+                    video_dict['out_video_id'] = h5_out_video_id
+                    video_dict['video_url'] = h5_video_url
+
+                    for k, v in video_dict.items():
+                        Common.logger(log_type, crawler).info(f"{k}:{v}")
+                    Common.logging(log_type, crawler, env, f"video_dict:{video_dict}")
+
+                    video_dict["out_user_id"] = h5_out_user_id
+                    video_dict["platform"] = crawler
+                    video_dict["strategy"] = log_type
+                    video_dict["strategyType"] = "recommend"
+                    video_dict["out_video_id"] = h5_out_video_id
+                    video_dict["width"] = 0
+                    video_dict["height"] = 0
+                    video_dict["crawler_rule"] = json.dumps(rule_dict)
+                    video_dict["user_id"] = our_uid
+                    video_dict["publish_time"] = video_dict["publish_time_str"]
+                    mq.send_msg(video_dict)
+                    Common.logger(log_type, crawler).info("已抓取到目标视频\n")
+                    Common.logging(log_type, crawler, env, "已抓取到目标视频\n")
+                    driver.quit()
+                    return
+                else:
+                    Common.logger(log_type, crawler).info(f"video_dict['video_title']:{video_dict['video_title']}")
+                    Common.logging(log_type, crawler, env, f"video_dict['video_title']:{video_dict['video_title']}")
+                    Common.logger(log_type, crawler).info(f"h5_video_title:{h5_video_title}")
+                    Common.logging(log_type, crawler, env, f"h5_video_title:{h5_video_title}")
+                    Common.logger(log_type, crawler).info(f"title_similarity:{title_similarity}")
+                    Common.logging(log_type, crawler, env, f"title_similarity:{title_similarity}")
+                    Common.logger(log_type, crawler).info(f"video_dict['user_name']:{video_dict['user_name']}")
+                    Common.logging(log_type, crawler, env, f"video_dict['user_name']:{video_dict['user_name']}")
+                    Common.logger(log_type, crawler).info(f"h5_user_name:{h5_user_name}")
+                    Common.logging(log_type, crawler, env, f"h5_user_name:{h5_user_name}")
+                    Common.logger(log_type, crawler).info(f"user_name_similarity:{user_name_similarity}")
+                    Common.logging(log_type, crawler, env, f"user_name_similarity:{user_name_similarity}")
+            except Exception as e:
+                Common.logger(log_type, crawler).info(f"抓取单条H5视频时异常:{e}\n")
+                Common.logging(log_type, crawler,env, f"抓取单条H5视频时异常:{e}\n")
+        Common.logger(log_type, crawler).info("未找到目标视频\n")
+        Common.logging(log_type, crawler, env, "未找到目标视频\n")
+
+    @classmethod
+    def download_videos(cls, log_type, crawler, env, rule_dict, our_uid):
+        try:
+            Common.logger(log_type, crawler).info(f'共{len(ShipinhaoRecommend.download_video_list)}条视频待抓取')
+            Common.logging(log_type, crawler, env, f'共{len(ShipinhaoRecommend.download_video_list)}条视频待抓取')
+            Common.logger(log_type, crawler).info(f'download_video_list:{ShipinhaoRecommend.download_video_list}\n')
+            Common.logging(log_type, crawler, env, f'download_video_list:{ShipinhaoRecommend.download_video_list}\n')
+            if len(ShipinhaoRecommend.download_video_list) == 0:
+                Common.logger(log_type, crawler).info("没有待下载的视频\n")
+                Common.logging(log_type, crawler, env, "没有待下载的视频\n")
+                return
+            for video_dict in ShipinhaoRecommend.download_video_list:
+                try:
+                    cls.search_video(log_type, crawler, env, video_dict, rule_dict, our_uid)
+                except Exception as e:
+                    Common.logger(log_type, crawler).info(f"抓取视频异常:{e}\n")
+                    Common.logging(log_type, crawler, env, f"抓取视频异常:{e}\n")
+        except Exception as e:
+            Common.logger(log_type, crawler).info(f"download_videos异常:{e}\n")
+            Common.logging(log_type, crawler, env, f"download_videos异常:{e}\n")
+
+
+if __name__ == "__main__":
+    ShipinhaoRecommend.download_video_list = [
+        {'video_title': '网友:不知道此时此刻黑车司机在想什么', 'video_id': '96bfb8b86965df7365f02373ce37fe87', 'duration': 21, 'user_name': '沂蒙晚报', 'like_cnt': 9575, 'share_cnt': 11000, 'favorite_cnt': 25000, 'comment_cnt': 5026, 'publish_time_str': '2023-07-25', 'publish_time_stamp': 1690214400, 'publish_time': 1690214400000, 'period': 1},
+        {'video_title': '女朋友这不就来了么', 'video_id': 'b1892886dca8c38dd6d72848ae4fd565', 'duration': 10, 'user_name': '向往的火焰蓝', 'like_cnt': 11000, 'share_cnt': 3701, 'favorite_cnt': 26000, 'comment_cnt': 1426, 'publish_time_str': '2023-07-26', 'publish_time_stamp': 1690300800, 'publish_time': 1690300800000, 'period': 0},
+        {'video_title': '近日,在韩国举办的2023世界跆拳道大赛上,中国选手出“奇招”,引网友点赞。关注', 'video_id': 'ebe8637a152c58bac2f1d875b257f9b5', 'duration': 10, 'user_name': '搜狐新闻', 'like_cnt': 9475, 'share_cnt': 9134, 'favorite_cnt': 18000, 'comment_cnt': 1770, 'publish_time_str': '2023-07-26', 'publish_time_stamp': 1690300800, 'publish_time': 1690300800000, 'period': 0},
+        {'video_title': '与愚者争论,自己就是愚者 #动画小故事  #哲理故事', 'video_id': '629abeb79f0de7a4dc45fadffc8ebc2b', 'duration': 32, 'user_name': '陈搞搞', 'like_cnt': 23000, 'share_cnt': 49000, 'favorite_cnt': 67000, 'comment_cnt': 1336, 'publish_time_str': '2023-07-24', 'publish_time_stamp': 1690128000, 'publish_time': 1690128000000, 'period': 2},
+        {'video_title': '我看不懂这种行为的意义在哪里,所以我决定坚持反复观看试图参悟其中的深意,', 'video_id': 'd7e6e1eeb519183d5e8665c92a101378', 'duration': 15, 'user_name': '蜡笔小星丶', 'like_cnt': 20000, 'share_cnt': 100000, 'favorite_cnt': 51000, 'comment_cnt': 9836, 'publish_time_str': '2023-07-25', 'publish_time_stamp': 1690214400, 'publish_time': 1690214400000, 'period': 1},
+        {'video_title': '女子一回家就开始脱衣服,不料老公的弟弟还在家里,女子下一秒的反应亮了!', 'video_id': 'c75472e887f2641acd34138b705cf8b9', 'duration': 11, 'user_name': '西米七七', 'like_cnt': 4335, 'share_cnt': 1107, 'favorite_cnt': 13000, 'comment_cnt': 1068, 'publish_time_str': '2023-07-26', 'publish_time_stamp': 1690300800, 'publish_time': 1690300800000, 'period': 0}]
+    RecommendH5.download_videos(log_type="recommend",
+                                crawler="shipinhao",
+                                env="dev",
+                                rule_dict={"period": {"min": 365, "max": 365},
+                                           "duration": {"min": 10, "max": 1800},
+                                           "favorite_cnt": {"min": 50000, "max": 0},
+                                           "share_cnt": {"min": 10000, "max": 0}},
+                                our_uid=6267140
+                                )

+ 287 - 0
shipinhao/shipinhao_recommend/shipinhao_recommend.py

@@ -0,0 +1,287 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/7/25
+import datetime
+import os
+import sys
+import time
+from datetime import date, timedelta
+from hashlib import md5
+from appium import webdriver
+from appium.webdriver.webdriver import WebDriver
+from selenium.common import NoSuchElementException
+from selenium.webdriver.common.by import By
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.public import download_rule
+from common.scheduling_db import MysqlHelper
+
+
+class ShipinhaoRecommend:
+    platform = "视频号"
+    download_video_list = []
+
+    @classmethod
+    def repeat_out_video_id(cls, log_type, crawler, out_video_id, env):
+        sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and out_video_id="{out_video_id}"; """
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
+        return len(repeat_video)
+
+    @classmethod
+    def start_wechat(cls, log_type, crawler, env):
+        Common.logger(log_type, crawler).info('启动微信')
+        Common.logging(log_type, crawler, env, '启动微信')
+        if env == "dev":
+            chromedriverExecutable = "/Users/wangkun/Downloads/chromedriver/chromedriver_v107/chromedriver"
+        else:
+            chromedriverExecutable = '/Users/lieyunye/Downloads/chromedriver/chromedriver_v111/chromedriver'
+        caps = {
+            "platformName": "Android",  # 手机操作系统 Android / iOS
+            "deviceName": "Android",  # 连接的设备名(模拟器或真机),安卓可以随便写
+            "platforVersion": "13",  # 手机对应的系统版本(Android 13)
+            "appPackage": "com.tencent.mm",  # 被测APP的包名,乐活圈 Android
+            "appActivity": ".ui.LauncherUI",  # 启动的Activity名
+            "autoGrantPermissions": True,  # 让 appium 自动授权 base 权限,
+            # 如果 noReset 为 True,则该条不生效(该参数为 Android 独有),对应的值为 True 或 False
+            "unicodekeyboard": True,  # 使用自带输入法,输入中文时填True
+            "resetkeyboard": True,  # 执行完程序恢复原来输入法
+            "noReset": True,  # 不重置APP
+            "recreateChromeDriverSessions": True,  # 切换到非 chrome-Driver 会 kill 掉 session,就不需要手动 kill 了
+            "printPageSourceOnFailure": True,  # 找不到元素时,appium log 会完整记录当前页面的 pagesource
+            "newCommandTimeout": 6000,  # 初始等待时间
+            "automationName": "UiAutomator2",  # 使用引擎,默认为 Appium,
+            # 其中 Appium、UiAutomator2、Selendroid、Espresso 用于 Android,XCUITest 用于 iOS
+            "showChromedriverLog": True,
+            # "chromeOptions": {"androidProcess": "com.tencent.mm:appbrand0"},
+            "chromeOptions": {"androidProcess": "com.tencent.mm:tools"},
+            # "chromeOptions": {"androidProcess": "com.tencent.mm:toolsmp"},
+            # "chromeOptions": {"androidProcess": "com.tencent.mm"},
+            'enableWebviewDetailsCollection': True,
+            'setWebContentsDebuggingEnabled': True,
+            'chromedriverExecutable': chromedriverExecutable,
+        }
+        driver = webdriver.Remote("http://localhost:4723/wd/hub", caps)
+        driver.implicitly_wait(10)
+        time.sleep(5)
+        return driver
+
+    @classmethod
+    def get_videoList(cls, log_type, crawler, rule_dict, env, scan_count, driver: WebDriver):
+        Common.logger(log_type, crawler).info("进入发现页")
+        Common.logging(log_type, crawler, env, "进入发现页")
+        tabs = driver.find_elements(By.ID, "com.tencent.mm:id/f2s")
+        for tab in tabs:
+            if tab.text == "发现":
+                tab.click()
+                time.sleep(0.5)
+                break
+
+        Common.logger(log_type, crawler).info('点击"视频号"')
+        Common.logging(log_type, crawler, env, '点击"视频号"')
+        textviews = driver.find_elements(By.ID, "android:id/title")
+        for textview in textviews:
+            if textview.text == "视频号":
+                textview.click()
+                time.sleep(0.5)
+                break
+
+        # 关闭青少年模式弹框
+        Common.logger(log_type, crawler).info("尝试关闭青少年模式弹框\n")
+        Common.logging(log_type, crawler, env, "尝试关闭青少年模式弹框\n")
+        try:
+            driver.find_element(By.ID, "com.tencent.mm:id/lqz").click()
+        except NoSuchElementException:
+            pass
+
+        for i in range(scan_count):
+            try:
+                Common.logger(log_type, crawler).info(f"第{i + 1}条视频")
+                Common.logging(log_type, crawler, env, f"第{i + 1}条视频")
+                if len(driver.find_elements(By.ID, "com.tencent.mm:id/dkf")) != 0:
+                    Common.logger(log_type, crawler).info("这是一个直播间,滑动至下一个视频\n")
+                    Common.logging(log_type, crawler, env, "这是一个直播间,滑动至下一个视频\n")
+                    driver.swipe(10, 1600, 10, 300, 200)
+                    continue
+                video_dict = cls.get_video_info(driver)
+                for k, v in video_dict.items():
+                    Common.logger(log_type, crawler).info(f"{k}:{v}")
+                Common.logging(log_type, crawler, env, f"video_dict:{video_dict}")
+
+                if video_dict["video_title"] is None:
+                    Common.logger(log_type, crawler).info("无效视频")
+                    Common.logging(log_type, crawler, env, "无效视频")
+                elif download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
+                    Common.logger(log_type, crawler).info("不满足抓取规则")
+                    Common.logging(log_type, crawler, env, "不满足抓取规则\n")
+                elif cls.repeat_out_video_id(log_type, crawler, video_dict["video_id"], env) != 0:
+                    Common.logger(log_type, crawler).info('视频已下载')
+                    Common.logging(log_type, crawler, env, '视频已下载\n')
+                else:
+                    cls.download_video_list.append(video_dict)
+                if i+1 == scan_count:
+                    Common.logger(log_type, crawler).info("扫描一轮结束\n")
+                    Common.logging(log_type, crawler, env, "扫描一轮结束\n")
+                    return
+                Common.logger(log_type, crawler).info(f"已抓取符合规则视频{len(cls.download_video_list)}条,滑动至下一个视频\n")
+                Common.logging(log_type, crawler, env, f"已抓取符合规则视频{len(cls.download_video_list)}条,滑动至下一个视频\n")
+                driver.swipe(10, 1600, 10, 300, 200)
+            except Exception as e:
+                Common.logger(log_type, crawler).info(f"扫描单条视频时异常:{e}\n")
+                Common.logging(log_type, crawler, env, f"扫描单条视频时异常:{e}\n")
+
+
+    @classmethod
+    def is_contain_chinese(cls, strword):
+        for ch in strword:
+            if u'\u4e00' <= ch <= u'\u9fff':
+                return True
+        return False
+
+    @classmethod
+    def get_video_info(cls, driver: WebDriver):
+
+        # 点击暂停
+        global duration
+        for i in range(3):
+            pause_elements = driver.find_elements(By.ID, "com.tencent.mm:id/gpx")
+            if len(pause_elements) != 0:
+                pause_elements[0].click()
+                try:
+                    duration_str = driver.find_element(By.ID, "com.tencent.mm:id/l7i").text
+                    duration = int(duration_str.split(":")[0]) * 60 + int(duration_str.split(":")[-1])
+                    break
+                except NoSuchElementException:
+                    duration = 0
+            else:
+                duration = 0
+
+        # user_name
+        user_name = driver.find_element(By.ID, "com.tencent.mm:id/hft").text
+
+        # 点赞
+        like_cnt = driver.find_element(By.ID, 'com.tencent.mm:id/k04').text
+        if like_cnt == "" or like_cnt == "喜欢" or like_cnt == "火":
+            like_cnt = 0
+        elif '万+' in like_cnt:
+            like_cnt = int(float(like_cnt.split('万+')[0]) * 10000)
+        elif '万' in like_cnt:
+            like_cnt = int(float(like_cnt.split('万')[0]) * 10000)
+        else:
+            like_cnt = int(float(like_cnt))
+
+        # 分享
+        share_cnt = driver.find_element(By.ID, 'com.tencent.mm:id/jhv').text
+        if share_cnt == "" or share_cnt == "转发":
+            share_cnt = 0
+        elif '万+' in share_cnt:
+            share_cnt = int(float(share_cnt.split('万+')[0]) * 10000)
+        elif '万' in share_cnt:
+            share_cnt = int(float(share_cnt.split('万')[0]) * 10000)
+        else:
+            share_cnt = int(float(share_cnt))
+
+        # 收藏
+        favorite_cnt = driver.find_element(By.ID, 'com.tencent.mm:id/fnp').text
+        if favorite_cnt == "" or favorite_cnt == "收藏" or favorite_cnt == "推荐" or favorite_cnt == "火":
+            favorite_cnt = 0
+        elif '万+' in favorite_cnt:
+            favorite_cnt = int(float(favorite_cnt.split('万+')[0]) * 10000)
+        elif '万' in favorite_cnt:
+            favorite_cnt = int(float(favorite_cnt.split('万')[0]) * 10000)
+        else:
+            favorite_cnt = int(float(favorite_cnt))
+
+        # 评论
+        comment_id = driver.find_element(By.ID, 'com.tencent.mm:id/bje')
+        comment_cnt = comment_id.text
+        if comment_cnt == "" or comment_cnt == "评论":
+            comment_cnt = 0
+        elif '万+' in comment_cnt:
+            comment_cnt = int(float(comment_cnt.split('万+')[0]) * 10000)
+        elif '万' in comment_cnt:
+            comment_cnt = int(float(comment_cnt.split('万')[0]) * 10000)
+        else:
+            comment_cnt = int(float(comment_cnt))
+
+        comment_id.click()
+        time.sleep(1)
+
+        # title
+        title_elements = driver.find_elements(By.ID, "com.tencent.mm:id/bga")
+        if len(title_elements) == 0:
+            title = ""
+        else:
+            title = title_elements[0].text.replace("\n", " ")[:40]
+
+        # 发布时间
+        publish_time = driver.find_element(By.ID, "com.tencent.mm:id/bre").get_attribute("name")
+        if "秒" in publish_time or "分钟" in publish_time or "小时" in publish_time:
+            publish_time_str = (date.today() + timedelta(days=0)).strftime("%Y-%m-%d")
+        elif "天前" in publish_time:
+            days = int(publish_time.replace("天前", ""))
+            publish_time_str = (date.today() + timedelta(days=-days)).strftime("%Y-%m-%d")
+        elif "年" in publish_time:
+            year_str = publish_time.split("年")[0]
+            month_str = publish_time.split("年")[-1].split("月")[0]
+            day_str = publish_time.split("月")[-1].split("日")[0]
+            if int(month_str) < 10:
+                month_str = f"0{month_str}"
+            if int(day_str) < 10:
+                day_str = f"0{day_str}"
+            publish_time_str = f"{year_str}-{month_str}-{day_str}"
+        else:
+            year_str = str(datetime.datetime.now().year)
+            month_str = publish_time.split("月")[0]
+            day_str = publish_time.split("月")[-1].split("日")[0]
+            if int(month_str) < 10:
+                month_str = f"0{month_str}"
+            if int(day_str) < 10:
+                day_str = f"0{day_str}"
+            publish_time_str = f"{year_str}-{month_str}-{day_str}"
+        publish_time_stamp = int(time.mktime(time.strptime(publish_time_str, "%Y-%m-%d")))
+
+        # 收起评论
+        driver.find_element(By.ID, "com.tencent.mm:id/be_").click()
+        time.sleep(0.5)
+
+        video_id = md5(title.encode('utf8')).hexdigest()
+        video_dict = {
+            "video_title": title,
+            "video_id": video_id,
+            "duration": duration,
+            "user_name": user_name,
+            "like_cnt": like_cnt,
+            "share_cnt": share_cnt,
+            "favorite_cnt": favorite_cnt,
+            "comment_cnt": comment_cnt,
+            "publish_time_str": publish_time_str+" 00:00:00",
+            "publish_time_stamp": publish_time_stamp,
+        }
+        return video_dict
+
+    @classmethod
+    def get_recommend_list(cls, log_type, crawler, rule_dict, scan_count, env):
+        try:
+            driver = cls.start_wechat(log_type, crawler, env)
+            cls.get_videoList(log_type=log_type,
+                              crawler=crawler,
+                              rule_dict=rule_dict,
+                              env=env,
+                              scan_count=scan_count,
+                              driver=driver)
+            driver.quit()
+            Common.logger(log_type, crawler).info(f"微信退出成功\n")
+            Common.logging(log_type, crawler, env, f"微信退出成功\n")
+        except Exception as e:
+            Common.logger(log_type, crawler).info(f"扫描视频列表异常:{e}\n")
+            Common.logging(log_type, crawler, env, f"扫描视频列表异常:{e}\n")
+
+
+if __name__ == "__main__":
+    rule_dict1 = {"period": {"min": 365, "max": 365},
+                 "duration": {"min": 10, "max": 1800},
+                 "favorite_cnt": {"min": 50000, "max": 0},
+                 "share_cnt": {"min": 10000, "max": 0}}
+    ShipinhaoRecommend.get_recommend_list("recommend", "shipinhao", rule_dict1, 5, "dev")
+    print(ShipinhaoRecommend.download_video_list)
+    pass

+ 13 - 13
shipinhao/shipinhao_search/shipinhao_search.py

@@ -169,7 +169,7 @@ class ShipinhaoSearch:
         if env == "dev":
             chromedriverExecutable = "/Users/wangkun/Downloads/chromedriver/chromedriver_v107/chromedriver"
         else:
-            chromedriverExecutable = '/Users/piaoquan/Downloads/chromedriver'
+            chromedriverExecutable = "/Users/piaoquan/Downloads/chromedriver/chromedriver_v111/chromedriver"
         caps = {
             "platformName": "Android",  # 手机操作系统 Android / iOS
             "deviceName": "Android",  # 连接的设备名(模拟器或真机),安卓可以随便写
@@ -395,48 +395,48 @@ class ShipinhaoSearch:
         # 点赞
         like_id = driver.find_element(By.ID, 'com.tencent.mm:id/k04')  # 微信版本 8.0.30
         like_cnt = like_id.get_attribute('name')
-        if like_cnt == "" or like_cnt == "喜欢" or like_cnt == "火" or cls.is_contain_chinese(like_cnt) is True:
-            like_cnt = 0
-        elif '万' in like_cnt:
+        if '万' in like_cnt:
             like_cnt = int(float(like_cnt.split('万')[0]) * 10000)
         elif '万+' in like_cnt:
             like_cnt = int(float(like_cnt.split('万+')[0]) * 10000)
+        elif like_cnt == "" or like_cnt == "喜欢" or like_cnt == "火" or cls.is_contain_chinese(like_cnt) is True:
+            like_cnt = 0
         else:
             like_cnt = int(float(like_cnt))
 
         # 分享
         share_id = driver.find_element(By.ID, 'com.tencent.mm:id/jhv')
         share_cnt = share_id.get_attribute('name')
-        if share_cnt == "" or share_cnt == "转发" or cls.is_contain_chinese(share_cnt) is True:
-            share_cnt = 0
-        elif '万' in share_cnt:
+        if '万' in share_cnt:
             share_cnt = int(float(share_cnt.split('万')[0]) * 10000)
         elif '万+' in share_cnt:
             share_cnt = int(float(share_cnt.split('万+')[0]) * 10000)
+        elif share_cnt == "" or share_cnt == "转发" or cls.is_contain_chinese(share_cnt) is True:
+            share_cnt = 0
         else:
             share_cnt = int(float(share_cnt))
 
         # 收藏
         favorite_id = driver.find_element(By.ID, 'com.tencent.mm:id/fnp')
         favorite_cnt = favorite_id.get_attribute('name')
-        if favorite_cnt == "" or favorite_cnt == "收藏" or favorite_cnt == "推荐" or favorite_cnt == "火" or cls.is_contain_chinese(favorite_cnt) is True:
-            favorite_cnt = 0
-        elif '万' in favorite_cnt:
+        if '万' in favorite_cnt:
             favorite_cnt = int(float(favorite_cnt.split('万')[0]) * 10000)
         elif '万+' in favorite_cnt:
             favorite_cnt = int(float(favorite_cnt.split('万+')[0]) * 10000)
+        elif favorite_cnt == "" or favorite_cnt == "收藏" or favorite_cnt == "推荐" or favorite_cnt == "火" or cls.is_contain_chinese(favorite_cnt) is True:
+            favorite_cnt = 0
         else:
             favorite_cnt = int(float(favorite_cnt))
 
         # 评论
         comment_id = driver.find_element(By.ID, 'com.tencent.mm:id/bje')
         comment_cnt = comment_id.get_attribute('name')
-        if comment_cnt == "" or comment_cnt == "评论" or cls.is_contain_chinese(comment_cnt) is True:
-            comment_cnt = 0
-        elif '万' in comment_cnt:
+        if '万' in comment_cnt:
             comment_cnt = int(float(comment_cnt.split('万')[0]) * 10000)
         elif '万+' in comment_cnt:
             comment_cnt = int(float(comment_cnt.split('万+')[0]) * 10000)
+        elif comment_cnt == "" or comment_cnt == "评论" or cls.is_contain_chinese(comment_cnt) is True:
+            comment_cnt = 0
         else:
             comment_cnt = int(float(comment_cnt))
 

+ 122 - 347
shipinhao/shipinhao_search/shipinhao_search_scheduling.py

@@ -4,7 +4,6 @@
 import datetime
 import json
 import os
-import shutil
 import sys
 import time
 from datetime import date, timedelta
@@ -15,10 +14,9 @@ from appium.webdriver.webdriver import WebDriver
 from selenium.common import NoSuchElementException
 from selenium.webdriver.common.by import By
 sys.path.append(os.getcwd())
-from common.feishu import Feishu
-from common.publish import Publish
+from common.public import download_rule
+from common.mq import MQ
 from common.common import Common
-from common.getuser import getUser
 from common.scheduling_db import MysqlHelper
 
 
@@ -27,116 +25,14 @@ class ShipinhaoSearchScheduling:
     i = 0
     download_cnt = 0
 
-    # 基础门槛规则
-    @staticmethod
-    def download_rule(log_type, crawler, video_dict, rule_dict):
-        """
-        下载视频的基本规则
-        :param log_type: 日志
-        :param crawler: 哪款爬虫
-        :param video_dict: 视频信息,字典格式
-        :param rule_dict: 规则信息,字典格式
-        :return: 满足规则,返回 True;反之,返回 False
-        """
-        rule_play_cnt_min = rule_dict.get('play_cnt', {}).get('min', 0)
-        rule_play_cnt_max = rule_dict.get('play_cnt', {}).get('max', 100000000)
-        if rule_play_cnt_max == 0:
-            rule_play_cnt_max = 100000000
-
-        rule_duration_min = rule_dict.get('duration', {}).get('min', 0)
-        rule_duration_max = rule_dict.get('duration', {}).get('max', 100000000)
-        if rule_duration_max == 0:
-            rule_duration_max = 100000000
-
-        # rule_period_min = rule_dict.get('period', {}).get('min', 0)
-        # rule_period_max = rule_dict.get('period', {}).get('max', 100000000)
-        # if rule_period_max == 0:
-        #     rule_period_max = 100000000
-
-        # rule_fans_cnt_min = rule_dict.get('fans_cnt', {}).get('min', 0)
-        # rule_fans_cnt_max = rule_dict.get('fans_cnt', {}).get('max', 100000000)
-        # if rule_fans_cnt_max == 0:
-        #     rule_fans_cnt_max = 100000000
-
-        # rule_videos_cnt_min = rule_dict.get('videos_cnt', {}).get('min', 0)
-        # rule_videos_cnt_max = rule_dict.get('videos_cnt', {}).get('max', 100000000)
-        # if rule_videos_cnt_max == 0:
-        #     rule_videos_cnt_max = 100000000
-
-        rule_like_cnt_min = rule_dict.get('like_cnt', {}).get('min', 0)
-        rule_like_cnt_max = rule_dict.get('like_cnt', {}).get('max', 100000000)
-        if rule_like_cnt_max == 0:
-            rule_like_cnt_max = 100000000
-
-        rule_width_min = rule_dict.get('width', {}).get('min', 0)
-        rule_width_max = rule_dict.get('width', {}).get('max', 100000000)
-        if rule_width_max == 0:
-            rule_width_max = 100000000
-
-        rule_height_min = rule_dict.get('height', {}).get('min', 0)
-        rule_height_max = rule_dict.get('height', {}).get('max', 100000000)
-        if rule_height_max == 0:
-            rule_height_max = 100000000
-
-        rule_share_cnt_min = rule_dict.get('share_cnt', {}).get('min', 0)
-        rule_share_cnt_max = rule_dict.get('share_cnt', {}).get('max', 100000000)
-        if rule_share_cnt_max == 0:
-            rule_share_cnt_max = 100000000
-
-        rule_favorite_cnt_min = rule_dict.get('favorite_cnt', {}).get('min', 0)
-        rule_favorite_cnt_max = rule_dict.get('favorite_cnt', {}).get('max', 100000000)
-        if rule_favorite_cnt_max == 0:
-            rule_favorite_cnt_max = 100000000
-
-        rule_comment_cnt_min = rule_dict.get('comment_cnt', {}).get('min', 0)
-        rule_comment_cnt_max = rule_dict.get('comment_cnt', {}).get('max', 100000000)
-        if rule_comment_cnt_max == 0:
-            rule_comment_cnt_max = 100000000
-
-        rule_publish_time_min = rule_dict.get('publish_time', {}).get('min', 0)
-        rule_publish_time_max = rule_dict.get('publish_time', {}).get('max', 0)
-        if rule_publish_time_max == 0:
-            rule_publish_time_max = 4102415999000  # 2099-12-31 23:59:59
-
-        Common.logger(log_type, crawler).info(
-            f'rule_duration_max:{rule_duration_max} >= duration:{int(float(video_dict["duration"]))} >= rule_duration_min:{int(rule_duration_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_play_cnt_max:{int(rule_play_cnt_max)} >= play_cnt:{int(video_dict["play_cnt"])} >= rule_play_cnt_min:{int(rule_play_cnt_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_like_cnt_max:{int(rule_like_cnt_max)} >= like_cnt:{int(video_dict["like_cnt"])} >= rule_like_cnt_min:{int(rule_like_cnt_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_comment_cnt_max:{int(rule_comment_cnt_max)} >= comment_cnt:{int(video_dict["comment_cnt"])} >= rule_comment_cnt_min:{int(rule_comment_cnt_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_share_cnt_max:{int(rule_share_cnt_max)} >= share_cnt:{int(video_dict["share_cnt"])} >= rule_share_cnt_min:{int(rule_share_cnt_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_favorite_cnt_max:{int(rule_favorite_cnt_max)} >= favorite_cnt:{int(video_dict["favorite_cnt"])} >= rule_favorite_cnt_min:{int(rule_favorite_cnt_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_width_max:{int(rule_width_max)} >= video_width:{int(video_dict["video_width"])} >= rule_width_min:{int(rule_width_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_height_max:{int(rule_height_max)} >= video_height:{int(video_dict["video_height"])} >= rule_height_min:{int(rule_height_min)}')
-        Common.logger(log_type, crawler).info(
-            f'rule_publish_time_max:{int(rule_publish_time_max)} >= publish_time_stamp:{int(video_dict["publish_time_stamp"])*1000} >= rule_publish_time_min:{int(rule_publish_time_min)}')
-
-        if int(rule_duration_max) >= int(float(video_dict["duration"])) >= int(rule_duration_min) \
-                and int(rule_play_cnt_max) >= int(video_dict['play_cnt']) >= int(rule_play_cnt_min) \
-                and int(rule_like_cnt_max) >= int(video_dict['like_cnt']) >= int(rule_like_cnt_min) \
-                and int(rule_comment_cnt_max) >= int(video_dict['comment_cnt']) >= int(rule_comment_cnt_min) \
-                and int(rule_share_cnt_max) >= int(video_dict['share_cnt']) >= int(rule_share_cnt_min) \
-                and int(rule_favorite_cnt_max) >= int(video_dict['favorite_cnt']) >= int(rule_favorite_cnt_min) \
-                and int(rule_width_max) >= int(video_dict['video_width']) >= int(rule_width_min) \
-                and int(rule_height_max) >= int(video_dict['video_height']) >= int(rule_height_min) \
-                and int(rule_publish_time_max) >= int(video_dict['publish_time_stamp'])*1000 >= int(rule_publish_time_min):
-            return True
-        else:
-            return False
-
     @classmethod
-    def start_wechat(cls, log_type, crawler, word, rule_dict, our_uid, oss_endpoint, env):
+    def start_wechat(cls, log_type, crawler, rule_dict, user_dict, env):
         Common.logger(log_type, crawler).info('启动微信')
+        Common.logging(log_type, crawler, env, '启动微信')
         if env == "dev":
             chromedriverExecutable = "/Users/wangkun/Downloads/chromedriver/chromedriver_v107/chromedriver"
         else:
-            chromedriverExecutable = '/Users/piaoquan/Downloads/chromedriver'
+            chromedriverExecutable = "/Users/piaoquan/Downloads/chromedriver/chromedriver_v111/chromedriver"
         caps = {
             "platformName": "Android",  # 手机操作系统 Android / iOS
             "deviceName": "Android",  # 连接的设备名(模拟器或真机),安卓可以随便写
@@ -156,31 +52,35 @@ class ShipinhaoSearchScheduling:
             "showChromedriverLog": True,
             # "chromeOptions": {"androidProcess": "com.tencent.mm:appbrand0"},
             "chromeOptions": {"androidProcess": "com.tencent.mm:tools"},
+            # "chromeOptions": {"androidProcess": "com.tencent.mm:toolsmp"},
+            # "chromeOptions": {"androidProcess": "com.tencent.mm"},
             'enableWebviewDetailsCollection': True,
             'setWebContentsDebuggingEnabled': True,
             'chromedriverExecutable': chromedriverExecutable,
         }
         driver = webdriver.Remote("http://localhost:4723/wd/hub", caps)
         driver.implicitly_wait(10)
-        if len(driver.find_elements(By.ID, 'android:id/text1')) != 0:
-            driver.find_elements(By.ID, 'android:id/text1')[0].click()
+        # Common.logger(log_type, crawler).info("点击微信")
+        # if len(driver.find_elements(By.ID, 'android:id/text1')) != 0:
+        #     driver.find_elements(By.ID, 'android:id/text1')[0].click()
+        # Common.logger(log_type, crawler).info("等待 5s")
         time.sleep(5)
         cls.search_video(log_type=log_type,
                          crawler=crawler,
-                         word=word,
                          rule_dict=rule_dict,
-                         our_uid=our_uid,
-                         oss_endpoint=oss_endpoint,
+                         user_dict=user_dict,
                          driver=driver,
                          env=env)
         cls.close_wechat(log_type=log_type,
                          crawler=crawler,
+                         env=env,
                          driver=driver)
 
     @classmethod
-    def close_wechat(cls, log_type, crawler, driver: WebDriver):
+    def close_wechat(cls, log_type, crawler, env, driver: WebDriver):
         driver.quit()
         Common.logger(log_type, crawler).info(f"微信退出成功\n")
+        Common.logging(log_type, crawler, env, f"微信退出成功\n")
 
     @classmethod
     def is_contain_chinese(cls, strword):
@@ -206,108 +106,132 @@ class ShipinhaoSearchScheduling:
 
     @classmethod
     def check_to_webview(cls, log_type, crawler, driver: WebDriver):
-        # Common.logger(log_type, crawler).info('切换到webview')
         webviews = driver.contexts
+        Common.logger(log_type, crawler).info(f"webviews:{webviews}")
         driver.switch_to.context(webviews[1])
+        Common.logger(log_type, crawler).info(driver.current_context)
         time.sleep(1)
         windowHandles = driver.window_handles
         for handle in windowHandles:
-            driver.switch_to.window(handle)
             try:
-                shipinhao_webview = driver.find_element(By.XPATH, '//div[@class="unit"]')
-                if shipinhao_webview:
-                    Common.logger(log_type, crawler).info('切换到视频号 webview 成功')
-                    return "成功"
-            except Exception as e:
-                Common.logger(log_type, crawler).info(f"{e}\n")
+                driver.switch_to.window(handle)
+                time.sleep(1)
+                driver.find_element(By.XPATH, '//div[@class="unit"]')
+                Common.logger(log_type, crawler).info('切换 webview 成功')
+                return "成功"
+            except Exception:
+                Common.logger(log_type, crawler).info("切换 webview 失败")
 
     @classmethod
     def repeat_out_video_id(cls, log_type, crawler, out_video_id, env):
-        sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{out_video_id}"; """
+        sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and out_video_id="{out_video_id}"; """
         repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
         return len(repeat_video)
 
     @classmethod
     def repeat_video_url(cls, log_type, crawler, video_url, env):
-        sql = f""" select * from crawler_video where platform="{cls.platform}" and video_url="{video_url}"; """
+        sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and video_url="{video_url}"; """
         repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
         return len(repeat_video)
 
     @classmethod
-    def search_video(cls, log_type, crawler, word, rule_dict, driver: WebDriver, our_uid, oss_endpoint, env):
+    def search_video(cls, log_type, crawler, rule_dict, driver: WebDriver, user_dict, env):
+        mq = MQ(topic_name="topic_crawler_etl_" + env)
         # 点击微信搜索框,并输入搜索词
         driver.implicitly_wait(10)
-        driver.find_element(By.ID, 'com.tencent.mm:id/j5t').click()
+        Common.logger(log_type, crawler).info("点击搜索框")
+        Common.logging(log_type, crawler, env, "点击搜索框")
+        driver.find_element(By.ID, 'com.tencent.mm:id/j5t').click()  # 微信8.0.30版本
+        # driver.find_element(By.ID, 'com.tencent.mm:id/he6').click()  # 微信8.0.16版本
         time.sleep(0.5)
-        Common.logger(log_type, crawler).info(f'输入搜索词:{word}')
-        driver.find_element(By.ID, 'com.tencent.mm:id/cd7').clear().send_keys(word)
+        driver.find_element(By.ID, 'com.tencent.mm:id/cd7').clear().send_keys(user_dict["link"])  # 微信8.0.30版本
+        # driver.find_element(By.ID, 'com.tencent.mm:id/bxz').clear().send_keys(word)  # 微信8.0.16版本
         driver.press_keycode(AndroidKey.ENTER)
-        # driver.find_elements(By.ID, 'com.tencent.mm:id/oi4')[0].click()
-        driver.find_element(By.ID, 'com.tencent.mm:id/m94').click()
+        Common.logger(log_type, crawler).info("进入搜索词页面")
+        Common.logging(log_type, crawler, env, "进入搜索词页面")
+        driver.find_elements(By.ID, 'com.tencent.mm:id/br8')[0].click()  # 微信8.0.30版本
+        # driver.find_elements(By.ID, 'com.tencent.mm:id/jkg')[0].click()  # 微信8.0.16版本
         time.sleep(5)
 
         # 切换到微信搜索结果页 webview
         check_to_webview = cls.check_to_webview(log_type, crawler, driver)
         if check_to_webview is None:
             Common.logger(log_type, crawler).info("切换到视频号 webview 失败\n")
+            Common.logging(log_type, crawler, env, "切换到视频号 webview 失败\n")
             return
         time.sleep(1)
 
         # 切换到"视频号"分类
         shipinhao_tags = cls.search_elements(driver, '//div[@class="unit"]/*[2]')
         Common.logger(log_type, crawler).info('点击"视频号"分类')
+        Common.logging(log_type, crawler, env, '点击"视频号"分类')
         shipinhao_tags[0].click()
         time.sleep(5)
 
-        videos_cnt = rule_dict.get('videos_cnt', {}).get('min', 0)
+        videos_cnt = rule_dict.get('videos_cnt', {}).get('min', 30)
         index = 0
         while True:
-
-            if cls.search_elements(driver, '//*[@class="double-rich double-rich_vertical"]') is None:
+            if cls.search_elements(driver, '//*[@class="mixed-box__bd"]') is None:
                 Common.logger(log_type, crawler).info('窗口已销毁\n')
+                Common.logging(log_type, crawler, env, '窗口已销毁\n')
                 return
 
             Common.logger(log_type, crawler).info('获取视频列表\n')
-            video_elements = cls.search_elements(driver, '//div[@class="vc active__mask"]')
+            Common.logging(log_type, crawler, env, '获取视频列表\n')
+            video_elements = cls.search_elements(driver, '//div[@class="rich-media active__absolute"]')
             if video_elements is None:
                 Common.logger(log_type, crawler).warning(f'video_elements:{video_elements}')
+                Common.logging(log_type, crawler, env, f'video_elements:{video_elements}')
                 return
 
             video_element_temp = video_elements[index:]
             if len(video_element_temp) == 0:
                 Common.logger(log_type, crawler).info('到底啦~~~~~~~~~~~~~\n')
+                Common.logging(log_type, crawler, env, '到底啦~~~~~~~~~~~~~\n')
                 return
 
             for i, video_element in enumerate(video_element_temp):
                 try:
                     Common.logger(log_type, crawler).info(f"download_cnt:{cls.download_cnt}")
+                    Common.logging(log_type, crawler, env, f"download_cnt:{cls.download_cnt}")
                     if cls.download_cnt >= int(videos_cnt):
-                        Common.logger(log_type, crawler).info(f'搜索词:"{word}",已抓取视频数:{cls.download_cnt}')
+                        Common.logger(log_type, crawler).info(f'搜索词:{user_dict["link"]},已抓取视频数:{cls.download_cnt}')
+                        Common.logging(log_type, crawler, env, f'搜索词:{user_dict["link"]},已抓取视频数:{cls.download_cnt}')
                         cls.download_cnt = 0
                         return
 
                     if video_element is None:
                         Common.logger(log_type, crawler).info('到底啦~\n')
+                        Common.logging(log_type, crawler, env, '到底啦~\n')
                         return
 
                     cls.i += 1
-                    cls.search_elements(driver, '//div[@class="vc active__mask"]')
+                    cls.search_elements(driver, '//*[@class="rich-media active__absolute"]')
 
                     Common.logger(log_type, crawler).info(f'拖动"视频"列表第{cls.i}个至屏幕中间')
+                    Common.logging(log_type, crawler, env, f'拖动"视频"列表第{cls.i}个至屏幕中间')
                     time.sleep(3)
-                    driver.execute_script("arguments[0].scrollIntoView({block:'center',inline:'center'})",
-                                          video_element)
+                    driver.execute_script("arguments[0].scrollIntoView({block:'center',inline:'center'})", video_element)
                     if len(video_element.find_elements(By.XPATH, "//*[@text='没有更多的搜索结果']")) != 0:
                         Common.logger(log_type, crawler).info("没有更多的搜索结果\n")
+                        Common.logging(log_type, crawler, env, "没有更多的搜索结果\n")
                         return
-                    video_title = video_element.find_elements(By.XPATH, '//div[@class="title ellipsis_2"]/*[2]')[index + i].text
-                    video_url = video_element.find_elements(By.XPATH, '//div[@class="video-player"]')[index+i].get_attribute('src')
-                    cover_url = video_element.find_elements(By.XPATH, '//div[@class="video-player__bd"]')[index+i].get_attribute('style')
+                    video_title = \
+                    video_element.find_elements(By.XPATH, '//div[@class="rich-media__title ellipsis_2"]/span')[
+                        index + i].text[:40]
+                    video_url = video_element.find_elements(By.XPATH, '//div[@class="video-player"]')[
+                        index + i].get_attribute('src')
+                    cover_url = video_element.find_elements(By.XPATH, '//div[@class="video-player__bd"]')[
+                        index + i].get_attribute('style')
                     cover_url = cover_url.split('url("')[-1].split('")')[0]
-                    duration = video_element.find_elements(By.XPATH, '//div[@class="play-mask__text"]/*[2]')[index+i].text
+                    duration = video_element.find_elements(By.XPATH, '//div[@class="video-player-mask__text"]')[
+                        index + i].text
                     duration = int(duration.split(':')[0]) * 60 + int(duration.split(':')[-1])
-                    user_name = video_element.find_elements(By.XPATH, '//p[@class="vc-source__text"]')[index+i].text
-                    avatar_url = video_element.find_elements(By.XPATH, '//div[@class="ui-image-image ui-image vc-source__thumb"]')[index+i].get_attribute('style')
+                    user_name = video_element.find_elements(By.XPATH, '//div[@class="rich-media__source__title"]')[
+                        index + i].text
+                    avatar_url = video_element.find_elements(By.XPATH,
+                                                             '//div[@class="ui-image-image ui-image rich-media__source__thumb"]')[
+                        index + i].get_attribute('style')
                     avatar_url = avatar_url.split('url("')[-1].split('")')[0]
                     out_video_id = md5(video_title.encode('utf8')).hexdigest()
                     out_user_id = md5(user_name.encode('utf8')).hexdigest()
@@ -317,6 +241,7 @@ class ShipinhaoSearchScheduling:
                         "video_id": out_video_id,
                         "play_cnt": 0,
                         "duration": duration,
+                        # "duration": 60,
                         "user_name": user_name,
                         "user_id": out_user_id,
                         "avatar_url": avatar_url,
@@ -326,12 +251,16 @@ class ShipinhaoSearchScheduling:
                     }
                     for k, v in video_dict.items():
                         Common.logger(log_type, crawler).info(f"{k}:{v}")
+                    Common.logging(log_type, crawler, env, f"{video_dict}")
                     if video_title is None or video_url is None:
                         Common.logger(log_type, crawler).info("无效视频\n")
+                        Common.logging(log_type, crawler, env, "无效视频\n")
                     elif cls.repeat_out_video_id(log_type, crawler, out_video_id, env) != 0:
                         Common.logger(log_type, crawler).info('视频已下载\n')
+                        Common.logging(log_type, crawler, env, '视频已下载\n')
                     elif cls.repeat_video_url(log_type, crawler, video_url, env) != 0:
                         Common.logger(log_type, crawler).info('视频已下载\n')
+                        Common.logging(log_type, crawler, env, '视频已下载\n')
                     else:
                         video_element.click()
                         time.sleep(3)
@@ -340,183 +269,85 @@ class ShipinhaoSearchScheduling:
                         video_dict["share_cnt"] = video_info_dict["share_cnt"]
                         video_dict["favorite_cnt"] = video_info_dict["favorite_cnt"]
                         video_dict["comment_cnt"] = video_info_dict["comment_cnt"]
-                        video_dict["publish_time_str"] = video_info_dict["publish_time_str"]
+                        video_dict["publish_time_str"] = video_info_dict["publish_time_str"] + " 00:00:00"
                         video_dict["publish_time_stamp"] = video_info_dict["publish_time_stamp"]
-
-                        cls.download_publish(log_type=log_type,
-                                             crawler=crawler,
-                                             word=word,
-                                             rule_dict=rule_dict,
-                                             video_dict=video_dict,
-                                             our_uid=our_uid,
-                                             oss_endpoint=oss_endpoint,
-                                             env=env)
+                        Common.logger(log_type, crawler).info(f'publish_time:{video_dict["publish_time_str"]}')
+                        Common.logging(log_type, crawler, env, f'publish_time:{video_dict["publish_time_str"]}')
+                        if download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
+                            Common.logger(log_type, crawler).info("不满足抓取规则\n")
+                            Common.logging(log_type, crawler, env, "不满足抓取规则\n")
+                        else:
+                            video_dict["out_user_id"] = video_dict["user_id"]
+                            video_dict["platform"] = crawler
+                            video_dict["strategy"] = log_type
+                            video_dict["out_video_id"] = video_dict["video_id"]
+                            video_dict["width"] = 0
+                            video_dict["height"] = 0
+                            video_dict["crawler_rule"] = json.dumps(rule_dict)
+                            video_dict["user_id"] = user_dict["uid"]
+                            video_dict["publish_time"] = video_dict["publish_time_str"]
+                            mq.send_msg(video_dict)
+                            cls.download_cnt += 1
                 except Exception as e:
-                    Common.logger(log_type, crawler).error(f"抓取单条视频时异常:{e}\n")
+                    Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
+                    Common.logging(log_type, crawler, env, f"抓取单条视频异常:{e}\n")
 
             Common.logger(log_type, crawler).info('已抓取完一组视频,休眠1秒\n')
+            Common.logging(log_type, crawler, env, '已抓取完一组视频,休眠1秒\n')
             time.sleep(1)
             index = index + len(video_element_temp)
 
-    @classmethod
-    def download_publish(cls, log_type, crawler, word, rule_dict, video_dict, our_uid, oss_endpoint, env):
-        # 下载视频
-        Common.download_method(log_type=log_type, crawler=crawler, text="video", title=video_dict["video_title"], url=video_dict["video_url"])
-
-        # ffmpeg 获取视频宽高
-        ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
-        if ffmpeg_dict is None:
-            md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
-            shutil.rmtree(f"./{crawler}/videos/{md_title}/")
-            Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
-            return
-        video_dict["video_width"] = ffmpeg_dict["width"]
-        video_dict["video_height"] = ffmpeg_dict["height"]
-
-        # 规则判断
-        if cls.download_rule(log_type=log_type,
-                             crawler=crawler,
-                             video_dict=video_dict,
-                             rule_dict=rule_dict) is False:
-            md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
-            shutil.rmtree(f"./{crawler}/videos/{md_title}/")
-            Common.logger(log_type, crawler).info("不满足抓取规则,删除成功\n")
-            return
-
-        # 下载封面
-        Common.download_method(log_type=log_type, crawler=crawler, text="cover", title=video_dict["video_title"], url=video_dict["cover_url"])
-        # 保存视频信息至 "./videos/{download_video_title}/info.txt"
-        Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
-
-        # 上传视频
-        Common.logger(log_type, crawler).info("开始上传视频...")
-        our_video_id = Publish.upload_and_publish(log_type=log_type,
-                                                  crawler=crawler,
-                                                  strategy="搜索爬虫策略",
-                                                  our_uid=our_uid,
-                                                  env=env,
-                                                  oss_endpoint=oss_endpoint)
-        if env == "dev":
-            our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
-        else:
-            our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
-        Common.logger(log_type, crawler).info("视频上传完成")
-
-        if our_video_id is None:
-            try:
-                # 删除视频文件夹
-                md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
-                shutil.rmtree(f"./{crawler}/videos/{md_title}")
-                Common.logger(log_type, crawler).warning(f"our_video_id:{our_video_id}, 删除成功\n")
-                return
-            except FileNotFoundError:
-                return
-
-        insert_sql = f""" insert into crawler_video(video_id,
-                                                out_user_id,
-                                                platform,
-                                                strategy,
-                                                out_video_id,
-                                                video_title,
-                                                cover_url,
-                                                video_url,
-                                                duration,
-                                                publish_time,
-                                                play_cnt,
-                                                crawler_rule,
-                                                width,
-                                                height)
-                                                values({our_video_id},
-                                                "{video_dict['user_id']}",
-                                                "{cls.platform}",
-                                                "搜索爬虫策略",
-                                                "{video_dict['video_id']}",
-                                                "{video_dict['video_title']}",
-                                                "{video_dict['cover_url']}",
-                                                "{video_dict['video_url']}",
-                                                {int(video_dict['duration'])},
-                                                "{video_dict['publish_time_str']}",
-                                                {int(video_dict['play_cnt'])},
-                                                '{json.dumps(rule_dict)}',
-                                                {int(video_dict['video_width'])},
-                                                {int(video_dict['video_height'])}) """
-        Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
-        MysqlHelper.update_values(log_type, crawler, insert_sql, env)
-        Common.logger(log_type, crawler).info('视频信息插入数据库成功!')
-
-        # 写飞书
-        Feishu.insert_columns(log_type, crawler, "xYWCzf", "ROWS", 1, 2)
-        time.sleep(0.5)
-        values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time()))),
-                   "搜索爬虫策略",
-                   word,
-                   video_dict["video_title"],
-                   our_video_link,
-                   video_dict["duration"],
-                   video_dict["like_cnt"],
-                   video_dict["share_cnt"],
-                   video_dict["favorite_cnt"],
-                   video_dict["comment_cnt"],
-                   f'{video_dict["video_width"]}*{video_dict["video_height"]}',
-                   video_dict["publish_time_str"],
-                   video_dict["user_name"],
-                   video_dict["avatar_url"],
-                   video_dict["cover_url"],
-                   video_dict["video_url"]]]
-        Feishu.update_values(log_type, crawler, "xYWCzf", "F2:Z2", values)
-        Common.logger(log_type, crawler).info("写入飞书成功\n")
-        cls.download_cnt += 1
-
     @classmethod
     def get_video_info(cls, driver: WebDriver):
         # Common.logger(log_type, crawler).info('切回NATIVE_APP')
         driver.switch_to.context('NATIVE_APP')
 
         # 点赞
-        like_id = driver.find_element(By.ID, 'com.tencent.mm:id/k04')
+        like_id = driver.find_element(By.ID, 'com.tencent.mm:id/k04')  # 微信版本 8.0.30
         like_cnt = like_id.get_attribute('name')
-        if like_cnt == "" or like_cnt == "喜欢" or like_cnt == "火" or cls.is_contain_chinese(like_cnt) is True:
-            like_cnt = 0
-        elif '万' in like_cnt:
+        if '万' in like_cnt:
             like_cnt = int(float(like_cnt.split('万')[0]) * 10000)
         elif '万+' in like_cnt:
             like_cnt = int(float(like_cnt.split('万+')[0]) * 10000)
+        elif like_cnt == "" or like_cnt == "喜欢" or like_cnt == "火" or cls.is_contain_chinese(like_cnt) is True:
+            like_cnt = 0
         else:
             like_cnt = int(float(like_cnt))
 
         # 分享
         share_id = driver.find_element(By.ID, 'com.tencent.mm:id/jhv')
         share_cnt = share_id.get_attribute('name')
-        if share_cnt == "" or share_cnt == "转发" or cls.is_contain_chinese(share_cnt) is True:
-            share_cnt = 0
-        elif '万' in share_cnt:
+        if '万' in share_cnt:
             share_cnt = int(float(share_cnt.split('万')[0]) * 10000)
         elif '万+' in share_cnt:
             share_cnt = int(float(share_cnt.split('万+')[0]) * 10000)
+        elif share_cnt == "" or share_cnt == "转发" or cls.is_contain_chinese(share_cnt) is True:
+            share_cnt = 0
         else:
             share_cnt = int(float(share_cnt))
 
         # 收藏
         favorite_id = driver.find_element(By.ID, 'com.tencent.mm:id/fnp')
         favorite_cnt = favorite_id.get_attribute('name')
-        if favorite_cnt == "" or favorite_cnt == "收藏" or favorite_cnt == "推荐" or favorite_cnt == "火" or cls.is_contain_chinese(favorite_cnt) is True:
-            favorite_cnt = 0
-        elif '万' in favorite_cnt:
+        if '万' in favorite_cnt:
             favorite_cnt = int(float(favorite_cnt.split('万')[0]) * 10000)
         elif '万+' in favorite_cnt:
             favorite_cnt = int(float(favorite_cnt.split('万+')[0]) * 10000)
+        elif favorite_cnt == "" or favorite_cnt == "收藏" or favorite_cnt == "推荐" or favorite_cnt == "火" or cls.is_contain_chinese(
+                favorite_cnt) is True:
+            favorite_cnt = 0
         else:
             favorite_cnt = int(float(favorite_cnt))
 
         # 评论
         comment_id = driver.find_element(By.ID, 'com.tencent.mm:id/bje')
         comment_cnt = comment_id.get_attribute('name')
-        if comment_cnt == "" or comment_cnt == "评论" or cls.is_contain_chinese(comment_cnt) is True:
-            comment_cnt = 0
-        elif '万' in comment_cnt:
+        if '万' in comment_cnt:
             comment_cnt = int(float(comment_cnt.split('万')[0]) * 10000)
         elif '万+' in comment_cnt:
             comment_cnt = int(float(comment_cnt.split('万+')[0]) * 10000)
+        elif comment_cnt == "" or comment_cnt == "评论" or cls.is_contain_chinese(comment_cnt) is True:
+            comment_cnt = 0
         else:
             comment_cnt = int(float(comment_cnt))
 
@@ -575,84 +406,28 @@ class ShipinhaoSearchScheduling:
         return video_dict
 
     @classmethod
-    def get_users(cls, log_type, crawler, sheetid, env):
-        while True:
-            user_sheet = Feishu.get_values_batch(log_type, crawler, sheetid)
-            if user_sheet is None:
-                Common.logger(log_type, crawler).warning(f"user_sheet:{user_sheet}, 3秒钟后重试")
-                time.sleep(3)
-                continue
-            our_user_list = []
-            # for i in range(1, len(user_sheet)):
-            for i in range(1, 3):
-                search_word = user_sheet[i][4]
-                our_uid = user_sheet[i][6]
-                tag1 = user_sheet[i][8]
-                tag2 = user_sheet[i][9]
-                tag3 = user_sheet[i][10]
-                tag4 = user_sheet[i][11]
-                tag5 = user_sheet[i][12]
-                tag6 = user_sheet[i][13]
-                tag7 = user_sheet[i][14]
-                Common.logger(log_type, crawler).info(f"正在更新 {search_word} 搜索词信息")
-                if our_uid is None:
-                    default_user = getUser.get_default_user()
-                    # 用来创建our_id的信息
-                    user_dict = {
-                        'recommendStatus': -6,
-                        'appRecommendStatus': -6,
-                        'nickName': default_user['nickName'],
-                        'avatarUrl': default_user['avatarUrl'],
-                        'tagName': f'{tag1},{tag2},{tag3},{tag4},{tag5},{tag6},{tag7}',
-                    }
-                    our_uid = getUser.create_uid(log_type, crawler, user_dict, env)
-                    if env == 'prod':
-                        our_user_link = f'https://admin.piaoquantv.com/ums/user/{our_uid}/post'
-                    else:
-                        our_user_link = f'https://testadmin.piaoquantv.com/ums/user/{our_uid}/post'
-                    Feishu.update_values(log_type, crawler, sheetid, f'G{i + 1}:H{i + 1}',
-                                         [[our_uid, our_user_link]])
-                    Common.logger(log_type, crawler).info(f'站内用户主页创建成功:{our_user_link}\n')
-                our_user_dict = {
-                    'out_uid': '',
-                    'search_word': search_word,
-                    'our_uid': our_uid,
-                    'our_user_link': f'https://admin.piaoquantv.com/ums/user/{our_uid}/post',
-                }
-                our_user_list.append(our_user_dict)
-
-            return our_user_list
-
-
-    @classmethod
-    def get_search_videos(cls, log_type, crawler, rule_dict, oss_endpoint, env):
-        user_list = cls.get_users(log_type, crawler, "wNgi6Z", env)
-        for user in user_list:
-            cls.i = 0
-            cls.download_cnt = 0
-            search_word = user["search_word"]
-            our_uid = user["our_uid"]
-            Common.logger(log_type, crawler).info(f"开始抓取搜索词:{search_word}")
+    def get_search_videos(cls, log_type, crawler, rule_dict, user_list, env):
+        Common.logger(log_type, crawler).info(f"搜索词总数:{len(user_list)}\n")
+        Common.logging(log_type, crawler, env, f"搜索词总数:{len(user_list)}\n")
+        if len(user_list) == 0:
+            return
+        for user_dict in user_list:
             try:
+                cls.i = 0
+                cls.download_cnt = 0
+                Common.logger(log_type, crawler).info(f"开始抓取 {user_dict['link']}\n")
+                Common.logging(log_type, crawler, env, f"开始抓取 {user_dict['link']}\n")
                 cls.start_wechat(log_type=log_type,
                                  crawler=crawler,
-                                 word=search_word,
                                  rule_dict=rule_dict,
-                                 our_uid=our_uid,
-                                 oss_endpoint=oss_endpoint,
+                                 user_dict=user_dict,
                                  env=env)
             except Exception as e:
-                Common.logger(log_type, crawler).error(f"抓取{user['search_word']}时异常:{e}\n")
+                Common.logger(log_type, crawler).error(f"抓取 {user_dict['link']} 时异常:{e}\n")
+                Common.logging(log_type, crawler, env, f"抓取 {user_dict['link']} 时异常:{e}\n")
 
 
 if __name__ == '__main__':
-    # ShipinhaoSearchScheduling.get_search_videos(log_type="search",
-    #                                             crawler="shipinhao",
-    #                                             rule_dict='[{"videos_cnt":{"min":10,"max":0}},{"duration":{"min":30,"max":600}},{"share_cnt":{"min":3000,"max":0}},{"favorite_cnt":{"min":1000,"max":0}},{"publish_time":{"min":1672502400000,"max":0}}]',
-    #                                             oss_endpoint="out",
-    #                                             env="dev")
-    # print(ShipinhaoSearchScheduling.get_users("search", "shipinhao", "wNgi6Z", "dev"))
-    # print((date.today() + timedelta(days=0)).strftime("%Y-%m-%d"))
     print(ShipinhaoSearchScheduling.repeat_out_video_id(log_type="search",
                                                         crawler="shipinhao",
                                                         out_video_id="123",

+ 28 - 1
suisuiniannianyingfuqi/suisuiniannianyingfuqi_main/run_ssnnyfq_recommend.py

@@ -25,11 +25,16 @@ def main(log_type, crawler, topic_name, group_id, env):
                                           f'WaitSeconds:{wait_seconds}\n'
                                           f'TopicName:{topic_name}\n'
                                           f'MQConsumer:{group_id}')
+    Common.logging(log_type, crawler, env, f'{10 * "="}Consume And Ack Message From Topic{10 * "="}\n'
+                                           f'WaitSeconds:{wait_seconds}\n'
+                                           f'TopicName:{topic_name}\n'
+                                           f'MQConsumer:{group_id}')
     while True:
         try:
             # 长轮询消费消息。
             recv_msgs = consumer.consume_message(batch, wait_seconds)
             for msg in recv_msgs:
+                ssnnyfq_start_time = int(time.time())
                 Common.logger(log_type, crawler).info(f"Receive\n"
                                                       f"MessageId:{msg.message_id}\n"
                                                       f"MessageBodyMD5:{msg.message_body_md5}\n"
@@ -40,6 +45,16 @@ def main(log_type, crawler, topic_name, group_id, env):
                                                       f"NextConsumeTime:{msg.next_consume_time}\n"
                                                       f"ReceiptHandle:{msg.receipt_handle}\n"
                                                       f"Properties:{msg.properties}")
+                Common.logging(log_type, crawler, env, f"Receive\n"
+                                                       f"MessageId:{msg.message_id}\n"
+                                                       f"MessageBodyMD5:{msg.message_body_md5}\n"
+                                                       f"MessageTag:{msg.message_tag}\n"
+                                                       f"ConsumedTimes:{msg.consumed_times}\n"
+                                                       f"PublishTime:{msg.publish_time}\n"
+                                                       f"Body:{msg.message_body}\n"
+                                                       f"NextConsumeTime:{msg.next_consume_time}\n"
+                                                       f"ReceiptHandle:{msg.receipt_handle}\n"
+                                                       f"Properties:{msg.properties}")
                 # ack_mq_message
                 ack_message(log_type=log_type, crawler=crawler, recv_msgs=recv_msgs, consumer=consumer)
 
@@ -54,9 +69,13 @@ def main(log_type, crawler, topic_name, group_id, env):
                     our_uid_list.append(user["uid"])
                 our_uid = random.choice(our_uid_list)
                 Common.logger(log_type, crawler).info(f"调度任务:\n{task_dict}")
+                Common.logging(log_type, crawler, env, f"调度任务:\n{task_dict}")
                 Common.logger(log_type, crawler).info(f"抓取规则:\n{rule_dict}")
+                Common.logging(log_type, crawler, env, f"抓取规则:\n{rule_dict}")
                 Common.logger(log_type, crawler).info(f"用户列表:\n{user_list}")
+                Common.logging(log_type, crawler, env, f"用户列表:\n{user_list}")
                 Common.logger(log_type, crawler).info(f'开始抓取:{task_dict["taskName"]}\n')
+                Common.logging(log_type, crawler, env, f'开始抓取:{task_dict["taskName"]}\n')
                 SuisuiniannianyingfuqiRecommendScheduling.get_videoList(log_type=log_type,
                                                                         crawler=crawler,
                                                                         our_uid=our_uid,
@@ -64,17 +83,25 @@ def main(log_type, crawler, topic_name, group_id, env):
                                                                         env=env)
                 Common.del_logs(log_type, crawler)
                 Common.logger(log_type, crawler).info('抓取一轮结束\n')
+                Common.logging(log_type, crawler, env, '抓取一轮结束\n')
+                ssnnyfq_end_time = int(time.time())
+                ssnnyfq_duration = ssnnyfq_start_time - ssnnyfq_end_time
+                Common.logger(log_type, crawler).info(f"duration {ssnnyfq_duration}")
+                Common.logging(log_type, crawler, env, f"duration {ssnnyfq_duration}")
 
         except MQExceptionBase as err:
             # Topic中没有消息可消费。
             if err.type == "MessageNotExist":
                 Common.logger(log_type, crawler).info(f"No new message! RequestId:{err.req_id}\n")
+                Common.logging(log_type, crawler, env, f"No new message! RequestId:{err.req_id}\n")
                 continue
 
             Common.logger(log_type, crawler).info(f"Consume Message Fail! Exception:{err}\n")
+            Common.logging(log_type, crawler, env, f"Consume Message Fail! Exception:{err}\n")
             time.sleep(2)
             continue
 
+
 if __name__ == "__main__":
     parser = argparse.ArgumentParser()  ## 新建参数解释器对象
     parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
@@ -87,4 +114,4 @@ if __name__ == "__main__":
          crawler=args.crawler,
          topic_name=args.topic_name,
          group_id=args.group_id,
-         env=args.env)
+         env=args.env)

+ 9 - 7
suisuiniannianyingfuqi/suisuiniannianyingfuqi_recommend/suisuiniannianyingfuqi_recommend_scheduling.py

@@ -64,12 +64,15 @@ class SuisuiniannianyingfuqiRecommendScheduling:
                 page += 1
                 if response.status_code != 200:
                     Common.logger(log_type, crawler).warning(f'get_videoList:{response.status_code}, {response.text}\n')
+                    Common.logging(log_type, crawler, env, f'get_videoList:{response.status_code}, {response.text}\n')
                     return
                 elif 'data' not in response.json():
                     Common.logger(log_type, crawler).warning(f'get_videoList:{response.status_code}, {response.json()}\n')
+                    Common.logging(log_type, crawler, env, f'get_videoList:{response.status_code}, {response.text}\n')
                     return
                 elif len(response.json()['data']['video_list']['data']) == 0:
                     Common.logger(log_type, crawler).info(f'没有更多数据啦~ {response.json()}\n')
+                    Common.logging(log_type, crawler, env, f'没有更多数据啦~ {response.json()}\n')
                     return
                 else:
                     feeds = response.json()['data']['video_list']['data']
@@ -93,20 +96,18 @@ class SuisuiniannianyingfuqiRecommendScheduling:
                                           'session': f"suisuiniannianyingfuqi-{int(time.time())}"}
                             for k, v in video_dict.items():
                                 Common.logger(log_type, crawler).info(f"{k}:{v}")
+                            Common.logging(log_type, crawler, env, f'video_dict:{video_dict}')
 
                             if video_dict["video_id"] == '' or video_dict["video_title"] == '' or video_dict["cover_url"] == '' or video_dict["video_url"] == '':
                                 Common.logger(log_type, crawler).info('无效视频\n')
+                                Common.logging(log_type, crawler, env, '无效视频\n')
                             elif download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
                                 Common.logger(log_type, crawler).info("不满足抓取规则\n")
+                                Common.logging(log_type, crawler, env, "不满足抓取规则\n")
                             elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
                                 Common.logger(log_type, crawler).info('视频已下载\n')
+                                Common.logging(log_type, crawler, env, '视频已下载\n')
                             else:
-                                # cls.download_publish(log_type=log_type,
-                                #                      crawler=crawler,
-                                #                      our_uid=our_uid,
-                                #                      video_dict=video_dict,
-                                #                      rule_dict=rule_dict,
-                                #                      env=env)
                                 video_dict["out_user_id"] = video_dict["user_id"]
                                 video_dict["platform"] = crawler
                                 video_dict["strategy"] = log_type
@@ -116,12 +117,13 @@ class SuisuiniannianyingfuqiRecommendScheduling:
                                 video_dict["crawler_rule"] = json.dumps(rule_dict)
                                 video_dict["user_id"] = our_uid
                                 video_dict["publish_time"] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
-
                                 mq.send_msg(video_dict)
                         except Exception as e:
                             Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
+                            Common.logging(log_type, crawler, env, f"抓取单条视频异常:{e}\n")
             except Exception as e:
                 Common.logger(log_type, crawler).error(f"抓取第{page}页时异常:{e}\n")
+                Common.logging(log_type, crawler, env, f"抓取第{page}页时异常:{e}\n")
 
     # 下载 / 上传
     @classmethod

+ 6 - 1
xiaoniangao/xiaoniangao_main/run_xng_author.py

@@ -32,6 +32,7 @@ def main(log_type, crawler, topic_name, group_id, env):
             # 长轮询消费消息。
             recv_msgs = consumer.consume_message(batch, wait_seconds)
             for msg in recv_msgs:
+                xng_author_start_time = int(time.time())
                 Common.logger(log_type, crawler).info(f"Receive\n"
                                                       f"MessageId:{msg.message_id}\n"
                                                       f"MessageBodyMD5:{msg.message_body_md5}\n"
@@ -74,9 +75,13 @@ def main(log_type, crawler, topic_name, group_id, env):
                                                               user_list=user_list,
                                                               rule_dict=rule_dict,
                                                               env=env)
-                Common.del_logs(log_type, crawler)
+                # Common.del_logs(log_type, crawler)
                 Common.logger(log_type, crawler).info('抓取一轮结束\n')
                 Common.logging(log_type, crawler, env, '抓取一轮结束\n')
+                xng_author_end_time = int(time.time())
+                xng_author_duration = xng_author_start_time - xng_author_end_time
+                Common.logger(log_type, crawler).info(f"duration {xng_author_duration}")
+                Common.logging(log_type, crawler, env, f"duration {xng_author_duration}")
 
         except MQExceptionBase as err:
             # Topic中没有消息可消费。

+ 8 - 1
xiaoniangao/xiaoniangao_main/run_xng_hour.py

@@ -4,6 +4,8 @@
 import argparse
 import datetime
 import random
+import time
+
 from mq_http_sdk.mq_client import *
 from mq_http_sdk.mq_consumer import *
 from mq_http_sdk.mq_exception import MQExceptionBase
@@ -34,6 +36,7 @@ def main(log_type, crawler, topic_name, group_id, env):
             # 长轮询消费消息。
             recv_msgs = consumer.consume_message(batch, wait_seconds)
             for msg in recv_msgs:
+                xng_hour_start_time = int(time.time())
                 Common.logger(log_type, crawler).info(f"Receive\n"
                                                       f"MessageId:{msg.message_id}\n"
                                                       f"MessageBodyMD5:{msg.message_body_md5}\n"
@@ -109,9 +112,13 @@ def main(log_type, crawler, topic_name, group_id, env):
                                                                rule_dict=rule_dict,
                                                                our_uid=our_uid,
                                                                env=env)
-                Common.del_logs(log_type, crawler)
+                # Common.del_logs(log_type, crawler)
                 Common.logger(log_type, crawler).info('抓取一轮结束\n')
                 Common.logging(log_type, crawler, env, '抓取一轮结束\n')
+                xng_hour_end_time = int(time.time())
+                xng_hour_duration = xng_hour_start_time - xng_hour_end_time
+                Common.logger(log_type, crawler).info(f"duration {xng_hour_duration}")
+                Common.logging(log_type, crawler, env, f"duration {xng_hour_duration}")
 
         except MQExceptionBase as err:
             # Topic中没有消息可消费。

+ 6 - 2
xiaoniangao/xiaoniangao_main/run_xng_play.py

@@ -33,6 +33,7 @@ def main(log_type, crawler, topic_name, group_id, env):
             # 长轮询消费消息。
             recv_msgs = consumer.consume_message(batch, wait_seconds)
             for msg in recv_msgs:
+                xng_play_start_time = int(time.time())
                 Common.logger(log_type, crawler).info(f"Receive\n"
                                                       f"MessageId:{msg.message_id}\n"
                                                       f"MessageBodyMD5:{msg.message_body_md5}\n"
@@ -78,10 +79,13 @@ def main(log_type, crawler, topic_name, group_id, env):
                                                         rule_dict=rule_dict,
                                                         our_uid=our_uid,
                                                         env=env)
-                Common.del_logs(log_type, crawler)
+                # Common.del_logs(log_type, crawler)
                 Common.logger(log_type, crawler).info('抓取一轮结束\n')
                 Common.logging(log_type, crawler, env, '抓取一轮结束\n')
-
+                xng_play_end_time = int(time.time())
+                xng_play_duration = xng_play_start_time - xng_play_end_time
+                Common.logger(log_type, crawler).info(f"duration {xng_play_duration}")
+                Common.logging(log_type, crawler, env, f"duration {xng_play_duration}")
         except MQExceptionBase as err:
             # Topic中没有消息可消费。
             if err.type == "MessageNotExist":

+ 5 - 0
xigua/xigua_main/run_xg_author.py

@@ -32,6 +32,7 @@ def main(log_type, crawler, topic_name, group_id, env):
             # 长轮询消费消息。
             recv_msgs = consumer.consume_message(batch, wait_seconds)
             for msg in recv_msgs:
+                xg_author_start_time = int(time.time())
                 Common.logger(log_type, crawler).info(f"Receive\n"
                                                       f"MessageId:{msg.message_id}\n"
                                                       f"MessageBodyMD5:{msg.message_body_md5}\n"
@@ -76,6 +77,10 @@ def main(log_type, crawler, topic_name, group_id, env):
                 Common.del_logs(log_type, crawler)
                 Common.logger(log_type, crawler).info('抓取一轮结束\n')
                 Common.logging(log_type, crawler, env, '抓取一轮结束\n')
+                xg_author_end_time = int(time.time())
+                xg_author_duration = xg_author_start_time - xg_author_end_time
+                Common.logger(log_type, crawler).info(f"duration {xg_author_duration}")
+                Common.logging(log_type, crawler, env, f"duration {xg_author_duration}")
 
         except MQExceptionBase as err:
             # Topic中没有消息可消费。

+ 5 - 1
xigua/xigua_main/run_xg_recommend.py

@@ -36,6 +36,7 @@ def main(log_type, crawler, topic_name, group_id, env):
             # 长轮询消费消息。
             recv_msgs = consumer.consume_message(batch, wait_seconds)
             for msg in recv_msgs:
+                xg_recommend_start_time = int(time.time())
                 Common.logger(log_type, crawler).info(f"Receive\n"
                                                       f"MessageId:{msg.message_id}\n"
                                                       f"MessageBodyMD5:{msg.message_body_md5}\n"
@@ -85,7 +86,10 @@ def main(log_type, crawler, topic_name, group_id, env):
                 # Common.del_logs(log_type, crawler)
                 Common.logger(log_type, crawler).info('抓取一轮结束\n')
                 Common.logging(log_type, crawler, env, '抓取一轮结束\n')
-
+                xg_recommend_end_time = int(time.time())
+                xg_recommend_duration = xg_recommend_start_time - xg_recommend_end_time
+                Common.logger(log_type, crawler).info(f"duration {xg_recommend_duration}")
+                Common.logging(log_type, crawler, env, f"duration {xg_recommend_duration}")
         except MQExceptionBase as err:
             # Topic中没有消息可消费。
             if err.type == "MessageNotExist":

+ 6 - 1
xigua/xigua_main/run_xg_search.py

@@ -32,6 +32,7 @@ def main(log_type, crawler, topic_name, group_id, env):
             # 长轮询消费消息。
             recv_msgs = consumer.consume_message(batch, wait_seconds)
             for msg in recv_msgs:
+                xg_search_start_time = int(time.time())
                 Common.logger(log_type, crawler).info(f"Receive\n"
                                                       f"MessageId:{msg.message_id}\n"
                                                       f"MessageBodyMD5:{msg.message_body_md5}\n"
@@ -75,9 +76,13 @@ def main(log_type, crawler, topic_name, group_id, env):
                                                         env=env)
                 os.system("ps aux | grep Chrome | grep -v grep | awk '{print $2}' | xargs kill -9")
                 os.system("ps aux | grep chromedriver | grep -v grep | awk '{print $2}' | xargs kill -9")
-                Common.del_logs(log_type, crawler)
+                # Common.del_logs(log_type, crawler)
                 Common.logger(log_type, crawler).info('抓取一轮结束\n')
                 Common.logging(log_type, crawler, env, '抓取一轮结束\n')
+                xg_search_end_time = int(time.time())
+                xg_search_duration = xg_search_start_time - xg_search_end_time
+                Common.logger(log_type, crawler).info(f"duration {xg_search_duration}")
+                Common.logging(log_type, crawler, env, f"duration {xg_search_duration}")
 
         except MQExceptionBase as err:
             # Topic中没有消息可消费。

+ 183 - 0
xigua/xigua_main/run_xgms_recommend.py

@@ -0,0 +1,183 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+<<<<<<<< HEAD:gongzhonghao/gongzhonghao_main/run_gzh_author_old.py
+# @Time: 2023/6/30
+import argparse
+from multiprocessing import Process
+========
+# @Time: 2023/7/27
+import argparse
+import random
+
+>>>>>>>> master:xigua/xigua_main/run_xgms_recommend.py
+from mq_http_sdk.mq_client import *
+from mq_http_sdk.mq_consumer import *
+from mq_http_sdk.mq_exception import MQExceptionBase
+
+sys.path.append(os.getcwd())
+from common.common import Common
+<<<<<<<< HEAD:gongzhonghao/gongzhonghao_main/run_gzh_author_old.py
+from common.scheduling_db import MysqlHelper
+from gongzhonghao.gongzhonghao_author.gongzhonghao_author import GongzhonghaoAuthor
+
+
+def get_author_videos(log_type, crawler, token_index, task_dict, rule_dict, user_list, env):
+    Common.logger(log_type, crawler).info(f'开始抓取:{task_dict["taskName"]}\n')
+    Common.logging(log_type, crawler, env, f'开始抓取:{task_dict["taskName"]}\n')
+    Common.logger(log_type, crawler).info(f"user_list:{user_list}")
+    Common.logging(log_type, crawler, env, f"user_list:{user_list}")
+    GongzhonghaoAuthor.get_all_videos(log_type=log_type,
+                                      crawler=crawler,
+                                      task_dict=task_dict,
+                                      token_index=token_index,
+                                      rule_dict=rule_dict,
+                                      user_list=user_list,
+                                      env=env)
+    Common.del_logs(log_type, crawler)
+    Common.logger(log_type, crawler).info('抓取一轮结束\n')
+    Common.logging(log_type, crawler, env, '抓取一轮结束\n')
+========
+from common.public import get_consumer, ack_message, task_fun_mq
+from common.scheduling_db import MysqlHelper
+from xigua.xigua_recommend.xgms_recommend import XiguaRecommend
+>>>>>>>> master:xigua/xigua_main/run_xgms_recommend.py
+
+
+def main(log_type, crawler, topic_name, group_id, env):
+    consumer = get_consumer(topic_name, group_id)
+    # 长轮询表示如果Topic没有消息,则客户端请求会在服务端挂起3秒,3秒内如果有消息可以消费则立即返回响应。
+    # 长轮询时间3秒(最多可设置为30秒)。
+    wait_seconds = 30
+    # 一次最多消费3条(最多可设置为16条)。
+    batch = 1
+    Common.logger(log_type, crawler).info(f'{10 * "="}Consume And Ack Message From Topic{10 * "="}\n'
+                                          f'WaitSeconds:{wait_seconds}\n'
+                                          f'TopicName:{topic_name}\n'
+                                          f'MQConsumer:{group_id}')
+    Common.logging(log_type, crawler, env, f'{10 * "="}Consume And Ack Message From Topic{10 * "="}\n'
+                                           f'WaitSeconds:{wait_seconds}\n'
+                                           f'TopicName:{topic_name}\n'
+                                           f'MQConsumer:{group_id}')
+    while True:
+        try:
+            # 长轮询消费消息。
+            recv_msgs = consumer.consume_message(batch, wait_seconds)
+            for msg in recv_msgs:
+                xg_recommend_start_time = int(time.time())
+                Common.logger(log_type, crawler).info(f"Receive\n"
+                                                      f"MessageId:{msg.message_id}\n"
+                                                      f"MessageBodyMD5:{msg.message_body_md5}\n"
+                                                      f"MessageTag:{msg.message_tag}\n"
+                                                      f"ConsumedTimes:{msg.consumed_times}\n"
+                                                      f"PublishTime:{msg.publish_time}\n"
+                                                      f"Body:{msg.message_body}\n"
+                                                      f"NextConsumeTime:{msg.next_consume_time}\n"
+                                                      f"ReceiptHandle:{msg.receipt_handle}\n"
+                                                      f"Properties:{msg.properties}")
+                Common.logging(log_type, crawler, env, f"Receive\n"
+                                                       f"MessageId:{msg.message_id}\n"
+                                                       f"MessageBodyMD5:{msg.message_body_md5}\n"
+                                                       f"MessageTag:{msg.message_tag}\n"
+                                                       f"ConsumedTimes:{msg.consumed_times}\n"
+                                                       f"PublishTime:{msg.publish_time}\n"
+                                                       f"Body:{msg.message_body}\n"
+                                                       f"NextConsumeTime:{msg.next_consume_time}\n"
+                                                       f"ReceiptHandle:{msg.receipt_handle}\n"
+                                                       f"Properties:{msg.properties}")
+                # ack_mq_message
+                ack_message(log_type=log_type, crawler=crawler, recv_msgs=recv_msgs, consumer=consumer)
+
+                # 解析 task_dict
+                task_dict = task_fun_mq(msg.message_body)['task_dict']
+<<<<<<<< HEAD:gongzhonghao/gongzhonghao_main/run_gzh_author_old.py
+                Common.logger(log_type, crawler).info(f"调度任务:{task_dict}")
+                Common.logging(log_type, crawler, env, f"调度任务:{task_dict}")
+
+                # 解析 rule_dict
+                rule_dict = task_fun_mq(msg.message_body)['rule_dict']
+                Common.logger(log_type, crawler).info(f"抓取规则:{rule_dict}\n")
+                Common.logging(log_type, crawler, env, f"抓取规则:{rule_dict}\n")
+
+                # 解析 user_list
+                task_id = task_dict['id']
+                select_user_sql = f"""select * from crawler_user_v3 where task_id={task_id}"""
+                user_list = MysqlHelper.get_values(log_type, crawler, select_user_sql, env, action="")
+
+                # 计算启动脚本数 crawler_num
+                user_num = len(user_list)
+                chunk_size = 100  # 每个进程处理的用户数量
+                crawler_num = int(user_num // chunk_size)  # 向下取整
+                if user_num % chunk_size != 0:
+                    crawler_num += 1
+                Common.logger(log_type, crawler).info(f"共{user_num}个公众号,需要启动{crawler_num}个脚本任务")
+                Common.logging(log_type, crawler, env, f"共{user_num}个公众号,需要启动{crawler_num}个脚本任务")
+
+                # 多进程并行抓取
+                processes = []
+                for i in range(crawler_num):
+                    start = i * chunk_size
+                    end = min((i + 1) * chunk_size, user_num + 1)
+                    process = Process(target=get_author_videos, args=(
+                    f"{log_type}{i + 1}", crawler, i + 1, task_dict, rule_dict, user_list[start:end], env))
+                    process.start()
+                    processes.append(process)
+
+                for process in processes:
+                    process.join()
+
+========
+                rule_dict = task_fun_mq(msg.message_body)['rule_dict']
+                task_id = task_dict['id']
+                select_user_sql = f"""select * from crawler_user_v3 where task_id={task_id}"""
+                user_list = MysqlHelper.get_values(log_type, crawler, select_user_sql, env, action="")
+                our_uid_list = []
+                for user in user_list:
+                    our_uid_list.append(user["uid"])
+                our_uid = random.choice(our_uid_list)
+                Common.logger(log_type, crawler).info(f"调度任务:{task_dict}")
+                Common.logging(log_type, crawler, env, f"调度任务:{task_dict}")
+                Common.logger(log_type, crawler).info(f"抓取规则:{rule_dict}")
+                Common.logging(log_type, crawler, env, f"抓取规则:{rule_dict}")
+                Common.logger(log_type, crawler).info(f"共{len(user_list)}个用户:\n{user_list}\n")
+                Common.logging(log_type, crawler, env, f"共{len(user_list)}个用户:\n{user_list}\n")
+                Common.logger(log_type, crawler).info(f'开始抓取:{task_dict["taskName"]}\n')
+                Common.logging(log_type, crawler, env, f'开始抓取:{task_dict["taskName"]}\n')
+                XiguaRecommend.get_videoList(log_type=log_type,
+                                             crawler=crawler,
+                                             rule_dict=rule_dict,
+                                             our_uid=our_uid,
+                                             env=env)
+                # Common.del_logs(log_type, crawler)
+                Common.logger(log_type, crawler).info('抓取一轮结束\n')
+                Common.logging(log_type, crawler, env, '抓取一轮结束\n')
+                xg_recommend_end_time = int(time.time())
+                xg_recommend_duration = xg_recommend_start_time - xg_recommend_end_time
+                Common.logger(log_type, crawler).info(f"duration {xg_recommend_duration}")
+                Common.logging(log_type, crawler, env, f"duration {xg_recommend_duration}")
+>>>>>>>> master:xigua/xigua_main/run_xgms_recommend.py
+        except MQExceptionBase as err:
+            # Topic中没有消息可消费。
+            if err.type == "MessageNotExist":
+                Common.logger(log_type, crawler).info(f"No new message! RequestId:{err.req_id}\n")
+                Common.logging(log_type, crawler, env, f"No new message! RequestId:{err.req_id}\n")
+                continue
+
+            Common.logger(log_type, crawler).info(f"Consume Message Fail! Exception:{err}\n")
+            Common.logging(log_type, crawler, env, f"Consume Message Fail! Exception:{err}\n")
+            time.sleep(2)
+            continue
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
+    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler')  ## 添加参数
+    parser.add_argument('--topic_name')  ## 添加参数
+    parser.add_argument('--group_id')  ## 添加参数
+    parser.add_argument('--env')  ## 添加参数
+    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
+    main(log_type=args.log_type,
+         crawler=args.crawler,
+         topic_name=args.topic_name,
+         group_id=args.group_id,
+         env=args.env)

+ 323 - 0
xigua/xigua_recommend/xgms_recommend.py

@@ -0,0 +1,323 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/7/10
+import base64
+import datetime
+import json
+import os
+import random
+import string
+import sys
+import time
+import requests
+import urllib3
+import re
+from requests.adapters import HTTPAdapter
+from selenium import webdriver
+from selenium.webdriver import DesiredCapabilities
+from selenium.webdriver.chrome.service import Service
+from selenium.webdriver.chrome.webdriver import WebDriver
+from selenium.webdriver.common.by import By
+sys.path.append(os.getcwd())
+from common.mq import MQ
+from common.feishu import Feishu
+from common.public import download_rule, get_config_from_mysql
+from common.common import Common
+from common.scheduling_db import MysqlHelper
+from common.userAgent import get_random_user_agent
+
+
+class XiguaRecommend:
+    platform = "xigua"
+
+    @classmethod
+    def random_signature(cls):
+        src_digits = string.digits  # string_数字
+        src_uppercase = string.ascii_uppercase  # string_大写字母
+        src_lowercase = string.ascii_lowercase  # string_小写字母
+        digits_num = random.randint(1, 6)
+        uppercase_num = random.randint(1, 26 - digits_num - 1)
+        lowercase_num = 26 - (digits_num + uppercase_num)
+        password = random.sample(src_digits, digits_num) + random.sample(src_uppercase, uppercase_num) + random.sample(
+            src_lowercase, lowercase_num)
+        random.shuffle(password)
+        new_password = 'AAAAAAAAAA' + ''.join(password)[10:-4] + 'AAAB'
+        new_password_start = new_password[0:18]
+        new_password_end = new_password[-7:]
+        if new_password[18] == '8':
+            new_password = new_password_start + 'w' + new_password_end
+        elif new_password[18] == '9':
+            new_password = new_password_start + 'x' + new_password_end
+        elif new_password[18] == '-':
+            new_password = new_password_start + 'y' + new_password_end
+        elif new_password[18] == '.':
+            new_password = new_password_start + 'z' + new_password_end
+        else:
+            new_password = new_password_start + 'y' + new_password_end
+        return new_password
+
+    @classmethod
+    def get_video_url(cls, video_info):
+        video_url_dict = {}
+
+        video_resource = video_info.get('videoResource', {})
+        dash_120fps = video_resource.get('dash_120fps', {})
+        normal = video_resource.get('normal', {})
+
+        # 从dash_120fps和normal字典中获取video_list字典
+        video_list = dash_120fps.get('video_list', {}) or normal.get('video_list', {})
+        # 获取video_list字典中的video_4、video_3、video_2或video_1的值。如果找到非空视频URL,则将其赋值给变量video_url。否则,将赋值为空字符串。
+        video = video_list.get('video_4') or video_list.get('video_3') or video_list.get('video_2') or video_list.get('video_1')
+
+        video_url = video.get('backup_url_1', '') if video else ''
+        audio_url = video.get('backup_url_1', '') if video else ''
+        video_width = video.get('vwidth', 0) if video else 0
+        video_height = video.get('vheight', 0) if video else 0
+
+        video_url = re.sub(r'[^a-zA-Z0-9+/=]', '', video_url)  # 从视频URL中删除特殊字符
+        audio_url = re.sub(r'[^a-zA-Z0-9+/=]', '', audio_url)  # 从音频URL中删除特殊字符
+
+        video_url = base64.b64decode(video_url).decode('utf8')  # 解码视频URL
+        audio_url = base64.b64decode(audio_url).decode('utf8')  # 解码音频URL
+
+        video_url_dict["video_url"] = video_url
+        video_url_dict["audio_url"] = audio_url
+        video_url_dict["video_width"] = video_width
+        video_url_dict["video_height"] = video_height
+
+        return video_url_dict
+
+    @classmethod
+    def get_comment_cnt(cls, item_id):
+        url = "https://www.ixigua.com/tlb/comment/article/v5/tab_comments/?"
+        params = {
+            "tab_index": "0",
+            "count": "10",
+            "offset": "10",
+            "group_id": str(item_id),
+            "item_id": str(item_id),
+            "aid": "1768",
+            "msToken": "50-JJObWB07HfHs-BMJWT1eIDX3G-6lPSF_i-QwxBIXE9VVa-iN0jbEXR5pG2DKjXBmP299n6ZTuXzY-GAy968CCvouSAYIS4GzvGQT3pNlKNejr5G4-1g==",
+            "X-Bogus": "DFSzswVOyGtANVeWtCLMqR/F6q9U",
+            "_signature": cls.random_signature(),
+        }
+        headers = {
+            'authority': 'www.ixigua.com',
+            'accept': 'application/json, text/plain, */*',
+            'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
+            'cache-control': 'no-cache',
+            'cookie': 'MONITOR_WEB_ID=67cb5099-a022-4ec3-bb8e-c4de6ba51dd0; passport_csrf_token=72b2574f3c99f8ba670e42df430218fd; passport_csrf_token_default=72b2574f3c99f8ba670e42df430218fd; sid_guard=c7472b508ea631823ba765a60cf8757f%7C1680867422%7C3024002%7CFri%2C+12-May-2023+11%3A37%3A04+GMT; uid_tt=c13f47d51767f616befe32fb3e9f485a; uid_tt_ss=c13f47d51767f616befe32fb3e9f485a; sid_tt=c7472b508ea631823ba765a60cf8757f; sessionid=c7472b508ea631823ba765a60cf8757f; sessionid_ss=c7472b508ea631823ba765a60cf8757f; sid_ucp_v1=1.0.0-KGUzNWYxNmRkZGJiZjgxY2MzZWNkMTEzMTkwYjY1Yjg5OTY5NzVlNmMKFQiu3d-eqQIQ3oDAoQYYGCAMOAhACxoCaGwiIGM3NDcyYjUwOGVhNjMxODIzYmE3NjVhNjBjZjg3NTdm; ssid_ucp_v1=1.0.0-KGUzNWYxNmRkZGJiZjgxY2MzZWNkMTEzMTkwYjY1Yjg5OTY5NzVlNmMKFQiu3d-eqQIQ3oDAoQYYGCAMOAhACxoCaGwiIGM3NDcyYjUwOGVhNjMxODIzYmE3NjVhNjBjZjg3NTdm; odin_tt=b893608d4dde2e1e8df8cd5d97a0e2fbeafc4ca762ac72ebef6e6c97e2ed19859bb01d46b4190ddd6dd17d7f9678e1de; SEARCH_CARD_MODE=7168304743566296612_0; support_webp=true; support_avif=false; csrf_session_id=a5355d954d3c63ed1ba35faada452b4d; tt_scid=7Pux7s634-z8DYvCM20y7KigwH5u7Rh6D9C-RROpnT.aGMEcz6Vsxp.oai47wJqa4f86; ttwid=1%7CHHtv2QqpSGuSu8r-zXF1QoWsvjmNi1SJrqOrZzg-UCY%7C1683858689%7Ca5223fe1500578e01e138a0d71d6444692018296c4c24f5885af174a65873c95; ixigua-a-s=3; msToken=50-JJObWB07HfHs-BMJWT1eIDX3G-6lPSF_i-QwxBIXE9VVa-iN0jbEXR5pG2DKjXBmP299n6ZTuXzY-GAy968CCvouSAYIS4GzvGQT3pNlKNejr5G4-1g==; __ac_nonce=0645dcbf0005064517440; __ac_signature=_02B4Z6wo00f01FEGmAwAAIDBKchzCGqn-MBRJpyAAHAjieFC5GEg6gGiwz.I4PRrJl7f0GcixFrExKmgt6QI1i1S-dQyofPEj2ugWTCnmKUdJQv-wYuDofeKNe8VtMtZq2aKewyUGeKU-5Ud21; ixigua-a-s=3',
+            'pragma': 'no-cache',
+            'referer': f'https://www.ixigua.com/{item_id}?logTag=3c5aa86a8600b9ab8540',
+            'sec-ch-ua': '"Microsoft Edge";v="113", "Chromium";v="113", "Not-A.Brand";v="24"',
+            'sec-ch-ua-mobile': '?0',
+            'sec-ch-ua-platform': '"macOS"',
+            'sec-fetch-dest': 'empty',
+            'sec-fetch-mode': 'cors',
+            'sec-fetch-site': 'same-origin',
+            'tt-anti-token': 'cBITBHvmYjEygzv-f9c78c1297722cf1f559c74b084e4525ce4900bdcf9e8588f20cc7c2e3234422',
+            'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36 Edg/113.0.1774.35',
+            'x-secsdk-csrf-token': '000100000001f8e733cf37f0cd255a51aea9a81ff7bc0c09490cfe41ad827c3c5c18ec809279175e4d9f5553d8a5'
+        }
+        urllib3.disable_warnings()
+        s = requests.session()
+        # max_retries=3 重试3次
+        s.mount('http://', HTTPAdapter(max_retries=3))
+        s.mount('https://', HTTPAdapter(max_retries=3))
+        response = s.get(url=url, headers=headers, params=params, verify=False, proxies=Common.tunnel_proxies(),
+                         timeout=5)
+        response.close()
+        if response.status_code != 200 or 'total_number' not in response.json() or response.json() == {}:
+            return 0
+        return response.json().get("total_number", 0)
+
+    # 获取视频详情
+    @classmethod
+    def get_video_info(cls, log_type, crawler, item_id):
+        url = 'https://www.ixigua.com/api/mixVideo/information?'
+        headers = {
+            "accept-encoding": "gzip, deflate",
+            "accept-language": "zh-CN,zh-Hans;q=0.9",
+            "user-agent": get_random_user_agent('pc'),
+            "referer": "https://www.ixigua.com/7102614741050196520?logTag=0531c88ac04f38ab2c62",
+        }
+        params = {
+            'mixId': str(item_id),
+            'msToken': 'IlG0wd0Pylyw9ghcYiB2YseUmTwrsrqqhXrbIcsSaTcLTJyVlbYJzk20zw3UO-CfrfC'
+                       'NVVIOBNjIl7vfBoxnVUwO9ZyzAI3umSKsT5-pef_RRfQCJwmA',
+            'X-Bogus': 'DFSzswVupYTANCJOSBk0P53WxM-r',
+            '_signature': '_02B4Z6wo0000119LvEwAAIDCuktNZ0y5wkdfS7jAALThuOR8D9yWNZ.EmWHKV0WSn6Px'
+                          'fPsH9-BldyxVje0f49ryXgmn7Tzk-swEHNb15TiGqa6YF.cX0jW8Eds1TtJOIZyfc9s5emH7gdWN94',
+        }
+        cookies = {
+            'ixigua-a-s': '1',
+            'msToken': 'IlG0wd0Pylyw9ghcYiB2YseUmTwrsrqqhXrbIcsSaTcLTJyVlbYJzk20zw3UO-CfrfCNVVIOB'
+                       'NjIl7vfBoxnVUwO9ZyzAI3umSKsT5-pef_RRfQCJwmA',
+            'ttwid': '1%7C_yXQeHWwLZgCsgHClOwTCdYSOt_MjdOkgnPIkpi-Sr8%7C1661241238%7Cf57d0c5ef3f1d7'
+                     '6e049fccdca1ac54887c34d1f8731c8e51a49780ff0ceab9f8',
+            'tt_scid': 'QZ4l8KXDG0YAEaMCSbADdcybdKbUfG4BC6S4OBv9lpRS5VyqYLX2bIR8CTeZeGHR9ee3',
+            'MONITOR_WEB_ID': '0a49204a-7af5-4e96-95f0-f4bafb7450ad',
+            '__ac_nonce': '06304878000964fdad287',
+            '__ac_signature': '_02B4Z6wo00f017Rcr3AAAIDCUVxeW1tOKEu0fKvAAI4cvoYzV-wBhq7B6D8k0no7lb'
+                              'FlvYoinmtK6UXjRIYPXnahUlFTvmWVtb77jsMkKAXzAEsLE56m36RlvL7ky.M3Xn52r9t1IEb7IR3ke8',
+            'ttcid': 'e56fabf6e85d4adf9e4d91902496a0e882',
+            '_tea_utm_cache_1300': 'undefined',
+            'support_avif': 'false',
+            'support_webp': 'false',
+            'xiguavideopcwebid': '7134967546256016900',
+            'xiguavideopcwebid.sig': 'xxRww5R1VEMJN_dQepHorEu_eAc',
+        }
+        urllib3.disable_warnings()
+        s = requests.session()
+        # max_retries=3 重试3次
+        s.mount('http://', HTTPAdapter(max_retries=3))
+        s.mount('https://', HTTPAdapter(max_retries=3))
+        response = s.get(url=url, headers=headers, params=params, cookies=cookies, verify=False,
+                         proxies=Common.tunnel_proxies(), timeout=5)
+        response.close()
+        if response.status_code != 200 or 'data' not in response.json() or response.json()['data'] == {}:
+            Common.logger(log_type, crawler).warning(f"get_video_info:{response.status_code}, {response.text}\n")
+            return None
+        else:
+            video_info = response.json()['data'].get("gidInformation", {}).get("packerData", {}).get("video", {})
+            if video_info == {}:
+                return None
+            video_dict = {
+                "video_title": video_info.get("title", ""),
+                "video_id": video_info.get("videoResource", {}).get("vid", ""),
+                "gid": str(item_id),
+                "play_cnt": int(video_info.get("video_watch_count", 0)),
+                "like_cnt": int(video_info.get("video_like_count", 0)),
+                "comment_cnt": int(cls.get_comment_cnt(item_id)),
+                "share_cnt": 0,
+                "favorite_cnt": 0,
+                "duration": int(video_info.get("video_duration", 0)),
+                "video_width": int(cls.get_video_url(video_info)["video_width"]),
+                "video_height": int(cls.get_video_url(video_info)["video_height"]),
+                "publish_time_stamp": int(video_info.get("video_publish_time", 0)),
+                "publish_time_str": time.strftime("%Y-%m-%d %H:%M:%S",
+                                                  time.localtime(int(video_info.get("video_publish_time", 0)))),
+                "user_name": video_info.get("user_info", {}).get("name", ""),
+                "user_id": str(video_info.get("user_info", {}).get("user_id", "")),
+                "avatar_url": str(video_info.get("user_info", {}).get("avatar_url", "")),
+                "cover_url": video_info.get("poster_url", ""),
+                "audio_url": cls.get_video_url(video_info)["audio_url"],
+                "video_url": cls.get_video_url(video_info)["video_url"],
+                "session": f"xigua-search-{int(time.time())}"
+            }
+            return video_dict
+
+    @classmethod
+    def repeat_video(cls, log_type, crawler, video_id, env):
+        sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and out_video_id="{video_id}"; """
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
+        return len(repeat_video)
+
+    @classmethod
+    def quit(cls, log_type, crawler, env, driver: WebDriver):
+        Common.logger(log_type, crawler).info("退出浏览器")
+        Common.logging(log_type, crawler, env, "退出浏览器")
+        driver.quit()
+        quit_cmd = "ps aux | grep Chrome | grep -v grep | awk '{print $2}' | xargs kill -9"
+        os.system(quit_cmd)
+
+    @classmethod
+    def get_videoList(cls, log_type, crawler, our_uid, rule_dict, env):
+        mq = MQ(topic_name="topic_crawler_etl_" + env)
+        Common.logger(log_type, crawler).info("启动 Chrome 浏览器")
+        Common.logging(log_type, crawler, env, "启动 Chrome 浏览器")
+        # kill 所有 Chrome 进程
+        quit_cmd = "ps aux | grep Chrome | grep -v grep | awk '{print $2}' | xargs kill -9"
+        os.system(quit_cmd)
+        time.sleep(1)
+        # 启动 Chrome,指定端口号:12306
+        cmd = 'open -a "Google Chrome" --args --remote-debugging-port=12306'
+        os.system(cmd)
+        # 打印请求配置
+        ca = DesiredCapabilities.CHROME
+        ca["goog:loggingPrefs"] = {"performance": "ALL"}
+        # 配置 chromedriver
+        if env == "dev":
+            chromedriver = "/Users/wangkun/Downloads/chromedriver/chromedriver_v114/chromedriver"
+        else:
+            # chromedriver = "/usr/bin/chromedriver"
+            chromedriver = "/Users/piaoquan/Downloads/chromedriver/chromedriver_v114/chromedriver"
+        # 初始化浏览器
+        browser = webdriver.ChromeOptions()
+        # browser.add_argument(f'--proxy-server={Common.tunnel_proxies()}')  # 代理的IP地址和端口号
+        browser.add_experimental_option("debuggerAddress", "127.0.0.1:12306")
+        # driver初始化
+        driver = webdriver.Chrome(desired_capabilities=ca, options=browser, service=Service(chromedriver))
+        driver.implicitly_wait(10)
+        Common.logger(log_type, crawler).info("打开西瓜推荐页")
+        Common.logging(log_type, crawler, env, "打开西瓜推荐页")
+        driver.get(f"https://www.ixigua.com/")
+        time.sleep(2)
+
+        # 检查登录状态
+        if len(driver.find_elements(By.XPATH, '//*[@class="BU-Component-Header-Avatar__image"]')) == 0:
+            Common.logger(log_type, crawler).info("登录失效")
+            Common.logging(log_type, crawler, env, "登录失效")
+            driver.get_screenshot_as_file(f"./{crawler}/photos/logon_err.png")
+            # 登录失效,报警
+            if 20 >= datetime.datetime.now().hour >= 10:
+                Feishu.bot(log_type, crawler, "西瓜推荐,登录失效")
+            return
+
+        videoList_elements = driver.find_elements(By.XPATH, '//*[@class="HorizontalFeedCard HorizontalChannelBlockList__item"]')
+        if len(videoList_elements) == 0:
+            Common.logger(log_type, crawler).info("到底啦~~~~~~~~~~\n")
+            Common.logging(log_type, crawler, env, "到底啦~~~~~~~~~~\n")
+            cls.quit(log_type, crawler, env, driver)
+            return
+        for i, video_element in enumerate(videoList_elements):
+            Common.logger(log_type, crawler).info(f"正在抓取第{i+1}条视频")
+            Common.logging(log_type, crawler, env, f"正在抓取第{i+1}条视频")
+            item_id = video_element.find_elements(By.XPATH, '//*[@class="HorizontalFeedCard__coverWrapper disableZoomAnimation"]')[i].get_attribute("href")
+            item_id = item_id.replace("https://www.ixigua.com/", "").replace("?&", "")
+            Common.logger(log_type, crawler).info(f"item_id:{item_id}")
+            video_dict = cls.get_video_info(log_type, crawler, item_id)
+            if video_dict is None:
+                Common.logger(log_type, crawler).info("无效视频\n")
+                Common.logging(log_type, crawler, env, "无效视频\n")
+                continue
+            for k, v in video_dict.items():
+                Common.logger(log_type, crawler).info(f"{k}:{v}")
+            Common.logging(log_type, crawler, env, f"{video_dict}")
+
+            if download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
+                Common.logger(log_type, crawler).info("不满足抓取规则\n")
+                Common.logging(log_type, crawler, env, "不满足抓取规则\n")
+            elif any(str(word) if str(word) in video_dict["video_title"] else False
+                     for word in get_config_from_mysql(log_type=log_type,
+                                                       source=crawler,
+                                                       env=env,
+                                                       text="filter",
+                                                       action="")) is True:
+                Common.logger(log_type, crawler).info('已中过滤词\n')
+                Common.logging(log_type, crawler, env, '已中过滤词\n')
+            elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
+                Common.logger(log_type, crawler).info('视频已下载\n')
+                Common.logging(log_type, crawler, env, '视频已下载\n')
+            else:
+                video_dict["out_user_id"] = video_dict["user_id"]
+                video_dict["platform"] = crawler
+                video_dict["strategy"] = log_type
+                video_dict["strategy_type"] = "ms"
+                video_dict["out_video_id"] = video_dict["video_id"]
+                video_dict["width"] = video_dict["video_width"]
+                video_dict["height"] = video_dict["video_height"]
+                video_dict["crawler_rule"] = json.dumps(rule_dict)
+                video_dict["user_id"] = our_uid
+                video_dict["publish_time"] = video_dict["publish_time_str"]
+                mq.send_msg(video_dict)
+        cls.quit(log_type, crawler, env, driver)
+
+
+if __name__ == "__main__":
+    XiguaRecommend.get_videoList(log_type="recommend2",
+                                 crawler="xigua",
+                                 our_uid=6267140,
+                                 rule_dict={},
+                                 env="dev")
+    pass

+ 65 - 63
zhiqingtiantiankan/zhiqingtiantiankan_recommend/zhiqingtiantiankan_recommend.py

@@ -49,69 +49,71 @@ class ZhiqingtiantiankanRecommend:
 
     @classmethod
     def start_wechat(cls, log_type, crawler, env):
-        # try:
-        Common.logger(log_type, crawler).info('启动微信')
-        if env == "dev":
-            chromedriverExecutable = '/Users/wangkun/Downloads/chromedriver/chromedriver_v107/chromedriver'
-        else:
-            chromedriverExecutable = '/Users/piaoquan/Downloads/chromedriver'
-        caps = {
-            "platformName": "Android",  # 手机操作系统 Android / iOS
-            "deviceName": "Android",  # 连接的设备名(模拟器或真机),安卓可以随便写
-            "platforVersion": "11",  # 手机对应的系统版本(Android 11)
-            "appPackage": "com.tencent.mm",  # 被测APP的包名,乐活圈 Android
-            "appActivity": ".ui.LauncherUI",  # 启动的Activity名
-            "autoGrantPermissions": "true",  # 让 appium 自动授权 base 权限,
-            # 如果 noReset 为 True,则该条不生效(该参数为 Android 独有),对应的值为 True 或 False
-            "unicodekeyboard": True,  # 使用自带输入法,输入中文时填True
-            "resetkeyboard": True,  # 执行完程序恢复原来输入法
-            "noReset": True,  # 不重置APP
-            "printPageSourceOnFailure": True,  # 找不到元素时,appium log 会完整记录当前页面的 pagesource
-            "newCommandTimeout": 6000,  # 初始等待时间
-            "automationName": "UiAutomator2",  # 使用引擎,默认为 Appium,
-            # 其中 Appium、UiAutomator2、Selendroid、Espresso 用于 Android,XCUITest 用于 iOS
-            "showChromedriverLog": True,
-            'enableWebviewDetailsCollection': True,
-            'setWebContentsDebuggingEnabled': True,
-            'recreateChromeDriverSessions': True,
-            'chromedriverExecutable': chromedriverExecutable,
-            "chromeOptions": {"androidProcess": "com.tencent.mm:appbrand0"},
-            # "chromeOptions": {"androidProcess": "com.tencent.mm:tools"},
-            'browserName': ''
-        }
-        driver = webdriver.Remote("http://localhost:4723/wd/hub", caps)
-        driver.implicitly_wait(20)
-        # 向下滑动页面,展示出小程序选择面板
-        for i in range(120):
-            try:
-                # 发现微信消息 TAB,代表微信已启动成功
-                if driver.find_elements(By.ID, 'com.tencent.mm:id/f2s'):
-                    break
-                # 发现并关闭系统菜单栏
-                elif driver.find_element(By.ID, 'com.android.systemui:id/dismiss_view'):
-                    Common.logger(log_type, crawler).info('发现并关闭系统下拉菜单栏')
-                    driver.find_element(By.ID, 'com.android.systemui:id/dismiss_view').click()
-                else:
-                    pass
-            except NoSuchElementException:
-                time.sleep(1)
-        Common.logger(log_type, crawler).info('下滑,展示小程序选择面板')
-        size = driver.get_window_size()
-        driver.swipe(int(size['width'] * 0.5), int(size['height'] * 0.2), int(size['width'] * 0.5),
-                     int(size['height'] * 0.8), 200)
-        # 打开小程序"知青天天看"
-        time.sleep(5)
-        Common.logger(log_type, crawler).info('打开小程序"知青天天看"')
-        driver.find_elements(By.XPATH, '//*[@text="知青天天看"]')[-1].click()
-
-        # 获取视频信息
-        time.sleep(5)
-        cls.get_videoList(log_type, crawler, driver, env)
-
-        # 退出微信
-        cls.quit(log_type, crawler, driver)
-        # except Exception as e:
-        #     Common.logger(log_type, crawler).error('start_wechat异常:{}\n', e)
+        try:
+            Common.logger(log_type, crawler).info('启动微信')
+            if env == "dev":
+                chromedriverExecutable = '/Users/wangkun/Downloads/chromedriver/chromedriver_v107/chromedriver'
+            else:
+                chromedriverExecutable = '/Users/piaoquan/Downloads/chromedriver'
+            caps = {
+                "platformName": "Android",  # 手机操作系统 Android / iOS
+                "deviceName": "Android",  # 连接的设备名(模拟器或真机),安卓可以随便写
+                "platforVersion": "11",  # 手机对应的系统版本(Android 11)
+                "appPackage": "com.tencent.mm",  # 被测APP的包名,乐活圈 Android
+                "appActivity": ".ui.LauncherUI",  # 启动的Activity名
+                "autoGrantPermissions": "true",  # 让 appium 自动授权 base 权限,
+                # 如果 noReset 为 True,则该条不生效(该参数为 Android 独有),对应的值为 True 或 False
+                "unicodekeyboard": True,  # 使用自带输入法,输入中文时填True
+                "resetkeyboard": True,  # 执行完程序恢复原来输入法
+                "noReset": True,  # 不重置APP
+                "printPageSourceOnFailure": True,  # 找不到元素时,appium log 会完整记录当前页面的 pagesource
+                "newCommandTimeout": 6000,  # 初始等待时间
+                "automationName": "UiAutomator2",  # 使用引擎,默认为 Appium,
+                # 其中 Appium、UiAutomator2、Selendroid、Espresso 用于 Android,XCUITest 用于 iOS
+                "showChromedriverLog": True,
+                'enableWebviewDetailsCollection': True,
+                'setWebContentsDebuggingEnabled': True,
+                'recreateChromeDriverSessions': True,
+                'chromedriverExecutable': chromedriverExecutable,
+                "chromeOptions": {"androidProcess": "com.tencent.mm:appbrand0"},
+                # "chromeOptions": {"androidProcess": "com.tencent.mm:tools"},
+                'browserName': ''
+            }
+            driver = webdriver.Remote("http://localhost:4723/wd/hub", caps)
+            driver.implicitly_wait(20)
+            # 向下滑动页面,展示出小程序选择面板
+            for i in range(120):
+                try:
+                    # 发现微信消息 TAB,代表微信已启动成功
+                    if driver.find_elements(By.ID, 'com.tencent.mm:id/f2s'):
+                        break
+                    # 发现并关闭系统菜单栏
+                    elif driver.find_element(By.ID, 'com.android.systemui:id/dismiss_view'):
+                        Common.logger(log_type, crawler).info('发现并关闭系统下拉菜单栏')
+                        driver.find_element(By.ID, 'com.android.systemui:id/dismiss_view').click()
+                    else:
+                        pass
+                except NoSuchElementException:
+                    time.sleep(1)
+            Common.logger(log_type, crawler).info('下滑,展示小程序选择面板')
+            size = driver.get_window_size()
+            driver.swipe(int(size['width'] * 0.5), int(size['height'] * 0.2), int(size['width'] * 0.5),
+                         int(size['height'] * 0.8), 200)
+            # 打开小程序"知青天天看"
+            time.sleep(5)
+            Common.logger(log_type, crawler).info('打开小程序"知青天天看"')
+            driver.find_elements(By.XPATH, '//*[@text="知青天天看"]')[-1].click()
+
+            # 获取视频信息
+            time.sleep(5)
+            cls.get_videoList(log_type, crawler, driver, env)
+
+            # 退出微信
+            cls.quit(log_type, crawler, driver)
+        except Exception as e:
+            Common.logger(log_type, crawler).error('start_wechat异常:{}\n', e)
+            cmd = "adb kill-server && adb start-server"
+            os.system(cmd)
 
     # 退出 APP
     @classmethod

+ 61 - 59
zhongmiaoyinxin/zhongmiaoyinxin_recommend/zhongmiaoyinxin_recommend.py

@@ -49,67 +49,69 @@ class ZhongmiaoyinxinRecommend:
 
     @classmethod
     def start_wechat(cls, log_type, crawler, env):
-        # try:
-        if env == "dev":
-            chromedriverExecutable = "/Users/wangkun/Downloads/chromedriver/chromedriver_v107/chromedriver"
-        else:
-            chromedriverExecutable = '/Users/piaoquan/Downloads/chromedriver'
-        Common.logger(log_type, crawler).info('启动微信')
-        caps = {
-            "platformName": "Android",  # 手机操作系统 Android / iOS
-            "deviceName": "a0a65126",  # 连接的设备名(模拟器或真机),安卓可以随便写
-            "platforVersion": "11",  # 手机对应的系统版本(Android 11)
-            "appPackage": "com.tencent.mm",  # 被测APP的包名,乐活圈 Android
-            "appActivity": ".ui.LauncherUI",  # 启动的Activity名
-            "autoGrantPermissions": "true",  # 让 appium 自动授权 base 权限,
-            # 如果 noReset 为 True,则该条不生效(该参数为 Android 独有),对应的值为 True 或 False
-            "unicodekeyboard": True,  # 使用自带输入法,输入中文时填True
-            "resetkeyboard": True,  # 执行完程序恢复原来输入法
-            "noReset": True,  # 不重置APP
-            "printPageSourceOnFailure": True,  # 找不到元素时,appium log 会完整记录当前页面的 pagesource
-            "newCommandTimeout": 6000,  # 初始等待时间
-            "automationName": "UiAutomator2",  # 使用引擎,默认为 Appium,
-            # 其中 Appium、UiAutomator2、Selendroid、Espresso 用于 Android,XCUITest 用于 iOS
-            "showChromedriverLog": True,
-            'enableWebviewDetailsCollection': True,
-            'setWebContentsDebuggingEnabled': True,
-            'recreateChromeDriverSessions': True,
-            'chromedriverExecutable': chromedriverExecutable,
-            "chromeOptions": {"androidProcess": "com.tencent.mm:appbrand0"},
-            # "chromeOptions": {"androidProcess": "com.tencent.mm:tools"},
-            'browserName': ''
-        }
-        driver = webdriver.Remote("http://localhost:4723/wd/hub", caps)
-        driver.implicitly_wait(20)
-        # 向下滑动页面,展示出小程序选择面板
-        for i in range(120):
-            try:
-                # 发现微信消息 TAB,代表微信已启动成功
-                if driver.find_elements(By.ID, 'com.tencent.mm:id/f2s'):
-                    break
-                # 发现并关闭系统菜单栏
-                elif driver.find_element(By.ID, 'com.android.systemui:id/dismiss_view'):
-                    Common.logger(log_type, crawler).info('发现并关闭系统下拉菜单栏')
-                    driver.find_element(By.ID, 'com.android.systemui:id/dismiss_view').click()
-                else:
-                    pass
-            except NoSuchElementException:
-                time.sleep(1)
-        Common.logger(log_type, crawler).info('下滑,展示小程序选择面板')
-        size = driver.get_window_size()
-        driver.swipe(int(size['width'] * 0.5), int(size['height'] * 0.2),
-                     int(size['width'] * 0.5), int(size['height'] * 0.8), 200)
-        # 打开小程序"众妙之上"
-        time.sleep(5)
-        Common.logger(log_type, crawler).info('打开小程序"西瓜说说"')
-        driver.find_elements(By.XPATH, '//*[@text="西瓜说说"]')[-1].click()
+        try:
+            if env == "dev":
+                chromedriverExecutable = "/Users/wangkun/Downloads/chromedriver/chromedriver_v107/chromedriver"
+            else:
+                chromedriverExecutable = '/Users/piaoquan/Downloads/chromedriver'
+            Common.logger(log_type, crawler).info('启动微信')
+            caps = {
+                "platformName": "Android",  # 手机操作系统 Android / iOS
+                "deviceName": "a0a65126",  # 连接的设备名(模拟器或真机),安卓可以随便写
+                "platforVersion": "11",  # 手机对应的系统版本(Android 11)
+                "appPackage": "com.tencent.mm",  # 被测APP的包名,乐活圈 Android
+                "appActivity": ".ui.LauncherUI",  # 启动的Activity名
+                "autoGrantPermissions": "true",  # 让 appium 自动授权 base 权限,
+                # 如果 noReset 为 True,则该条不生效(该参数为 Android 独有),对应的值为 True 或 False
+                "unicodekeyboard": True,  # 使用自带输入法,输入中文时填True
+                "resetkeyboard": True,  # 执行完程序恢复原来输入法
+                "noReset": True,  # 不重置APP
+                "printPageSourceOnFailure": True,  # 找不到元素时,appium log 会完整记录当前页面的 pagesource
+                "newCommandTimeout": 6000,  # 初始等待时间
+                "automationName": "UiAutomator2",  # 使用引擎,默认为 Appium,
+                # 其中 Appium、UiAutomator2、Selendroid、Espresso 用于 Android,XCUITest 用于 iOS
+                "showChromedriverLog": True,
+                'enableWebviewDetailsCollection': True,
+                'setWebContentsDebuggingEnabled': True,
+                'recreateChromeDriverSessions': True,
+                'chromedriverExecutable': chromedriverExecutable,
+                "chromeOptions": {"androidProcess": "com.tencent.mm:appbrand0"},
+                # "chromeOptions": {"androidProcess": "com.tencent.mm:tools"},
+                'browserName': ''
+            }
+            driver = webdriver.Remote("http://localhost:4723/wd/hub", caps)
+            driver.implicitly_wait(20)
+            # 向下滑动页面,展示出小程序选择面板
+            for i in range(120):
+                try:
+                    # 发现微信消息 TAB,代表微信已启动成功
+                    if driver.find_elements(By.ID, 'com.tencent.mm:id/f2s'):
+                        break
+                    # 发现并关闭系统菜单栏
+                    elif driver.find_element(By.ID, 'com.android.systemui:id/dismiss_view'):
+                        Common.logger(log_type, crawler).info('发现并关闭系统下拉菜单栏')
+                        driver.find_element(By.ID, 'com.android.systemui:id/dismiss_view').click()
+                    else:
+                        pass
+                except NoSuchElementException:
+                    time.sleep(1)
+            Common.logger(log_type, crawler).info('下滑,展示小程序选择面板')
+            size = driver.get_window_size()
+            driver.swipe(int(size['width'] * 0.5), int(size['height'] * 0.2),
+                         int(size['width'] * 0.5), int(size['height'] * 0.8), 200)
+            # 打开小程序"众妙之上"
+            time.sleep(5)
+            Common.logger(log_type, crawler).info('打开小程序"西瓜说说"')
+            driver.find_elements(By.XPATH, '//*[@text="西瓜说说"]')[-1].click()
 
-        time.sleep(5)
-        cls.get_videoList(log_type, crawler, driver, env)
+            time.sleep(5)
+            cls.get_videoList(log_type, crawler, driver, env)
 
-        cls.quit(log_type, crawler, driver)
-        # except Exception as e:
-        #     Common.logger(log_type, crawler).error('start_wechat异常:{}\n', e)
+            cls.quit(log_type, crawler, driver)
+        except Exception as e:
+            Common.logger(log_type, crawler).error('start_wechat异常:{}\n', e)
+            cmd = "adb kill-server && adb start-server"
+            os.system(cmd)
 
     @classmethod
     def quit(cls, log_type, crawler, driver: WebDriver):

Beberapa file tidak ditampilkan karena terlalu banyak file yang berubah dalam diff ini