Przeglądaj źródła

祝好事多磨——测试成功

罗俊辉 1 rok temu
rodzic
commit
4808bcf2a9

+ 68 - 0
app/main.py

@@ -0,0 +1,68 @@
+import asyncio
+import json
+
+from mq_http_sdk.mq_consumer import *
+from mq_http_sdk.mq_exception import MQExceptionBase
+
+sys.path.append(os.getcwd())
+
+from application.common import MysqlHelper, AliyunLogger, get_consumer, ack_message
+from application.config import TopicGroup
+
+
+async def run(task_id, mode, platform):
+    """
+    传入参数,然后根据参数执行爬虫代码
+    :return: None
+    """
+    # 创建并等待一个子进程
+    await asyncio.create_subprocess_shell(
+        "python3 scheduler/run_spider_online.py --task_id {} --mode {} --platform {}".format(task_id, mode, platform)
+    )
+
+
+async def consume_single_message(spider):
+    topic = spider['topic']
+    group = spider['group']
+    consumer = get_consumer(topic, group)
+    try:
+        messages = consumer.consume_message(wait_seconds=10, batch_size=1)
+        if messages:
+            # 在这里消费消息,做一些数据处理分析
+            for single_message in messages:
+                ack_message(mode=spider['mode'], platform=spider['platform'], recv_msgs=messages,
+                            consumer=consumer)
+                message_body = single_message.message_body
+                task_id = json.loads(message_body)['id']
+                print("成功消费消息,正在准备启动爬虫任务")
+                print(message_body)
+                # 创建爬虫task
+                await asyncio.create_task(run(task_id, spider['mode'], spider['platform']))
+                print("爬虫任务启动完成")
+        else:
+            message = "Messages Queue is Empty"
+            print(message)
+
+    except MQExceptionBase as err:
+        # Topic中没有消息可消费。
+        if err.type == "MessageNotExist":
+            message = f"No new message! RequestId:{err.req_id}\n"
+            print(message)
+        else:
+            message = f"Consume Message Fail! Exception:{err}\n"
+            print(message)
+
+
+async def main():
+    spider_list = TopicGroup().produce()
+    while spider_list:
+        async_tasks = []
+        for spider in spider_list:
+            task = asyncio.create_task(consume_single_message(spider))
+            async_tasks.append(task)
+        await asyncio.gather(*async_tasks)
+
+
+if __name__ == '__main__':
+    # 运行主事件循环
+    asyncio.run(main())

+ 0 - 0
off_line_controler.py → app/off_line_controler.py


+ 0 - 0
tt.py → app/tt.py


+ 5 - 0
application/common/__init__.py

@@ -0,0 +1,5 @@
+from .feishu import Feishu
+from .log import *
+from .messageQueue import *
+from .mysql import *
+from .proxies import *

+ 1 - 1
application/common/log/local_log.py

@@ -23,7 +23,7 @@ class Local(object):
         使用 logger 模块生成日志
         """
         # 日志路径
-        log_dir = f"./{platform}/logs/"
+        log_dir = f"./log_store/{platform}/"
         log_path = os.getcwd() + os.sep + log_dir
         if not os.path.isdir(log_path):
             os.makedirs(log_path)

+ 8 - 1
application/common/mysql/mysql_helper.py

@@ -6,12 +6,19 @@
 """
 import redis
 import pymysql
+import os
+import sys
+
+sys.path.append(os.getcwd())
+
 from application.common.log import Local
 from application.config.mysql_config import env_dict
 
 
 class MysqlHelper(object):
-
+    """
+    MySQL
+    """
     def __init__(self, env, mode, platform, action=''):
         mysql_config = env_dict[env]
         self.connection = pymysql.connect(

+ 0 - 1
application/config/__init__.py

@@ -1,4 +1,3 @@
 from .ipconfig import ip_config
 from .mysql_config import env_dict
 from .topic_group_queue import TopicGroup
-from .spider_map import spider_map

+ 0 - 15
application/config/mobile_config.py

@@ -1,15 +0,0 @@
-"""
-key: 手机的id
-value: {
-    "wechat_account": 123456789,
-    "machine_ip":""
-}
-"""
-platform_config = {
-    "xiaoniangao_plus": {
-        "adb_ip": "192.168.100.19:5555",
-        "machine_id": "150",
-        "local_port": "4750"
-    },
-    "zhufuquanzi": {},
-}

+ 3 - 3
application/config/topic_group_queue.py

@@ -1,8 +1,8 @@
 class TopicGroup(object):
     def __init__(self):
         self.spider_list = [
-            ("test", "recommend")
-            # ("zwwfs", "recommend"),
+            ("test", "recommend", "test"),
+            ("zhsdm", "recommend", "zhuhaoshiduomo"),
             # ("zchqs", "recommend"),
         ]
 
@@ -12,7 +12,7 @@ class TopicGroup(object):
                 "topic": "{}_{}_prod".format(i[0], i[1]),
                 "group": "{}_{}_prod".format(i[0], i[1]),
                 "mode": i[1],
-                "platform": i[0]
+                "platform": i[2]
             } for i in self.spider_list
         ]
         return result

+ 0 - 0
application/spider/ad_click/__init__.py → application/functions/crypt.py


+ 6 - 2
application/pipeline/pipeline.py

@@ -1,7 +1,11 @@
 import re
+import sys
+import os
 import time
-from application.common.log import AliyunLogger
-from application.common.mysql import MysqlHelper
+
+sys.path.append(os.getcwd())
+
+from application.common import MysqlHelper, AliyunLogger
 
 
 class PiaoQuanPipeline:

+ 0 - 1
application/spider/crawler_online/__init__.py

@@ -1 +0,0 @@
-from .test import TestClass

+ 0 - 63
main.py

@@ -1,63 +0,0 @@
-import asyncio
-import json
-
-from mq_http_sdk.mq_consumer import *
-from mq_http_sdk.mq_exception import MQExceptionBase
-
-sys.path.append(os.getcwd())
-
-from application.common.messageQueue import get_consumer, ack_message
-from application.common.log import AliyunLogger
-from application.common.mysql import MysqlHelper
-from application.config import TopicGroup
-
-
-async def run(task_id, mode, platform):
-    """
-    传入参数,然后根据参数执行爬虫代码
-    :return: None
-    """
-    # 创建并等待一个子进程
-    await asyncio.create_subprocess_shell(
-        "python3 scheduler/run_spider_online.py --task_id {} --mode {} --platform {}".format(task_id, mode, platform)
-    )
-
-
-async def main():
-    spider_list = TopicGroup().produce()
-    while spider_list:
-        for spider in spider_list:
-            # 获取消息
-            topic = spider['topic']
-            group = spider['group']
-            consumer = get_consumer(topic, group)
-            try:
-                messages = consumer.consume_message(wait_seconds=10, batch_size=1)
-                if messages:
-                    # 在这里消费消息,做一些数据处理分析
-                    for single_message in messages:
-                        ack_message(mode=spider['mode'], platform=spider['platform'], recv_msgs=messages,
-                                    consumer=consumer)
-                        message_body = single_message.message_body
-                        task_id = json.loads(message_body)['id']
-                        print(message_body)
-                        # 创建爬虫task
-                        await asyncio.create_task(run(task_id, spider['mode'], spider['platform']))
-                else:
-                    message = "Messages Queue is Empty"
-                    print(message)
-
-            except MQExceptionBase as err:
-                # Topic中没有消息可消费。
-                if err.type == "MessageNotExist":
-                    message = f"No new message! RequestId:{err.req_id}\n"
-                    print(message)
-                    continue
-                else:
-                    message = f"Consume Message Fail! Exception:{err}\n"
-                    print(message)
-
-
-if __name__ == '__main__':
-    # 运行主事件循环
-    asyncio.run(main())

+ 5 - 1
scheduler/run_spider_online.py

@@ -6,10 +6,14 @@ import argparse
 sys.path.append(os.getcwd())
 
 from application.common.mysql import MysqlHelper
-from application.config import spider_map
+from spider.spider_map import spider_map
 
 
 class OnlineManager(object):
+    """
+    线上爬虫模版
+    Todo: 加上阿里云日志;
+    """
     def __init__(self, task_id, mode, platform):
         self.env = "prod"
         self.task_id = task_id

+ 1 - 1
scheduler/spider_scheduler.py

@@ -3,7 +3,7 @@ import sys
 
 sys.path.append(os.getcwd())
 
-from application.spider.crawler_offline import *
+from spider.crawler_offline import *
 
 
 class SpiderHome(object):

+ 0 - 0
spider/ad_click/__init__.py


+ 0 - 0
application/spider/ad_click/piaoquan_tv_v1.py → spider/ad_click/piaoquan_tv_v1.py


+ 0 - 0
application/spider/crawler_offline/__init__.py → spider/crawler_offline/__init__.py


+ 0 - 0
application/spider/crawler_offline/piaopiaoquan.py → spider/crawler_offline/piaopiaoquan.py


+ 0 - 0
application/spider/crawler_offline/shipinshuashua.py → spider/crawler_offline/shipinshuashua.py


+ 0 - 0
application/spider/crawler_offline/xiaoniangao_plus.py → spider/crawler_offline/xiaoniangao_plus.py


+ 0 - 0
application/spider/crawler_offline/zhufuquanzi.py → spider/crawler_offline/zhufuquanzi.py


+ 2 - 0
spider/crawler_online/__init__.py

@@ -0,0 +1,2 @@
+from .test import TestClass
+from .zhuhaoshiduomo import ZhuHaoShiDuoMoRecommend

+ 1 - 1
application/spider/crawler_online/test.py → spider/crawler_online/test.py

@@ -7,7 +7,7 @@ class TestClass(object):
         self.user_list = user_list
 
     def run(self):
-        print("爬虫成功启动")
+        print("子任务爬虫成功启动")
         print(self.platform)
         print(self.mode)
         print(self.env)

+ 139 - 0
spider/crawler_online/zhuhaoshiduomo.py

@@ -0,0 +1,139 @@
+import os
+import json
+import random
+import sys
+import time
+import uuid
+import requests
+from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
+from cryptography.hazmat.backends import default_backend
+
+sys.path.append(os.getcwd())
+
+from application.items import VideoItem
+from application.pipeline import PiaoQuanPipelineTest
+from application.common.messageQueue import MQ
+from application.common.proxies import tunnel_proxies
+
+
+class AESCipher:
+    def __init__(self):
+        self.key = b'50102fa64073ad76'  # 用适当的方式转换或直接定义为字节串
+        self.iv = b'173d023138824bb0'  # 同上
+
+    def aes_encrypt(self, data):
+        cipher = Cipher(algorithms.AES(self.key), modes.CBC(self.iv), backend=default_backend())
+        encryptor = cipher.encryptor()
+        ct = encryptor.update(self._pad(data).encode()) + encryptor.finalize()
+        return ct.hex().upper()
+
+    def aes_decrypt(self, data):
+        cipher = Cipher(algorithms.AES(self.key), modes.CBC(self.iv), backend=default_backend())
+        decryptor = cipher.decryptor()
+        decrypted_data = decryptor.update(bytes.fromhex(data)) + decryptor.finalize()
+        return self._unpad(decrypted_data).decode()
+
+    def _pad(self, s):
+        return s + (16 - len(s) % 16) * chr(16 - len(s) % 16)
+
+    def _unpad(self, s):
+        return s[:-ord(s[len(s) - 1:])]
+
+
+class ZhuHaoShiDuoMoRecommend(object):
+    def __init__(self, platform, mode, rule_dict, user_list, env):
+        self.platform = platform
+        self.mode = mode
+        self.rule_dict = rule_dict
+        self.user_list = user_list
+        self.env = env
+        self.download_cnt = 0
+        self.mq = MQ(topic_name="topic_crawler_etl_" + self.env)
+        self.expire_flag = False
+        self.cryptor = AESCipher()
+
+    def get_recommend_list(self):
+        url = "https://api.lidongze.cn/jeecg-boot/ugc/getVideoListsEn2"
+        headers = {
+            'Host': 'api.lidongze.cn',
+            'xweb_xhr': '1',
+            'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36 MicroMessenger/6.8.0(0x16080000) NetType/WIFI MiniProgramEnv/Mac MacWechat/WMPF MacWechat/3.8.4(0x13080410)XWEB/31009',
+            'token': '',
+            'content-type': 'application/json',
+            'accept': '*/*',
+            'referer': 'https://servicewechat.com/wx0afdc2669ed8df2f/3/page-frame.html',
+            'accept-language': 'en-US,en;q=0.9'
+        }
+        page_index = 1
+        total_page = 2
+        while page_index <= total_page:
+            query = {
+                "pageNo": page_index,
+                "pageSize": 10,
+                "groupId": "1650323161797439489",  # 推荐流的 ID
+                "vn": 1,
+                "gx": 1,
+                "appid": "wx0afdc2669ed8df2f",
+                "type": 0
+            }
+            params = {
+                "v": self.cryptor.aes_encrypt(data=json.dumps(query))
+            }
+            response = requests.request("GET", url, headers=headers, params=params, proxies=tunnel_proxies())
+            result = json.loads(self.cryptor.aes_decrypt(response.text))
+            total_page = result['list']['pages']
+            page_index = result['list']['current'] + 1
+            for index, video_obj in enumerate(result['list']['records']):
+                self.process_video_obj(video_obj)
+
+    def process_video_obj(self, video_obj):
+        trace_id = self.platform + str(uuid.uuid1())
+        play_cnt = int(video_obj['playnum'].replace("万+", "0000")) if "万+" in video_obj['playnum'] else int(
+            video_obj['playnum'])
+        item = VideoItem()
+        user_dict = random.choice(self.user_list)
+        item.add_video_info("video_id", video_obj['id'])
+        item.add_video_info("video_title", video_obj['vname'])
+        item.add_video_info("play_cnt", play_cnt)
+        item.add_video_info("publish_time_stamp", int(time.time()))
+        item.add_video_info("out_user_id", video_obj['authid'])
+        item.add_video_info("cover_url", video_obj['shareimg'])
+        item.add_video_info("like_cnt", int(video_obj['likenum']))
+        item.add_video_info("video_url", video_obj['videoaddr'])
+        item.add_video_info("out_video_id", video_obj['id'])
+        item.add_video_info("platform", self.platform)
+        item.add_video_info("strategy", self.mode)
+        item.add_video_info("session", "{}-{}".format(self.platform, int(time.time())))
+        item.add_video_info("user_id", user_dict['uid'])
+        item.add_video_info("user_name", user_dict['link'])
+
+        mq_obj = item.produce_item()
+        pipeline = PiaoQuanPipelineTest(
+            platform=self.platform,
+            mode=self.mode,
+            rule_dict=self.rule_dict,
+            env=self.env,
+            item=mq_obj,
+            trace_id=trace_id,
+        )
+        if pipeline.process_item():
+            print(json.dumps(mq_obj, ensure_ascii=False, indent=4))
+            self.download_cnt += 1
+            print(self.download_cnt)
+
+    def run(self):
+        """
+        执行函数
+        """
+        self.get_recommend_list()
+
+
+if __name__ == '__main__':
+    Z = ZhuHaoShiDuoMoRecommend(
+        platform="zhuwanwufusu",
+        mode="recommend",
+        rule_dict={},
+        user_dict={"uid": 123456, "nick_name": "luojunhuishuaige"},
+        env="prod"
+    )
+    Z.get_recommend_list()

+ 4 - 1
application/config/spider_map.py → spider/spider_map.py

@@ -4,7 +4,7 @@ key是spider的platform ;
 sub_key是recommend,author, value;
 value是爬虫封装好的类
 """
-from application.spider.crawler_online import *
+from spider.crawler_online import *
 
 spider_map = {
     # 祝万物复苏
@@ -14,5 +14,8 @@ spider_map = {
     # 测试脚本
     "test": {
         "recommend": TestClass
+    },
+    "zhuhaoshiduomo": {
+        "recommend": ZhuHaoShiDuoMoRecommend
     }
 }