|
@@ -6,6 +6,7 @@ import os
|
|
|
import random
|
|
|
import sys
|
|
|
import time
|
|
|
+import uuid
|
|
|
import requests
|
|
|
from Crypto.Cipher import AES
|
|
|
from Crypto.Hash import MD5
|
|
@@ -16,34 +17,9 @@ from common.mq import MQ
|
|
|
|
|
|
sys.path.append(os.getcwd())
|
|
|
from common.common import Common
|
|
|
-from common.scheduling_db import MysqlHelper
|
|
|
-from common.public import get_config_from_mysql, download_rule_v2
|
|
|
-
|
|
|
-proxies = {"http": None, "https": None}
|
|
|
-
|
|
|
-
|
|
|
-def clean_title(strings):
|
|
|
- return (
|
|
|
- strings.strip()
|
|
|
- .replace("\n", "")
|
|
|
- .replace("/", "")
|
|
|
- .replace("\r", "")
|
|
|
- .replace("#", "")
|
|
|
- .replace(".", "。")
|
|
|
- .replace("\\", "")
|
|
|
- .replace("&NBSP", "")
|
|
|
- .replace(":", "")
|
|
|
- .replace("*", "")
|
|
|
- .replace("?", "")
|
|
|
- .replace("?", "")
|
|
|
- .replace('"', "")
|
|
|
- .replace("<", "")
|
|
|
- .replace(">", "")
|
|
|
- .replace("|", "")
|
|
|
- .replace(" ", "")
|
|
|
- .replace('"', "")
|
|
|
- .replace("'", "")
|
|
|
- )
|
|
|
+from common.aliyun_log import AliyunLogger
|
|
|
+from common.pipeline import PiaoQuanPipeline
|
|
|
+from common.public import clean_title
|
|
|
|
|
|
|
|
|
def decrypt(a, e, n):
|
|
@@ -106,13 +82,6 @@ class GGDCScheduling:
|
|
|
self.mq = MQ(topic_name="topic_crawler_etl_" + self.env)
|
|
|
self.download_count = 0
|
|
|
|
|
|
- def repeat_video(self, video_id):
|
|
|
- sql = f""" select * from crawler_video where platform in ("{self.crawler}","{self.platform}") and out_video_id="{video_id}"; """
|
|
|
- repeat_video = MysqlHelper.get_values(
|
|
|
- self.log_type, self.crawler, sql, self.env
|
|
|
- )
|
|
|
- return len(repeat_video)
|
|
|
-
|
|
|
# 获取视频id_list
|
|
|
def get_videoList(self, page_id):
|
|
|
time.sleep(random.randint(5, 10))
|
|
@@ -122,15 +91,11 @@ class GGDCScheduling:
|
|
|
"page": page_id,
|
|
|
"timeline": 0,
|
|
|
"version": "9.0.2",
|
|
|
- # "timestamp": 1697700674000,
|
|
|
- # "openid": "oNnpe5SwkfGtD5aJAaRbsIKIEdjc",
|
|
|
}
|
|
|
headers = {
|
|
|
"Host": "ganggangdouchuan2.mengniu99.com",
|
|
|
- # "Authorization": "oNnpe5SwkfGtD5aJAaRbsIKIEdjc",
|
|
|
"xweb_xhr": "1",
|
|
|
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36 MicroMessenger/6.8.0(0x16080000) NetType/WIFI MiniProgramEnv/Mac MacWechat/WMPF XWEB/30817",
|
|
|
- # "Sign": "88ab143a9401ebac3525562073248231",
|
|
|
"Content-Type": "application/json",
|
|
|
"Accept": "*/*",
|
|
|
"Sec-Fetch-Site": "cross-site",
|
|
@@ -144,42 +109,82 @@ class GGDCScheduling:
|
|
|
try:
|
|
|
response = requests.get(url, headers=headers, params=params)
|
|
|
decrypted_data = decrypt(
|
|
|
- response.json()["data"], response.json()["_yyy"], True
|
|
|
+ response.json()["data"][:-2], response.json()["_yyy"], True
|
|
|
)
|
|
|
result = json.loads(decrypted_data)
|
|
|
+ AliyunLogger.logging(
|
|
|
+ code="1000",
|
|
|
+ platform=self.crawler,
|
|
|
+ mode=self.log_type,
|
|
|
+ env=self.env,
|
|
|
+ data={},
|
|
|
+ message="开始抓取第{}页".format(page_id),
|
|
|
+ )
|
|
|
break
|
|
|
except:
|
|
|
- Common.logger(self.log_type, self.crawler).info("编码不对,解密失败, 等待10秒钟后重试\n")
|
|
|
- # print("编码不对,解密失败, 等待10秒钟")
|
|
|
- time.sleep(10)
|
|
|
+ AliyunLogger.logging(
|
|
|
+ code="2000",
|
|
|
+ platform=self.crawler,
|
|
|
+ mode=self.log_type,
|
|
|
+ env=self.env,
|
|
|
+ data={},
|
|
|
+ message="抓取第{}页,未获取数据,编码错误".format(page_id),
|
|
|
+ )
|
|
|
+ Common.logger(self.log_type, self.crawler).info("编码不对,解密失败\n")
|
|
|
+ return
|
|
|
|
|
|
- if "totalCount" not in result or response.status_code != 200:
|
|
|
+ if "totalCount" not in result:
|
|
|
Common.logger(self.log_type, self.crawler).info(
|
|
|
f"get_videoList:{response.text}\n"
|
|
|
)
|
|
|
- Common.logging(
|
|
|
- self.log_type,
|
|
|
- self.crawler,
|
|
|
- self.env,
|
|
|
- f"get_videoList:{response.text}\n",
|
|
|
+ AliyunLogger.logging(
|
|
|
+ code="2000",
|
|
|
+ platform=self.crawler,
|
|
|
+ mode=self.log_type,
|
|
|
+ env=self.env,
|
|
|
+ data={},
|
|
|
+ message="抓取第{}页,未获取数据".format(page_id),
|
|
|
)
|
|
|
return
|
|
|
elif len(result["videos"]) == 0:
|
|
|
Common.logger(self.log_type, self.crawler).info(f"没有更多数据啦~\n")
|
|
|
- Common.logging(self.log_type, self.crawler, self.env, f"没有更多数据啦~\n")
|
|
|
+ AliyunLogger.logging(
|
|
|
+ code="2000",
|
|
|
+ platform=self.crawler,
|
|
|
+ mode=self.log_type,
|
|
|
+ env=self.env,
|
|
|
+ data={},
|
|
|
+ message="抓取第{}页,没有更多数据啦".format(page_id),
|
|
|
+ )
|
|
|
return
|
|
|
else:
|
|
|
data_list = result["videos"]
|
|
|
- for video_obj in data_list:
|
|
|
+ for index, video_obj in enumerate(data_list):
|
|
|
try:
|
|
|
self.process_video_obj(video_obj)
|
|
|
except Exception as e:
|
|
|
Common.logger(self.log_type, self.crawler).error(f"抓取单条视频异常:{e}\n")
|
|
|
- Common.logging(
|
|
|
- self.log_type, self.crawler, self.env, f"抓取单条视频异常:{e}\n"
|
|
|
+ AliyunLogger.logging(
|
|
|
+ code="3000",
|
|
|
+ platform=self.crawler,
|
|
|
+ mode=self.log_type,
|
|
|
+ env=self.env,
|
|
|
+ data=video_obj,
|
|
|
+ message="抓取单条视频异常, 报错原因是: {}, 该视频位于第{}页{}条".format(
|
|
|
+ e, page_id, index + 1
|
|
|
+ ),
|
|
|
)
|
|
|
+ AliyunLogger.logging(
|
|
|
+ code="1000",
|
|
|
+ platform=self.crawler,
|
|
|
+ mode=self.log_type,
|
|
|
+ env=self.env,
|
|
|
+ data={},
|
|
|
+ message="完成抓取第{}页".format(page_id),
|
|
|
+ )
|
|
|
|
|
|
def process_video_obj(self, video_obj):
|
|
|
+ trace_id = self.platform + str(uuid.uuid1())
|
|
|
video_id = video_obj.get("videoid", 0)
|
|
|
video_title = clean_title(video_obj.get("title", "no title"))
|
|
|
video_time = video_obj.get("v_time", 0)
|
|
@@ -199,6 +204,7 @@ class GGDCScheduling:
|
|
|
"user_name": user_name,
|
|
|
"publish_time_stamp": publish_time_stamp,
|
|
|
"publish_time_str": publish_time_str,
|
|
|
+ "update_time_stamp": int(time.time()),
|
|
|
"video_width": 0,
|
|
|
"video_height": 0,
|
|
|
"profile_id": 0,
|
|
@@ -206,48 +212,20 @@ class GGDCScheduling:
|
|
|
"cover_url": video_obj["cover"],
|
|
|
"session": f"ganggangdouchuan-{int(time.time())}",
|
|
|
}
|
|
|
- for k, v in video_dict.items():
|
|
|
- Common.logger(self.log_type, self.crawler).info(f"{k}:{v}")
|
|
|
- Common.logging(self.log_type, self.crawler, self.env, f"{video_dict}")
|
|
|
- # 过滤无效视频
|
|
|
- if video_title == "" or video_dict["video_id"] == "":
|
|
|
- Common.logger(self.log_type, self.crawler).info("无效视频\n")
|
|
|
- Common.logging(self.log_type, self.crawler, self.env, "无效视频\n")
|
|
|
- # 抓取基础规则过滤
|
|
|
- elif (
|
|
|
- download_rule_v2(
|
|
|
- log_type=self.log_type,
|
|
|
- crawler=self.crawler,
|
|
|
- video_dict=video_dict,
|
|
|
- rule_dict=self.rule_dict,
|
|
|
- )
|
|
|
- is False
|
|
|
- ):
|
|
|
- Common.logger(self.log_type, self.crawler).info("不满足抓取规则\n")
|
|
|
- Common.logging(self.log_type, self.crawler, self.env, "不满足抓取规则\n")
|
|
|
- elif (
|
|
|
- any(
|
|
|
- str(word) if str(word) in video_dict["video_title"] else False
|
|
|
- for word in get_config_from_mysql(
|
|
|
- log_type=self.log_type,
|
|
|
- source=self.crawler,
|
|
|
- env=self.env,
|
|
|
- text="filter",
|
|
|
- action="",
|
|
|
- )
|
|
|
- )
|
|
|
- is True
|
|
|
- ):
|
|
|
- Common.logger(self.log_type, self.crawler).info("已中过滤词\n")
|
|
|
- Common.logging(self.log_type, self.crawler, self.env, "已中过滤词\n")
|
|
|
- elif self.repeat_video(video_dict["video_id"]) != 0:
|
|
|
- Common.logger(self.log_type, self.crawler).info("视频已下载\n")
|
|
|
- Common.logging(self.log_type, self.crawler, self.env, "视频已下载\n")
|
|
|
- else:
|
|
|
+ video_dict["out_video_id"] = str(video_dict["video_id"])
|
|
|
+ rule_pipeline = PiaoQuanPipeline(
|
|
|
+ platform=self.platform,
|
|
|
+ mode=self.log_type,
|
|
|
+ rule_dict=self.rule_dict,
|
|
|
+ env=self.env,
|
|
|
+ item=video_dict,
|
|
|
+ trace_id=trace_id
|
|
|
+ )
|
|
|
+ flag = rule_pipeline.process_item()
|
|
|
+ if flag:
|
|
|
video_dict["out_user_id"] = video_dict["profile_id"]
|
|
|
video_dict["platform"] = self.crawler
|
|
|
video_dict["strategy"] = self.log_type
|
|
|
- video_dict["out_video_id"] = str(video_dict["video_id"])
|
|
|
video_dict["width"] = video_dict["video_width"]
|
|
|
video_dict["height"] = video_dict["video_height"]
|
|
|
video_dict["crawler_rule"] = json.dumps(self.rule_dict)
|
|
@@ -256,19 +234,27 @@ class GGDCScheduling:
|
|
|
video_dict["video_url"] = find_tencent_url(video_obj["txvid"])
|
|
|
video_dict["avatar_url"] = video_obj["avatarurl"]
|
|
|
video_dict["cover_url"] = video_obj["cover"]
|
|
|
- # print(json.dumps(video_dict, ensure_ascii=False, indent=4))
|
|
|
self.download_count += 1
|
|
|
self.mq.send_msg(video_dict)
|
|
|
+ AliyunLogger.logging(
|
|
|
+ code="1002",
|
|
|
+ platform=self.crawler,
|
|
|
+ mode=self.log_type,
|
|
|
+ env=self.env,
|
|
|
+ data=video_dict,
|
|
|
+ trace_id=trace_id,
|
|
|
+ message="成功发送 MQ 至 ETL",
|
|
|
+ )
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
ZL = GGDCScheduling(
|
|
|
log_type="recommend",
|
|
|
- crawler="ggdc",
|
|
|
+ crawler="ganggangdouchuan",
|
|
|
rule_dict={},
|
|
|
our_uid="luojunhuihaoshuai",
|
|
|
- env="dev",
|
|
|
+ env="prod",
|
|
|
)
|
|
|
- for i in range(5):
|
|
|
+ for i in range(1):
|
|
|
ZL.get_videoList(page_id=i + 1)
|
|
|
print(ZL.download_count)
|