|
@@ -6,10 +6,9 @@
|
|
# @Time: 2022/4/25
|
|
# @Time: 2022/4/25
|
|
import json
|
|
import json
|
|
import os
|
|
import os
|
|
-import shutil
|
|
|
|
import sys
|
|
import sys
|
|
import time
|
|
import time
|
|
-from hashlib import md5
|
|
|
|
|
|
+import uuid
|
|
from urllib import parse
|
|
from urllib import parse
|
|
import requests
|
|
import requests
|
|
import urllib3
|
|
import urllib3
|
|
@@ -17,9 +16,8 @@ sys.path.append(os.getcwd())
|
|
from common.mq import MQ
|
|
from common.mq import MQ
|
|
from common.common import Common
|
|
from common.common import Common
|
|
from common.scheduling_db import MysqlHelper
|
|
from common.scheduling_db import MysqlHelper
|
|
-from common.feishu import Feishu
|
|
|
|
-from common.publish import Publish
|
|
|
|
from common.public import get_config_from_mysql, download_rule
|
|
from common.public import get_config_from_mysql, download_rule
|
|
|
|
+from common.aliyun_log import AliyunLogger
|
|
proxies = {"http": None, "https": None}
|
|
proxies = {"http": None, "https": None}
|
|
|
|
|
|
|
|
|
|
@@ -82,13 +80,20 @@ class BenshanzhufuRecommend:
|
|
page += 1
|
|
page += 1
|
|
feeds = r.json()["data"]["list"]
|
|
feeds = r.json()["data"]["list"]
|
|
for i in range(len(feeds)):
|
|
for i in range(len(feeds)):
|
|
- # try:
|
|
|
|
|
|
+ trace_id = crawler + str(uuid.uuid1())
|
|
|
|
+ AliyunLogger.logging(
|
|
|
|
+ code="1001",
|
|
|
|
+ platform=crawler,
|
|
|
|
+ mode=log_type,
|
|
|
|
+ env=env,
|
|
|
|
+ data=feeds[i],
|
|
|
|
+ message="扫描到一条视频"
|
|
|
|
+ )
|
|
publish_time_stamp = feeds[i].get("update_time", 0)
|
|
publish_time_stamp = feeds[i].get("update_time", 0)
|
|
publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
|
|
publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
|
|
video_url = feeds[i].get("video_url", "")
|
|
video_url = feeds[i].get("video_url", "")
|
|
if ".mp4" not in video_url:
|
|
if ".mp4" not in video_url:
|
|
video_url = ""
|
|
video_url = ""
|
|
-
|
|
|
|
video_dict = {
|
|
video_dict = {
|
|
'video_title': feeds[i].get("title", "").replace(" ", "").replace("'", "").replace('"', ""),
|
|
'video_title': feeds[i].get("title", "").replace(" ", "").replace("'", "").replace('"', ""),
|
|
'video_id': str(feeds[i].get("nid", "")),
|
|
'video_id': str(feeds[i].get("nid", "")),
|
|
@@ -127,6 +132,15 @@ class BenshanzhufuRecommend:
|
|
elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
|
|
elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
|
|
Common.logger(log_type, crawler).info('视频已下载\n')
|
|
Common.logger(log_type, crawler).info('视频已下载\n')
|
|
Common.logging(log_type, crawler, env, '视频已下载\n')
|
|
Common.logging(log_type, crawler, env, '视频已下载\n')
|
|
|
|
+ AliyunLogger.logging(
|
|
|
|
+ code="2002",
|
|
|
|
+ platform=crawler,
|
|
|
|
+ mode=log_type,
|
|
|
|
+ message="重复的视频",
|
|
|
|
+ data=video_dict,
|
|
|
|
+ trace_id=trace_id,
|
|
|
|
+ env=env
|
|
|
|
+ )
|
|
else:
|
|
else:
|
|
video_dict["out_user_id"] = video_dict["user_id"]
|
|
video_dict["out_user_id"] = video_dict["user_id"]
|
|
video_dict["platform"] = crawler
|
|
video_dict["platform"] = crawler
|
|
@@ -140,127 +154,20 @@ class BenshanzhufuRecommend:
|
|
video_dict["fans_cnt"] = 0
|
|
video_dict["fans_cnt"] = 0
|
|
video_dict["videos_cnt"] = 0
|
|
video_dict["videos_cnt"] = 0
|
|
mq.send_msg(video_dict)
|
|
mq.send_msg(video_dict)
|
|
-
|
|
|
|
|
|
+ AliyunLogger.logging(
|
|
|
|
+ code="1002",
|
|
|
|
+ platform=crawler,
|
|
|
|
+ mode=log_type,
|
|
|
|
+ message="成功发送至 ETL",
|
|
|
|
+ data=video_dict,
|
|
|
|
+ trace_id=trace_id,
|
|
|
|
+ env=env
|
|
|
|
+ )
|
|
# except Exception as e:
|
|
# except Exception as e:
|
|
# Common.logger(log_type, crawler).info(f"抓取单条视频异常:{e}\n")
|
|
# Common.logger(log_type, crawler).info(f"抓取单条视频异常:{e}\n")
|
|
# except Exception as e:
|
|
# except Exception as e:
|
|
# Common.logger(log_type, crawler).error(f"抓取第{page}页时异常:{e}\n")
|
|
# Common.logger(log_type, crawler).error(f"抓取第{page}页时异常:{e}\n")
|
|
|
|
|
|
- # 下载 / 上传
|
|
|
|
- @classmethod
|
|
|
|
- def download_publish(cls, log_type, crawler, our_uid, video_dict, rule_dict, env):
|
|
|
|
- # 下载视频
|
|
|
|
- Common.download_method(log_type=log_type, crawler=crawler, text='video', title=video_dict['video_title'], url=video_dict['video_url'])
|
|
|
|
- md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
|
|
|
|
- try:
|
|
|
|
- if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
|
|
|
|
- # 删除视频文件夹
|
|
|
|
- shutil.rmtree(f"./{crawler}/videos/{md_title}")
|
|
|
|
- Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
|
|
|
|
- return
|
|
|
|
- except FileNotFoundError:
|
|
|
|
- # 删除视频文件夹
|
|
|
|
- shutil.rmtree(f"./{crawler}/videos/{md_title}")
|
|
|
|
- Common.logger(log_type, crawler).info("视频文件不存在,删除文件夹成功\n")
|
|
|
|
- return
|
|
|
|
-
|
|
|
|
- ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
|
|
|
|
- video_dict["duration"] = ffmpeg_dict["duration"]
|
|
|
|
- video_dict["video_width"] = ffmpeg_dict["width"]
|
|
|
|
- video_dict["video_height"] = ffmpeg_dict["height"]
|
|
|
|
-
|
|
|
|
- # 下载封面
|
|
|
|
- Common.download_method(log_type=log_type, crawler=crawler, text='cover', title=video_dict['video_title'], url=video_dict['cover_url'])
|
|
|
|
- # 保存视频信息至txt
|
|
|
|
- Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
|
|
|
|
-
|
|
|
|
- # 上传视频
|
|
|
|
- Common.logger(log_type, crawler).info("开始上传视频...")
|
|
|
|
- if env == "dev":
|
|
|
|
- oss_endpoint = "out"
|
|
|
|
- our_video_id = Publish.upload_and_publish(log_type=log_type,
|
|
|
|
- crawler=crawler,
|
|
|
|
- strategy="推荐榜爬虫策略",
|
|
|
|
- our_uid=our_uid,
|
|
|
|
- env=env,
|
|
|
|
- oss_endpoint=oss_endpoint)
|
|
|
|
- our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
|
|
|
|
- else:
|
|
|
|
- oss_endpoint = "inner"
|
|
|
|
- our_video_id = Publish.upload_and_publish(log_type=log_type,
|
|
|
|
- crawler=crawler,
|
|
|
|
- strategy="推荐榜爬虫策略",
|
|
|
|
- our_uid=our_uid,
|
|
|
|
- env=env,
|
|
|
|
- oss_endpoint=oss_endpoint)
|
|
|
|
-
|
|
|
|
- our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
|
|
|
|
-
|
|
|
|
- if our_video_id is None:
|
|
|
|
- try:
|
|
|
|
- # 删除视频文件夹
|
|
|
|
- shutil.rmtree(f"./{crawler}/videos/{md_title}")
|
|
|
|
- return
|
|
|
|
- except FileNotFoundError:
|
|
|
|
- return
|
|
|
|
-
|
|
|
|
- # 视频信息保存数据库
|
|
|
|
- insert_sql = f""" insert into crawler_video(video_id,
|
|
|
|
- out_user_id,
|
|
|
|
- platform,
|
|
|
|
- strategy,
|
|
|
|
- out_video_id,
|
|
|
|
- video_title,
|
|
|
|
- cover_url,
|
|
|
|
- video_url,
|
|
|
|
- duration,
|
|
|
|
- publish_time,
|
|
|
|
- play_cnt,
|
|
|
|
- crawler_rule,
|
|
|
|
- width,
|
|
|
|
- height)
|
|
|
|
- values({our_video_id},
|
|
|
|
- "{video_dict['user_id']}",
|
|
|
|
- "{cls.platform}",
|
|
|
|
- "推荐榜爬虫策略",
|
|
|
|
- "{video_dict['video_id']}",
|
|
|
|
- "{video_dict['video_title']}",
|
|
|
|
- "{video_dict['cover_url']}",
|
|
|
|
- "{video_dict['video_url']}",
|
|
|
|
- {int(video_dict['duration'])},
|
|
|
|
- "{video_dict['publish_time_str']}",
|
|
|
|
- {int(video_dict['play_cnt'])},
|
|
|
|
- '{json.dumps(rule_dict)}',
|
|
|
|
- {int(video_dict['video_width'])},
|
|
|
|
- {int(video_dict['video_height'])}) """
|
|
|
|
- Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
|
|
|
|
- MysqlHelper.update_values(log_type, crawler, insert_sql, env, action='')
|
|
|
|
- Common.logger(log_type, crawler).info('视频信息写入数据库成功')
|
|
|
|
-
|
|
|
|
- # 视频写入飞书
|
|
|
|
- Feishu.insert_columns(log_type, crawler, "440018", "ROWS", 1, 2)
|
|
|
|
- upload_time = int(time.time())
|
|
|
|
- values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
|
|
|
|
- "推荐榜爬虫策略",
|
|
|
|
- video_dict['video_id'],
|
|
|
|
- video_dict['video_title'],
|
|
|
|
- our_video_link,
|
|
|
|
- video_dict['play_cnt'],
|
|
|
|
- video_dict['comment_cnt'],
|
|
|
|
- video_dict['like_cnt'],
|
|
|
|
- video_dict['share_cnt'],
|
|
|
|
- video_dict['duration'],
|
|
|
|
- f"{video_dict['video_width']}*{video_dict['video_height']}",
|
|
|
|
- video_dict['publish_time_str'],
|
|
|
|
- video_dict['user_name'],
|
|
|
|
- video_dict['user_id'],
|
|
|
|
- video_dict['avatar_url'],
|
|
|
|
- video_dict['cover_url'],
|
|
|
|
- video_dict['video_url']]]
|
|
|
|
- time.sleep(0.5)
|
|
|
|
- Feishu.update_values(log_type, crawler, "440018", "E2:Z2", values)
|
|
|
|
- Common.logger(log_type, crawler).info(f"视频信息已保存至云文档\n")
|
|
|
|
-
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
if __name__ == "__main__":
|
|
print(get_config_from_mysql("recommend", "benshanzhufu", "dev", "filter"))
|
|
print(get_config_from_mysql("recommend", "benshanzhufu", "dev", "filter"))
|