# -*- coding: utf-8 -*- # @Author: wangkun # @Time: 2023/4/13 import json import os import shutil import sys import time from hashlib import md5 import requests import urllib3 sys.path.append(os.getcwd()) from common.common import Common from common.feishu import Feishu from common.publish import Publish from common.scheduling_db import MysqlHelper class SuisuiniannianyingfuqiRecommend: page = 0 platform = "岁岁年年迎福气" @classmethod def repeat_video(cls, log_type, crawler, video_id, env): sql = f""" select * from crawler_video where platform="岁岁年年迎福气" and out_video_id="{video_id}"; """ repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env) return len(repeat_video) @classmethod def get_videoList(cls, log_type, crawler, oss_endpoint, env): while True: # try: url = 'https://www.jzkksp.com/index/home/get_home_list.html' headers = { 'content-type': 'application/x-www-form-urlencoded', 'Accept-Encoding': 'gzip,compress,br,deflate', 'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 13_3_1 like Mac OS X) ' 'AppleWebKit/605.1.15 (KHTML, like Gecko) ' 'Mobile/15E148 MicroMessenger/8.0.25(0x1800192b) NetType/WIFI Language/zh_CN', 'Referer': 'https://servicewechat.com/wxd4c54f60812f6f36/1/page-frame.html', } cls.page += 1 data = { 'token': '851ae159fd33f955bf433e7c47a4a298', 'time': '1667905857000', 'str_data': 'uT551tU8', 'page': str(cls.page), 'limit': '10', 'appid': 'wxd4c54f60812f6f36', 'version': '1.4.1', 'openid': 'oDAjy5SCFe7Ml3PNgiow3ncozL1o' } urllib3.disable_warnings() response = requests.post(url=url, headers=headers, data=data, verify=False) if response.status_code != 200: Common.logger(log_type, crawler).warning(f'get_videoList:{response.status_code}, {response.text}\n') cls.page = 0 return if 'data' not in response.json(): Common.logger(log_type, crawler).warning(f'get_videoList:{response.status_code}, {response.json()}\n') cls.page = 0 return elif len(response.json()['data']['video_list']['data']) == 0: Common.logger(log_type, crawler).info(f'没有更多数据啦~ {response.json()}\n') cls.page = 0 return else: feeds = response.json()['data']['video_list']['data'] # Common.logger(log_type, crawler).info('page:{}\n', cls.page) for i in range(len(feeds)): video_title = feeds[i].get('title', "").replace("'", "").replace('"', '') video_id = str(feeds[i].get('id', '')) play_cnt = feeds[i].get('browse', 0) comment_cnt = 0 like_cnt = 0 share_cnt = 0 publish_time_str = feeds[i].get('createtime', '') publish_time_stamp = int(time.mktime(time.strptime(publish_time_str, "%Y-%m-%d"))) user_name = "岁岁年年迎福气" user_id = "suisuiniannianyingfuqi" cover_url = feeds[i].get('thumb', '') video_url = feeds[i].get('url', '') video_dict = {'video_title': video_title, 'video_id': video_id, 'play_cnt': play_cnt, 'comment_cnt': comment_cnt, 'like_cnt': like_cnt, 'share_cnt': share_cnt, 'publish_time_stamp': publish_time_stamp, 'publish_time_str': publish_time_str, 'user_name': user_name, 'user_id': user_id, 'avatar_url': cover_url, 'cover_url': cover_url, 'video_url': video_url, 'session': f"suisuiniannianyingfuqi-{int(time.time())}"} for k, v in video_dict.items(): Common.logger(log_type, crawler).info(f"{k}:{v}") if video_id == '' or video_title == '' or cover_url == '' or video_url == '': Common.logger(log_type, crawler).info('无效视频\n') elif cls.repeat_video(log_type, crawler, video_id, env) != 0: Common.logger(log_type, crawler).info('视频已下载\n') else: cls.download_publish(log_type, crawler, video_dict, oss_endpoint, env) # except Exception as e: # Common.logger(log_type, crawler).error('get_feeds异常:{}\n', e) # 下载 / 上传 @classmethod def download_publish(cls, log_type, crawler, video_dict, oss_endpoint, env): # try: # 下载视频 Common.download_method(log_type=log_type, crawler=crawler, text='video', title=video_dict['video_title'], url=video_dict['video_url']) ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4") if ffmpeg_dict is None: md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest() shutil.rmtree(f"./{crawler}/videos/{md_title}/") Common.logger(log_type, crawler).info("视频size=0,删除成功\n") return video_dict["duration"] = ffmpeg_dict["duration"] video_dict["video_width"] = ffmpeg_dict["width"] video_dict["video_height"] = ffmpeg_dict["height"] # 下载封面 Common.download_method(log_type=log_type, crawler=crawler, text='cover', title=video_dict['video_title'], url=video_dict['cover_url']) # 保存视频信息至txt Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict) # 上传视频 Common.logger(log_type, crawler).info("开始上传视频...") our_video_id = Publish.upload_and_publish(log_type=log_type, crawler=crawler, strategy="推荐榜爬虫策略", our_uid="recommend", env=env, oss_endpoint=oss_endpoint) if env == 'dev': our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info" else: our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info" Common.logger(log_type, crawler).info("视频上传完成") if our_video_id is None: # 删除视频文件夹 shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}") return # 视频写入飞书 Feishu.insert_columns(log_type, crawler, "290bae", "ROWS", 1, 2) upload_time = int(time.time()) values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)), "推荐榜爬虫策略", video_dict['video_title'], video_dict['video_id'], our_video_link, video_dict['play_cnt'], video_dict['duration'], f"{video_dict['video_width']}*{video_dict['video_height']}", video_dict['cover_url'], video_dict['video_url']]] time.sleep(0.5) Feishu.update_values(log_type, crawler, "290bae", "F2:Z2", values) Common.logger(log_type, crawler).info(f"视频已保存至云文档\n") rule_dict = {} # 视频信息保存数据库 insert_sql = f""" insert into crawler_video(video_id, out_user_id, platform, strategy, out_video_id, video_title, cover_url, video_url, duration, publish_time, play_cnt, crawler_rule, width, height) values({our_video_id}, "{video_dict['user_id']}", "{cls.platform}", "推荐榜爬虫策略", "{video_dict['video_id']}", "{video_dict['video_title']}", "{video_dict['cover_url']}", "{video_dict['video_url']}", {int(video_dict['duration'])}, "{video_dict['publish_time_str']}", {int(video_dict['play_cnt'])}, '{json.dumps(rule_dict)}', {int(video_dict['video_width'])}, {int(video_dict['video_height'])}) """ Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}") MysqlHelper.update_values(log_type, crawler, insert_sql, env, action='') Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n') # except Exception as e: # Common.logger(log_type, crawler).error(f"download_publish异常:{e}\n") if __name__ == '__main__': SuisuiniannianyingfuqiRecommend.get_videoList('recommend', 'suisuiniannianyingfuqi', 'out', 'dev')