import configparser import json import os import random import time import requests from common import Material, Oss, Common from common.sql_help import sqlCollect from data_channel.piaoquan import PQ from data_channel.shipinhao import SPH config = configparser.ConfigParser() config.read('./config.ini') class SphHistory: @classmethod def remove_files(cls, video_path_url): """ 删除指定目录下的所有文件和子目录 """ if os.path.exists(video_path_url) and os.path.isdir(video_path_url): for root, dirs, files in os.walk(video_path_url): for file in files: file_path = os.path.join(root, file) os.remove(file_path) for dir in dirs: dir_path = os.path.join(root, dir) os.rmdir(dir_path) @classmethod def create_folders(cls): """ 根据标示和任务标示创建目录 """ video_path_url = config['PATHS']['VIDEO_PATH']+"/sph_crawling/" # video_path_url = '/root/video_rewriting/path/sph_crawling/' if not os.path.exists(video_path_url): os.makedirs(video_path_url) return video_path_url """获取视频号所有内容""" @classmethod def sph_data_info(cls): user_list = cls.get_sph_user() video_path_url = cls.create_folders() if user_list == None: return for user in user_list: Common.logger("sph_crawling").info(f"{user}开始获取数据") account_id = SPH.get_account_id(user) if account_id == False: print(f"{account_id}:没有获取到视频account_id,无法抓取数据") continue url = "http://61.48.133.26:30001/FinderGetUpMasterNextPage" last_buffer = "" try: while True: headers = { 'Content-Type': 'application/json' } payload = json.dumps({ "username": account_id, "last_buffer": last_buffer }) response = requests.request("POST", url, headers=headers, data=payload) time.sleep(random.randint(1, 5)) res_json = response.json() try: if len(res_json["DownloadAddress"]) == 0 or res_json["DownloadAddress"] == "" or res_json["DownloadAddress"] == None: break except: pass if "objectId" not in response.text or response.status_code != 200: break if len(res_json["UpMasterHomePage"]) == 0: break if not res_json["UpMasterHomePage"]: break last_buffer = res_json.get('last_buffer') for obj in res_json["UpMasterHomePage"]: Common.logger("sph_crawling").info(f"{user}扫描到一条数据") objectId = obj['objectId'] objectNonceId = obj['objectNonceId'] url = "http://61.48.133.26:30001/GetFinderDownloadAddress" payload = json.dumps({ "objectId": objectId, "objectNonceId": objectNonceId }) headers = { 'Content-Type': 'text/plain' } response = requests.request("POST", url, headers=headers, data=payload) time.sleep(random.randint(0, 1)) video_obj = response.json() video_url = video_obj.get('DownloadAddress') if len(video_url) == 0: continue v_id = f"sph/{objectId}" Common.logger("sph_crawling").info(f"{user}视频ID:{v_id},视频链接:{video_url}开始发送oss") oss_video_key = Oss.channel_upload_oss(video_url, v_id) # 视频发送OSS oss_video_key = oss_video_key.get("oss_object_key") Common.logger("sph_crawling").info(f"{user}视频发送oss成功,视频oss地址{oss_video_key}") share_cnt = int(obj['forward_count']) # 分享 like_cnt = int(obj['like_count']) # 点赞 video_title = video_obj.get('title').split("\n")[0].split("#")[0] cover = video_obj.get('thumb_url') jpg_path = PQ.download_video_jpg(cover, video_path_url, v_id) # 下载视频封面 if os.path.isfile(jpg_path): oss_jpg_key = Oss.stitching_fm_upload_oss(jpg_path, v_id) # 封面发送OSS oss_cover_key = oss_jpg_key.get("oss_object_key") Common.logger("sph_crawling").info(f"{user}封面发送oss成功,封面oss地址{oss_video_key}") else: oss_cover_key = '' Common.logger("sph_crawling").info(f"{user}封面发送oss失败") create_time = obj['createtime'] # 发布时间 user_name = obj['username'] # 用户名标示 nick_name = obj['nickname'] # 用户名 comment_count = obj['comment_count'] # 评论数 fav_count = obj['fav_count'] # 大拇指点赞数 sqlCollect.sph_data_info('视频号', objectId, video_url, cover, video_title, str(share_cnt), str(like_cnt), oss_video_key, oss_cover_key, nick_name, user_name, comment_count, fav_count, create_time) Common.logger("sph_crawling").info(f"{nick_name}插入数据成功") cls.remove_files(video_path_url) return "完成" except Exception as e: Common.logger("sph_crawling").info(f"{user}异常,异常信息{e}") cls.remove_files(video_path_url) continue @classmethod def get_sph_user(cls): data = sqlCollect.sph_channel_user_list() if data == None: user_list = Material.get_sph_user() if user_list: for user in user_list: sqlCollect.insert_sph_channel_user("视频号", user) else: return None result_list = [item for sublist in data for item in sublist] return result_list if __name__ == '__main__': SphHistory.sph_data_info()