# -*- coding: utf-8 -*- # @Author: wangkun # @Time: 2023/1/13 import datetime import os import sys import time import requests import urllib3 sys.path.append(os.getcwd()) from main.common import Common from main.feishu_lib import Feishu from main.haokan_publish import Publish class Follow: ctime = '' @classmethod def filter_words(cls, log_type): try: filter_words_sheet = Feishu.get_values_batch(log_type, 'haokan', 'nKgHzp') filter_words_list = [] for x in filter_words_sheet: for y in x: if y is None: pass else: filter_words_list.append(y) return filter_words_list except Exception as e: Common.logger(log_type).error(f'filter_words异常:{e}') @classmethod def get_users_from_feishu(cls, log_type): try: user_sheet = Feishu.get_values_batch(log_type, 'haokan', 'x4nb7H') user_dict = {} for i in range(1, len(user_sheet)): user_name = user_sheet[i][0] out_id = user_sheet[i][1] our_id = user_sheet[i][3] if user_name is None or out_id is None or our_id is None: pass else: user_dict[user_name] = str(out_id) + ',' + str(our_id) return user_dict except Exception as e: Common.logger(log_type).error(f'get_users_from_feishu异常:{e}\n') @classmethod def follow_download_rule(cls, duration, width, height): if int(duration) >= 60: if int(width) >= 720 or int(height) >= 720: return True else: return False else: return False @classmethod def get_follow_feed(cls, log_type, out_id, our_id, user_name, env): try: while True: url = 'https://haokan.baidu.com/web/author/listall?' headers = { 'Accept': '*/*', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6', 'Cache-Control': 'no-cache', 'Connection': 'keep-alive', 'Content-Type': 'application/x-www-form-urlencoded', 'Cookie': Feishu.get_values_batch(log_type, 'haokan', '5LksMx')[0][0], 'Referer': 'https://haokan.baidu.com/author/'+str(out_id), 'Pragma': 'no-cache', 'sec-ch-ua': '"Not?A_Brand";v="8", "Chromium";v="108", "Microsoft Edge";v="108"', 'sec-ch-ua-mobile': '?0', 'sec-ch-ua-platform': '"macOS"', 'Sec-Fetch-Dest': 'empty', 'Sec-Fetch-Mode': 'cors', 'Sec-Fetch-Site': 'same-origin', 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36 Edg/108.0.1462.76' } params = { 'app_id': str(out_id), 'ctime': cls.ctime, 'rn': '10', 'searchAfter': '', '_api': '1' } response = requests.get(url=url, headers=headers, params=params, verify=False) if '"errno":0,' not in response.text: Common.logger(log_type).warning(f'get_follow_feed:{response.text}\n') elif len(response.json()['data']['results']) == 0: Common.logger(log_type).info(f'get_follow_feed:{response.json()}\n') cls.ctime = 0 else: cls.ctime = response.json()['data']['ctime'] follow_feeds = response.json()['data']['results'] for i in range(len(follow_feeds)): # video_title if 'title' not in follow_feeds[i]['content']: video_title = '' else: video_title = follow_feeds[i]['content']['title'] # video_id if 'vid' not in follow_feeds[i]['content']: video_id = '' else: video_id = follow_feeds[i]['content']['vid'] # is_top if 'is_show_feature' not in follow_feeds[i]['content']: is_top = '' else: is_top = follow_feeds[i]['content']['is_show_feature'] # play_cnt if 'playcnt' not in follow_feeds[i]['content']: play_cnt = '' else: play_cnt = follow_feeds[i]['content']['playcnt'] # duration if 'duration' not in follow_feeds[i]['content']: duration = '' duration_stamp = '' else: duration = follow_feeds[i]['content']['duration'] duration_stamp = int(duration.split(':')[0])*60 + int(duration.split(':')[-1]) # publish_time if 'publish_time' not in follow_feeds[i]['content']: publish_time = '' else: publish_time = follow_feeds[i]['content']['publish_time'] # publish_time_stamp if '刚刚' in publish_time: publish_time_stamp = int(time.time()) elif '分钟前' in publish_time: publish_time_stamp = int(time.time()) - int(publish_time[0]) * 60 elif '小时前' in publish_time: publish_time_stamp = int(time.time()) - int(publish_time[0]) * 3600 elif '昨天' in publish_time: publish_time_str = (datetime.date.today() + datetime.timedelta(days=-1)).strftime("%Y/%m/%d") publish_time_stamp = int(time.mktime(time.strptime(publish_time_str, "%Y/%m/%d"))) elif '天前' in publish_time: today = datetime.date.today() publish_time_str = today - datetime.timedelta(days=int(publish_time[0])) publish_time_stamp = int(time.mktime(publish_time_str.timetuple())) elif '年' in publish_time: publish_time_str = publish_time.replace('年', '/').replace('月', '/').replace('日', '') publish_time_stamp = int(time.mktime(time.strptime(publish_time_str, "%Y/%m/%d"))) else: publish_time_str = publish_time.replace('月', '/').replace('日', '') this_year = datetime.datetime.now().year publish_time_stamp = int(time.mktime(time.strptime(f"{this_year}/{publish_time_str}", "%Y/%m/%d"))) # cover_url if 'cover_src' in follow_feeds[i]['content']: cover_url = follow_feeds[i]['content']['cover_src'] elif 'cover_src_pc' in follow_feeds[i]['content']: cover_url = follow_feeds[i]['content']['cover_src_pc'] elif 'poster' in follow_feeds[i]['content']: cover_url = follow_feeds[i]['content']['poster'] else: cover_url = '' if is_top is True and int(time.time()) - publish_time_stamp >= 3600*24*30: Common.logger(log_type).info(f'video_title:{video_title}') Common.logger(log_type).info(f'置顶视频,发布时间超过30天:{publish_time}\n') elif int(time.time()) - publish_time_stamp >= 3600*24*30: Common.logger(log_type).info(f'video_title:{video_title}') Common.logger(log_type).info(f'发布时间超过30天:{publish_time}\n') cls.ctime = '' return else: video_info_dict = cls.get_video_url(log_type, video_id) # video_url video_url = video_info_dict['video_url'] # video_width video_width = video_info_dict['video_width'] # video_height video_height = video_info_dict['video_height'] Common.logger(log_type).info(f'video_title:{video_title}') # Common.logger(log_type).info(f'user_name:{user_name}') # Common.logger(log_type).info(f'out_id:{out_id}') # Common.logger(log_type).info(f'our_id:{our_id}') # Common.logger(log_type).info(f'duration_stamp:{duration_stamp}') Common.logger(log_type).info(f'duration:{duration}') Common.logger(log_type).info(f'video_width:{video_width}') Common.logger(log_type).info(f'video_height:{video_height}') Common.logger(log_type).info(f'publish_time:{publish_time}') Common.logger(log_type).info(f'video_url:{video_url}\n') video_dict = { 'video_title': video_title, 'video_id': video_id, 'play_cnt': play_cnt, 'duration': duration, 'duration_stamp': duration_stamp, 'publish_time': publish_time, 'video_width': video_width, 'video_height': video_height, 'user_name': user_name, 'cover_url': cover_url, 'video_url': video_url } cls.download_publish(log_type, video_dict, our_id, env) except Exception as e: Common.logger(log_type).error(f'get_follow_feed异常:{e}\n') @classmethod def get_video_url(cls, log_type, video_id): try: url = 'https://haokan.hao123.com/v?' params = { 'vid': str(video_id), '_format': 'json', } headers = { 'Accept': '*/*', 'Accept-Encoding': 'gzip, deflate, br', 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6', 'Cache-Control': 'no-cache', 'Connection': 'keep-alive', 'Content-Type': 'application/x-www-form-urlencoded', 'Cookie': 'PC_TAB_LOG=video_details_page; COMMON_LID=b0be69dd9fcae328d06935bd40f615cd; Hm_lvt_4aadd610dfd2f5972f1efee2653a2bc5=1669029953; hkpcvideolandquery=%u82CF%u5DDE%u6700%u5927%u7684%u4E8C%u624B%u8F66%u8D85%u5E02%uFF0C%u8F6C%u4E00%u8F6C%u91CC%u8FB9%u8C6A%u8F66%u592A%u591A%u4E86%uFF0C%u4EF7%u683C%u66F4%u8BA9%u6211%u5403%u60CA%uFF01; Hm_lpvt_4aadd610dfd2f5972f1efee2653a2bc5=1669875695; ariaDefaultTheme=undefined; reptileData=%7B%22data%22%3A%22636c55e0319da5169a60acec4a264a35c10862f8abfe2f2cc32c55eb6b0ab4de0efdfa115ea522d6d4d361dea07feae2831d3e2c16ed6b051c611ffe5aded6c9f852501759497b9fbd2132a2160e1e40e5845b41f78121ddcc3288bd077ae4e8%22%2C%22key_id%22%3A%2230%22%2C%22sign%22%3A%22f6752aac%22%7D; RT="z=1&dm=hao123.com&si=uc0q7wnm4w&ss=lb4otu71&sl=j&tt=av0&bcn=https%3A%2F%2Ffclog.baidu.com%2Flog%2Fweirwood%3Ftype%3Dperf&ld=1rdw&cl=7v6c"', 'Pragma': 'no-cache', 'Referer': 'https://haokan.hao123.com/v?vid=10623278258033022286&pd=pc&context=', 'sec-ch-ua': '"Microsoft Edge";v="107", "Chromium";v="107", "Not=A?Brand";v="24"', 'sec-ch-ua-mobile': '?0', 'sec-ch-ua-platform': '"macOS"', 'Sec-Fetch-Dest': 'empty', 'Sec-Fetch-Mode': 'cors', 'Sec-Fetch-Site': 'same-origin', 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36 Edg/107.0.1418.62', } urllib3.disable_warnings() r = requests.get(url=url, headers=headers, params=params, verify=False) if r.status_code != 200: video_url = '' video_width = '' video_height = '' Common.logger(log_type).info(f'get_video_url_response:{r.text}') elif r.json()['errno'] != 0 or len(r.json()['data']) == 0: video_url = '' video_width = '' video_height = '' Common.logger(log_type).info(f'get_video_url_response:{r.json()}') else: clarityUrl = r.json()['data']['apiData']['curVideoMeta']['clarityUrl'] video_url = r.json()['data']['apiData']['curVideoMeta']['clarityUrl'][len(clarityUrl) - 1]['url'] video_width = r.json()['data']['apiData']['curVideoMeta']['clarityUrl'][len(clarityUrl) - 1]['vodVideoHW'].split('$$')[-1] video_height = r.json()['data']['apiData']['curVideoMeta']['clarityUrl'][len(clarityUrl) - 1]['vodVideoHW'].split('$$')[0] video_info_dict = { 'video_url': video_url, 'video_width': video_width, 'video_height': video_height } return video_info_dict except Exception as e: Common.logger(log_type).error(f'get_video_url异常:{e}\n') @classmethod def download_publish(cls, log_type, video_dict, our_id, env): try: if video_dict['video_title'] == '' or video_dict['video_id'] == '' or video_dict['video_url'] == '': Common.logger(log_type).info('无效视频\n') elif int(video_dict['duration_stamp']) < 60: Common.logger(log_type).info(f'时长:{int(video_dict["duration"])} < 60s\n') elif int(video_dict['video_width']) < 720 or int(video_dict['video_height']) < 720: Common.logger(log_type).info(f'{int(video_dict["video_width"])}*{int(video_dict["video_height"])} < 720P\n') elif any(word if word in video_dict['video_title'] else False for word in cls.filter_words(log_type)) is True: Common.logger(log_type).info('已中过滤词库\n') elif video_dict['video_id'] in [x for y in Feishu.get_values_batch(log_type, 'haokan', '5pWipX') for x in y]: Common.logger(log_type).info('视频已下载\n') elif video_dict['video_id'] in [x for y in Feishu.get_values_batch(log_type, 'haokan', '7f05d8') for x in y]: Common.logger(log_type).info('视频已下载\n') elif video_dict['video_id'] in [x for y in Feishu.get_values_batch(log_type, 'haokan', 'kVaSjf') for x in y]: Common.logger(log_type).info('视频已下载\n') elif video_dict['video_id'] in [x for y in Feishu.get_values_batch(log_type, 'haokan', 'A5VCbq') for x in y]: Common.logger(log_type).info('视频已下载\n') else: # 下载 Common.download_method(log_type, 'cover', video_dict['video_title'], video_dict['cover_url']) Common.download_method(log_type, 'video', video_dict['video_title'], video_dict['video_url']) with open("./videos/" + video_dict['video_title'] + "/" + "info.txt", "a", encoding="UTF-8") as f_a: f_a.write(str(video_dict['video_id']) + "\n" + str(video_dict['video_title']) + "\n" + str(video_dict['duration_stamp']) + "\n" + '100000' + "\n" + '100000' + "\n" + '100000' + "\n" + '100000' + "\n" + '1920*1080' + "\n" + str(int(time.time())) + "\n" + str(video_dict['user_name']) + "\n" + str(video_dict['cover_url']) + "\n" + str(video_dict['video_url']) + "\n" + str(video_dict['cover_url']) + "\n" + "HAOKAN" + str(int(time.time()))) Common.logger(log_type).info("==========视频信息已保存至info.txt==========") # 上传 Common.logger(log_type).info(f"开始上传视频:{video_dict['video_title']}") if env == 'dev': our_video_id = Publish.upload_and_publish(log_type, our_id, env) our_video_link = "https://testadmin.piaoquantv.com/cms/post-detail/" + str(our_video_id) + "/info" else: our_video_id = Publish.upload_and_publish(log_type, our_id, env) our_video_link = "https://admin.piaoquantv.com/cms/post-detail/" + str(our_video_id) + "/info" Common.logger(log_type).info(f"视频上传完成:{video_dict['video_title']}\n") # 保存视频信息至云文档 Common.logger(log_type).info(f"保存视频至已下载表:{video_dict['video_title']}") Feishu.insert_columns(log_type, "haokan", "kVaSjf", "ROWS", 1, 2) upload_time = int(time.time()) values = [[time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(upload_time)), '定向榜', video_dict['video_title'], video_dict['video_id'], our_video_link, int(video_dict['play_cnt']), video_dict['duration'], video_dict['publish_time'], video_dict['video_width']+"*"+video_dict['video_height'], video_dict['user_name'], video_dict['cover_url'], video_dict['video_url']]] time.sleep(1) Feishu.update_values(log_type, "haokan", "kVaSjf", "F2:Z2", values) Common.logger(log_type).info(f"视频:{video_dict['video_title']},下载/上传成功\n") except Exception as e: Common.logger(log_type).error(f'download_publish异常:{e}\n') @classmethod def get_user_videos(cls, log_type, env): try: user_dict = cls.get_users_from_feishu(log_type) if len(user_dict) == 0: Common.logger(log_type).warning('用户ID列表为空\n') else: for k, v in user_dict.items(): user_name = k out_id = v.split(',')[0] our_id = v.split(',')[1] Common.logger(log_type).info(f'抓取{user_name}主页视频\n') cls.get_follow_feed(log_type, out_id, our_id, user_name, env) Common.logger(log_type).info('休眠 30 秒\n') time.sleep(30) cls.ctime = '' except Exception as e: Common.logger(log_type).error(f'get_user_videos异常:{e}\n') if __name__ == '__main__': print(Follow.get_users_from_feishu('follow')) pass