# -*- coding: utf-8 -*- # @Author: wangkun # @Time: 2022/11/23 import os import sys import time import requests import urllib3 sys.path.append(os.getcwd()) from main.common import Common from main.feishu_lib import Feishu from main.haokan_publish import Publish proxies = {'http': None, 'https': None} class Channel: @classmethod def download_rule(cls, play_cnt, duration): if int(play_cnt) >= 50000: if int(duration) >= 60: return True else: return False else: return False @classmethod def get_channel_from_feishu(cls, log_type): try: user_sheet = Feishu.get_values_batch(log_type, 'haokan', 'TaQXk3') user_dict = {} # for i in range(1, len(user_sheet)): for i in range(1, 11): user_name = user_sheet[i][0] out_id = user_sheet[i][1] our_id = user_sheet[i][3] if user_name is None or out_id is None or our_id is None or i == 13: pass else: user_dict[user_name] = str(out_id) + ',' + str(our_id) return user_dict except Exception as e: Common.logger(log_type).error(f'get_tab_from_feishu异常:{e}\n') @classmethod def get_channel_feeds(cls, log_type, tab): try: url = "https://haokan.baidu.com/web/video/feed?" params = { 'tab': str(tab), 'act': 'pcFeed', 'pd': 'pc', 'num': '20', 'shuaxin_id': '16698987960000', } headers = { 'Accept': '*/*', 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6', 'Cache-Control': 'no-cache', 'Connection': 'keep-alive', 'Content-Type': 'application/x-www-form-urlencoded', 'Cookie': 'BIDUPSID=0C817797C726E2312710D870ECDAE8A2; PSTM=1669001132; BAIDUID=AB7069CAF9ECB7AA43E400D164119733:FG=1; Hm_lvt_4aadd610dfd2f5972f1efee2653a2bc5=1669029805; PC_TAB_LOG=video_details_page; COMMON_LID=88bc9b0fbce964fbb6a76cfd7927d02b; hkpcSearch=%u517B%u751F; hkpcvideolandquery=%u80D6%u5B50%u56E0%u54BD%u5589%u5F02%u7269%u5C31%u8BCA%u80C3%u98DF%u7BA1%u53CD%u6D41%u79D1%uFF0C%u533B%u751F%u5EFA%u8BAE%u5176%u4FEE%u8EAB%u517B%u6027%uFF0C%u5F53%u573A%u61F5%u4E86; BDSFRCVID=rlFOJeCmHRhWneoj7IiJtKf1EeKK0gOTHllnoMiRAPOFYGCVJeC6EG0Ptf8g0KubuTkzogKK0gOTH6KF_2uxOjjg8UtVJeC6EG0Ptf8g0M5; H_BDCLCKID_SF=tR333R7oKRu_HRjYbb__-P4DePAttURZ56bHWh0M3b61qRcIh4ob5MPEDto-BMPj52OnKUT13lc5h4jX0P7_KRtr346-35543bRTLn76LRv0Kj6HybOfhP-UyN3LWh37bJblMKoaMp78jR093JO4y4Ldj4oxJpOJ5JbMopCafD8ahI86D6K2entebl8Xt6_Df4o2WDv-apQcOR5Jhf7CMh-HbfnBtMc0Wjni0DjN5lvvhb3O3M7Sh-CeXfrN3lO93H5x5MQF5l8-sq0x0bOte-bQypoa-l3WMDOMahkM5h7xOKQoQlPK5JkgMx6MqpQJQeQ-5KQN3KJmfbL9bT3YjjISKx-_t60OtxK; H_PS_PSSID=37857_36557_37769_37841_37766_37866_36806_37760_37759_26350_37787; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; BAIDUID_BFESS=AB7069CAF9ECB7AA43E400D164119733:FG=1; BDSFRCVID_BFESS=rlFOJeCmHRhWneoj7IiJtKf1EeKK0gOTHllnoMiRAPOFYGCVJeC6EG0Ptf8g0KubuTkzogKK0gOTH6KF_2uxOjjg8UtVJeC6EG0Ptf8g0M5; H_BDCLCKID_SF_BFESS=tR333R7oKRu_HRjYbb__-P4DePAttURZ56bHWh0M3b61qRcIh4ob5MPEDto-BMPj52OnKUT13lc5h4jX0P7_KRtr346-35543bRTLn76LRv0Kj6HybOfhP-UyN3LWh37bJblMKoaMp78jR093JO4y4Ldj4oxJpOJ5JbMopCafD8ahI86D6K2entebl8Xt6_Df4o2WDv-apQcOR5Jhf7CMh-HbfnBtMc0Wjni0DjN5lvvhb3O3M7Sh-CeXfrN3lO93H5x5MQF5l8-sq0x0bOte-bQypoa-l3WMDOMahkM5h7xOKQoQlPK5JkgMx6MqpQJQeQ-5KQN3KJmfbL9bT3YjjISKx-_t60OtxK; BD_SVTK=WteO3EWWnfkYlotW3WZa108dO0fwiY849lYv08f7ZWAoQnfD9fOg0k0Dj0te0O0w0kgA1AQaTA59QnfD9tFqJtlhPiS5pxiXSpxglSpmPlHdlOlSSlO90f0GYlfnljpZS8OSf8tleWEAQgiY0lTU8flWPltllT3ok0nS0eIbkaamB; RT="sl=4&ss=lb52jzr9&tt=a8n&bcn=https%3A%2F%2Ffclog.baidu.com%2Flog%2Fweirwood%3Ftype%3Dperf&z=1&dm=baidu.com&si=3pq65xfunw1"; Hm_lpvt_4aadd610dfd2f5972f1efee2653a2bc5=1669898797; ariaDefaultTheme=undefined; ab_sr=1.0.1_N2FiMzcwMTJjMGYxMGU3MDEyOGJmMjAyODExMjlhZWVjMWZiY2MzYmJkY2NhNWQ1NDlhN2U0MDlkODU0NmIxOGJjNDA5ODlmMzJkMzg1MTE1OWNhOGI4MWViNGZkZmFlYjRjOTM1NjhiMjFkNTJjYmY4MTBjNmFjNzVlMTc5YzJiN2E4OThlMTUzNTg3Y2Q3ZWMxOGY4NDc3YTA3N2I5OA==; reptileData=%7B%22data%22%3A%22636c55e0319da5169a60acec4a264a35c10862f8abfe2f2cc32c55eb6b0ab4de0efdfa115ea522d6d4d361dea07feae2831d3e2c16ed6b051c611ffe5aded6c9024ac4e54798b9fa3db23f72cf5ac856e4ae6c93cef5a6a27cc527a65db8d720%22%2C%22key_id%22%3A%2230%22%2C%22sign%22%3A%228c6a2016%22%7D', 'Pragma': 'no-cache', 'Referer': 'https://haokan.baidu.com/tab/recommend', 'Sec-Fetch-Dest': 'empty', 'Sec-Fetch-Mode': 'cors', 'Sec-Fetch-Site': 'same-origin', 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36 Edg/107.0.1418.52', 'sec-ch-ua': '"Microsoft Edge";v="107", "Chromium";v="107", "Not=A?Brand";v="24"', 'sec-ch-ua-mobile': '?0', 'sec-ch-ua-platform': '"macOS"' } urllib3.disable_warnings() r = requests.get(url=url, headers=headers, params=params, proxies=proxies, verify=False) if r.json()['errno'] != 0 or r.json()['errmsg'] != '成功': Common.logger(log_type).error(f'feeds_response:{r.json()}\n') elif len(r.json()['data']['response']['videos']) == 0: Common.logger(log_type).warning(f'feeds_response:{r.json()}\n') else: feeds = r.json()['data']['response']['videos'] return feeds except Exception as e: Common.logger(log_type).error(f'get_channel_feeds异常:{e}\n') @classmethod def get_video_url(cls, log_type, video_id): try: url = 'https://haokan.hao123.com/v?' params = { 'vid': str(video_id), '_format': 'json', } headers = { 'Accept': '*/*', 'Accept-Encoding': 'gzip, deflate, br', 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6', 'Cache-Control': 'no-cache', 'Connection': 'keep-alive', 'Content-Type': 'application/x-www-form-urlencoded', 'Cookie': Feishu.get_values_batch(log_type, 'haokan', '5LksMx')[0][0], 'Pragma': 'no-cache', 'Referer': 'https://haokan.hao123.com/v?vid=10623278258033022286&pd=pc&context=', 'sec-ch-ua': '"Microsoft Edge";v="107", "Chromium";v="107", "Not=A?Brand";v="24"', 'sec-ch-ua-mobile': '?0', 'sec-ch-ua-platform': '"macOS"', 'Sec-Fetch-Dest': 'empty', 'Sec-Fetch-Mode': 'cors', 'Sec-Fetch-Site': 'same-origin', 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36 Edg/107.0.1418.62', } r = requests.get(url=url, headers=headers, params=params, proxies=proxies, verify=False) if r.status_code != 200: Common.logger(log_type).info(f'get_video_url_response:{r.text}') elif r.json()['errno'] != 0 or len(r.json()['data']) == 0: Common.logger(log_type).info(f'get_video_url_response:{r.json()}') else: clarityUrl = r.json()['data']['apiData']['curVideoMeta']['clarityUrl'] video_url = r.json()['data']['apiData']['curVideoMeta']['clarityUrl'][len(clarityUrl) - 1]['url'] return video_url except Exception as e: Common.logger(log_type).info(f'get_video_url异常:{e}\n') @classmethod def get_channel_videos(cls, log_type, tab, our_id, env): try: feeds = cls.get_channel_feeds(log_type, tab) for i in range(len(feeds)): # video_title if 'title' not in feeds[i]: video_title = 0 else: video_title = feeds[i]['title'] # video_id if 'id' not in feeds[i]: video_id = 0 else: video_id = feeds[i]['id'] # play_cnt if 'playcnt' not in feeds[i]: play_cnt = 0 else: play_cnt = feeds[i]['playcnt'] # duration if 'duration' not in feeds[i]: duration = 0 else: duration = int(feeds[i]['duration'].split(':')[0])*60 + int(feeds[i]['duration'].split(':')[-1]) # publish_time if 'publish_time' not in feeds[i]: publish_time = 0 else: publish_time = feeds[i]['publish_time'] # user_name if 'source_name' not in feeds[i]: user_name = 0 else: user_name = feeds[i]['source_name'] # head_url if 'author_avatar' not in feeds[i]: head_url = 0 else: head_url = feeds[i]['author_avatar'] # cover_url if 'poster_big' in feeds[i]: cover_url = feeds[i]['poster_big'] elif 'poster_pc' in feeds[i]: cover_url = feeds[i]['poster_pc'] elif 'poster_small' in feeds[i]: cover_url = feeds[i]['poster_small'] else: cover_url = 0 # video_url get_video_url = cls.get_video_url(log_type, video_id) if get_video_url is not None: video_url = get_video_url elif 'play_url' in feeds[i]: video_url = feeds[i]['play_url'] else: video_url = 0 Common.logger(log_type).info(f'video_title:{video_title}') Common.logger(log_type).info(f'play_cnt:{play_cnt}') Common.logger(log_type).info(f'duration:{duration}') Common.logger(log_type).info(f'video_url:{video_url}') video_dict = {'video_title': video_title, 'video_id': video_id, 'play_cnt': play_cnt, 'duration': duration, 'publish_time': publish_time, 'user_name': user_name, 'head_url': head_url, 'cover_url': cover_url, 'video_url': video_url} cls.download_publish(log_type, tab, our_id, video_dict, env) except Exception as e: Common.logger(log_type).error(f'get_channel_videos异常:{e}\n') @classmethod def download_publish(cls, log_type, tab, our_id, video_dict, env): try: if video_dict['video_title'] == 0 or video_dict['video_url'] == 0: Common.logger(log_type).info('无效视频\n') elif cls.download_rule(video_dict['play_cnt'], video_dict['duration']) is False: Common.logger(log_type).info('不满足抓取规则\n') elif video_dict['video_id'] in [x for y in Feishu.get_values_batch(log_type, 'haokan', '5pWipX') for x in y]: Common.logger(log_type).info('视频已下载\n') elif video_dict['video_id'] in [x for y in Feishu.get_values_batch(log_type, 'haokan', '7f05d8') for x in y]: Common.logger(log_type).info('视频已下载\n') elif video_dict['video_id'] in [x for y in Feishu.get_values_batch(log_type, 'haokan', 'A5VCbq') for x in y]: Common.logger(log_type).info('视频已下载\n') else: # 下载 Common.download_method(log_type, 'cover', video_dict['video_title'], video_dict['cover_url']) Common.download_method(log_type, 'video', video_dict['video_title'], video_dict['video_url']) with open("./videos/" + video_dict['video_title'] + "/" + "info.txt", "a", encoding="UTF-8") as f_a: f_a.write(str(video_dict['video_id']) + "\n" + str(video_dict['video_title']) + "\n" + str(video_dict['duration']) + "\n" + '0' + "\n" + '0' + "\n" + '0' + "\n" + '0' + "\n" + '1920*1080' + "\n" + str(int(time.time())) + "\n" + str(video_dict['user_name']) + "\n" + str(video_dict['head_url']) + "\n" + str(video_dict['video_url']) + "\n" + str(video_dict['cover_url']) + "\n" + "HAOKAN" + str(int(time.time()))) Common.logger(log_type).info("==========视频信息已保存至info.txt==========") # 上传 Common.logger(log_type).info(f"开始上传视频:{video_dict['video_title']}") if env == 'dev': our_video_id = Publish.upload_and_publish(log_type, our_id, env) our_video_link = "https://testadmin.piaoquantv.com/cms/post-detail/" + str(our_video_id) + "/info" else: our_video_id = Publish.upload_and_publish(log_type, our_id, env) our_video_link = "https://admin.piaoquantv.com/cms/post-detail/" + str(our_video_id) + "/info" Common.logger(log_type).info(f"视频上传完成:{video_dict['video_title']}\n") # 保存视频信息至云文档 Common.logger(log_type).info(f"保存视频至已下载表:{video_dict['video_title']}") Feishu.insert_columns(log_type, "haokan", "7f05d8", "ROWS", 1, 2) upload_time = int(time.time()) if tab == 'recommend': tab = '播放量榜_首页频道' elif tab == 'yinyue_new': tab = '播放量榜_音乐频道' elif tab == 'gaoxiao_new': tab = '播放量榜_搞笑频道' elif tab == 'zongyi_new': tab = '播放量榜_综艺频道' elif tab == 'shenghuo_new': tab = '播放量榜_生活频道' elif tab == 'meishi_new': tab = '播放量榜_美食频道' elif tab == 'sannong_new': tab = '播放量榜_三农频道' elif tab == 'junshi_new': tab = '播放量榜_军事频道' elif tab == 'shehui_new': tab = '播放量榜_社会频道' elif tab == 'keji_new': tab = '播放量榜_科技频道' else: tab = '播放量榜' values = [[time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(upload_time)), tab, video_dict['video_title'], video_dict['video_id'], our_video_link, int(video_dict['play_cnt']), video_dict['duration'], video_dict['publish_time'], video_dict['user_name'], video_dict['head_url'], video_dict['cover_url'], video_dict['video_url']]] time.sleep(1) Feishu.update_values(log_type, "haokan", "7f05d8", "F2:Z2", values) Common.logger(log_type).info(f"视频:{video_dict['video_title']},下载/上传成功\n") except Exception as e: Common.logger(log_type).error(f'download_publish异常:{e}\n') @classmethod def get_all_channel_videos(cls, log_type, env): try: channel_dict = cls.get_channel_from_feishu(log_type) if len(channel_dict) == 0: Common.logger(log_type).warning('频道数量为空\n') else: for k, v in channel_dict.items(): Common.logger(log_type).info(f'正在获取 {k} 频道视频\n') cls.get_channel_videos(log_type, v.split(',')[0], v.split(',')[1], env) time.sleep(10) except Exception as e: Common.logger(log_type).error(f'get_all_channel_videos异常:{e}\n') if __name__ == '__main__': Channel.get_channel_videos('channel', 'keji_new', '6267140', 'dev') pass