# -*- coding: utf-8 -*- # @Author: wangkun # @Time: 2023/1/31 """ 看一看+小程序: 朋友圈榜单 """ import time import requests import urllib3 from common.common import Common from common.feishu import Feishu from common.publish import Publish proxies = {"http": None, "https": None} class Moment: # 抓取基础规则 @staticmethod def download_rule(video_dict): """ 抓取基础规则 """ if int(float(video_dict['duration'])) >= 60: if int(video_dict['video_width']) >= 0 or int(video_dict['video_height']) >= 0: if int(video_dict['play_cnt']) >= 100000: if int(video_dict['like_cnt']) >= 0: if int(video_dict['share_cnt']) >= 0: return True else: return False else: return False else: return False return False return False # 获取推荐视频列表 @classmethod def get_videos(cls, log_type, crawler, strategy, oss_endpoint, env, machine, moment_video_id): url = "https://search.weixin.qq.com/cgi-bin/recwxa/snsgetvideoinfo?" headers = { "content-type": "application/json", "Accept-Encoding": "gzip,compress,br,deflate", "User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X)" " AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148" " MicroMessenger/8.0.20(0x18001442) NetType/WIFI Language/zh_CN", "Referer": "https://servicewechat.com/wxbb9a805eb4f9533c/236/page-frame.html" } # videoid = random.choice(Feishu.get_sheet_content(log_type, crawler, 'iK58HX')) params = { "vid": moment_video_id, "openid": "1924336296754305", "model": "iPhone 1114.7.1", "sharesearchid": "8406805193800900989", "shareOpenid": "oh_m45YffSEGxvDH--6s6g9ZkPxg", } try: urllib3.disable_warnings() r = requests.get(url=url, headers=headers, params=params, proxies=proxies, verify=False) if r.status_code != 200: Common.logger(log_type, crawler).warning(f"response.status_code:{r.status_code}") Common.logger(log_type, crawler).warning(f"response.text:{r.text}\n") elif r.json()["errcode"] != 0: Common.logger(log_type, crawler).warning(f"msg:{r.json()['msg']}\n") elif "rec_video_list" not in r.json()["data"]: Common.logger(log_type, crawler).warning(f"该视频没有推荐列表\n") else: feeds = r.json()["data"]["rec_video_list"] for i in range(len(feeds)): # video_id if "vid" in feeds[i]: video_id = feeds[i]["vid"] else: video_id = 0 # video_title if "title" in feeds[i]: video_title = feeds[i]["title"].strip().replace("\n", "") \ .replace("/", "").replace("\\", "").replace("\r", "") \ .replace(":", "").replace("*", "").replace("?", "") \ .replace("?", "").replace('"', "").replace("<", "") \ .replace(">", "").replace("|", "").replace(" ", "") \ .replace("&NBSP", "").replace(".", "。").replace(" ", "") \ .replace("小年糕", "").replace("#", "").replace("Merge", "") else: video_title = 0 # video_play_cnt if "played_cnt" in feeds[i]: video_play_cnt = feeds[i]["played_cnt"] else: video_play_cnt = 0 # video_comment_cnt if "comment_cnt" in feeds[i]: video_comment_cnt = feeds[i]["comment_cnt"] else: video_comment_cnt = 0 # video_liked_cnt if "liked_cnt" in feeds[i]: video_liked_cnt = feeds[i]["liked_cnt"] else: video_liked_cnt = 0 # video_share_cnt if "shared_cnt" in feeds[i]: video_share_cnt = feeds[i]["shared_cnt"] else: video_share_cnt = 0 # video_duration if "duration" in feeds[i]: video_duration = feeds[i]["duration"] else: video_duration = 0 # video_width / video_height if "width" in feeds[i] or "height" in feeds[i]: video_width = feeds[i]["width"] video_height = feeds[i]["height"] else: video_width = 0 video_height = 0 # video_send_time if "upload_time" in feeds[i]: publish_time = feeds[i]["upload_time"] else: publish_time = 0 # user_name if "user_info" not in feeds[i]: user_name = 0 elif "nickname" not in feeds[i]["user_info"]: user_name = 0 else: user_name = feeds[i]["user_info"]["nickname"].strip().replace("\n", "") # user_id if "user_info" not in feeds[i]: user_id = 0 elif "openid" not in feeds[i]["user_info"]: user_id = 0 else: user_id = feeds[i]["user_info"]["openid"] # head_url if "user_info" not in feeds[i]: avatar_url = 0 elif "headimg_url" not in feeds[i]["user_info"]: avatar_url = 0 else: avatar_url = feeds[i]["user_info"]["headimg_url"] # cover_url if "cover_url" not in feeds[i]: cover_url = 0 else: cover_url = feeds[i]["cover_url"] # video_url if "play_info" not in feeds[i]: video_url = 0 elif "items" not in feeds[i]["play_info"]: video_url = 0 else: video_url = feeds[i]["play_info"]["items"][-1]["play_url"] video_dict = { 'video_id': video_id, 'video_title': video_title, 'duration': video_duration, 'play_cnt': video_play_cnt, 'comment_cnt': video_comment_cnt, 'like_cnt': video_liked_cnt, 'share_cnt': video_share_cnt, 'video_width': video_width, 'video_height': video_height, 'publish_time': publish_time, 'user_name': user_name, 'user_id': user_id, 'avatar_url': avatar_url, 'video_url': video_url, 'cover_url': cover_url, 'session': f'kanyikan_moment_{int(time.time())}', } Common.logger(log_type, crawler).info("video_title:{}", video_title) Common.logger(log_type, crawler).info("video_play_cnt:{}", video_play_cnt) Common.logger(log_type, crawler).info("video_duration:{}", video_duration) Common.logger(log_type, crawler).info("video_url:{}", video_url) # 过滤无效视频 if video_id == 0 or video_title == 0 or video_duration == 0 or publish_time == 0 or user_id == 0\ or avatar_url == 0 or cover_url == 0 or video_url == 0: Common.logger(log_type, crawler).warning("无效视频\n") # 抓取基础规则 elif cls.download_rule(video_dict) is False: Common.logger(log_type, crawler).info("不满足基础规则\n") elif int(publish_time) < 1659283200: Common.logger(log_type, crawler).info(f'发布时间{time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(publish_time))} < 2022-08-01\n') # 过滤词库 elif any(word if word in video_title else False for word in Feishu.get_sheet_content(log_type, crawler, 'rofdM5')) is True: Common.logger(log_type, crawler).info("视频已中过滤词\n") # 已下载视频表去重 elif video_id in [j for m in Feishu.get_values_batch(log_type, crawler, "20ce0c") for j in m]: Common.logger(log_type, crawler).info("视频已下载\n") else: cls.download_publish(log_type, crawler, strategy, oss_endpoint, env, video_dict) except Exception as e: Common.logger(log_type, crawler).error(f"get_videos异常:{e}\n") # 下载/上传视频 @classmethod def download_publish(cls, log_type, crawler, strategy, oss_endpoint, env, video_dict): try: # 过滤空行及空标题视频 if video_dict['video_id'] == 0 \ or video_dict['video_title'] == 0\ or video_dict['video_url'] == 0: Common.logger(log_type, crawler).info("无效视频\n") # # 视频的抓取时间小于 2 天 # elif int(time.time()) - v_push_time > 172800: # Common.logger(log_type, crawler).info("抓取时间超过2天:{}", video_dict['video_title']) # # 删除行或列,可选 ROWS、COLUMNS # Feishu.dimension_range("tGqZMX", "ROWS", i + 1, i + 1) # return # 视频发布时间不小于 2021-06-01 00:00:00 elif video_dict['publish_time'] < 1622476800: Common.logger(log_type, crawler).info(f'发布时间{time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(video_dict["publish_time"]))} < 2021-06-01\n') else: # 下载封面 Common.download_method(log_type=log_type, crawler=crawler, text="cover", title=video_dict['video_title'], url=video_dict['cover_url']) # 下载视频 Common.download_method(log_type=log_type, crawler=crawler, text="video", title=video_dict['video_title'], url=video_dict['video_url']) # 保存视频信息至 "./{crawler}/videos/{video_dict['video_title']}/info.txt" Common.save_video_info(log_type, crawler, video_dict) # 上传视频 Common.logger(log_type, crawler).info(f"开始上传视频:{video_dict['video_title']}") our_video_id = Publish.upload_and_publish(log_type=log_type, crawler=crawler, strategy=strategy, oss_endpoint=oss_endpoint, our_uid="kanyikan_moment", env=env) if env == 'dev': our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info" else: our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info" Common.logger(log_type, crawler).info("视频上传完成:{}", video_dict['video_title']) # 保存视频 ID 到云文档 Common.logger(log_type, crawler).info(f"保存视频ID至云文档:{video_dict['video_title']}") # 视频ID工作表,插入首行 Feishu.insert_columns(log_type, crawler, "20ce0c", "ROWS", 1, 2) # 视频ID工作表,首行写入数据 upload_time = int(time.time()) values = [[time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(upload_time)), "朋友圈", str(video_dict['video_id']), str(video_dict['video_title']), our_video_link, video_dict['play_cnt'], video_dict['comment_cnt'], video_dict['like_cnt'], video_dict['share_cnt'], video_dict['duration'], f"{video_dict['video_width']}*{video_dict['video_height']}", time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(video_dict["publish_time"])), video_dict['user_name'], video_dict['user_id'], video_dict['head_url'], video_dict['cover_url'], video_dict['video_url'] ]] time.sleep(1) Feishu.update_values(log_type, crawler, "20ce0c", "F2:W2", values) Common.logger(log_type, crawler).info('下载/上传成功\n') except Exception as e: Common.logger(log_type, crawler).error(f"download_publish异常:{e}\n") if __name__ == "__main__": kanyikan_moment = Moment() pass