# -*- coding: utf-8 -*- # @Author: wangkun # @Time: 2022/8/1 # import time # import base64 import json import os import time # import urllib.parse import requests import urllib3 from crawler_gzh.main.common import Common from crawler_gzh.main.feishu_lib import Feishu from crawler_gzh.main.publish import Publish proxies = {"http": None, "https": None} class Recommend: # 获取 token,保存至飞书云文档 @classmethod def get_token(cls, log_type): # charles 抓包文件保存目录 charles_file_dir = "./crawler-kanyikan-recommend/chlsfiles/" if int(len(os.listdir(charles_file_dir))) == 1: Common.logger(log_type).info("未找到chlsfile文件,等待60s") time.sleep(60) else: try: # 目标文件夹下所有文件 all_file = sorted(os.listdir(charles_file_dir)) # 获取到目标文件 old_file = all_file[-1] # 分离文件名与扩展名 new_file = os.path.splitext(old_file) # 重命名文件后缀 os.rename(os.path.join(charles_file_dir, old_file), os.path.join(charles_file_dir, new_file[0] + ".txt")) with open(charles_file_dir + new_file[0] + ".txt", encoding='utf-8-sig', errors='ignore') as f: contents = json.load(f, strict=False) # 定义需要返回的列表 request_info = [] for content in contents: if "mp.weixin.qq.com" in content['host']: if content["path"] == r"/mp/getappmsgext": headers = content["request"]["header"]["headers"] title = content["request"]["body"]["text"].split("title=")[-1].split("&ct=")[0] vid = content["request"]["body"]["text"].split("vid=")[-1].split("&is_pay_subscribe")[0] request_info.append(title) request_info.append(vid) for h in headers: if h["name"] == "cookie" and "pass_ticket" in h["value"]: pass_ticket = h["value"].split("pass_ticket=")[-1] # print(f"pass_ticket:{pass_ticket}") request_info.append(pass_ticket) if h["name"] == "referer": __biz = h["value"].split("__biz=")[-1].split("&mid=")[0] # print(f"__biz:{__biz}") request_info.append(__biz) if h["name"] == "cookie" and "appmsg_token" in h["value"]: appmsg_token = h["value"].split("appmsg_token=")[-1] # print(f"appmsg_token:{appmsg_token}") request_info.append(appmsg_token) if h["name"] == "cookie" and "wap_sid2" in h["value"]: wap_sid2 = h["value"].split("wap_sid2=")[-1] # print(f"wap_sid2:{wap_sid2}") request_info.append(wap_sid2) return request_info except Exception as e: Common.logger(log_type).error("获取session异常,30s后重试:{}", e) time.sleep(30) cls.get_token(log_type) # 获取推荐列表 @classmethod def get_recommend(cls, log_type): try: params = cls.get_token(log_type) if params is None: Common.logger(log_type).info("未获取到token等信息,30s后重试") time.sleep(30) cls.get_recommend(log_type) else: title = params[0] vid = params[1] __biz = params[2] appmsg_token = params[3] pass_ticket = params[4] wap_sid2 = params[5] url = "https://mp.weixin.qq.com/mp/getappmsgext?" headers = { "content-type": "application/x-www-form-urlencoded; charset=UTF-8", "accept": "*/*", "x-requested-with": "XMLHttpRequest", "accept-language": "zh-cn", "accept-encoding": "gzip, deflate, br", "origin": "https://mp.weixin.qq.com", "user-agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 " "(KHTML, like Gecko) Mobile/15E148 MicroMessenger/8.0.26(0x18001a29)" " NetType/WIFI Language/zh_CN", "referer": "https://mp.weixin.qq.com/s?" "__biz=Mzg5MDY2NzY5Nw==" "&mid=2247484710" "&idx=1" "&sn=657a341da42ed071aaa4d3ce853f64f2" "&chksm=cfd852f8f8afdbeef513340dec8702433bd78137e7b4afb665d1de5014dc6837ed4dcc979684" "&sessionid=1659509075" "&channel_session_id=" "&scene=136" "&subscene=" "&exptype=" "&reloadid=1659509075" "&reloadseq=2" "&related_video_source=10" "&ascene=1" "&devicetype=iOS14.7.1" "&version=18001a29" "&nettype=WIFI" "&abtest_cookie=AAACAA%3D%3D" "&lang=zh_CN" "&session_us=gh_7364edd0ca9f" "&fontScale=100" "&exportkey=AdT9lhjADCG9r69d1meNZ5c%3D" "&pass_ticket=" + pass_ticket + "&wx_header=3", } query_string = { "f": "json", "mock": "", "uin": "777", "key": "777", "pass_ticket": pass_ticket, "wxtoken": "", "devicetype": "iOS14.7.1", "clientversion": "18001a29", "__biz": __biz, "appmsg_token": appmsg_token, "x5": "0", # "f": "json" } cookies = { "appmsg_token": appmsg_token, "devicetype": "iOS14.7.1", "lang": "zh_CN", "pass_ticket": pass_ticket, "rewardsn": "", "version": "18001a29", "wap_sid2": wap_sid2, "wxtokenkey": "777", "wxuin": "2010747860" } form = { "r": "0.13440037781889225", "__biz": __biz, "appmsg_type": "9", "mid": "2247484710", "sn": "657a341da42ed071aaa4d3ce853f64f2", "idx": "1", "scene": "136", "title": title, "ct": "1654824718", "abtest_cookie": "AAACAA==", "devicetype": "iOS14.7.1", "version": "18001a29", "is_need_ticket": "0", "is_need_ad": "1", "comment_id": "0", "is_need_reward": "0", "both_ad": "0", "reward_uin_count": "0", "send_time": "", "msg_daily_idx": "1", "is_original": "0", "is_only_read": "1", "req_id": "0314yH9rphN660ejUCz1hRVD", "pass_ticket": pass_ticket, "is_temp_url": "0", "item_show_type": "5", "tmp_version": "1", "more_read_type": "0", "appmsg_like_type": "2", "related_video_sn": "", "related_video_num": "5", "vid": vid, "is_pay_subscribe": "0", "pay_subscribe_uin_count": "0", "has_red_packet_cover": "0", "album_id": "1296223588617486300", "album_video_num": "5", "cur_album_id": "", "is_public_related_video": "0", "encode_info_by_base64": "0", "exptype": "" } urllib3.disable_warnings() response = requests.post(url=url, headers=headers, cookies=cookies, params=query_string, data=form, verify=False) if "related_tag_video" not in response.json(): Common.logger(log_type).warning("response:{}\n", response.text) elif len(response.json()["related_tag_video"]) == 0: Common.logger(log_type).warning("response:{}\n", response.text) time.sleep(10) cls.get_recommend(log_type) else: feeds = response.json()["related_tag_video"] for m in range(len(feeds)): # video_title if "title" not in feeds[m]: video_title = 0 else: video_title = feeds[m]["title"] # video_title = base64.b64decode(video_title).decode("utf-8") # video_id if "vid" not in feeds[m]: video_id = 0 else: video_id = feeds[m]["vid"] # play_cnt if "read_num" not in feeds[m]: play_cnt = 0 else: play_cnt = feeds[m]["read_num"] # like_cnt if "like_num" not in feeds[m]: like_cnt = 0 else: like_cnt = feeds[m]["like_num"] # duration if "duration" not in feeds[m]: duration = 0 else: duration = feeds[m]["duration"] # video_width / video_height if "videoWidth" not in feeds[m] or "videoHeight" not in feeds[m]: video_width = 0 video_height = 0 else: video_width = feeds[m]["videoWidth"] video_height = feeds[m]["videoHeight"] # send_time if "pubTime" not in feeds[m]: send_time = 0 else: send_time = feeds[m]["pubTime"] # user_name if "srcDisplayName" not in feeds[m]: user_name = 0 else: user_name = feeds[m]["srcDisplayName"] # user_name = base64.b64decode(user_name).decode("utf-8") # user_id if "srcUserName" not in feeds[m]: user_id = 0 else: user_id = feeds[m]["srcUserName"] # head_url if "head_img_url" not in feeds[m]: head_url = 0 else: head_url = feeds[m]["head_img_url"] # cover_url if "cover" not in feeds[m]: cover_url = 0 else: cover_url = feeds[m]["cover"] # video_url if "url" not in feeds[m]: video_url = 0 else: video_url = feeds[m]["url"] # 下载链接 download_url = cls.get_url(log_type, video_url) Common.logger(log_type).info("video_title:{}", video_title) Common.logger(log_type).info("video_id:{}", video_id) Common.logger(log_type).info("play_cnt:{}", play_cnt) Common.logger(log_type).info("like_cnt:{}", like_cnt) Common.logger(log_type).info("duration:{}", duration) Common.logger(log_type).info("video_width:{}", video_width) Common.logger(log_type).info("video_height:{}", video_height) Common.logger(log_type).info("send_time:{}", send_time) Common.logger(log_type).info("user_name:{}", user_name) Common.logger(log_type).info("user_id:{}", user_id) Common.logger(log_type).info("head_url:{}", head_url) Common.logger(log_type).info("cover_url:{}", cover_url) Common.logger(log_type).info("video_url:{}", video_url) Common.logger(log_type).info("download_url:{}", download_url) if video_id == 0 or video_title == 0 or duration == 0 or video_url == 0: Common.logger(log_type).info("无效视频\n") elif str(video_id) in [x for y in Feishu.get_values_batch(log_type, "gzh", "fCs3BT") for x in y]: Common.logger(log_type).info("该视频已下载\n") elif str(video_id) in [x for y in Feishu.get_values_batch(log_type, "gzh", "zWKFGb") for x in y]: Common.logger(log_type).info("该视频已在feeds中\n") else: Feishu.insert_columns(log_type, "gzh", "zWKFGb", "ROWS", 1, 2) get_feeds_time = int(time.time()) values = [[time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(get_feeds_time)), "推荐榜", video_title, str(video_id), play_cnt, like_cnt, duration, str(video_width) + "*" + str(video_height), time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(send_time)), user_name, user_id, head_url, cover_url, video_url, download_url ]] time.sleep(1) Feishu.update_values(log_type, "gzh", "zWKFGb", "D2:T2", values) Common.logger(log_type).info("添加至recommend_feeds成功\n") except Exception as e: Common.logger(log_type).error("get_recommend异常:{}", e) # 获取视频下载链接 @classmethod def get_url(cls, log_type, url): try: payload = {} headers = { 'Cookie': 'rewardsn=; wxtokenkey=777' } urllib3.disable_warnings() response = requests.get(url=url, headers=headers, data=payload, verify=False) response_list = response.text.splitlines() video_url_list = [] for m in response_list: if "mpvideo.qpic.cn" in m: video_url = m.split("url: '")[1].split("',")[0].replace(r"\x26amp;", "&") video_url_list.append(video_url) video_url = video_url_list[0] return video_url except Exception as e: Common.logger(log_type).error("get_url异常:{}", e) # 下载/上传 @classmethod def download_publish(cls, log_type, env): try: recommend_feeds_sheet = Feishu.get_values_batch(log_type, "gzh", "zWKFGb") for i in range(1, len(recommend_feeds_sheet)): download_video_title = recommend_feeds_sheet[i][5] download_video_id = recommend_feeds_sheet[i][6] download_video_play_cnt = recommend_feeds_sheet[i][7] download_video_like_cnt = recommend_feeds_sheet[i][8] download_video_duration = recommend_feeds_sheet[i][9] download_width_height = recommend_feeds_sheet[i][10] download_video_send_time = recommend_feeds_sheet[i][11] download_user_name = recommend_feeds_sheet[i][12] download_user_id = recommend_feeds_sheet[i][13] download_head_url = recommend_feeds_sheet[i][14] download_cover_url = recommend_feeds_sheet[i][15] download_video_url = recommend_feeds_sheet[i][17] download_video_comment_cnt = 0 download_video_share_cnt = 0 Common.logger(log_type).info("正在判断第{}行", i + 1) Common.logger(log_type).info("download_video_title:{}", download_video_title) Common.logger(log_type).info("download_video_id:{}", download_video_id) Common.logger(log_type).info("download_video_play_cnt:{}", download_video_play_cnt) Common.logger(log_type).info("download_video_duration:{}", download_video_duration) Common.logger(log_type).info("download_video_send_time:{}", download_video_send_time) Common.logger(log_type).info("download_video_url:{}\n", download_video_url) # Common.logger(log_type).info("download_video_like_cnt:{}", download_video_like_cnt) # Common.logger(log_type).info("download_width_height:{}", download_width_height) # Common.logger(log_type).info("download_user_name:{}", download_user_name) # Common.logger(log_type).info("download_user_id:{}", download_user_id) # Common.logger(log_type).info("download_head_url:{}", download_head_url) # Common.logger(log_type).info("download_cover_url:{}", download_cover_url) # 过滤空行 if download_video_id is None or download_video_title is None or download_video_play_cnt is None: Common.logger(log_type).warning("空行,略过\n") # # 过滤敏感词 # elif any(word if word in download_video_title else False for word in # cls.sensitive_words(log_type)) is True: # Feishu.dimension_range(log_type, "music_album", "69UxPo", "ROWS", i + 1, i + 1) # Common.logger(log_type).info("视频已中敏感词,删除成功\n") # return # # 下载规则 # elif cls.download_rule(download_video_share_cnt, download_video_play_cnt) is False: # Feishu.dimension_range(log_type, "music_album", "69UxPo", "ROWS", i + 1, i + 1) # Common.logger(log_type).info("不满足下载规则,删除成功\n") # return # 时长小于 60s,删除 elif int(download_video_duration) < 60: Feishu.dimension_range(log_type, "gzh", "zWKFGb", "ROWS", i + 1, i + 1) Common.logger(log_type).info("时长{}<60,删除成功\n", download_video_duration) return # 已下载视频表去重 elif str(download_video_id) in [n for m in Feishu.get_values_batch(log_type, "gzh", "fCs3BT") for n in m]: Feishu.dimension_range(log_type, "gzh", "zWKFGb", "ROWS", i + 1, i + 1) Common.logger(log_type).info("该视频在公众号中已下载,删除成功\n") return # 看一看已下载表去重 elif str(download_video_id) in [n for m in Feishu.get_values_batch(log_type, "kanyikan", "20ce0c") for n in m]: Feishu.dimension_range(log_type, "gzh", "zWKFGb", "ROWS", i + 1, i + 1) Common.logger(log_type).info("该视频在看一看中已下载,删除成功\n") return else: # 下载封面 Common.download_method(log_type=log_type, text="cover", d_name=str(download_video_title), d_url=str(download_cover_url)) # 下载视频 Common.download_method(log_type=log_type, text="video", d_name=str(download_video_title), d_url=str(download_video_url)) # 保存视频信息至 "./videos/{download_video_title}/info.txt" with open("./videos/" + download_video_title + "/" + "info.txt", "a", encoding="UTF-8") as f_a: f_a.write(str(download_video_id) + "\n" + str(download_video_title) + "\n" + str(download_video_duration) + "\n" + str(download_video_play_cnt) + "\n" + str(download_video_comment_cnt) + "\n" + str(download_video_like_cnt) + "\n" + str(download_video_share_cnt) + "\n" + str(download_width_height) + "\n" + str(int(time.mktime( time.strptime(download_video_send_time, "%Y/%m/%d %H:%M:%S")))) + "\n" + str(download_user_name) + "\n" + str(download_head_url) + "\n" + str(download_video_url) + "\n" + str(download_cover_url) + "\n" + "gzh") Common.logger(log_type).info("==========视频信息已保存至info.txt==========") # 上传视频 Common.logger(log_type).info("开始上传视频:{}".format(download_video_title)) our_video_id = Publish.upload_and_publish(log_type, env, "play") our_video_link = "https://admin.piaoquantv.com/cms/post-detail/" + str(our_video_id) + "/info" Common.logger(log_type).info("视频上传完成:{}", download_video_title) # 保存视频 ID 到云文档 Common.logger(log_type).info("保存视频ID至云文档:{}", download_video_title) # 视频ID工作表,插入首行 Feishu.insert_columns(log_type, "gzh", "fCs3BT", "ROWS", 1, 2) # 视频ID工作表,首行写入数据 upload_time = int(time.time()) values = [[time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(upload_time)), "推荐榜", str(download_video_title), str(download_video_id), our_video_link, download_video_play_cnt, download_video_like_cnt, download_video_duration, str(download_width_height), str(download_video_send_time), str(download_user_name), str(download_user_id), str(download_head_url), str(download_cover_url), str(download_video_url)]] time.sleep(1) Feishu.update_values(log_type, "gzh", "fCs3BT", "D2:W2", values) # 删除行或列,可选 ROWS、COLUMNS Feishu.dimension_range(log_type, "gzh", "zWKFGb", "ROWS", i + 1, i + 1) Common.logger(log_type).info("视频:{},下载/上传成功\n", download_video_title) return except Exception as e: Common.logger(log_type).error("download_publish异常:{}", e) # 执行下载/上传 @classmethod def run_download_publish(cls, log_type, env): try: while True: recommend_feeds_sheet = Feishu.get_values_batch(log_type, "gzh", "zWKFGb") if len(recommend_feeds_sheet) == 1: Common.logger(log_type).info("下载/上传完成") break else: cls.download_publish(log_type, env) except Exception as e: Common.logger(log_type).error("run_download_publish异常:{}", e) if __name__ == "__main__": Recommend.get_recommend("recommend") # Recommend.download_publish("recommend") # Recommend.run_download_publish("recommend", "dev") # token = Recommend.get_token("recommend") # print(token)