|
@@ -1,808 +0,0 @@
|
|
|
-# -*- coding: utf-8 -*-
|
|
|
-# @Author: wangkun
|
|
|
-# @Time: 2022/8/1
|
|
|
-# import time
|
|
|
-import base64
|
|
|
-import json
|
|
|
-import os
|
|
|
-import sys
|
|
|
-import time
|
|
|
-# import urllib.parse
|
|
|
-import requests
|
|
|
-import urllib3
|
|
|
-
|
|
|
-sys.path.append(os.getcwd())
|
|
|
-from crawler_gzh.main.common import Common
|
|
|
-from crawler_gzh.main.feishu_lib import Feishu
|
|
|
-from crawler_gzh.main.publish import Publish
|
|
|
-
|
|
|
-proxies = {"http": None, "https": None}
|
|
|
-
|
|
|
-
|
|
|
-class Recommend:
|
|
|
- # 获取 token,保存至飞书云文档
|
|
|
- @classmethod
|
|
|
- def get_token(cls, log_type):
|
|
|
- # charles 抓包文件保存目录
|
|
|
- charles_file_dir = "./crawler-kanyikan-recommend/chlsfiles/"
|
|
|
- # charles_file_dir = "../chlsfiles/"
|
|
|
-
|
|
|
- if int(len(os.listdir(charles_file_dir))) == 1:
|
|
|
- Common.logger(log_type).info("未找到chlsfile文件,等待60s")
|
|
|
- time.sleep(60)
|
|
|
- else:
|
|
|
- try:
|
|
|
- # 目标文件夹下所有文件
|
|
|
- all_file = sorted(os.listdir(charles_file_dir))
|
|
|
-
|
|
|
- # 获取到目标文件
|
|
|
- old_file = all_file[-1]
|
|
|
-
|
|
|
- # 分离文件名与扩展名
|
|
|
- new_file = os.path.splitext(old_file)
|
|
|
-
|
|
|
- # 重命名文件后缀
|
|
|
- os.rename(os.path.join(charles_file_dir, old_file),
|
|
|
- os.path.join(charles_file_dir, new_file[0] + ".txt"))
|
|
|
-
|
|
|
- with open(charles_file_dir + new_file[0] + ".txt", encoding='utf-8-sig', errors='ignore') as f:
|
|
|
- contents = json.load(f, strict=False)
|
|
|
- Common.logger(log_type).info("chlsfile:{}", new_file)
|
|
|
- for content in contents:
|
|
|
- if "mp.weixin.qq.com" in content['host']:
|
|
|
- if content["path"] == r"/mp/getappmsgext":
|
|
|
- # query
|
|
|
- query = content["query"]
|
|
|
- Feishu.update_values("recommend", "gzh", "VzrN7E", "B9:B9", [[query]])
|
|
|
-
|
|
|
- # body
|
|
|
- headers = content["request"]["header"]["headers"]
|
|
|
- body = content["request"]["body"]["text"]
|
|
|
- # time.sleep(1)
|
|
|
- Feishu.update_values("recommend", "gzh", "VzrN7E", "B8:B8", [[body]])
|
|
|
-
|
|
|
- # title / vid
|
|
|
- title = content["request"]["body"]["text"].split("title=")[-1].split("&ct=")[0]
|
|
|
- vid = content["request"]["body"]["text"].split("vid=")[-1].split("&is_pay_subscribe")[0]
|
|
|
- # time.sleep(1)
|
|
|
- Feishu.update_values("recommend", "gzh", "VzrN7E", "B1:B1", [[title]])
|
|
|
- # time.sleep(1)
|
|
|
- Feishu.update_values("recommend", "gzh", "VzrN7E", "B2:B2", [[vid]])
|
|
|
-
|
|
|
- for h in headers:
|
|
|
- if h["name"] == "cookie" and "pass_ticket" in h["value"]:
|
|
|
- pass_ticket = h["value"].split("pass_ticket=")[-1]
|
|
|
- # print(f"pass_ticket:{pass_ticket}")
|
|
|
- Feishu.update_values("recommend", "gzh", "VzrN7E", "B5:B5", [[pass_ticket]])
|
|
|
-
|
|
|
- if h["name"] == "referer":
|
|
|
- referer = h["value"]
|
|
|
- # print(f"__biz:{referer}")
|
|
|
- Feishu.update_values("recommend", "gzh", "VzrN7E", "B7:B7", [[referer]])
|
|
|
-
|
|
|
- if h["name"] == "referer":
|
|
|
- __biz = h["value"].split("__biz=")[-1].split("&mid=")[0]
|
|
|
- # print(f"__biz:{__biz}")
|
|
|
- Feishu.update_values("recommend", "gzh", "VzrN7E", "B3:B3", [[__biz]])
|
|
|
-
|
|
|
- if h["name"] == "cookie" and "appmsg_token" in h["value"]:
|
|
|
- appmsg_token = h["value"].split("appmsg_token=")[-1]
|
|
|
- # print(f"appmsg_token:{appmsg_token}")
|
|
|
- Feishu.update_values("recommend", "gzh", "VzrN7E", "B4:B4", [[appmsg_token]])
|
|
|
-
|
|
|
- if h["name"] == "cookie" and "wap_sid2" in h["value"]:
|
|
|
- wap_sid2 = h["value"].split("wap_sid2=")[-1]
|
|
|
- # print(f"wap_sid2:{wap_sid2}")
|
|
|
- Feishu.update_values("recommend", "gzh", "VzrN7E", "B6:B6", [[wap_sid2]])
|
|
|
-
|
|
|
- except Exception as e:
|
|
|
- Common.logger(log_type).error("获取session异常,30s后重试:{}", e)
|
|
|
- time.sleep(30)
|
|
|
- cls.get_token(log_type)
|
|
|
-
|
|
|
- @classmethod
|
|
|
- def get_token_v2(cls, log_type):
|
|
|
- # charles 抓包文件保存目录
|
|
|
- charles_file_dir = "./crawler-kanyikan-recommend/chlsfiles/"
|
|
|
- # charles_file_dir = "../chlsfiles/"
|
|
|
-
|
|
|
- if int(len(os.listdir(charles_file_dir))) == 1:
|
|
|
- Common.logger(log_type).info("未找到chlsfile文件,等待60s")
|
|
|
- time.sleep(60)
|
|
|
- else:
|
|
|
- try:
|
|
|
- # 目标文件夹下所有文件
|
|
|
- all_file = sorted(os.listdir(charles_file_dir))
|
|
|
-
|
|
|
- # 获取到目标文件
|
|
|
- old_file = all_file[-1]
|
|
|
-
|
|
|
- # 分离文件名与扩展名
|
|
|
- new_file = os.path.splitext(old_file)
|
|
|
-
|
|
|
- # 重命名文件后缀
|
|
|
- os.rename(os.path.join(charles_file_dir, old_file),
|
|
|
- os.path.join(charles_file_dir, new_file[0] + ".txt"))
|
|
|
-
|
|
|
- with open(charles_file_dir + new_file[0] + ".txt", encoding='utf-8-sig', errors='ignore') as f:
|
|
|
- contents = json.load(f, strict=False)
|
|
|
- # Common.logger(log_type).info("chlsfile:{}\n", new_file)
|
|
|
- for content in contents:
|
|
|
- if content["host"] == "mp.weixin.qq.com" and content["path"] == r"/mp/getappmsgext":
|
|
|
- # Common.logger(log_type).info("content:{}\n", content)
|
|
|
- # query
|
|
|
- query = content["query"]
|
|
|
- # Common.logger(log_type).info("query:{}\n", query)
|
|
|
- Feishu.update_values("recommend", "gzh", "VzrN7E", "B9:B9", [[query]])
|
|
|
- Common.logger(log_type).info("保存query成功\n")
|
|
|
-
|
|
|
- # body
|
|
|
- body = content["request"]["body"]["text"]
|
|
|
- # Common.logger(log_type).info("body:{}", body)
|
|
|
- Feishu.update_values("recommend", "gzh", "VzrN7E", "B8:B8", [[body]])
|
|
|
- Common.logger(log_type).info("保存body成功\n")
|
|
|
-
|
|
|
- # referer
|
|
|
- headers = content["request"]["header"]["headers"]
|
|
|
- # Common.logger(log_type).info("headers:{}", headers)
|
|
|
- for header in headers:
|
|
|
- # referer
|
|
|
- if header["name"] == "referer":
|
|
|
- referer = header["value"]
|
|
|
- # Common.logger(log_type).info("referer:{}\n", referer)
|
|
|
- Feishu.update_values("recommend", "gzh", "VzrN7E", "B7:B7", [[referer]])
|
|
|
- Common.logger(log_type).info("保存referer成功\n")
|
|
|
-
|
|
|
- # wxuin
|
|
|
- if header["name"] == "cookie" and "wxuin" in header["value"]:
|
|
|
- wxuin = header["value"].split("wxuin=")[-1]
|
|
|
- # Common.logger(log_type).info("wxuin:{}\n", wxuin)
|
|
|
- Feishu.update_values("recommend", "gzh", "VzrN7E", "B10:B10", [[wxuin]])
|
|
|
- Common.logger(log_type).info("保存wxuin成功\n")
|
|
|
-
|
|
|
- # version
|
|
|
- if header["name"] == "cookie" and "version" in header["value"]:
|
|
|
- version = header["value"].split("version=")[-1]
|
|
|
- Common.logger(log_type).info("version:{}\n", version)
|
|
|
- Feishu.update_values("recommend", "gzh", "VzrN7E", "B11:B11", [[version]])
|
|
|
- Common.logger(log_type).info("保存version成功\n")
|
|
|
-
|
|
|
- # pass_ticket
|
|
|
- if header["name"] == "cookie" and "pass_ticket" in header["value"]:
|
|
|
- pass_ticket = header["value"].split("pass_ticket=")[-1]
|
|
|
- Common.logger(log_type).info("pass_ticket:{}\n", pass_ticket)
|
|
|
- Feishu.update_values("recommend", "gzh", "VzrN7E", "B5:B5", [[pass_ticket]])
|
|
|
- Common.logger(log_type).info("保存pass_ticket成功\n")
|
|
|
-
|
|
|
- # appmsg_token
|
|
|
- if header["name"] == "cookie" and "appmsg_token" in header["value"]:
|
|
|
- appmsg_token = header["value"].split("appmsg_token=")[-1]
|
|
|
- Common.logger(log_type).info("appmsg_token:{}\n", appmsg_token)
|
|
|
- Feishu.update_values("recommend", "gzh", "VzrN7E", "B4:B4", [[appmsg_token]])
|
|
|
- Common.logger(log_type).info("保存appmsg_token成功\n")
|
|
|
-
|
|
|
- # appmsg_token
|
|
|
- if header["name"] == "cookie" and "wap_sid2" in header["value"]:
|
|
|
- wap_sid2 = header["value"].split("wap_sid2=")[-1]
|
|
|
- Common.logger(log_type).info("wap_sid2:{}\n", wap_sid2)
|
|
|
- Feishu.update_values("recommend", "gzh", "VzrN7E", "B6:B6", [[wap_sid2]])
|
|
|
- Common.logger(log_type).info("保存wap_sid2成功\n")
|
|
|
-
|
|
|
- return True
|
|
|
-
|
|
|
- # x-wechat-key
|
|
|
- # for header in headers:
|
|
|
- # if header["name"] == "x-wechat-key":
|
|
|
- # x_wechat_key = header["value"]
|
|
|
- # Common.logger(log_type).info("x_wechat_key:{}\n", x_wechat_key)
|
|
|
- # Feishu.update_values("recommend", "gzh", "VzrN7E", "B12:B12", [[x_wechat_key]])
|
|
|
- # Common.logger(log_type).info("保存x_wechat_key成功\n")
|
|
|
- # return True
|
|
|
- except Exception as e:
|
|
|
- Common.logger(log_type).error("get_token_v2异常:{}", e)
|
|
|
-
|
|
|
- # 获取推荐列表
|
|
|
- @classmethod
|
|
|
- def get_recommend(cls, log_type):
|
|
|
- try:
|
|
|
- token_sheet = Feishu.get_values_batch("recommend", "gzh", "VzrN7E")
|
|
|
- if token_sheet is None:
|
|
|
- Common.logger(log_type).info("未获取到token等信息,30s后重试")
|
|
|
- time.sleep(30)
|
|
|
- cls.get_recommend(log_type)
|
|
|
- else:
|
|
|
- # __biz = token_sheet[2][1]
|
|
|
- appmsg_token = token_sheet[3][1]
|
|
|
- pass_ticket = token_sheet[4][1]
|
|
|
- wap_sid2 = token_sheet[5][1]
|
|
|
- referer = token_sheet[6][1]
|
|
|
- body = token_sheet[7][1]
|
|
|
- query = token_sheet[8][1]
|
|
|
-
|
|
|
- url = "https://mp.weixin.qq.com/mp/getappmsgext?"
|
|
|
- headers = {
|
|
|
- # "content-type": "application/x-www-form-urlencoded; charset=UTF-8",
|
|
|
- "content-type": 'text/plain',
|
|
|
- "accept": "*/*",
|
|
|
- "x-requested-with": "XMLHttpRequest",
|
|
|
- "accept-language": "zh-cn",
|
|
|
- "accept-encoding": "gzip, deflate, br",
|
|
|
- "origin": "https://mp.weixin.qq.com",
|
|
|
- "user-agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 "
|
|
|
- "(KHTML, like Gecko) Mobile/15E148 MicroMessenger/8.0.26(0x18001a29)"
|
|
|
- " NetType/WIFI Language/zh_CN",
|
|
|
- "referer": referer
|
|
|
- }
|
|
|
- cookies = {
|
|
|
- "appmsg_token": appmsg_token,
|
|
|
- "devicetype": "iOS14.7.1",
|
|
|
- "lang": "zh_CN",
|
|
|
- "pass_ticket": pass_ticket,
|
|
|
- "rewardsn": "",
|
|
|
- "version": "18001a29",
|
|
|
- "wap_sid2": wap_sid2,
|
|
|
- "wxtokenkey": "777",
|
|
|
- "wxuin": "2010747860"
|
|
|
- }
|
|
|
- urllib3.disable_warnings()
|
|
|
- response = requests.post(url=url, headers=headers, cookies=cookies, params=query, data=body,
|
|
|
- verify=False, proxies=proxies)
|
|
|
-
|
|
|
- if "related_tag_video" not in response.json():
|
|
|
- Common.logger(log_type).warning("response:{}\n", response.text)
|
|
|
- elif len(response.json()["related_tag_video"]) == 0:
|
|
|
- Common.logger(log_type).warning("response:{}\n", response.text)
|
|
|
- time.sleep(10)
|
|
|
- cls.get_recommend(log_type)
|
|
|
- else:
|
|
|
- feeds = response.json()["related_tag_video"]
|
|
|
- for m in range(len(feeds)):
|
|
|
- # video_title
|
|
|
- if "title" not in feeds[m]:
|
|
|
- video_title = 0
|
|
|
- else:
|
|
|
- video_title = feeds[m]["title"]
|
|
|
- # video_title = base64.b64decode(video_title).decode("utf-8")
|
|
|
-
|
|
|
- # video_id
|
|
|
- if "vid" not in feeds[m]:
|
|
|
- video_id = 0
|
|
|
- else:
|
|
|
- video_id = feeds[m]["vid"]
|
|
|
-
|
|
|
- # play_cnt
|
|
|
- if "read_num" not in feeds[m]:
|
|
|
- play_cnt = 0
|
|
|
- else:
|
|
|
- play_cnt = feeds[m]["read_num"]
|
|
|
-
|
|
|
- # like_cnt
|
|
|
- if "like_num" not in feeds[m]:
|
|
|
- like_cnt = 0
|
|
|
- else:
|
|
|
- like_cnt = feeds[m]["like_num"]
|
|
|
-
|
|
|
- # duration
|
|
|
- if "duration" not in feeds[m]:
|
|
|
- duration = 0
|
|
|
- else:
|
|
|
- duration = feeds[m]["duration"]
|
|
|
-
|
|
|
- # video_width / video_height
|
|
|
- if "videoWidth" not in feeds[m] or "videoHeight" not in feeds[m]:
|
|
|
- video_width = 0
|
|
|
- video_height = 0
|
|
|
- else:
|
|
|
- video_width = feeds[m]["videoWidth"]
|
|
|
- video_height = feeds[m]["videoHeight"]
|
|
|
-
|
|
|
- # send_time
|
|
|
- if "pubTime" not in feeds[m]:
|
|
|
- send_time = 0
|
|
|
- else:
|
|
|
- send_time = feeds[m]["pubTime"]
|
|
|
-
|
|
|
- # user_name
|
|
|
- if "srcDisplayName" not in feeds[m]:
|
|
|
- user_name = 0
|
|
|
- else:
|
|
|
- user_name = feeds[m]["srcDisplayName"]
|
|
|
- # user_name = base64.b64decode(user_name).decode("utf-8")
|
|
|
-
|
|
|
- # user_id
|
|
|
- if "srcUserName" not in feeds[m]:
|
|
|
- user_id = 0
|
|
|
- else:
|
|
|
- user_id = feeds[m]["srcUserName"]
|
|
|
-
|
|
|
- # head_url
|
|
|
- if "head_img_url" not in feeds[m]:
|
|
|
- head_url = 0
|
|
|
- else:
|
|
|
- head_url = feeds[m]["head_img_url"]
|
|
|
-
|
|
|
- # cover_url
|
|
|
- if "cover" not in feeds[m]:
|
|
|
- cover_url = 0
|
|
|
- else:
|
|
|
- cover_url = feeds[m]["cover"]
|
|
|
-
|
|
|
- # video_url
|
|
|
- if "url" not in feeds[m]:
|
|
|
- video_url = 0
|
|
|
- else:
|
|
|
- video_url = feeds[m]["url"]
|
|
|
-
|
|
|
- # 下载链接
|
|
|
- download_url = cls.get_url(log_type, video_url)
|
|
|
-
|
|
|
- Common.logger(log_type).info("video_title:{}", video_title)
|
|
|
- Common.logger(log_type).info("video_id:{}", video_id)
|
|
|
- Common.logger(log_type).info("play_cnt:{}", play_cnt)
|
|
|
- Common.logger(log_type).info("like_cnt:{}", like_cnt)
|
|
|
- Common.logger(log_type).info("duration:{}", duration)
|
|
|
- Common.logger(log_type).info("video_width:{}", video_width)
|
|
|
- Common.logger(log_type).info("video_height:{}", video_height)
|
|
|
- Common.logger(log_type).info("send_time:{}", send_time)
|
|
|
- Common.logger(log_type).info("user_name:{}", user_name)
|
|
|
- Common.logger(log_type).info("user_id:{}", user_id)
|
|
|
- Common.logger(log_type).info("head_url:{}", head_url)
|
|
|
- Common.logger(log_type).info("cover_url:{}", cover_url)
|
|
|
- Common.logger(log_type).info("video_url:{}", video_url)
|
|
|
- Common.logger(log_type).info("download_url:{}", download_url)
|
|
|
-
|
|
|
- if video_id == 0 or video_title == 0 or duration == 0 or video_url == 0:
|
|
|
- Common.logger(log_type).info("无效视频\n")
|
|
|
- elif str(video_id) in [x for y in Feishu.get_values_batch(log_type, "gzh", "fCs3BT") for x in
|
|
|
- y]:
|
|
|
- Common.logger(log_type).info("该视频已下载\n")
|
|
|
- elif str(video_id) in [x for y in Feishu.get_values_batch(log_type, "gzh", "zWKFGb") for x in
|
|
|
- y]:
|
|
|
- Common.logger(log_type).info("该视频已在feeds中\n")
|
|
|
- else:
|
|
|
- Feishu.insert_columns(log_type, "gzh", "zWKFGb", "ROWS", 1, 2)
|
|
|
- get_feeds_time = int(time.time())
|
|
|
- values = [[time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(get_feeds_time)),
|
|
|
- "推荐榜",
|
|
|
- video_title,
|
|
|
- str(video_id),
|
|
|
- play_cnt,
|
|
|
- like_cnt,
|
|
|
- duration,
|
|
|
- str(video_width) + "*" + str(video_height),
|
|
|
- time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(send_time)),
|
|
|
- user_name,
|
|
|
- user_id,
|
|
|
- head_url,
|
|
|
- cover_url,
|
|
|
- video_url,
|
|
|
- download_url
|
|
|
- ]]
|
|
|
- time.sleep(1)
|
|
|
- Feishu.update_values(log_type, "gzh", "zWKFGb", "D2:T2", values)
|
|
|
- Common.logger(log_type).info("添加至recommend_feeds成功\n")
|
|
|
-
|
|
|
- except Exception as e:
|
|
|
- Common.logger(log_type).error("get_recommend异常:{}", e)
|
|
|
-
|
|
|
- # 使用 token 获取推荐列表
|
|
|
- @classmethod
|
|
|
- def get_recommend_by_token(cls, log_type):
|
|
|
- try:
|
|
|
- get_token = cls.get_token_v2(log_type)
|
|
|
- if get_token is not True:
|
|
|
- Common.logger(log_type).warning("未获取到token,10s后重试")
|
|
|
- time.sleep(10)
|
|
|
- cls.get_recommend_by_token(log_type)
|
|
|
- else:
|
|
|
- # 获取公众号token
|
|
|
- token_sheet = Feishu.get_values_batch(log_type, "gzh", "VzrN7E")
|
|
|
- appmsg_token = token_sheet[3][1]
|
|
|
- pass_ticket = token_sheet[4][1]
|
|
|
- wap_sid2 = token_sheet[5][1]
|
|
|
- referer = token_sheet[6][1]
|
|
|
- body = token_sheet[7][1]
|
|
|
- query = token_sheet[8][1]
|
|
|
- wxuin = token_sheet[9][1]
|
|
|
- version = token_sheet[10][1]
|
|
|
- # x_wechat_key = token_sheet[11][1]
|
|
|
-
|
|
|
- url = "https://mp.weixin.qq.com/mp/getappmsgext?"
|
|
|
- headers = {
|
|
|
- "origin": "https://mp.weixin.qq.com",
|
|
|
- "x-requested-with": "XMLHttpRequest",
|
|
|
- "content-type": "application/x-www-form-urlencoded",
|
|
|
- "accept": "*/*",
|
|
|
- "sec-fetch-site": "same-origin",
|
|
|
- "sec-fetch-mode": "cors",
|
|
|
- "sec-fetch-dest": "empty",
|
|
|
- "accept-encoding": "gzip, deflate, br",
|
|
|
- "user-agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) "
|
|
|
- "Chrome/81.0.4044.138 Safari/537.36 NetType/WIFI "
|
|
|
- "MicroMessenger/7.0.20.1781(0x6700143B) WindowsWechat(0x6307001e)",
|
|
|
- "accept-language": "zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7",
|
|
|
- "referer": str(referer)
|
|
|
- }
|
|
|
- cookies = {
|
|
|
- "rewardsn": "",
|
|
|
- "wxtokenkey": "777",
|
|
|
- "devicetype": "Windows10x64",
|
|
|
- "lang": "zh_CN",
|
|
|
- "wxuin": str(wxuin),
|
|
|
- "version": str(version),
|
|
|
- "pass_ticket": str(pass_ticket),
|
|
|
- "appmsg_token": str(appmsg_token),
|
|
|
- "wap_sid2": str(wap_sid2)
|
|
|
- }
|
|
|
- # query_string = {
|
|
|
- # "f": "json",
|
|
|
- # "mock": "",
|
|
|
- # "fasttmplajax": "1",
|
|
|
- # "uin": "",
|
|
|
- # "key": "",
|
|
|
- # "pass_ticket": "MPA2Yy1dnOo6JSfV1DNWyJcLBO9dwupvcgkQj6sXOo3puAQKD7t4Odst6kRxfmUc",
|
|
|
- # "wxtoken": "",
|
|
|
- # "devicetype": "iOS14.7.1",
|
|
|
- # "clientversion": "18001a2b",
|
|
|
- # "__biz": "MzkwMjM4OTYyMA==",
|
|
|
- # "enterid": "1659926777",
|
|
|
- # "appmsg_token": "",
|
|
|
- # "x5": "0",
|
|
|
- # "wx_header": "1"
|
|
|
- # }
|
|
|
- # form = {
|
|
|
- # "r": "0.2395852290889654",
|
|
|
- # "__biz": "MzkwMjM4OTYyMA==",
|
|
|
- # "appmsg_type": "9",
|
|
|
- # "mid": "2247483674",
|
|
|
- # "sn": "4719d4e269e8923f7cad6c8a1e43d14e",
|
|
|
- # "idx": "1",
|
|
|
- # "scene": "102",
|
|
|
- # "title": "%E4%B8%A4%E5%85%84%E5%BC%9F%E6%95%B4%E5%A4%A9%E5%A5%BD%E5%90%83%E6%87%92%E5%81%9A%EF%BC%8C%E6%97%A0%E6%89%80%E4%BA%8B%E4%BA%8B%E8%80%81%E6%83%B3%E7%9D%80%E4%B8%8D%E5%8A%B3%E8%80%8C%E8%8E%B7%EF%BC%8C%E5%A5%BD%E4%BA%86%E6%8A%A5%E5%BA%94%E6%9D%A5%E4%BA%86",
|
|
|
- # "ct": "1659803693",
|
|
|
- # "abtest_cookie": "",
|
|
|
- # "devicetype": "iOS14.7.1",
|
|
|
- # "version": "18001a2b",
|
|
|
- # "is_need_ticket": "0",
|
|
|
- # "is_need_ad": "1",
|
|
|
- # "comment_id": "0",
|
|
|
- # "is_need_reward": "0",
|
|
|
- # "both_ad": "0",
|
|
|
- # "reward_uin_count": "0",
|
|
|
- # "send_time": "",
|
|
|
- # "msg_daily_idx": "1",
|
|
|
- # "is_original": "0",
|
|
|
- # "is_only_read": "1",
|
|
|
- # "req_id": "",
|
|
|
- # "pass_ticket": "MPA2Yy1dnOo6JSfV1DNWyJcLBO9dwupvcgkQj6sXOo3puAQKD7t4Odst6kRxfmUc",
|
|
|
- # "is_temp_url": "0",
|
|
|
- # "item_show_type": "5",
|
|
|
- # "tmp_version": "1",
|
|
|
- # "more_read_type": "0",
|
|
|
- # "appmsg_like_type": "2",
|
|
|
- # "related_video_sn": "",
|
|
|
- # "related_video_num": "5",
|
|
|
- # "vid": "wxv_2520118281538846720",
|
|
|
- # "is_pay_subscribe": "0",
|
|
|
- # "pay_subscribe_uin_count": "0",
|
|
|
- # "has_red_packet_cover": "0",
|
|
|
- # "album_id": "1296223588617486300",
|
|
|
- # "album_video_num": "5",
|
|
|
- # "cur_album_id": "",
|
|
|
- # "is_public_related_video": "0",
|
|
|
- # "encode_info_by_base64": "1",
|
|
|
- # "exptype": ""
|
|
|
- # }
|
|
|
-
|
|
|
- urllib3.disable_warnings()
|
|
|
- response = requests.post(url=url, headers=headers, cookies=cookies, params=query, data=body,
|
|
|
- proxies=proxies, verify=False)
|
|
|
- if "related_tag_video" not in response.json():
|
|
|
- Common.logger(log_type).warning("response:{}\n", response.text)
|
|
|
- elif len(response.json()["related_tag_video"]) == 0:
|
|
|
- Common.logger(log_type).warning("response:{}\n", response.text)
|
|
|
- # time.sleep(10)
|
|
|
- # cls.get_recommend(log_type)
|
|
|
- else:
|
|
|
- feeds = response.json()["related_tag_video"]
|
|
|
- for m in range(len(feeds)):
|
|
|
- # video_title
|
|
|
- if "title" not in feeds[m]:
|
|
|
- video_title = 0
|
|
|
- else:
|
|
|
- video_title = feeds[m]["title"]
|
|
|
- # video_title = base64.b64decode(video_title).decode("utf-8")
|
|
|
-
|
|
|
- # video_id
|
|
|
- if "vid" not in feeds[m]:
|
|
|
- video_id = 0
|
|
|
- else:
|
|
|
- video_id = feeds[m]["vid"]
|
|
|
-
|
|
|
- # play_cnt
|
|
|
- if "read_num" not in feeds[m]:
|
|
|
- play_cnt = 0
|
|
|
- else:
|
|
|
- play_cnt = feeds[m]["read_num"]
|
|
|
-
|
|
|
- # like_cnt
|
|
|
- if "like_num" not in feeds[m]:
|
|
|
- like_cnt = 0
|
|
|
- else:
|
|
|
- like_cnt = feeds[m]["like_num"]
|
|
|
-
|
|
|
- # duration
|
|
|
- if "duration" not in feeds[m]:
|
|
|
- duration = 0
|
|
|
- else:
|
|
|
- duration = feeds[m]["duration"]
|
|
|
-
|
|
|
- # video_width / video_height
|
|
|
- if "videoWidth" not in feeds[m] or "videoHeight" not in feeds[m]:
|
|
|
- video_width = 0
|
|
|
- video_height = 0
|
|
|
- else:
|
|
|
- video_width = feeds[m]["videoWidth"]
|
|
|
- video_height = feeds[m]["videoHeight"]
|
|
|
-
|
|
|
- # send_time
|
|
|
- if "pubTime" not in feeds[m]:
|
|
|
- send_time = 0
|
|
|
- else:
|
|
|
- send_time = feeds[m]["pubTime"]
|
|
|
-
|
|
|
- # user_name
|
|
|
- if "srcDisplayName" not in feeds[m]:
|
|
|
- user_name = 0
|
|
|
- else:
|
|
|
- user_name = feeds[m]["srcDisplayName"]
|
|
|
- # user_name = base64.b64decode(user_name).decode("utf-8")
|
|
|
-
|
|
|
- # user_id
|
|
|
- if "srcUserName" not in feeds[m]:
|
|
|
- user_id = 0
|
|
|
- else:
|
|
|
- user_id = feeds[m]["srcUserName"]
|
|
|
-
|
|
|
- # head_url
|
|
|
- if "head_img_url" not in feeds[m]:
|
|
|
- head_url = 0
|
|
|
- else:
|
|
|
- head_url = feeds[m]["head_img_url"]
|
|
|
-
|
|
|
- # cover_url
|
|
|
- if "cover" not in feeds[m]:
|
|
|
- cover_url = 0
|
|
|
- else:
|
|
|
- cover_url = feeds[m]["cover"]
|
|
|
-
|
|
|
- # video_url
|
|
|
- if "url" not in feeds[m]:
|
|
|
- video_url = 0
|
|
|
- else:
|
|
|
- video_url = feeds[m]["url"]
|
|
|
-
|
|
|
- # 下载链接
|
|
|
- download_url = cls.get_url(log_type, video_url)
|
|
|
-
|
|
|
- Common.logger(log_type).info("video_title:{}", video_title)
|
|
|
- Common.logger(log_type).info("video_id:{}", video_id)
|
|
|
- Common.logger(log_type).info("play_cnt:{}", play_cnt)
|
|
|
- Common.logger(log_type).info("like_cnt:{}", like_cnt)
|
|
|
- Common.logger(log_type).info("duration:{}", duration)
|
|
|
- Common.logger(log_type).info("video_width:{}", video_width)
|
|
|
- Common.logger(log_type).info("video_height:{}", video_height)
|
|
|
- Common.logger(log_type).info("send_time:{}", send_time)
|
|
|
- Common.logger(log_type).info("user_name:{}", user_name)
|
|
|
- Common.logger(log_type).info("user_id:{}", user_id)
|
|
|
- Common.logger(log_type).info("head_url:{}", head_url)
|
|
|
- Common.logger(log_type).info("cover_url:{}", cover_url)
|
|
|
- Common.logger(log_type).info("video_url:{}", video_url)
|
|
|
- Common.logger(log_type).info("download_url:{}", download_url)
|
|
|
-
|
|
|
- if video_id == 0 or video_title == 0 or duration == 0 or video_url == 0:
|
|
|
- Common.logger(log_type).info("无效视频\n")
|
|
|
- elif str(video_id) in [x for y in Feishu.get_values_batch(log_type, "gzh", "fCs3BT") for x in
|
|
|
- y]:
|
|
|
- Common.logger(log_type).info("该视频已下载\n")
|
|
|
- elif str(video_id) in [x for y in Feishu.get_values_batch(log_type, "gzh", "zWKFGb") for x in
|
|
|
- y]:
|
|
|
- Common.logger(log_type).info("该视频已在feeds中\n")
|
|
|
- else:
|
|
|
- Feishu.insert_columns(log_type, "gzh", "zWKFGb", "ROWS", 1, 2)
|
|
|
- get_feeds_time = int(time.time())
|
|
|
- values = [[time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(get_feeds_time)),
|
|
|
- "推荐榜",
|
|
|
- video_title,
|
|
|
- str(video_id),
|
|
|
- play_cnt,
|
|
|
- like_cnt,
|
|
|
- duration,
|
|
|
- str(video_width) + "*" + str(video_height),
|
|
|
- time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(send_time)),
|
|
|
- user_name,
|
|
|
- user_id,
|
|
|
- head_url,
|
|
|
- cover_url,
|
|
|
- video_url,
|
|
|
- download_url
|
|
|
- ]]
|
|
|
- time.sleep(1)
|
|
|
- Feishu.update_values(log_type, "gzh", "zWKFGb", "D2:T2", values)
|
|
|
- Common.logger(log_type).info("添加至recommend_feeds成功\n")
|
|
|
- except Exception as e:
|
|
|
- Common.logger(log_type).error("get_recommend_by_token异常:{}", e)
|
|
|
-
|
|
|
- # 获取视频下载链接
|
|
|
- @classmethod
|
|
|
- def get_url(cls, log_type, url):
|
|
|
- try:
|
|
|
- payload = {}
|
|
|
- headers = {
|
|
|
- 'Cookie': 'rewardsn=; wxtokenkey=777'
|
|
|
- }
|
|
|
- urllib3.disable_warnings()
|
|
|
- response = requests.get(url=url, headers=headers, data=payload, verify=False, proxies=proxies)
|
|
|
- response_list = response.text.splitlines()
|
|
|
- video_url_list = []
|
|
|
- for m in response_list:
|
|
|
- if "mpvideo.qpic.cn" in m:
|
|
|
- video_url = m.split("url: '")[1].split("',")[0].replace(r"\x26amp;", "&")
|
|
|
- video_url_list.append(video_url)
|
|
|
- video_url = video_url_list[0]
|
|
|
- return video_url
|
|
|
- except Exception as e:
|
|
|
- Common.logger(log_type).error("get_url异常:{}", e)
|
|
|
-
|
|
|
- # 下载/上传
|
|
|
- @classmethod
|
|
|
- def download_publish(cls, log_type, env):
|
|
|
- try:
|
|
|
- recommend_feeds_sheet = Feishu.get_values_batch(log_type, "gzh", "zWKFGb")
|
|
|
- for i in range(1, len(recommend_feeds_sheet)):
|
|
|
- download_video_title = recommend_feeds_sheet[i][5]
|
|
|
- download_video_id = recommend_feeds_sheet[i][6]
|
|
|
- download_video_play_cnt = recommend_feeds_sheet[i][7]
|
|
|
- download_video_like_cnt = recommend_feeds_sheet[i][8]
|
|
|
- download_video_duration = recommend_feeds_sheet[i][9]
|
|
|
- download_width_height = recommend_feeds_sheet[i][10]
|
|
|
- download_video_send_time = recommend_feeds_sheet[i][11]
|
|
|
- download_user_name = recommend_feeds_sheet[i][12]
|
|
|
- download_user_id = recommend_feeds_sheet[i][13]
|
|
|
- download_head_url = recommend_feeds_sheet[i][14]
|
|
|
- download_cover_url = recommend_feeds_sheet[i][15]
|
|
|
- download_video_url = recommend_feeds_sheet[i][17]
|
|
|
- download_video_comment_cnt = 0
|
|
|
- download_video_share_cnt = 0
|
|
|
-
|
|
|
- Common.logger(log_type).info("正在判断第{}行", i + 1)
|
|
|
- Common.logger(log_type).info("download_video_title:{}", download_video_title)
|
|
|
- Common.logger(log_type).info("download_video_id:{}", download_video_id)
|
|
|
- Common.logger(log_type).info("download_video_play_cnt:{}", download_video_play_cnt)
|
|
|
- Common.logger(log_type).info("download_video_duration:{}", download_video_duration)
|
|
|
- Common.logger(log_type).info("download_video_send_time:{}", download_video_send_time)
|
|
|
- Common.logger(log_type).info("download_video_url:{}\n", download_video_url)
|
|
|
- # Common.logger(log_type).info("download_video_like_cnt:{}", download_video_like_cnt)
|
|
|
- # Common.logger(log_type).info("download_width_height:{}", download_width_height)
|
|
|
- # Common.logger(log_type).info("download_user_name:{}", download_user_name)
|
|
|
- # Common.logger(log_type).info("download_user_id:{}", download_user_id)
|
|
|
- # Common.logger(log_type).info("download_head_url:{}", download_head_url)
|
|
|
- # Common.logger(log_type).info("download_cover_url:{}", download_cover_url)
|
|
|
-
|
|
|
- # 过滤空行
|
|
|
- if download_video_id is None or download_video_title is None or download_video_play_cnt is None:
|
|
|
- Common.logger(log_type).warning("空行,略过\n")
|
|
|
- # # 过滤敏感词
|
|
|
- # elif any(word if word in download_video_title else False for word in
|
|
|
- # cls.sensitive_words(log_type)) is True:
|
|
|
- # Feishu.dimension_range(log_type, "music_album", "69UxPo", "ROWS", i + 1, i + 1)
|
|
|
- # Common.logger(log_type).info("视频已中敏感词,删除成功\n")
|
|
|
- # return
|
|
|
- # # 下载规则
|
|
|
- # elif cls.download_rule(download_video_share_cnt, download_video_play_cnt) is False:
|
|
|
- # Feishu.dimension_range(log_type, "music_album", "69UxPo", "ROWS", i + 1, i + 1)
|
|
|
- # Common.logger(log_type).info("不满足下载规则,删除成功\n")
|
|
|
- # return
|
|
|
- # 时长小于 60s,删除
|
|
|
- elif int(download_video_duration) < 60:
|
|
|
- Feishu.dimension_range(log_type, "gzh", "zWKFGb", "ROWS", i + 1, i + 1)
|
|
|
- Common.logger(log_type).info("时长{}<60,删除成功\n", download_video_duration)
|
|
|
- return
|
|
|
- # 已下载视频表去重
|
|
|
- elif str(download_video_id) in [n for m in Feishu.get_values_batch(log_type, "gzh", "fCs3BT")
|
|
|
- for n in m]:
|
|
|
- Feishu.dimension_range(log_type, "gzh", "zWKFGb", "ROWS", i + 1, i + 1)
|
|
|
- Common.logger(log_type).info("该视频在公众号中已下载,删除成功\n")
|
|
|
- return
|
|
|
- # 看一看已下载表去重
|
|
|
- elif str(download_video_id) in [n for m in Feishu.get_values_batch(log_type, "kanyikan", "20ce0c")
|
|
|
- for n in m]:
|
|
|
- Feishu.dimension_range(log_type, "gzh", "zWKFGb", "ROWS", i + 1, i + 1)
|
|
|
- Common.logger(log_type).info("该视频在看一看中已下载,删除成功\n")
|
|
|
- return
|
|
|
- else:
|
|
|
- # 下载封面
|
|
|
- Common.download_method(log_type=log_type, text="cover",
|
|
|
- d_name=str(download_video_title), d_url=str(download_cover_url))
|
|
|
- # 下载视频
|
|
|
- Common.download_method(log_type=log_type, text="video",
|
|
|
- d_name=str(download_video_title), d_url=str(download_video_url))
|
|
|
- # 保存视频信息至 "./videos/{download_video_title}/info.txt"
|
|
|
- with open("./crawler_gzh/videos/" + download_video_title + "/" + "info.txt",
|
|
|
- "a", encoding="UTF-8") as f_a:
|
|
|
- f_a.write(str(download_video_id) + "\n" +
|
|
|
- str(download_video_title) + "\n" +
|
|
|
- str(download_video_duration) + "\n" +
|
|
|
- str(download_video_play_cnt) + "\n" +
|
|
|
- str(download_video_comment_cnt) + "\n" +
|
|
|
- str(download_video_like_cnt) + "\n" +
|
|
|
- str(download_video_share_cnt) + "\n" +
|
|
|
- str(download_width_height) + "\n" +
|
|
|
- str(int(time.mktime(
|
|
|
- time.strptime(download_video_send_time, "%Y/%m/%d %H:%M:%S")))) + "\n" +
|
|
|
- str(download_user_name) + "\n" +
|
|
|
- str(download_head_url) + "\n" +
|
|
|
- str(download_video_url) + "\n" +
|
|
|
- str(download_cover_url) + "\n" +
|
|
|
- "gzh")
|
|
|
- Common.logger(log_type).info("==========视频信息已保存至info.txt==========")
|
|
|
-
|
|
|
- # 上传视频
|
|
|
- Common.logger(log_type).info("开始上传视频:{}".format(download_video_title))
|
|
|
- our_video_id = Publish.upload_and_publish(log_type, env, "play")
|
|
|
- our_video_link = "https://admin.piaoquantv.com/cms/post-detail/" + str(our_video_id) + "/info"
|
|
|
- Common.logger(log_type).info("视频上传完成:{}", download_video_title)
|
|
|
-
|
|
|
- # 保存视频 ID 到云文档
|
|
|
- Common.logger(log_type).info("保存视频ID至云文档:{}", download_video_title)
|
|
|
- # 视频ID工作表,插入首行
|
|
|
- Feishu.insert_columns(log_type, "gzh", "fCs3BT", "ROWS", 1, 2)
|
|
|
- # 视频ID工作表,首行写入数据
|
|
|
- upload_time = int(time.time())
|
|
|
- values = [[time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(upload_time)),
|
|
|
- "推荐榜",
|
|
|
- str(download_video_title),
|
|
|
- str(download_video_id),
|
|
|
- our_video_link,
|
|
|
- download_video_play_cnt,
|
|
|
- download_video_like_cnt,
|
|
|
- download_video_duration,
|
|
|
- str(download_width_height),
|
|
|
- str(download_video_send_time),
|
|
|
- str(download_user_name),
|
|
|
- str(download_user_id),
|
|
|
- str(download_head_url),
|
|
|
- str(download_cover_url),
|
|
|
- str(download_video_url)]]
|
|
|
- time.sleep(1)
|
|
|
- Feishu.update_values(log_type, "gzh", "fCs3BT", "D2:W2", values)
|
|
|
-
|
|
|
- # 删除行或列,可选 ROWS、COLUMNS
|
|
|
- Feishu.dimension_range(log_type, "gzh", "zWKFGb", "ROWS", i + 1, i + 1)
|
|
|
- Common.logger(log_type).info("视频:{},下载/上传成功\n", download_video_title)
|
|
|
- return
|
|
|
-
|
|
|
- except Exception as e:
|
|
|
- Common.logger(log_type).error("download_publish异常:{}", e)
|
|
|
-
|
|
|
- # 执行下载/上传
|
|
|
- @classmethod
|
|
|
- def run_download_publish(cls, log_type, env):
|
|
|
- try:
|
|
|
- while True:
|
|
|
- recommend_feeds_sheet = Feishu.get_values_batch(log_type, "gzh", "zWKFGb")
|
|
|
- if len(recommend_feeds_sheet) == 1:
|
|
|
- Common.logger(log_type).info("下载/上传完成")
|
|
|
- break
|
|
|
- else:
|
|
|
- cls.download_publish(log_type, env)
|
|
|
- except Exception as e:
|
|
|
- Common.logger(log_type).error("run_download_publish异常:{}", e)
|
|
|
-
|
|
|
-
|
|
|
-if __name__ == "__main__":
|
|
|
- Recommend.get_recommend_by_token("recommend")
|
|
|
- # Recommend.download_publish("recommend")
|
|
|
- # Recommend.run_download_publish("recommend", "dev")
|
|
|
- # print(Recommend.get_token_v2("recommend"))
|
|
|
- # print(token)
|