123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245 |
- # -*- coding: utf-8 -*-
- # @Author: wangkun
- # @Time: 2023/6/1
- import os
- import random
- import shutil
- import sys
- import time
- import requests
- import urllib3
- sys.path.append(os.getcwd())
- from main.common import Common
- from main.feishu_lib import Feishu
- from main.publish import Publish
- proxies = {"http": None, "https": None}
- class Kanyikanrecommend:
- @classmethod
- def get_filter_word(cls, log_type, crawler):
- while True:
- filter_sheet = Feishu.get_values_batch(log_type, crawler, "rofdM5")
- if filter_sheet is None:
- Common.logger(log_type).info(f"filter_sheet:{filter_sheet}")
- time.sleep(1)
- continue
- # 敏感词库列表
- word_list = []
- for i in filter_sheet:
- for j in i:
- # 过滤空的单元格内容
- if j is None:
- pass
- else:
- word_list.append(j)
- return word_list
- @classmethod
- def download_rule(cls, video_dict):
- now = int(time.time())
- publish_day = int(int(now - video_dict["publish_time_stamp"]) / (3600*24))
- if (int(video_dict["video_width"]) or int(video_dict["video_height"]) >= 0) \
- and int(video_dict["duration"]) >= 40\
- and ((publish_day >= 7 and int(video_dict["play_cnt"]) >= 2000000) or (publish_day < 7 and int(video_dict["play_cnt"]) >= 500000)):
- # and int(video_dict["publish_time_stamp"]) >= 1672502400: # 发布时间>=2023-01-01 00:00:00
- return True
- else:
- return False
- @classmethod
- def get_videoList(cls, log_type, crawler, env):
- while True:
- for page in range(1, 101):
- Common.logger(log_type).info(f"正在抓取第{page}页")
- try:
- session = Common.get_session(log_type)
- if session is None:
- time.sleep(1)
- continue
- url = 'https://search.weixin.qq.com/cgi-bin/recwxa/recwxavideolist?'
- header = {
- "Connection": "keep-alive",
- "content-type": "application/json",
- "Accept-Encoding": "gzip,compress,br,deflate",
- "User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X) "
- "AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 MicroMessenger/8.0.18(0x18001236) "
- "NetType/WIFI Language/zh_CN",
- "Referer": "https://servicewechat.com/wxbb9a805eb4f9533c/234/page-frame.html",
- }
- params = {
- 'session': session,
- "offset": 0,
- "wxaVersion": "3.9.2",
- "count": "10",
- "channelid": "208",
- "scene": '310',
- "subscene": '1089',
- "clientVersion": '8.0.18',
- "sharesearchid": '0',
- "nettype": 'wifi',
- "switchprofile": "0",
- "switchnewuser": "0",
- }
- urllib3.disable_warnings()
- response = requests.get(url=url, headers=header, params=params, proxies=proxies, verify=False)
- if "data" not in response.text:
- Common.logger(log_type).info("获取视频list时,session过期,随机睡眠 31-50 秒")
- # 如果返回空信息,则随机睡眠 31-40 秒
- time.sleep(random.randint(31, 40))
- continue
- elif "items" not in response.json()["data"]:
- Common.logger(log_type).info(f"get_feeds:{response.json()},随机睡眠 1-3 分钟")
- # 如果返回空信息,则随机睡眠 1-3 分钟
- time.sleep(random.randint(60, 180))
- continue
- feeds = response.json().get("data", {}).get("items", "")
- if feeds == "":
- Common.logger(log_type).info(f"feeds:{feeds}")
- time.sleep(random.randint(31, 40))
- continue
- for i in range(len(feeds)):
- try:
- video_title = feeds[i].get("title", "").strip().replace("\n", "") \
- .replace("/", "").replace("\\", "").replace("\r", "") \
- .replace(":", "").replace("*", "").replace("?", "") \
- .replace("?", "").replace('"', "").replace("<", "") \
- .replace(">", "").replace("|", "").replace(" ", "") \
- .replace("&NBSP", "").replace(".", "。").replace(" ", "") \
- .replace("'", "").replace("#", "").replace("Merge", "")
- publish_time_stamp = feeds[i].get("date", 0)
- publish_time_str = time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(publish_time_stamp))
- # 获取播放地址
- if "videoInfo" not in feeds[i]:
- video_url = ""
- elif "mpInfo" in feeds[i]["videoInfo"]["videoCdnInfo"]:
- if len(feeds[i]["videoInfo"]["videoCdnInfo"]["mpInfo"]["urlInfo"]) > 2:
- video_url = feeds[i]["videoInfo"]["videoCdnInfo"]["mpInfo"]["urlInfo"][2]["url"]
- else:
- video_url = feeds[i]["videoInfo"]["videoCdnInfo"]["mpInfo"]["urlInfo"][0]["url"]
- elif "ctnInfo" in feeds[i]["videoInfo"]["videoCdnInfo"]:
- video_url = feeds[i]["videoInfo"]["videoCdnInfo"]["ctnInfo"]["urlInfo"][0]["url"]
- else:
- video_url = feeds[i]["videoInfo"]["videoCdnInfo"]["urlInfo"][0]["url"]
- video_dict = {
- "video_title": video_title,
- "video_id": feeds[i].get("videoId", ""),
- "play_cnt": feeds[i].get("playCount", 0),
- "like_cnt": feeds[i].get("liked_cnt", 0),
- "comment_cnt": feeds[i].get("comment_cnt", 0),
- "share_cnt": feeds[i].get("shared_cnt", 0),
- "duration": feeds[i].get("mediaDuration", 0),
- "video_width": feeds[i].get("short_video_info", {}).get("width", 0),
- "video_height": feeds[i].get("short_video_info", {}).get("height", 0),
- "publish_time_stamp": publish_time_stamp,
- "publish_time_str": publish_time_str,
- "user_name": feeds[i].get("source", "").strip().replace("\n", ""),
- "user_id": feeds[i].get("openid", ""),
- "avatar_url": feeds[i].get("bizIcon", ""),
- "cover_url": feeds[i].get("thumbUrl", ""),
- "video_url": video_url,
- "session": session,
- }
- for k, v in video_dict.items():
- Common.logger(log_type).info(f"{k}:{v}")
- if video_dict["video_id"] == "" \
- or video_dict["video_title"] == ""\
- or video_dict["video_url"] == "":
- Common.logger(log_type).info("无效视频\n")
- elif cls.download_rule(video_dict) is False:
- Common.logger(log_type).info("不满足抓取规则\n")
- elif any(str(word) if str(word) in video_title else False for word in cls.get_filter_word(log_type, crawler)) is True:
- Common.logger(log_type).info("视频已中过滤词\n")
- elif video_dict["video_id"] in [j for i in Feishu.get_values_batch(log_type, crawler, "ho98Ov") for j in i]:
- Common.logger(log_type).info("视频已下载\n")
- elif video_dict["video_id"] in [j for i in Feishu.get_values_batch(log_type, crawler, "20ce0c") for j in i]:
- Common.logger(log_type).info("视频已下载\n")
- else:
- cls.download_publish(log_type, crawler, video_dict, env)
- except Exception as e:
- Common.logger(log_type).error(f"抓取单条视频异常:{e}\n")
- except Exception as e:
- Common.logger(log_type).error(f"抓取第{page}页时异常:{e}\n")
- @classmethod
- def download_publish(cls, log_type, crawler, video_dict, env):
- Common.download_method(log_type, "video", video_dict["video_title"], video_dict["video_url"])
- try:
- if os.path.getsize(f"./videos/{video_dict['video_title']}/video.mp4") == 0:
- # 删除视频文件夹
- shutil.rmtree(f"./videos/{video_dict['video_title']}")
- Common.logger(log_type).info("视频size=0,删除成功\n")
- return
- except FileNotFoundError:
- # 删除视频文件夹
- shutil.rmtree(f"./videos/{video_dict['video_title']}")
- Common.logger(log_type).info("视频文件不存在,删除文件夹成功\n")
- return
- Common.download_method(log_type, "cover", video_dict["video_title"], video_dict["cover_url"])
- with open(f"./videos/{video_dict['video_title']}/info.txt", "a", encoding="utf8") as f_a2:
- f_a2.write(str(video_dict['video_id']) + "\n" +
- str(video_dict['video_title']) + "\n" +
- str(video_dict['duration']) + "\n" +
- str(video_dict['play_cnt']) + "\n" +
- str(video_dict['comment_cnt']) + "\n" +
- str(video_dict['like_cnt']) + "\n" +
- str(video_dict['share_cnt']) + "\n" +
- f'{video_dict["video_width"]}*{video_dict["video_height"]}' + "\n" +
- str(video_dict["publish_time_stamp"]) + "\n" +
- str(video_dict["user_name"]) + "\n" +
- str(video_dict["avatar_url"]) + "\n" +
- str(video_dict["video_url"]) + "\n" +
- str(video_dict["cover_url"]) + "\n" +
- f"kanyikan-recommend-{int(time.time())}")
- Common.logger("recommend").info("==========视频信息已保存至info.txt==========")
- # 上传视频
- our_video_id = Publish.upload_and_publish(log_type=log_type,
- crawler=crawler,
- strategy="推荐抓取策略",
- our_uid="recommend",
- env=env,
- oss_endpoint="out")
- if env == "dev":
- our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
- else:
- our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
- if our_video_id is None:
- try:
- # 删除视频文件夹
- shutil.rmtree(f"./videos/{video_dict['video_title']}")
- return
- except FileNotFoundError:
- return
- # 保存视频信息到云文档:
- Feishu.insert_columns(log_type, crawler, "20ce0c", "ROWS", 1, 2)
- # 看一看+ ,视频ID工作表,首行写入数据
- upload_time = int(time.time())
- values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
- "推荐榜",
- str(video_dict["video_id"]),
- str(video_dict["video_title"]),
- our_video_link,
- video_dict["play_cnt"],
- video_dict["comment_cnt"],
- video_dict["like_cnt"],
- video_dict["share_cnt"],
- video_dict["duration"],
- f'{video_dict["video_width"]}*{video_dict["video_height"]}',
- video_dict["publish_time_str"],
- video_dict["user_name"],
- video_dict["user_id"],
- video_dict["avatar_url"],
- video_dict["cover_url"],
- video_dict["video_url"]]]
- time.sleep(0.5)
- Feishu.update_values(log_type, crawler, "20ce0c", "F2:Z2", values)
- Common.logger(log_type).info("视频信息保存至云文档成功\n")
- if __name__ == "__main__":
- print(Kanyikanrecommend.get_filter_word("recommend", "kanyikan"))
- print(int(time.mktime(time.strptime("2021-06-01 00:00:00", "%Y-%m-%d %H:%M:%S"))))
- pass
|