xiaoniangao_author_scheduling.py 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2023/3/13
  4. import json
  5. import os
  6. import random
  7. import shutil
  8. import sys
  9. import time
  10. import requests
  11. import urllib3
  12. sys.path.append(os.getcwd())
  13. from common.common import Common
  14. from common.scheduling_db import MysqlHelper
  15. from common.publish import Publish
  16. from common.feishu import Feishu
  17. from common.public import get_config_from_mysql
  18. proxies = {"http": None, "https": None}
  19. class XiaoniangaoAuthorScheduling:
  20. platform = "小年糕"
  21. # 小程序个人主页视频列表翻页参数
  22. next_t = None
  23. # 基础门槛规则
  24. @staticmethod
  25. def download_rule(log_type, crawler, video_dict, rule_dict):
  26. """
  27. 下载视频的基本规则
  28. :param log_type: 日志
  29. :param crawler: 哪款爬虫
  30. :param video_dict: 视频信息,字典格式
  31. :param rule_dict: 规则信息,字典格式
  32. :return: 满足规则,返回 True;反之,返回 False
  33. """
  34. rule_play_cnt_min = rule_dict.get('play_cnt', {}).get('min', 0)
  35. rule_play_cnt_max = rule_dict.get('play_cnt', {}).get('max', 100000000)
  36. if rule_play_cnt_max == 0:
  37. rule_play_cnt_max = 100000000
  38. rule_duration_min = rule_dict.get('duration', {}).get('min', 0)
  39. rule_duration_max = rule_dict.get('duration', {}).get('max', 100000000)
  40. if rule_duration_max == 0:
  41. rule_duration_max = 100000000
  42. rule_period_min = rule_dict.get('period', {}).get('min', 0)
  43. # rule_period_max = rule_dict.get('period', {}).get('max', 100000000)
  44. # if rule_period_max == 0:
  45. # rule_period_max = 100000000
  46. rule_fans_cnt_min = rule_dict.get('fans_cnt', {}).get('min', 0)
  47. rule_fans_cnt_max = rule_dict.get('fans_cnt', {}).get('max', 100000000)
  48. if rule_fans_cnt_max == 0:
  49. rule_fans_cnt_max = 100000000
  50. rule_videos_cnt_min = rule_dict.get('videos_cnt', {}).get('min', 0)
  51. rule_videos_cnt_max = rule_dict.get('videos_cnt', {}).get('max', 100000000)
  52. if rule_videos_cnt_max == 0:
  53. rule_videos_cnt_max = 100000000
  54. rule_like_cnt_min = rule_dict.get('like_cnt', {}).get('min', 0)
  55. rule_like_cnt_max = rule_dict.get('like_cnt', {}).get('max', 100000000)
  56. if rule_like_cnt_max == 0:
  57. rule_like_cnt_max = 100000000
  58. rule_width_min = rule_dict.get('width', {}).get('min', 0)
  59. rule_width_max = rule_dict.get('width', {}).get('max', 100000000)
  60. if rule_width_max == 0:
  61. rule_width_max = 100000000
  62. rule_height_min = rule_dict.get('height', {}).get('min', 0)
  63. rule_height_max = rule_dict.get('height', {}).get('max', 100000000)
  64. if rule_height_max == 0:
  65. rule_height_max = 100000000
  66. rule_share_cnt_min = rule_dict.get('share_cnt', {}).get('min', 0)
  67. rule_share_cnt_max = rule_dict.get('share_cnt', {}).get('max', 100000000)
  68. if rule_share_cnt_max == 0:
  69. rule_share_cnt_max = 100000000
  70. rule_comment_cnt_min = rule_dict.get('comment_cnt', {}).get('min', 0)
  71. rule_comment_cnt_max = rule_dict.get('comment_cnt', {}).get('max', 100000000)
  72. if rule_comment_cnt_max == 0:
  73. rule_comment_cnt_max = 100000000
  74. rule_publish_time_min = rule_dict.get('publish_time', {}).get('min', 0)
  75. rule_publish_time_max = rule_dict.get('publish_time', {}).get('max', 0)
  76. if rule_publish_time_max == 0:
  77. rule_publish_time_max = 4102415999000 # 2099-12-31 23:59:59
  78. Common.logger(log_type, crawler).info(
  79. f'rule_duration_max:{rule_duration_max} >= duration:{int(float(video_dict["duration"]))} >= rule_duration_min:{int(rule_duration_min)}')
  80. Common.logger(log_type, crawler).info(
  81. f'rule_play_cnt_max:{int(rule_play_cnt_max)} >= play_cnt:{int(video_dict["play_cnt"])} >= rule_play_cnt_min:{int(rule_play_cnt_min)}')
  82. Common.logger(log_type, crawler).info(
  83. f'now:{int(time.time())} - publish_time_stamp:{int(video_dict["publish_time_stamp"])} <= {3600 * 24 * int(rule_period_min)}')
  84. Common.logger(log_type, crawler).info(
  85. f'rule_like_cnt_max:{int(rule_like_cnt_max)} >= like_cnt:{int(video_dict["like_cnt"])} >= rule_like_cnt_min:{int(rule_like_cnt_min)}')
  86. Common.logger(log_type, crawler).info(
  87. f'rule_comment_cnt_max:{int(rule_comment_cnt_max)} >= comment_cnt:{int(video_dict["comment_cnt"])} >= rule_comment_cnt_min:{int(rule_comment_cnt_min)}')
  88. Common.logger(log_type, crawler).info(
  89. f'rule_share_cnt_max:{int(rule_share_cnt_max)} >= share_cnt:{int(video_dict["share_cnt"])} >= rule_share_cnt_min:{int(rule_share_cnt_min)}')
  90. Common.logger(log_type, crawler).info(
  91. f'rule_width_max:{int(rule_width_max)} >= video_width:{int(video_dict["video_width"])} >= rule_width_min:{int(rule_width_min)}')
  92. Common.logger(log_type, crawler).info(
  93. f'rule_height_max:{int(rule_height_max)} >= video_height:{int(video_dict["video_height"])} >= rule_height_min:{int(rule_height_min)}')
  94. Common.logger(log_type, crawler).info(
  95. f'rule_publish_time_max:{int(rule_publish_time_max)} >= publish_time_stamp:{int(video_dict["publish_time_stamp"])} >= rule_publish_time_min:{int(rule_publish_time_min)}')
  96. if int(rule_duration_max) >= int(float(video_dict["duration"])) >= int(rule_duration_min) \
  97. and int(rule_play_cnt_max) >= int(video_dict['play_cnt']) >= int(rule_play_cnt_min) \
  98. and int(time.time()) - int(video_dict["publish_time_stamp"]) <= 3600 * 24 * int(rule_period_min) \
  99. and int(rule_like_cnt_max) >= int(video_dict['like_cnt']) >= int(rule_like_cnt_min) \
  100. and int(rule_comment_cnt_max) >= int(video_dict['comment_cnt']) >= int(rule_comment_cnt_min) \
  101. and int(rule_share_cnt_max) >= int(video_dict['share_cnt']) >= int(rule_share_cnt_min) \
  102. and int(rule_width_max) >= int(video_dict['video_width']) >= int(rule_width_min) \
  103. and int(rule_height_max) >= int(video_dict['video_height']) >= int(rule_height_min) \
  104. and int(rule_publish_time_max) >= int(video_dict['publish_time_stamp']) >= int(rule_publish_time_min):
  105. return True
  106. else:
  107. return False
  108. @classmethod
  109. def repeat_video(cls, log_type, crawler, video_id, env):
  110. sql = f""" select * from crawler_video where platform="小年糕" and out_video_id="{video_id}"; """
  111. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  112. return len(repeat_video)
  113. # 获取个人主页视频
  114. @classmethod
  115. def get_videoList(cls, log_type, crawler, strategy, p_mid, uid, rule_dict, oss_endpoint, env):
  116. while True:
  117. url = "https://api.xiaoniangao.cn/profile/list_album"
  118. headers = {
  119. "X-Mid": '1fb47aa7a860d9',
  120. "X-Token-Id": '9f2cb91f9952c107ecb73642083e1dec-1145266232',
  121. "content-type": "application/json",
  122. "uuid": 'f40c2e7c-3cfb-4804-b513-608c0280268c',
  123. "Accept-Encoding": "gzip,compress,br,deflate",
  124. "User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X)"
  125. " AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 "
  126. "MicroMessenger/8.0.20(0x18001435) NetType/WIFI Language/zh_CN",
  127. "Referer": 'https://servicewechat.com/wxd7911e4c177690e4/654/page-frame.html'
  128. }
  129. json_text = {
  130. "visited_mid": str(p_mid),
  131. "start_t": cls.next_t,
  132. "qs": "imageMogr2/gravity/center/rotate/$/thumbnail/!690x385r/crop/690x385/interlace/1/format/jpg",
  133. "h_qs": "imageMogr2/gravity/center/rotate/$/thumbnail/!120x120r/crop/120x120/interlace/1/format/jpg",
  134. "limit": 20,
  135. "token": '54e4c603f7bf3dc009c86b49ed91be36',
  136. "uid": 'f40c2e7c-3cfb-4804-b513-608c0280268c',
  137. "proj": "ma",
  138. "wx_ver": "8.0.23",
  139. "code_ver": "3.68.0",
  140. "log_common_params": {
  141. "e": [{
  142. "data": {
  143. "page": "profilePage",
  144. "topic": "public"
  145. }
  146. }],
  147. "ext": {
  148. "brand": "iPhone",
  149. "device": "iPhone 11",
  150. "os": "iOS 14.7.1",
  151. "weixinver": "8.0.23",
  152. "srcver": "2.24.7",
  153. "net": "wifi",
  154. "scene": "1089"
  155. },
  156. "pj": "1",
  157. "pf": "2",
  158. "session_id": "7468cf52-00ea-432e-8505-6ea3ad7ec164"
  159. }
  160. }
  161. urllib3.disable_warnings()
  162. r = requests.post(url=url, headers=headers, json=json_text, proxies=proxies, verify=False)
  163. if 'data' not in r.text or r.status_code != 200:
  164. Common.logger(log_type, crawler).info(f"get_videoList:{r.text}\n")
  165. cls.next_t = None
  166. return
  167. elif 'list' not in r.json()['data']:
  168. Common.logger(log_type, crawler).info(f"get_videoList:{r.json()}\n")
  169. cls.next_t = None
  170. return
  171. elif len(r.json()['data']['list']) == 0:
  172. Common.logger(log_type, crawler).info(f"没有更多数据啦~\n")
  173. cls.next_t = None
  174. return
  175. else:
  176. cls.next_t = r.json()["data"]["next_t"]
  177. feeds = r.json()["data"]["list"]
  178. for i in range(len(feeds)):
  179. # 标题,表情随机加在片头、片尾,或替代句子中间的标点符号
  180. xiaoniangao_title = feeds[i].get("title", "").strip().replace("\n", "") \
  181. .replace("/", "").replace("\r", "").replace("#", "") \
  182. .replace(".", "。").replace("\\", "").replace("&NBSP", "") \
  183. .replace(":", "").replace("*", "").replace("?", "") \
  184. .replace("?", "").replace('"', "").replace("<", "") \
  185. .replace(">", "").replace("|", "").replace(" ", "") \
  186. .replace('"', '').replace("'", '')
  187. # 随机取一个表情/符号
  188. emoji = random.choice(get_config_from_mysql(log_type, crawler, env, "emoji"))
  189. # 生成最终标题,标题list[表情+title, title+表情]随机取一个
  190. video_title = random.choice([f"{emoji}{xiaoniangao_title}", f"{xiaoniangao_title}{emoji}"])
  191. # 视频 ID
  192. video_id = feeds[i].get("vid", "")
  193. # 播放量
  194. play_cnt = feeds[i].get("play_pv", 0)
  195. # 点赞量
  196. like_cnt = feeds[i].get("favor", {}).get("total", 0)
  197. # 评论数
  198. comment_cnt = feeds[i].get("comment_count", 0)
  199. # 分享量
  200. share_cnt = feeds[i].get("share", 0)
  201. # 时长
  202. duration = int(feeds[i].get("du", 0) / 1000)
  203. # 宽和高
  204. video_width = int(feeds[i].get("w", 0))
  205. video_height = int(feeds[i].get("h", 0))
  206. # 发布时间
  207. publish_time_stamp = int(int(feeds[i].get("t", 0)) / 1000)
  208. publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
  209. # 用户名 / 头像
  210. user_name = feeds[i].get("album_user", {}).get("nick", "").strip().replace("\n", "") \
  211. .replace("/", "").replace("快手", "").replace(" ", "") \
  212. .replace(" ", "").replace("&NBSP", "").replace("\r", "")
  213. avatar_url = feeds[i].get("album_user", {}).get("hurl", "")
  214. # 用户 ID
  215. profile_id = feeds[i]["id"]
  216. # 用户 mid
  217. profile_mid = feeds[i]["mid"]
  218. # 视频封面
  219. cover_url = feeds[i].get("url", "")
  220. # 视频播放地址
  221. video_url = feeds[i].get("v_url", "")
  222. video_dict = {
  223. "video_id": video_id,
  224. "video_title": video_title,
  225. "duration": duration,
  226. "play_cnt": play_cnt,
  227. "like_cnt": like_cnt,
  228. "comment_cnt": comment_cnt,
  229. "share_cnt": share_cnt,
  230. "user_name": user_name,
  231. "publish_time_stamp": publish_time_stamp,
  232. "publish_time_str": publish_time_str,
  233. "video_width": video_width,
  234. "video_height": video_height,
  235. "avatar_url": avatar_url,
  236. "profile_id": profile_id,
  237. "profile_mid": profile_mid,
  238. "cover_url": cover_url,
  239. "video_url": video_url,
  240. "session": f"xiaoniangao-author-{int(time.time())}"
  241. }
  242. for k, v in video_dict.items():
  243. Common.logger(log_type, crawler).info(f"{k}:{v}")
  244. if int(time.time()) - publish_time_stamp > 3600 * 24 * int(rule_dict.get('period', {}).get('min', 1000)):
  245. Common.logger(log_type, crawler).info(f"发布时间超过{int(rule_dict.get('period', {}).get('min', 1000))}天\n")
  246. cls.next_t = None
  247. return
  248. # 过滤无效视频
  249. if video_title == "" or video_id == "" or video_url == "":
  250. Common.logger(log_type, crawler).info("无效视频\n")
  251. # 抓取基础规则过滤
  252. elif cls.download_rule(log_type, crawler, video_dict, rule_dict) is False:
  253. Common.logger(log_type, crawler).info("不满足抓取规则\n")
  254. elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
  255. Common.logger(log_type, crawler).info('视频已下载\n')
  256. # 过滤词
  257. elif any(str(word) if str(word) in video_title else False for word in get_config_from_mysql(log_type, crawler, env, "filter", action="")) is True:
  258. Common.logger(log_type, crawler).info("视频已中过滤词\n")
  259. else:
  260. cls.download_publish(log_type=log_type,
  261. crawler=crawler,
  262. strategy=strategy,
  263. video_dict=video_dict,
  264. rule_dict=rule_dict,
  265. uid=uid,
  266. oss_endpoint=oss_endpoint,
  267. env=env)
  268. # 下载/上传
  269. @classmethod
  270. def download_publish(cls, log_type, crawler, strategy, video_dict, rule_dict, uid, oss_endpoint, env):
  271. # 下载封面
  272. Common.download_method(log_type=log_type, crawler=crawler, text="cover", title=video_dict["video_title"], url=video_dict["cover_url"])
  273. # 下载视频
  274. Common.download_method(log_type=log_type, crawler=crawler, text="video", title=video_dict["video_title"], url=video_dict["video_url"])
  275. # 保存视频信息至 "./videos/{download_video_title}/info.txt"
  276. Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
  277. # 上传视频
  278. Common.logger(log_type, crawler).info("开始上传视频...")
  279. our_video_id = Publish.upload_and_publish(log_type=log_type,
  280. crawler=crawler,
  281. strategy=strategy,
  282. our_uid=uid,
  283. env=env,
  284. oss_endpoint=oss_endpoint)
  285. if env == "dev":
  286. our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  287. else:
  288. our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  289. Common.logger(log_type, crawler).info("视频上传完成")
  290. if our_video_id is None:
  291. # 删除视频文件夹
  292. shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
  293. return
  294. insert_sql = f""" insert into crawler_video(video_id,
  295. out_user_id,
  296. platform,
  297. strategy,
  298. out_video_id,
  299. video_title,
  300. cover_url,
  301. video_url,
  302. duration,
  303. publish_time,
  304. play_cnt,
  305. crawler_rule,
  306. width,
  307. height)
  308. values({our_video_id},
  309. "{video_dict['profile_id']}",
  310. "{cls.platform}",
  311. "定向爬虫策略",
  312. "{video_dict['video_id']}",
  313. "{video_dict['video_title']}",
  314. "{video_dict['cover_url']}",
  315. "{video_dict['video_url']}",
  316. {int(video_dict['duration'])},
  317. "{video_dict['publish_time_str']}",
  318. {int(video_dict['play_cnt'])},
  319. '{json.dumps(rule_dict)}',
  320. {int(video_dict['video_width'])},
  321. {int(video_dict['video_height'])}) """
  322. Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
  323. MysqlHelper.update_values(log_type, crawler, insert_sql, env)
  324. Common.logger(log_type, crawler).info('视频信息插入数据库成功!')
  325. # 视频写入飞书
  326. Feishu.insert_columns(log_type, crawler, "Wu0CeL", "ROWS", 1, 2)
  327. # 视频ID工作表,首行写入数据
  328. upload_time = int(time.time())
  329. values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
  330. "用户主页",
  331. str(video_dict['video_id']),
  332. str(video_dict['video_title']),
  333. our_video_link,
  334. video_dict['play_cnt'],
  335. video_dict['comment_cnt'],
  336. video_dict['like_cnt'],
  337. video_dict['share_cnt'],
  338. video_dict['duration'],
  339. f"{video_dict['video_width']}*{video_dict['video_height']}",
  340. str(video_dict['publish_time_str']),
  341. str(video_dict['user_name']),
  342. str(video_dict['profile_id']),
  343. str(video_dict['profile_mid']),
  344. str(video_dict['avatar_url']),
  345. str(video_dict['cover_url']),
  346. str(video_dict['video_url'])]]
  347. time.sleep(1)
  348. Feishu.update_values(log_type, crawler, "Wu0CeL", "F2:Z2", values)
  349. Common.logger(log_type, crawler).info('视频信息写入飞书成功\n')
  350. # 获取所有关注列表的用户视频
  351. @classmethod
  352. def get_follow_videos(cls, log_type, crawler, user_list, rule_dict, strategy, oss_endpoint, env):
  353. if len(user_list) == 0:
  354. Common.logger(log_type, crawler).warning(f"抓取用户列表为空\n")
  355. return
  356. for user in user_list:
  357. # Common.logger(log_type, crawler).info(f"user:{user}")
  358. try:
  359. user_name = user['nick_name']
  360. profile_mid = user['link']
  361. uid = user['uid']
  362. Common.logger(log_type, crawler).info(f"获取 {user_name} 主页视频")
  363. cls.get_videoList(log_type=log_type,
  364. crawler=crawler,
  365. strategy=strategy,
  366. p_mid=profile_mid,
  367. rule_dict=rule_dict,
  368. uid=uid,
  369. oss_endpoint=oss_endpoint,
  370. env=env)
  371. cls.next_t = None
  372. time.sleep(1)
  373. except Exception as e:
  374. Common.logger(log_type, crawler).error(f"get_follow_videos:{e}\n")
  375. if __name__ == "__main__":
  376. # print(XiaoniangaoAuthorScheduling.repeat_video("follow", "xiaoniangao", "4919087666", "prod", "aliyun"))
  377. # print(XiaoniangaoAuthorScheduling.repeat_video("follow", "xiaoniangao", "4919087666", "dev"))
  378. # XiaoniangaoAuthorScheduling.get_users()
  379. pass