xiaoniangao_play_scheduling.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2023/3/16
  4. import json
  5. import os
  6. import random
  7. import shutil
  8. import sys
  9. import time
  10. from hashlib import md5
  11. import requests
  12. import urllib3
  13. from common.mq import MQ
  14. sys.path.append(os.getcwd())
  15. from common.common import Common
  16. from common.feishu import Feishu
  17. from common.publish import Publish
  18. from common.scheduling_db import MysqlHelper
  19. from common.public import get_config_from_mysql, download_rule
  20. proxies = {"http": None, "https": None}
  21. class XiaoniangaoplayScheduling:
  22. platform = "小年糕"
  23. words = "abcdefghijklmnopqrstuvwxyz0123456789"
  24. uid_token_dict = {
  25. "uid": f"""{"".join(random.sample(words, 8))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 12))}""",
  26. "token": "".join(random.sample(words, 32))}
  27. # 获取列表
  28. @classmethod
  29. def get_videoList(cls, log_type, crawler, rule_dict, our_uid, env):
  30. mq = MQ(topic_name="topic_crawler_etl_" + env)
  31. # uid_token_dict = cls.uid_token_dict
  32. for page in range(1, 101):
  33. try:
  34. Common.logger(log_type, crawler).info(f"正在抓取第{page}页")
  35. Common.logging(log_type, crawler, env, f"正在抓取第{page}页")
  36. url = "https://kapi-xng-app.xiaoniangao.cn/v2/trends/recommend"
  37. payload = "{\"topic_name\":\"recommend\",\"ext\":{\"current_item\":0,\"items\":[]},\"tag_id\":0,\"refresh\":false,\"share_width\":300,\"share_height\":240,\"staggered_style\":0,\"qs\":\"imageMogr2\\/gravity\\/center\\/rotate\\/$\\/thumbnail\\/!750x500r\\/interlace\\/1\\/format\\/jpg\",\"topic_id\":2,\"h_qs\":\"imageMogr2\\/gravity\\/center\\/rotate\\/$\\/thumbnail\\/!80x80r\\/interlace\\/1\\/format\\/jpg\",\"log\":{\"net\":\"wifi\",\"product\":\"xng\",\"uid\":\"2F310D09-5E32-5985-8644-3BCB6920E76F\",\"brand\":\"iPad\",\"page\":\"\",\"session_id\":\"71C77648-3224-4083-894C-B6282131F286\",\"resolution\":\"750*1334\",\"pf\":\"4\",\"app_version\":\"1.22.5\",\"device\":\"iPad Pro (12.9-inch) (3rd generation)\",\"os_version\":\"15.7\",\"idfa\":\"\",\"channel\":\"ios_app_store\"},\"token\":\"\"}"
  38. headers = {
  39. 'Host': 'kapi-xng-app.xiaoniangao.cn',
  40. 'content-type': 'application/json; charset=utf-8',
  41. 'accept': 'application/json',
  42. 'authorization': 'PsrUTBCQ5G7UVZdgx+JxymPHcKU=',
  43. 'verb': 'POST',
  44. 'content-md5': '08fa0e6bf725fd6ef83c16d2ceb8a544',
  45. 'x-b3-traceid': '45a6c5b4c471eecc',
  46. 'accept-language': 'zh-Hans-CN;q=1.0',
  47. 'date': 'Mon, 19 Jun 2023 09:47:40 GMT',
  48. 'x-token-id': '',
  49. 'x-signaturemethod': 'hmac-sha1',
  50. 'user-agent': 'xngapp/1.22.5 (cn.xiaoniangao.xngapp; build:157; iOS 15.7.0) Alamofire/5.2.2'
  51. }
  52. urllib3.disable_warnings()
  53. r = requests.post(url=url, headers=headers, data=payload, proxies=proxies, verify=False)
  54. if "data" not in r.text or r.status_code != 200:
  55. Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
  56. Common.logging(log_type, crawler, env, f"get_videoList:{r.text}\n")
  57. return
  58. elif "data" not in r.json():
  59. Common.logger(log_type, crawler).info(f"get_videoList:{r.json()}\n")
  60. Common.logging(log_type, crawler, env, f"get_videoList:{r.json()}\n")
  61. return
  62. elif "list" not in r.json()["data"]:
  63. Common.logger(log_type, crawler).warning(f"get_videoList:{r.json()['data']}\n")
  64. Common.logging(log_type, crawler, env, f"get_videoList:{r.json()['data']}\n")
  65. return
  66. elif len(r.json()["data"]["list"]) == 0:
  67. Common.logger(log_type, crawler).warning(f"get_videoList:{r.json()['data']['list']}\n")
  68. Common.logging(log_type, crawler, env, f"get_videoList:{r.json()['data']['list']}\n")
  69. return
  70. else:
  71. # 视频列表数据
  72. feeds = r.json()["data"]["list"]
  73. for i in range(len(feeds)):
  74. try:
  75. # 标题,表情随机加在片头、片尾,或替代句子中间的标点符号
  76. xiaoniangao_title = feeds[i].get("title", "").strip().replace("\n", "") \
  77. .replace("/", "").replace("\r", "").replace("#", "") \
  78. .replace(".", "。").replace("\\", "").replace("&NBSP", "") \
  79. .replace(":", "").replace("*", "").replace("?", "") \
  80. .replace("?", "").replace('"', "").replace("<", "") \
  81. .replace(">", "").replace("|", "").replace(" ", "") \
  82. .replace('"', '').replace("'", '')
  83. # 随机取一个表情/符号
  84. emoji = random.choice(get_config_from_mysql(log_type, crawler, env, "emoji"))
  85. # 生成最终标题,标题list[表情+title, title+表情]随机取一个
  86. video_title = random.choice([f"{emoji}{xiaoniangao_title}", f"{xiaoniangao_title}{emoji}"])
  87. # 发布时间
  88. publish_time_stamp = int(int(feeds[i].get("t", 0)) / 1000)
  89. publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
  90. # 用户名 / 头像
  91. user_name = feeds[i].get("user", {}).get("nick", "").strip().replace("\n", "") \
  92. .replace("/", "").replace("快手", "").replace(" ", "") \
  93. .replace(" ", "").replace("&NBSP", "").replace("\r", "")
  94. video_dict = {
  95. "video_title": video_title,
  96. "video_id": feeds[i].get("vid", ""),
  97. "duration": int(feeds[i].get("du", 0) / 1000),
  98. "play_cnt": feeds[i].get("play_pv", 0),
  99. "like_cnt": feeds[i].get("favor", {}).get("total", 0),
  100. "comment_cnt": feeds[i].get("comment_count", 0),
  101. "share_cnt": feeds[i].get("share", 0),
  102. "user_name": user_name,
  103. "publish_time_stamp": publish_time_stamp,
  104. "publish_time_str": publish_time_str,
  105. "video_width": int(feeds[i].get("vw", 0)),
  106. "video_height": int(feeds[i].get("vh", 0)),
  107. "avatar_url": feeds[i].get("user", {}).get("hurl", ""),
  108. "profile_id": feeds[i]["id"],
  109. "profile_mid": feeds[i]["user"]["mid"],
  110. "cover_url": feeds[i].get("url", ""),
  111. "video_url": feeds[i].get("v_url", ""),
  112. "session": f"xiaoniangao-play-{int(time.time())}"
  113. }
  114. for k, v in video_dict.items():
  115. Common.logger(log_type, crawler).info(f"{k}:{v}")
  116. Common.logging(log_type, crawler, env, f"{video_dict}")
  117. # 过滤无效视频
  118. if video_title == "" or video_dict["video_id"] == "" or video_dict["video_url"] == "":
  119. Common.logger(log_type, crawler).warning("无效视频\n")
  120. Common.logging(log_type, crawler, env, "无效视频\n")
  121. # 抓取基础规则过滤
  122. elif download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
  123. Common.logger(log_type, crawler).info("不满足抓取规则\n")
  124. Common.logging(log_type, crawler, env, "不满足抓取规则\n")
  125. elif any(str(word) if str(word) in video_dict["video_title"] else False
  126. for word in get_config_from_mysql(log_type=log_type,
  127. source=crawler,
  128. env=env,
  129. text="filter",
  130. action="")) is True:
  131. Common.logger(log_type, crawler).info('已中过滤词\n')
  132. Common.logging(log_type, crawler, env, '已中过滤词\n')
  133. elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
  134. Common.logger(log_type, crawler).info('视频已下载\n')
  135. Common.logging(log_type, crawler, env, '视频已下载\n')
  136. else:
  137. # cls.download_publish(log_type=log_type,
  138. # crawler=crawler,
  139. # video_dict=video_dict,
  140. # rule_dict=rule_dict,
  141. # our_uid=our_uid,
  142. # env=env)
  143. video_dict["out_user_id"] = video_dict["profile_id"]
  144. video_dict["platform"] = crawler
  145. video_dict["strategy"] = log_type
  146. video_dict["out_video_id"] = video_dict["video_id"]
  147. video_dict["width"] = video_dict["video_width"]
  148. video_dict["height"] = video_dict["video_height"]
  149. video_dict["crawler_rule"] = json.dumps(rule_dict)
  150. video_dict["user_id"] = our_uid
  151. video_dict["publish_time"] = video_dict["publish_time_str"]
  152. video_dict["strategy_type"] = "play"
  153. mq.send_msg(video_dict)
  154. except Exception as e:
  155. Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
  156. Common.logging(log_type, crawler, env, f"抓取单条视频异常:{e}\n")
  157. except Exception as e:
  158. Common.logger(log_type, crawler).error(f"抓取第{page}页时异常:{e}\n")
  159. Common.logging(log_type, crawler, env, f"抓取第{page}页时异常:{e}\n")
  160. @classmethod
  161. def repeat_video(cls, log_type, crawler, video_id, env):
  162. # sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}"; """
  163. sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and out_video_id="{video_id}"; """
  164. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  165. return len(repeat_video)
  166. @classmethod
  167. def download_publish(cls, log_type, crawler, video_dict, rule_dict, our_uid, env):
  168. # 下载视频
  169. Common.download_method(log_type=log_type, crawler=crawler, text="video", title=video_dict["video_title"], url=video_dict["video_url"])
  170. md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
  171. try:
  172. if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
  173. # 删除视频文件夹
  174. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  175. Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
  176. Common.logging(log_type, crawler, env, "视频size=0,删除成功\n")
  177. return
  178. except FileNotFoundError:
  179. # 删除视频文件夹
  180. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  181. Common.logger(log_type, crawler).info("视频文件不存在,删除文件夹成功\n")
  182. Common.logging(log_type, crawler, env, "视频文件不存在,删除文件夹成功\n")
  183. return
  184. # 下载封面
  185. Common.download_method(log_type=log_type, crawler=crawler, text="cover", title=video_dict["video_title"], url=video_dict["cover_url"])
  186. # 保存视频信息至 "./videos/{download_video_title}/info.txt"
  187. Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
  188. # 上传视频
  189. Common.logger(log_type, crawler).info("开始上传视频...")
  190. Common.logging(log_type, crawler, env, "开始上传视频...")
  191. if env == "dev":
  192. oss_endpoint = "out"
  193. our_video_id = Publish.upload_and_publish(log_type=log_type,
  194. crawler=crawler,
  195. strategy="播放量抓取策略",
  196. our_uid=our_uid,
  197. env=env,
  198. oss_endpoint=oss_endpoint)
  199. our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  200. else:
  201. oss_endpoint = "inner"
  202. our_video_id = Publish.upload_and_publish(log_type=log_type,
  203. crawler=crawler,
  204. strategy="播放量抓取策略",
  205. our_uid=our_uid,
  206. env=env,
  207. oss_endpoint=oss_endpoint)
  208. our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  209. if our_video_id is None:
  210. try:
  211. # 删除视频文件夹
  212. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  213. return
  214. except FileNotFoundError:
  215. return
  216. insert_sql = f""" insert into crawler_video(video_id,
  217. out_user_id,
  218. platform,
  219. strategy,
  220. out_video_id,
  221. video_title,
  222. cover_url,
  223. video_url,
  224. duration,
  225. publish_time,
  226. play_cnt,
  227. crawler_rule,
  228. width,
  229. height)
  230. values({our_video_id},
  231. "{video_dict['profile_id']}",
  232. "{cls.platform}",
  233. "播放量抓取策略",
  234. "{video_dict['video_id']}",
  235. "{video_dict['video_title']}",
  236. "{video_dict['cover_url']}",
  237. "{video_dict['video_url']}",
  238. {int(video_dict['duration'])},
  239. "{video_dict['publish_time_str']}",
  240. {int(video_dict['play_cnt'])},
  241. '{json.dumps(rule_dict)}',
  242. {int(video_dict['video_width'])},
  243. {int(video_dict['video_height'])}) """
  244. Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
  245. Common.logging(log_type, crawler, env, f"insert_sql:{insert_sql}")
  246. MysqlHelper.update_values(log_type, crawler, insert_sql, env)
  247. Common.logger(log_type, crawler).info('视频信息写入数据库成功')
  248. Common.logging(log_type, crawler, env, '视频信息写入数据库成功')
  249. # 视频写入飞书
  250. Feishu.insert_columns(log_type, crawler, "c85k1C", "ROWS", 1, 2)
  251. # 视频ID工作表,首行写入数据
  252. upload_time = int(time.time())
  253. values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
  254. "播放量榜爬虫策略",
  255. str(video_dict['video_id']),
  256. str(video_dict['video_title']),
  257. our_video_link,
  258. video_dict['play_cnt'],
  259. video_dict['comment_cnt'],
  260. video_dict['like_cnt'],
  261. video_dict['share_cnt'],
  262. video_dict['duration'],
  263. f"{video_dict['video_width']}*{video_dict['video_height']}",
  264. str(video_dict['publish_time_str']),
  265. str(video_dict['user_name']),
  266. str(video_dict['profile_id']),
  267. str(video_dict['profile_mid']),
  268. str(video_dict['avatar_url']),
  269. str(video_dict['cover_url']),
  270. str(video_dict['video_url'])]]
  271. time.sleep(0.5)
  272. Feishu.update_values(log_type, crawler, "c85k1C", "F2:Z2", values)
  273. Common.logger(log_type, crawler).info('视频信息写入飞书成功\n')
  274. Common.logging(log_type, crawler, env, '视频信息写入飞书成功\n')
  275. if __name__ == '__main__':
  276. pass