douyin_author_scheduling_new.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325
  1. # -*- coding: utf-8 -*-
  2. # @Time: 2023/11/07
  3. import os
  4. import random
  5. import sys
  6. import time
  7. import requests
  8. import json
  9. import urllib3
  10. sys.path.append(os.getcwd())
  11. from datetime import timedelta, date
  12. from common.common import Common
  13. from common import AliyunLogger
  14. from common.mq import MQ
  15. from requests.adapters import HTTPAdapter
  16. from common.scheduling_db import MysqlHelper
  17. from common.public import get_config_from_mysql, download_rule
  18. from douyin.douyin_author.douyin_author_scheduling_help import DouYinHelper
  19. from common.limit import AuthorLimit
  20. class DouyinauthorScheduling:
  21. platform = "抖音"
  22. download_cnt = 0
  23. limiter = AuthorLimit(platform="douyin", mode="author")
  24. @classmethod
  25. def videos_cnt(cls, rule_dict):
  26. videos_cnt = rule_dict.get("videos_cnt", {}).get("min", 0)
  27. if videos_cnt == 0:
  28. videos_cnt = 1000
  29. return videos_cnt
  30. @classmethod
  31. def get_cookie(cls, log_type, crawler, env):
  32. select_sql = f""" select * from crawler_config where source="{crawler}" """
  33. configs = MysqlHelper.get_values(log_type, crawler, select_sql, env, action="")
  34. for config in configs:
  35. if "cookie" in config["config"]:
  36. cookie_dict = {
  37. "cookie_id": config["id"],
  38. "title": config["title"].strip(),
  39. "cookie": dict(eval(config["config"]))["cookie"].strip(),
  40. "update_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(config["update_time"] / 1000))),
  41. "operator": config["operator"].strip()
  42. }
  43. return cookie_dict
  44. @classmethod
  45. def get_videoList(cls, log_type, crawler, user_dict, rule_dict, env):
  46. mq = MQ(topic_name="topic_crawler_etl_" + env)
  47. next_cursor = 0
  48. while True:
  49. flag = user_dict["link"].split("_")[0]
  50. if flag == "V1":
  51. rule_dict = {
  52. "like_cnt": {"min": 10000, "max": 0},
  53. 'period': {"min": 90, "max": 90},
  54. 'special': 0.01
  55. }
  56. elif flag == "V2":
  57. rule_dict = {
  58. "like_cnt": {"min": 2000, "max": 0},
  59. 'period': {"min": 90, "max": 90},
  60. 'special': 0.01
  61. }
  62. elif flag == "V3":
  63. rule_dict = {
  64. "like_cnt": {"min": 100, "max": 0},
  65. 'period': {"min": 90, "max": 90},
  66. 'special': 0.01
  67. }
  68. cookie = cls.get_cookie(log_type, crawler, env)["cookie"]
  69. if user_dict['link'][0] == "V":
  70. link = user_dict["link"][3:]
  71. else:
  72. link = user_dict["link"]
  73. time.sleep(random.randint(5, 10))
  74. url = 'https://www.douyin.com/aweme/v1/web/aweme/post/'
  75. account_id = link
  76. headers = {
  77. 'Accept': 'application/json, text/plain, */*',
  78. 'Accept-Language': 'zh-CN,zh;q=0.9',
  79. 'Cache-Control': 'no-cache',
  80. 'Cookie': f"ttwid=" + cookie,
  81. 'Pragma': 'no-cache',
  82. 'Referer': f'https://www.douyin.com/user/{account_id}',
  83. 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) '
  84. 'Chrome/118.0.0.0 Safari/537.36',
  85. }
  86. query = DouYinHelper.get_full_query(ua=headers['User-Agent'], extra_data={
  87. 'sec_user_id': account_id,
  88. 'max_cursor': next_cursor,
  89. 'locate_query': 'false',
  90. 'show_live_replay_strategy': '1',
  91. 'need_time_list': '1',
  92. 'time_list_query': '0',
  93. 'whale_cut_token': '',
  94. 'cut_version': '1',
  95. 'count': '18',
  96. 'publish_video_strategy_type': '2',
  97. })
  98. urllib3.disable_warnings()
  99. s = requests.session()
  100. # max_retries=3 重试3次
  101. s.mount('http://', HTTPAdapter(max_retries=3))
  102. s.mount('https://', HTTPAdapter(max_retries=3))
  103. response = requests.request(method='GET', url=url, headers=headers, params=query)
  104. body = response.content.decode()
  105. obj = json.loads(body)
  106. has_more = True if obj.get('has_more', 0) == 1 else False
  107. next_cursor = str(obj.get('max_cursor')) if has_more else None
  108. data = obj.get('aweme_list', [])
  109. response.close()
  110. if response.status_code != 200:
  111. Common.logger(log_type, crawler).warning(f"data:{data}\n")
  112. AliyunLogger.logging(
  113. code="2000",
  114. platform=crawler,
  115. mode=log_type,
  116. env=env,
  117. message=f"data:{data}\n"
  118. )
  119. return
  120. elif len(data) == 0:
  121. Common.logger(log_type, crawler).warning(f"没有更多视频啦 ~\n")
  122. AliyunLogger.logging(
  123. code="2001",
  124. platform=crawler,
  125. mode=log_type,
  126. env=env,
  127. message=f"没有更多视频啦 ~\n"
  128. )
  129. return
  130. for i in range(len(data)):
  131. try:
  132. entity_type = data[i].get('search_impr').get('entity_type')
  133. if entity_type == 'GENERAL':
  134. Common.logger(log_type, crawler).info('扫描到一条视频\n')
  135. AliyunLogger.logging(
  136. code="1001",
  137. platform=crawler,
  138. mode=log_type,
  139. env=env,
  140. message='扫描到一条视频\n'
  141. )
  142. video_id = data[i].get('aweme_id') # 文章id
  143. video_title = data[i].get('desc', "").strip().replace("\n", "") \
  144. .replace("/", "").replace("\\", "").replace("\r", "") \
  145. .replace(":", "").replace("*", "").replace("?", "") \
  146. .replace("?", "").replace('"', "").replace("<", "") \
  147. .replace(">", "").replace("|", "").replace(" ", "") \
  148. .replace("&NBSP", "").replace(".", "。").replace(" ", "") \
  149. .replace("'", "").replace("#", "").replace("Merge", "")
  150. publish_time_stamp = data[i].get('create_time') # 发布时间
  151. publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
  152. video_url = data[i].get('video').get('play_addr').get('url_list')[0] # 视频链接
  153. cover_url = data[i].get('video').get('cover').get('url_list')[0] # 视频封面
  154. digg_count = int(data[i].get('statistics').get('digg_count')) # 点赞
  155. comment_count = int(data[i].get('statistics').get('comment_count')) # 评论
  156. # collect_count = data[i].get('statistics').get('collect_count') # 收藏
  157. share_count = int(data[i].get('statistics').get('share_count')) # 转发
  158. video_percent = '%.2f' % (share_count / digg_count)
  159. special = float(rule_dict.get("special"))
  160. if float(video_percent) < special:
  161. Common.logger(log_type, crawler).info(f"不符合条件:分享/点赞-{video_percent}\n")
  162. AliyunLogger.logging(
  163. code="2004",
  164. platform=crawler,
  165. mode=log_type,
  166. env=env,
  167. message=f"不符合条件:分享/点赞-{video_percent},点赞量-{digg_count}\n"
  168. )
  169. continue
  170. video_dict = {'video_title': video_title,
  171. 'video_id': video_id,
  172. 'play_cnt': 0,
  173. 'like_cnt': digg_count,
  174. 'comment_cnt': comment_count,
  175. 'share_cnt': share_count,
  176. 'video_width': 0,
  177. 'video_height': 0,
  178. 'duration': 0,
  179. 'publish_time_stamp': publish_time_stamp,
  180. 'publish_time_str': publish_time_str,
  181. 'user_name': "douyin",
  182. 'user_id': video_id,
  183. 'avatar_url': '',
  184. 'cover_url': cover_url,
  185. 'video_url': video_url,
  186. 'session': f"douyin-{int(time.time())}"}
  187. for k, v in video_dict.items():
  188. Common.logger(log_type, crawler).info(f"{k}:{v}")
  189. AliyunLogger.logging(
  190. code="1000",
  191. platform=crawler,
  192. mode=log_type,
  193. env=env,
  194. message=f"{video_dict}\n"
  195. )
  196. if int((int(time.time()) - int(publish_time_stamp)) / (3600*24)) > int(rule_dict.get("period", {}).get("max", 1000)):
  197. Common.logger(log_type, crawler).info(f'发布时间超过{int(rule_dict.get("period", {}).get("max", 1000))}天\n')
  198. AliyunLogger.logging(
  199. code="2004",
  200. platform=crawler,
  201. mode=log_type,
  202. env=env,
  203. message=f'发布时间超过{int(rule_dict.get("period", {}).get("max", 1000))}天\n'
  204. )
  205. return
  206. if video_dict["video_id"] == '' or video_dict["cover_url"] == '' or video_dict["video_url"] == '':
  207. Common.logger(log_type, crawler).info('无效视频\n')
  208. AliyunLogger.logging(
  209. code="2004",
  210. platform=crawler,
  211. mode=log_type,
  212. env=env,
  213. message='无效视频\n'
  214. )
  215. elif download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
  216. Common.logger(log_type, crawler).info("不满足抓取规则\n")
  217. AliyunLogger.logging(
  218. code="2004",
  219. platform=crawler,
  220. mode=log_type,
  221. env=env,
  222. message='不满足抓取规则\n'
  223. )
  224. elif any(str(word) if str(word) in video_dict["video_title"] else False
  225. for word in get_config_from_mysql(log_type=log_type,
  226. source=crawler,
  227. env=env,
  228. text="filter",
  229. action="")) is True:
  230. Common.logger(log_type, crawler).info('已中过滤词\n')
  231. AliyunLogger.logging(
  232. code="2004",
  233. platform=crawler,
  234. mode=log_type,
  235. env=env,
  236. message='已中过滤词\n'
  237. )
  238. elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
  239. Common.logger(log_type, crawler).info('视频已下载\n')
  240. AliyunLogger.logging(
  241. code="2002",
  242. platform=crawler,
  243. mode=log_type,
  244. env=env,
  245. message='视频已下载\n'
  246. )
  247. else:
  248. video_dict["out_user_id"] = video_dict["user_id"]
  249. video_dict["platform"] = crawler
  250. video_dict["strategy"] = log_type
  251. video_dict["out_video_id"] = video_dict["video_id"]
  252. video_dict["width"] = video_dict["video_width"]
  253. video_dict["height"] = video_dict["video_height"]
  254. video_dict["crawler_rule"] = json.dumps(rule_dict)
  255. video_dict["user_id"] = user_dict["uid"]
  256. video_dict["publish_time"] = video_dict["publish_time_str"]
  257. video_dict["strategy_type"] = log_type
  258. limit_flag = cls.limiter.author_limitation(user_id=video_dict['user_id'])
  259. if limit_flag:
  260. mq.send_msg(video_dict)
  261. cls.download_cnt += 1
  262. except Exception as e:
  263. Common.logger(log_type, crawler).warning(f"抓取单条视频异常:{e}\n")
  264. AliyunLogger.logging(
  265. code="3000",
  266. platform=crawler,
  267. mode=log_type,
  268. env=env,
  269. message=f"抓取单条视频异常:{e}\n"
  270. )
  271. @classmethod
  272. def repeat_video(cls, log_type, crawler, video_id, env):
  273. sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and out_video_id="{video_id}"; """
  274. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  275. return len(repeat_video)
  276. @classmethod
  277. def get_author_videos(cls, log_type, crawler, user_list, rule_dict, env):
  278. for user_dict in user_list:
  279. try:
  280. Common.logger(log_type, crawler).info(f"开始抓取 {user_dict['nick_name']} 主页视频")
  281. AliyunLogger.logging(
  282. code="2000",
  283. platform=crawler,
  284. mode=log_type,
  285. env=env,
  286. message=f"开始抓取 {user_dict['nick_name']} 主页视频"
  287. )
  288. cls.download_cnt = 0
  289. cls.get_videoList(log_type=log_type,
  290. crawler=crawler,
  291. user_dict=user_dict,
  292. rule_dict=rule_dict,
  293. env=env)
  294. except Exception as e:
  295. Common.logger(log_type, crawler).warning(f"抓取用户{user_dict['nick_name']}主页视频时异常:{e}\n")
  296. AliyunLogger.logging(
  297. code="3000",
  298. platform=crawler,
  299. mode=log_type,
  300. env=env,
  301. message=f"抓取用户{user_dict['nick_name']}主页视频时异常:{e}\n"
  302. )
  303. if __name__ == "__main__":
  304. print(DouyinauthorScheduling.get_cookie("author", "douyin", "prod")["cookie"])
  305. pass