douyin_author_scheduling.py 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2023/5/26
  4. import json
  5. import os
  6. import shutil
  7. import sys
  8. import time
  9. import requests
  10. from hashlib import md5
  11. from common.mq import MQ
  12. sys.path.append(os.getcwd())
  13. from common.common import Common
  14. from common.scheduling_db import MysqlHelper
  15. from douyin.douyin_recommend import get_xb
  16. from common.feishu import Feishu
  17. from common.publish import Publish
  18. from common.public import random_title, get_config_from_mysql, download_rule
  19. class DouyinauthorScheduling:
  20. platform = "抖音"
  21. @classmethod
  22. def video_title(cls, log_type, env, crawler, title):
  23. title_split1 = title.split(" #")
  24. if title_split1[0] != "":
  25. title1 = title_split1[0]
  26. else:
  27. title1 = title_split1[-1]
  28. title_split2 = title1.split(" #")
  29. if title_split2[0] != "":
  30. title2 = title_split2[0]
  31. else:
  32. title2 = title_split2[-1]
  33. title_split3 = title2.split("@")
  34. if title_split3[0] != "":
  35. title3 = title_split3[0]
  36. else:
  37. title3 = title_split3[-1]
  38. video_title = title3.strip().split('#')[0].replace("\n", "") \
  39. .replace("/", "").replace("抖音", "").replace(" ", "") \
  40. .replace(" ", "").replace("&NBSP", "").replace("\r", "") \
  41. .replace(".", "。").replace("\\", "") \
  42. .replace(":", "").replace("*", "").replace("?", "") \
  43. .replace("?", "").replace('"', "").replace("<", "") \
  44. .replace(">", "").replace("|", "").replace("@", "").replace('"', '').replace("'", '')[:40]
  45. if video_title.replace(" ", "") == "" or video_title == "。。。" or video_title == "...":
  46. return random_title(log_type, crawler, env, text='title')
  47. else:
  48. return video_title
  49. @classmethod
  50. def get_videoList(cls, log_type, crawler, user_dict, rule_dict, env):
  51. mq = MQ(topic_name="topic_crawler_etl_" + env)
  52. max_cursor = ""
  53. # while True:
  54. url = "https://www.douyin.com/aweme/v1/web/aweme/post/?device_platform=webapp&aid=6383&channel=channel_pc_web&sec_user_id={sec_user_id}&max_cursor={max_cursor}&show_live_replay_strategy=1&count=10&publish_video_strategy_type=2&pc_client_type=1&version_code=170400&version_name=17.4.0&cookie_enabled=true&screen_width=1440&screen_height=900&browser_language=zh-CN&browser_platform=MacIntel&browser_name=Chrome&browser_version=112.0.0.0&browser_online=true&engine_name=Blink&engine_version=112.0.0.0&os_name=Mac+OS&os_version=10.15.7&cpu_core_num=8&device_memory=8&platform=PC&downlink=10&effective_type=4g&round_trip_time=50".format(
  55. sec_user_id=user_dict["link"].replace("https://www.douyin.com/user/", ""), max_cursor=max_cursor)
  56. Common.logger(log_type, crawler).info(f"url:{url}")
  57. Common.logging(log_type, crawler, env, f"url:{url}")
  58. headers = {
  59. 'authority': 'www.douyin.com',
  60. 'accept': 'application/json, text/plain, */*',
  61. 'accept-language': 'zh-CN,zh;q=0.9',
  62. # 'cookie': '__ac_nonce=06437a18000f23ad954f0; __ac_signature=_02B4Z6wo00f01Sb71TAAAIDCsi2OPpjonN0m29GAAC2M85; s_v_web_id=verify_lgeqr3uq_3aDaqQXf_juHS_40Yi_BE8b_tI8FCILZQXPK; _tea_utm_cache_2018=undefined; ttwid=1%7Cq_IBs6hbBUOIEcRR1gxtgY6GiTbTE3U1XhJNLL_9BZA%7C1681367431%7Cf77b36ae4721884fec1c3fa9d6a08c29e308236ae13df58d1be3b0d1f82f8668; strategyABtestKey=%221681367433.454%22; passport_csrf_token=bff0289a5846e58b4b9db6e1f64665f4; passport_csrf_token_default=bff0289a5846e58b4b9db6e1f64665f4; bd_ticket_guard_client_data=eyJiZC10aWNrZXQtZ3VhcmQtdmVyc2lvbiI6MiwiYmQtdGlja2V0LWd1YXJkLWl0ZXJhdGlvbi12ZXJzaW9uIjoxLCJiZC10aWNrZXQtZ3VhcmQtY2xpZW50LWNzciI6Ii0tLS0tQkVHSU4gQ0VSVElGSUNBVEUgUkVRVUVTVC0tLS0tXHJcbk1JSUJEVENCdFFJQkFEQW5NUXN3Q1FZRFZRUUdFd0pEVGpFWU1CWUdBMVVFQXd3UFltUmZkR2xqYTJWMFgyZDFcclxuWVhKa01Ga3dFd1lIS29aSXpqMENBUVlJS29aSXpqMERBUWNEUWdBRVhzcHJ5TElFT3E4Z2tPc2l5MTdSS1dEcVxyXG5xTXZkWEt5Y1V5NStiL3JpSmJ6VkMwMlYrU1dNaWtZTlNOK29IU2g2WVVTTmdUUjJrZEhvRUxISmxGdU9scUFzXHJcbk1Db0dDU3FHU0liM0RRRUpEakVkTUJzd0dRWURWUjBSQkJJd0VJSU9kM2QzTG1SdmRYbHBiaTVqYjIwd0NnWUlcclxuS29aSXpqMEVBd0lEUndBd1JBSWdIeW9SblFNK0h0Z3ZOU2dFMVpHdGpmYWQxT3BuWHJRdVFSNXNSaDkwakRJQ1xyXG5JRG1tVkthRkN5djBLemtpZ0J0RExaTVJSNndURzRBWUVoNUlWUmlZUU9UVVxyXG4tLS0tLUVORCBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS1cclxuIn0=; msToken=ZPkeAqCnLbjDWCkWcWf36ZZIoZTl07X33ca0xcNGk3tZPoMvgx-lo28oNb5JhbCKsXLgLNIoojAbocBrjdAv54Hwf-Tk3_yAjLW7WJxxSa0=; ttcid=54b98e03a03e43e09211ee28db90fdd126; home_can_add_dy_2_desktop=%221%22; msToken=nqMSwn8xJAXLZk2AikdOeJ6P3JvoCsbYjHROoR55KXMDvWs8auYCMpZFGQSClHhitgB0l_vwg8m0-lE-aWQovfN7Ga1QupM3SpdNktiamFRiHMdJExREc9_uxS1ln8E=; tt_scid=DnO5GFg1oLONpPMFuFeL-OveKBn43mRynRVTvHsh1KUQm283ocN6JX6qPKDDrFHbfdf1; download_guide=%222%2F20230413%22; msToken=b9haS5RjLaFgqVDoGp5xSqc8B4kl-miQB5Nku0BSIvHVutKT81Nzk_pPb0wm7xYlAp_nz1gytQng5gYeIRNxcMgZJ_MB7lhejt_093miXlHtvqAaxL0FNg==',
  63. 'referer': 'https://www.douyin.com/user/{}?showTab=post'.format(user_dict["link"].replace("https://www.douyin.com/user/", "")),
  64. 'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
  65. 'sec-ch-ua-mobile': '?0',
  66. 'sec-ch-ua-platform': '"macOS"',
  67. 'sec-fetch-dest': 'empty',
  68. 'sec-fetch-mode': 'cors',
  69. 'sec-fetch-site': 'same-origin',
  70. 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
  71. }
  72. x_bogus = get_xb(url, headers['user-agent'])
  73. url = url + '&X-Bogus={}'.format(x_bogus)
  74. if not x_bogus:
  75. return
  76. res = requests.get(url=url, headers=headers, data={}, proxies=Common.tunnel_proxies(), timeout=10)
  77. # Common.logger(log_type, crawler).info(f"res:{res.text}\n")
  78. aweme_list = res.json().get('aweme_list', [])
  79. # max_cursor = res.json().get("max_cursor", "")
  80. if not aweme_list:
  81. Common.logger(log_type, crawler).info(f"没有更多数据啦~:{res.text}\n")
  82. Common.logging(log_type, crawler, env, f"没有更多数据啦~:{res.text}\n")
  83. return
  84. for info in aweme_list:
  85. try:
  86. if info.get('is_ads'):
  87. continue
  88. publish_time = info.get('create_time')
  89. if not publish_time:
  90. continue
  91. publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time))
  92. publish_day = int((int(time.time()) - publish_time) / (3600 * 24))
  93. video_title = cls.video_title(log_type, env, crawler, info['desc'])
  94. if not video_title:
  95. video_title = random_title(log_type, crawler, env, text='title')
  96. video_dict = {'video_title': video_title,
  97. 'video_id': info['aweme_id'],
  98. 'play_cnt': info['statistics']['play_count'],
  99. 'comment_cnt': info['statistics']['comment_count'],
  100. 'like_cnt': info['statistics']['digg_count'],
  101. 'share_cnt': info['statistics']['share_count'],
  102. 'video_width': info['video']['width'],
  103. 'video_height': info['video']['height'],
  104. 'duration': round(info['video']['duration'] / 1000),
  105. 'publish_time': publish_day,
  106. 'publish_time_stamp': publish_time,
  107. 'publish_time_str': publish_time_str,
  108. 'user_name': info['author']['nickname'],
  109. 'user_id': info['author_user_id'],
  110. 'user_sec_id': info['author']['sec_uid'],
  111. 'avatar_url': info['author']['avatar_thumb']['url_list'][0],
  112. 'cover_url': info['video']['origin_cover']['url_list'][0].replace('\u0026', '&'),
  113. 'video_url': info['video']['play_addr']['url_list'][0],
  114. 'session': f"douyin{int(time.time())}"
  115. }
  116. for k, v in video_dict.items():
  117. Common.logger(log_type, crawler).info(f"{k}:{v}")
  118. Common.logging(log_type, crawler, env, f"{video_dict}")
  119. if int((int(time.time()) - int(video_dict["publish_time_stamp"])) / (3600 * 24)) > int(rule_dict.get("period", {}).get("max", 1000)):
  120. Common.logger(log_type, crawler).info(f'发布时间超过{int(rule_dict.get("period", {}).get("max", 1000))}天\n')
  121. Common.logging(log_type, crawler, env, f'发布时间超过{int(rule_dict.get("period", {}).get("max", 1000))}天\n')
  122. return
  123. if download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict,
  124. rule_dict=rule_dict) is False:
  125. Common.logger(log_type, crawler).info("不满足抓取规则\n")
  126. Common.logging(log_type, crawler, env, "不满足抓取规则\n")
  127. elif any(str(word) if str(word) in video_dict["video_title"] else False
  128. for word in get_config_from_mysql(log_type=log_type,
  129. source=crawler,
  130. env=env,
  131. text="filter",
  132. action="")) is True:
  133. Common.logger(log_type, crawler).info('已中过滤词\n')
  134. Common.logging(log_type, crawler, env, '已中过滤词\n')
  135. elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
  136. Common.logger(log_type, crawler).info('视频已下载\n')
  137. Common.logging(log_type, crawler, env, '视频已下载\n')
  138. else:
  139. # cls.download_publish(log_type=log_type,
  140. # crawler=crawler,
  141. # user_dict=user_dict,
  142. # video_dict=video_dict,
  143. # rule_dict=rule_dict,
  144. # env=env)
  145. video_dict["out_user_id"] = video_dict["user_id"]
  146. video_dict["platform"] = crawler
  147. video_dict["strategy"] = log_type
  148. video_dict["out_video_id"] = video_dict["video_id"]
  149. video_dict["width"] = video_dict["video_width"]
  150. video_dict["height"] = video_dict["video_height"]
  151. video_dict["crawler_rule"] = json.dumps(rule_dict)
  152. video_dict["user_id"] = user_dict["uid"]
  153. video_dict["publish_time"] = video_dict["publish_time_str"]
  154. video_dict["strategy_type"] = log_type
  155. mq.send_msg(video_dict)
  156. except Exception as e:
  157. Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
  158. Common.logging(log_type, crawler, env, f"抓取单条视频异常:{e}\n")
  159. @classmethod
  160. def repeat_video(cls, log_type, crawler, video_id, env):
  161. # sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}"; """
  162. sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and out_video_id="{video_id}"; """
  163. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  164. return len(repeat_video)
  165. # 下载 / 上传
  166. @classmethod
  167. def download_publish(cls, log_type, crawler, user_dict, video_dict, rule_dict, env):
  168. # 下载视频
  169. Common.download_method(log_type=log_type, crawler=crawler, text='video', title=video_dict['video_title'], url=video_dict['video_url'])
  170. md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
  171. try:
  172. if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
  173. # 删除视频文件夹
  174. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  175. Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
  176. Common.logging(log_type, crawler, env, "视频size=0,删除成功\n")
  177. return
  178. except FileNotFoundError:
  179. # 删除视频文件夹
  180. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  181. Common.logger(log_type, crawler).info("视频文件不存在,删除文件夹成功\n")
  182. Common.logging(log_type, crawler, env, "视频文件不存在,删除文件夹成功\n")
  183. return
  184. # 下载封面
  185. Common.download_method(log_type=log_type, crawler=crawler, text='cover', title=video_dict['video_title'], url=video_dict['cover_url'])
  186. # 保存视频信息至txt
  187. Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
  188. # 上传视频
  189. Common.logger(log_type, crawler).info("开始上传视频...")
  190. Common.logging(log_type, crawler, env, "开始上传视频...")
  191. if env == "dev":
  192. oss_endpoint = "out"
  193. our_video_id = Publish.upload_and_publish(log_type=log_type,
  194. crawler=crawler,
  195. strategy="定向抓取策略",
  196. our_uid=user_dict["uid"],
  197. env=env,
  198. oss_endpoint=oss_endpoint)
  199. our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  200. else:
  201. oss_endpoint = "inner"
  202. our_video_id = Publish.upload_and_publish(log_type=log_type,
  203. crawler=crawler,
  204. strategy="定向抓取策略",
  205. our_uid=user_dict["uid"],
  206. env=env,
  207. oss_endpoint=oss_endpoint)
  208. our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  209. if our_video_id is None:
  210. try:
  211. # 删除视频文件夹
  212. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  213. return
  214. except FileNotFoundError:
  215. return
  216. # 视频信息保存数据库
  217. insert_sql = f""" insert into crawler_video(video_id,
  218. user_id,
  219. out_user_id,
  220. platform,
  221. strategy,
  222. out_video_id,
  223. video_title,
  224. cover_url,
  225. video_url,
  226. duration,
  227. publish_time,
  228. play_cnt,
  229. comment_cnt,
  230. like_cnt,
  231. share_cnt,
  232. crawler_rule,
  233. width,
  234. height)
  235. values({our_video_id},
  236. {user_dict["uid"]},
  237. "{video_dict['user_id']}",
  238. "{cls.platform}",
  239. "定向抓取策略",
  240. "{video_dict['video_id']}",
  241. "{video_dict['video_title']}",
  242. "{video_dict['cover_url']}",
  243. "{video_dict['video_url']}",
  244. {int(video_dict['duration'])},
  245. "{video_dict['publish_time_str']}",
  246. {int(video_dict['play_cnt'])},
  247. {int(video_dict['comment_cnt'])},
  248. {int(video_dict['like_cnt'])},
  249. {int(video_dict['share_cnt'])},
  250. '{json.dumps(rule_dict)}',
  251. {int(video_dict['video_width'])},
  252. {int(video_dict['video_height'])}) """
  253. Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
  254. Common.logging(log_type, crawler, env, f"insert_sql:{insert_sql}")
  255. MysqlHelper.update_values(log_type, crawler, insert_sql, env)
  256. Common.logger(log_type, crawler).info('视频信息写入数据库成功')
  257. Common.logging(log_type, crawler, env, '视频信息写入数据库成功')
  258. # 视频写入飞书
  259. upload_time = int(time.time())
  260. values = [[
  261. our_video_id,
  262. time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
  263. "定向抓取策略",
  264. str(video_dict['video_id']),
  265. video_dict['video_title'],
  266. our_video_link,
  267. # video_dict['gid'],
  268. video_dict['play_cnt'],
  269. video_dict['comment_cnt'],
  270. video_dict['like_cnt'],
  271. video_dict['share_cnt'],
  272. video_dict['duration'],
  273. str(video_dict['video_width']) + '*' + str(video_dict['video_height']),
  274. video_dict['publish_time_str'],
  275. video_dict['nick_name'],
  276. video_dict['user_id'],
  277. video_dict['avatar_url'],
  278. video_dict['cover_url'],
  279. video_dict['video_url']
  280. ]]
  281. Feishu.insert_columns(log_type, 'douyin', "qV9VC0", "ROWS", 1, 2)
  282. time.sleep(0.5)
  283. Feishu.update_values(log_type, 'douyin', "qV9VC0", "A2:Z2", values)
  284. Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
  285. Common.logging(log_type, crawler, env, f"视频已保存至云文档\n")
  286. @classmethod
  287. def get_author_videos(cls, log_type, crawler, rule_dict, user_list, env):
  288. for user_dict in user_list:
  289. try:
  290. Common.logger(log_type, crawler).info(f"开始抓取 {user_dict['nick_name']} 用户主页视频\n")
  291. Common.logging(log_type, crawler, env, f"开始抓取 {user_dict['nick_name']} 用户主页视频\n")
  292. cls.get_videoList(log_type=log_type,
  293. crawler=crawler,
  294. rule_dict=rule_dict,
  295. user_dict=user_dict,
  296. env=env)
  297. except Exception as e:
  298. Common.logger(log_type, crawler).error(f"抓取用户{user_dict['nick_name']}主页视频时异常:{e}\n")
  299. Common.logging(log_type, crawler, env, f"抓取用户{user_dict['nick_name']}主页视频时异常:{e}\n")
  300. if __name__ == '__main__':
  301. pass