douyin_author_scheduling.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2023/5/26
  4. import json
  5. import os
  6. import shutil
  7. import sys
  8. import time
  9. import requests
  10. from hashlib import md5
  11. sys.path.append(os.getcwd())
  12. from common.common import Common
  13. from common.scheduling_db import MysqlHelper
  14. from douyin.douyin_recommend import get_xb
  15. from common.feishu import Feishu
  16. from common.publish import Publish
  17. from common.public import random_title, get_config_from_mysql, download_rule
  18. class DouyinauthorScheduling:
  19. platform = "抖音"
  20. @classmethod
  21. def video_title(cls, log_type, env, crawler, title):
  22. title_split1 = title.split(" #")
  23. if title_split1[0] != "":
  24. title1 = title_split1[0]
  25. else:
  26. title1 = title_split1[-1]
  27. title_split2 = title1.split(" #")
  28. if title_split2[0] != "":
  29. title2 = title_split2[0]
  30. else:
  31. title2 = title_split2[-1]
  32. title_split3 = title2.split("@")
  33. if title_split3[0] != "":
  34. title3 = title_split3[0]
  35. else:
  36. title3 = title_split3[-1]
  37. video_title = title3.strip().split('#')[0].replace("\n", "") \
  38. .replace("/", "").replace("抖音", "").replace(" ", "") \
  39. .replace(" ", "").replace("&NBSP", "").replace("\r", "") \
  40. .replace(".", "。").replace("\\", "") \
  41. .replace(":", "").replace("*", "").replace("?", "") \
  42. .replace("?", "").replace('"', "").replace("<", "") \
  43. .replace(">", "").replace("|", "").replace("@", "").replace('"', '').replace("'", '')[:40]
  44. if video_title.replace(" ", "") == "" or video_title == "。。。" or video_title == "...":
  45. return random_title(log_type, crawler, env, text='title')
  46. else:
  47. return video_title
  48. @classmethod
  49. def get_videoList(cls, log_type, crawler, user_dict, rule_dict, env):
  50. max_cursor = ""
  51. # while True:
  52. url = "https://www.douyin.com/aweme/v1/web/aweme/post/?device_platform=webapp&aid=6383&channel=channel_pc_web&sec_user_id={sec_user_id}&max_cursor={max_cursor}&show_live_replay_strategy=1&count=10&publish_video_strategy_type=2&pc_client_type=1&version_code=170400&version_name=17.4.0&cookie_enabled=true&screen_width=1440&screen_height=900&browser_language=zh-CN&browser_platform=MacIntel&browser_name=Chrome&browser_version=112.0.0.0&browser_online=true&engine_name=Blink&engine_version=112.0.0.0&os_name=Mac+OS&os_version=10.15.7&cpu_core_num=8&device_memory=8&platform=PC&downlink=10&effective_type=4g&round_trip_time=50".format(
  53. sec_user_id=user_dict["link"].replace("https://www.douyin.com/user/", ""), max_cursor=max_cursor)
  54. Common.logger(log_type, crawler).info(f"url:{url}")
  55. Common.logging(log_type, crawler, env, f"url:{url}")
  56. headers = {
  57. 'authority': 'www.douyin.com',
  58. 'accept': 'application/json, text/plain, */*',
  59. 'accept-language': 'zh-CN,zh;q=0.9',
  60. # 'cookie': '__ac_nonce=06437a18000f23ad954f0; __ac_signature=_02B4Z6wo00f01Sb71TAAAIDCsi2OPpjonN0m29GAAC2M85; s_v_web_id=verify_lgeqr3uq_3aDaqQXf_juHS_40Yi_BE8b_tI8FCILZQXPK; _tea_utm_cache_2018=undefined; ttwid=1%7Cq_IBs6hbBUOIEcRR1gxtgY6GiTbTE3U1XhJNLL_9BZA%7C1681367431%7Cf77b36ae4721884fec1c3fa9d6a08c29e308236ae13df58d1be3b0d1f82f8668; strategyABtestKey=%221681367433.454%22; passport_csrf_token=bff0289a5846e58b4b9db6e1f64665f4; passport_csrf_token_default=bff0289a5846e58b4b9db6e1f64665f4; bd_ticket_guard_client_data=eyJiZC10aWNrZXQtZ3VhcmQtdmVyc2lvbiI6MiwiYmQtdGlja2V0LWd1YXJkLWl0ZXJhdGlvbi12ZXJzaW9uIjoxLCJiZC10aWNrZXQtZ3VhcmQtY2xpZW50LWNzciI6Ii0tLS0tQkVHSU4gQ0VSVElGSUNBVEUgUkVRVUVTVC0tLS0tXHJcbk1JSUJEVENCdFFJQkFEQW5NUXN3Q1FZRFZRUUdFd0pEVGpFWU1CWUdBMVVFQXd3UFltUmZkR2xqYTJWMFgyZDFcclxuWVhKa01Ga3dFd1lIS29aSXpqMENBUVlJS29aSXpqMERBUWNEUWdBRVhzcHJ5TElFT3E4Z2tPc2l5MTdSS1dEcVxyXG5xTXZkWEt5Y1V5NStiL3JpSmJ6VkMwMlYrU1dNaWtZTlNOK29IU2g2WVVTTmdUUjJrZEhvRUxISmxGdU9scUFzXHJcbk1Db0dDU3FHU0liM0RRRUpEakVkTUJzd0dRWURWUjBSQkJJd0VJSU9kM2QzTG1SdmRYbHBiaTVqYjIwd0NnWUlcclxuS29aSXpqMEVBd0lEUndBd1JBSWdIeW9SblFNK0h0Z3ZOU2dFMVpHdGpmYWQxT3BuWHJRdVFSNXNSaDkwakRJQ1xyXG5JRG1tVkthRkN5djBLemtpZ0J0RExaTVJSNndURzRBWUVoNUlWUmlZUU9UVVxyXG4tLS0tLUVORCBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS1cclxuIn0=; msToken=ZPkeAqCnLbjDWCkWcWf36ZZIoZTl07X33ca0xcNGk3tZPoMvgx-lo28oNb5JhbCKsXLgLNIoojAbocBrjdAv54Hwf-Tk3_yAjLW7WJxxSa0=; ttcid=54b98e03a03e43e09211ee28db90fdd126; home_can_add_dy_2_desktop=%221%22; msToken=nqMSwn8xJAXLZk2AikdOeJ6P3JvoCsbYjHROoR55KXMDvWs8auYCMpZFGQSClHhitgB0l_vwg8m0-lE-aWQovfN7Ga1QupM3SpdNktiamFRiHMdJExREc9_uxS1ln8E=; tt_scid=DnO5GFg1oLONpPMFuFeL-OveKBn43mRynRVTvHsh1KUQm283ocN6JX6qPKDDrFHbfdf1; download_guide=%222%2F20230413%22; msToken=b9haS5RjLaFgqVDoGp5xSqc8B4kl-miQB5Nku0BSIvHVutKT81Nzk_pPb0wm7xYlAp_nz1gytQng5gYeIRNxcMgZJ_MB7lhejt_093miXlHtvqAaxL0FNg==',
  61. 'referer': 'https://www.douyin.com/user/{}?showTab=post'.format(user_dict["link"].replace("https://www.douyin.com/user/", "")),
  62. 'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
  63. 'sec-ch-ua-mobile': '?0',
  64. 'sec-ch-ua-platform': '"macOS"',
  65. 'sec-fetch-dest': 'empty',
  66. 'sec-fetch-mode': 'cors',
  67. 'sec-fetch-site': 'same-origin',
  68. 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
  69. }
  70. x_bogus = get_xb(url, headers['user-agent'])
  71. url = url + '&X-Bogus={}'.format(x_bogus)
  72. if not x_bogus:
  73. return
  74. res = requests.get(url=url, headers=headers, data={}, proxies=Common.tunnel_proxies(), timeout=10)
  75. # Common.logger(log_type, crawler).info(f"res:{res.text}\n")
  76. aweme_list = res.json().get('aweme_list', [])
  77. # max_cursor = res.json().get("max_cursor", "")
  78. if not aweme_list:
  79. Common.logger(log_type, crawler).info(f"没有更多数据啦~:{res.text}\n")
  80. Common.logging(log_type, crawler, env, f"没有更多数据啦~:{res.text}\n")
  81. return
  82. for info in aweme_list:
  83. try:
  84. if info.get('is_ads'):
  85. continue
  86. publish_time = info.get('create_time')
  87. if not publish_time:
  88. continue
  89. publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time))
  90. publish_day = int((int(time.time()) - publish_time) / (3600 * 24))
  91. video_title = cls.video_title(log_type, env, crawler, info['desc'])
  92. if not video_title:
  93. video_title = random_title(log_type, crawler, env, text='title')
  94. video_dict = {'video_title': video_title,
  95. 'video_id': info['aweme_id'],
  96. 'play_cnt': info['statistics']['play_count'],
  97. 'comment_cnt': info['statistics']['comment_count'],
  98. 'like_cnt': info['statistics']['digg_count'],
  99. 'share_cnt': info['statistics']['share_count'],
  100. 'video_width': info['video']['width'],
  101. 'video_height': info['video']['height'],
  102. 'duration': round(info['video']['duration'] / 1000),
  103. 'publish_time': publish_day,
  104. 'publish_time_stamp': publish_time,
  105. 'publish_time_str': publish_time_str,
  106. 'user_name': info['author']['nickname'],
  107. 'user_id': info['author_user_id'],
  108. 'user_sec_id': info['author']['sec_uid'],
  109. 'avatar_url': info['author']['avatar_thumb']['url_list'][0],
  110. 'cover_url': info['video']['origin_cover']['url_list'][0].replace('\u0026', '&'),
  111. 'video_url': info['video']['play_addr']['url_list'][0],
  112. 'session': f"douyin{int(time.time())}"
  113. }
  114. for k, v in video_dict.items():
  115. Common.logger(log_type, crawler).info(f"{k}:{v}")
  116. Common.logging(log_type, crawler, env, f"{video_dict}")
  117. if int((int(time.time()) - int(video_dict["publish_time_stamp"])) / (3600 * 24)) > int(rule_dict.get("period", {}).get("max", 1000)):
  118. Common.logger(log_type, crawler).info(f'发布时间超过{int(rule_dict.get("period", {}).get("max", 1000))}天\n')
  119. Common.logging(log_type, crawler, env, f'发布时间超过{int(rule_dict.get("period", {}).get("max", 1000))}天\n')
  120. return
  121. if download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict,
  122. rule_dict=rule_dict) is False:
  123. Common.logger(log_type, crawler).info("不满足抓取规则\n")
  124. Common.logging(log_type, crawler, env, "不满足抓取规则\n")
  125. elif any(str(word) if str(word) in video_dict["video_title"] else False
  126. for word in get_config_from_mysql(log_type=log_type,
  127. source=crawler,
  128. env=env,
  129. text="filter",
  130. action="")) is True:
  131. Common.logger(log_type, crawler).info('已中过滤词\n')
  132. Common.logging(log_type, crawler, env, '已中过滤词\n')
  133. elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
  134. Common.logger(log_type, crawler).info('视频已下载\n')
  135. Common.logging(log_type, crawler, env, '视频已下载\n')
  136. else:
  137. cls.download_publish(log_type=log_type,
  138. crawler=crawler,
  139. user_dict=user_dict,
  140. video_dict=video_dict,
  141. rule_dict=rule_dict,
  142. env=env)
  143. except Exception as e:
  144. Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
  145. Common.logging(log_type, crawler, env, f"抓取单条视频异常:{e}\n")
  146. @classmethod
  147. def repeat_video(cls, log_type, crawler, video_id, env):
  148. sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}"; """
  149. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  150. return len(repeat_video)
  151. # 下载 / 上传
  152. @classmethod
  153. def download_publish(cls, log_type, crawler, user_dict, video_dict, rule_dict, env):
  154. # 下载视频
  155. Common.download_method(log_type=log_type, crawler=crawler, text='video', title=video_dict['video_title'], url=video_dict['video_url'])
  156. md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
  157. try:
  158. if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
  159. # 删除视频文件夹
  160. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  161. Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
  162. Common.logging(log_type, crawler, env, "视频size=0,删除成功\n")
  163. return
  164. except FileNotFoundError:
  165. # 删除视频文件夹
  166. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  167. Common.logger(log_type, crawler).info("视频文件不存在,删除文件夹成功\n")
  168. Common.logging(log_type, crawler, env, "视频文件不存在,删除文件夹成功\n")
  169. return
  170. # 下载封面
  171. Common.download_method(log_type=log_type, crawler=crawler, text='cover', title=video_dict['video_title'], url=video_dict['cover_url'])
  172. # 保存视频信息至txt
  173. Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
  174. # 上传视频
  175. Common.logger(log_type, crawler).info("开始上传视频...")
  176. Common.logging(log_type, crawler, env, "开始上传视频...")
  177. if env == "dev":
  178. oss_endpoint = "out"
  179. our_video_id = Publish.upload_and_publish(log_type=log_type,
  180. crawler=crawler,
  181. strategy="定向抓取策略",
  182. our_uid=user_dict["uid"],
  183. env=env,
  184. oss_endpoint=oss_endpoint)
  185. our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  186. else:
  187. oss_endpoint = "inner"
  188. our_video_id = Publish.upload_and_publish(log_type=log_type,
  189. crawler=crawler,
  190. strategy="定向抓取策略",
  191. our_uid=user_dict["uid"],
  192. env=env,
  193. oss_endpoint=oss_endpoint)
  194. our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  195. if our_video_id is None:
  196. try:
  197. # 删除视频文件夹
  198. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  199. return
  200. except FileNotFoundError:
  201. return
  202. # 视频信息保存数据库
  203. insert_sql = f""" insert into crawler_video(video_id,
  204. user_id,
  205. out_user_id,
  206. platform,
  207. strategy,
  208. out_video_id,
  209. video_title,
  210. cover_url,
  211. video_url,
  212. duration,
  213. publish_time,
  214. play_cnt,
  215. comment_cnt,
  216. like_cnt,
  217. share_cnt,
  218. crawler_rule,
  219. width,
  220. height)
  221. values({our_video_id},
  222. {user_dict["uid"]},
  223. "{video_dict['user_id']}",
  224. "{cls.platform}",
  225. "定向抓取策略",
  226. "{video_dict['video_id']}",
  227. "{video_dict['video_title']}",
  228. "{video_dict['cover_url']}",
  229. "{video_dict['video_url']}",
  230. {int(video_dict['duration'])},
  231. "{video_dict['publish_time_str']}",
  232. {int(video_dict['play_cnt'])},
  233. {int(video_dict['comment_cnt'])},
  234. {int(video_dict['like_cnt'])},
  235. {int(video_dict['share_cnt'])},
  236. '{json.dumps(rule_dict)}',
  237. {int(video_dict['video_width'])},
  238. {int(video_dict['video_height'])}) """
  239. Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
  240. Common.logging(log_type, crawler, env, f"insert_sql:{insert_sql}")
  241. MysqlHelper.update_values(log_type, crawler, insert_sql, env)
  242. Common.logger(log_type, crawler).info('视频信息写入数据库成功')
  243. Common.logging(log_type, crawler, env, '视频信息写入数据库成功')
  244. # 视频写入飞书
  245. upload_time = int(time.time())
  246. values = [[
  247. our_video_id,
  248. time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
  249. "定向抓取策略",
  250. str(video_dict['video_id']),
  251. video_dict['video_title'],
  252. our_video_link,
  253. # video_dict['gid'],
  254. video_dict['play_cnt'],
  255. video_dict['comment_cnt'],
  256. video_dict['like_cnt'],
  257. video_dict['share_cnt'],
  258. video_dict['duration'],
  259. str(video_dict['video_width']) + '*' + str(video_dict['video_height']),
  260. video_dict['publish_time_str'],
  261. video_dict['nick_name'],
  262. video_dict['user_id'],
  263. video_dict['avatar_url'],
  264. video_dict['cover_url'],
  265. video_dict['video_url']
  266. ]]
  267. Feishu.insert_columns(log_type, 'douyin', "qV9VC0", "ROWS", 1, 2)
  268. time.sleep(0.5)
  269. Feishu.update_values(log_type, 'douyin', "qV9VC0", "A2:Z2", values)
  270. Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
  271. Common.logging(log_type, crawler, env, f"视频已保存至云文档\n")
  272. @classmethod
  273. def get_author_videos(cls, log_type, crawler, rule_dict, user_list, env):
  274. for user_dict in user_list:
  275. try:
  276. Common.logger(log_type, crawler).info(f"开始抓取 {user_dict['nick_name']} 用户主页视频\n")
  277. Common.logging(log_type, crawler, env, f"开始抓取 {user_dict['nick_name']} 用户主页视频\n")
  278. cls.get_videoList(log_type=log_type,
  279. crawler=crawler,
  280. rule_dict=rule_dict,
  281. user_dict=user_dict,
  282. env=env)
  283. except Exception as e:
  284. Common.logger(log_type, crawler).error(f"抓取用户{user_dict['nick_name']}主页视频时异常:{e}\n")
  285. Common.logging(log_type, crawler, env, f"抓取用户{user_dict['nick_name']}主页视频时异常:{e}\n")
  286. if __name__ == '__main__':
  287. pass