douyin_author_scheduling.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2023/5/26
  4. import json
  5. import os
  6. import random
  7. import shutil
  8. import sys
  9. import time
  10. import requests
  11. from hashlib import md5
  12. from douyin.douyin_recommend import get_xb
  13. sys.path.append(os.getcwd())
  14. from common.common import Common
  15. # from common.db import MysqlHelper
  16. from common.scheduling_db import MysqlHelper
  17. from common.feishu import Feishu
  18. from common.publish import Publish
  19. from common.public import random_title
  20. from common.userAgent import get_random_user_agent
  21. from common.public import get_user_from_mysql, get_config_from_mysql
  22. class DyAuthorScheduling(object):
  23. platform = "抖音"
  24. tag = "抖音定向爬虫策略"
  25. @classmethod
  26. def download_rule(cls, video_info_dict, rule_dict):
  27. if video_info_dict['like_cnt'] >= rule_dict['like_cnt']['min']:
  28. if video_info_dict['duration'] >= rule_dict['duration']['min']:
  29. if video_info_dict['video_width'] >= rule_dict['width']['min'] \
  30. or video_info_dict['video_height'] >= rule_dict['height']['min']:
  31. return True
  32. else:
  33. return False
  34. else:
  35. return False
  36. else:
  37. return False
  38. # 过滤词库
  39. @classmethod
  40. def filter_words(cls, log_type, crawler):
  41. try:
  42. while True:
  43. filter_words_sheet = Feishu.get_values_batch(log_type, crawler, '6BS2RR')
  44. if filter_words_sheet is None:
  45. Common.logger(log_type, crawler).warning(f"filter_words_sheet:{filter_words_sheet} 10秒钟后重试")
  46. continue
  47. filter_words_list = []
  48. for x in filter_words_sheet:
  49. for y in x:
  50. if y is None:
  51. pass
  52. else:
  53. filter_words_list.append(y)
  54. return filter_words_list
  55. except Exception as e:
  56. Common.logger(log_type, crawler).error(f'filter_words异常:{e}\n')
  57. @classmethod
  58. def video_title(cls, log_type, env, crawler, title):
  59. title_split1 = title.split(" #")
  60. if title_split1[0] != "":
  61. title1 = title_split1[0]
  62. else:
  63. title1 = title_split1[-1]
  64. title_split2 = title1.split(" #")
  65. if title_split2[0] != "":
  66. title2 = title_split2[0]
  67. else:
  68. title2 = title_split2[-1]
  69. title_split3 = title2.split("@")
  70. if title_split3[0] != "":
  71. title3 = title_split3[0]
  72. else:
  73. title3 = title_split3[-1]
  74. video_title = title3.strip().split('#')[0].replace("\n", "") \
  75. .replace("/", "").replace("抖音", "").replace(" ", "") \
  76. .replace(" ", "").replace("&NBSP", "").replace("\r", "") \
  77. .replace(".", "。").replace("\\", "") \
  78. .replace(":", "").replace("*", "").replace("?", "") \
  79. .replace("?", "").replace('"', "").replace("<", "") \
  80. .replace(">", "").replace("|", "").replace("@", "").replace('"', '').replace("'", '')[:40]
  81. if video_title.replace(" ", "") == "" or video_title == "。。。" or video_title == "...":
  82. return random_title(log_type, crawler, env, text='title')
  83. else:
  84. return video_title
  85. @classmethod
  86. def get_videoList(cls, log_type, crawler, strategy, task, our_uid, out_uid, oss_endpoint, env):
  87. try:
  88. max_cursor = ''
  89. url = "https://www.douyin.com/aweme/v1/web/aweme/post/?device_platform=webapp&aid=6383&channel=channel_pc_web&sec_user_id={sec_user_id}&max_cursor={max_cursor}&show_live_replay_strategy=1&count=10&publish_video_strategy_type=2&pc_client_type=1&version_code=170400&version_name=17.4.0&cookie_enabled=true&screen_width=1440&screen_height=900&browser_language=zh-CN&browser_platform=MacIntel&browser_name=Chrome&browser_version=112.0.0.0&browser_online=true&engine_name=Blink&engine_version=112.0.0.0&os_name=Mac+OS&os_version=10.15.7&cpu_core_num=8&device_memory=8&platform=PC&downlink=10&effective_type=4g&round_trip_time=50".format(
  90. sec_user_id=out_uid, max_cursor=max_cursor)
  91. headers = {
  92. 'authority': 'www.douyin.com',
  93. 'accept': 'application/json, text/plain, */*',
  94. 'accept-language': 'zh-CN,zh;q=0.9',
  95. # 'cookie': '__ac_nonce=06437a18000f23ad954f0; __ac_signature=_02B4Z6wo00f01Sb71TAAAIDCsi2OPpjonN0m29GAAC2M85; s_v_web_id=verify_lgeqr3uq_3aDaqQXf_juHS_40Yi_BE8b_tI8FCILZQXPK; _tea_utm_cache_2018=undefined; ttwid=1%7Cq_IBs6hbBUOIEcRR1gxtgY6GiTbTE3U1XhJNLL_9BZA%7C1681367431%7Cf77b36ae4721884fec1c3fa9d6a08c29e308236ae13df58d1be3b0d1f82f8668; strategyABtestKey=%221681367433.454%22; passport_csrf_token=bff0289a5846e58b4b9db6e1f64665f4; passport_csrf_token_default=bff0289a5846e58b4b9db6e1f64665f4; bd_ticket_guard_client_data=eyJiZC10aWNrZXQtZ3VhcmQtdmVyc2lvbiI6MiwiYmQtdGlja2V0LWd1YXJkLWl0ZXJhdGlvbi12ZXJzaW9uIjoxLCJiZC10aWNrZXQtZ3VhcmQtY2xpZW50LWNzciI6Ii0tLS0tQkVHSU4gQ0VSVElGSUNBVEUgUkVRVUVTVC0tLS0tXHJcbk1JSUJEVENCdFFJQkFEQW5NUXN3Q1FZRFZRUUdFd0pEVGpFWU1CWUdBMVVFQXd3UFltUmZkR2xqYTJWMFgyZDFcclxuWVhKa01Ga3dFd1lIS29aSXpqMENBUVlJS29aSXpqMERBUWNEUWdBRVhzcHJ5TElFT3E4Z2tPc2l5MTdSS1dEcVxyXG5xTXZkWEt5Y1V5NStiL3JpSmJ6VkMwMlYrU1dNaWtZTlNOK29IU2g2WVVTTmdUUjJrZEhvRUxISmxGdU9scUFzXHJcbk1Db0dDU3FHU0liM0RRRUpEakVkTUJzd0dRWURWUjBSQkJJd0VJSU9kM2QzTG1SdmRYbHBiaTVqYjIwd0NnWUlcclxuS29aSXpqMEVBd0lEUndBd1JBSWdIeW9SblFNK0h0Z3ZOU2dFMVpHdGpmYWQxT3BuWHJRdVFSNXNSaDkwakRJQ1xyXG5JRG1tVkthRkN5djBLemtpZ0J0RExaTVJSNndURzRBWUVoNUlWUmlZUU9UVVxyXG4tLS0tLUVORCBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS1cclxuIn0=; msToken=ZPkeAqCnLbjDWCkWcWf36ZZIoZTl07X33ca0xcNGk3tZPoMvgx-lo28oNb5JhbCKsXLgLNIoojAbocBrjdAv54Hwf-Tk3_yAjLW7WJxxSa0=; ttcid=54b98e03a03e43e09211ee28db90fdd126; home_can_add_dy_2_desktop=%221%22; msToken=nqMSwn8xJAXLZk2AikdOeJ6P3JvoCsbYjHROoR55KXMDvWs8auYCMpZFGQSClHhitgB0l_vwg8m0-lE-aWQovfN7Ga1QupM3SpdNktiamFRiHMdJExREc9_uxS1ln8E=; tt_scid=DnO5GFg1oLONpPMFuFeL-OveKBn43mRynRVTvHsh1KUQm283ocN6JX6qPKDDrFHbfdf1; download_guide=%222%2F20230413%22; msToken=b9haS5RjLaFgqVDoGp5xSqc8B4kl-miQB5Nku0BSIvHVutKT81Nzk_pPb0wm7xYlAp_nz1gytQng5gYeIRNxcMgZJ_MB7lhejt_093miXlHtvqAaxL0FNg==',
  96. 'referer': 'https://www.douyin.com/user/{}?showTab=post'.format(out_uid),
  97. 'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
  98. 'sec-ch-ua-mobile': '?0',
  99. 'sec-ch-ua-platform': '"macOS"',
  100. 'sec-fetch-dest': 'empty',
  101. 'sec-fetch-mode': 'cors',
  102. 'sec-fetch-site': 'same-origin',
  103. 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
  104. }
  105. x_bogus = get_xb(url, headers['user-agent'])
  106. url = url + '&X-Bogus={}'.format(x_bogus)
  107. if not x_bogus:
  108. return
  109. res = requests.get(url=url, headers=headers, data={}, proxies=Common.tunnel_proxies(), timeout=10).json()
  110. aweme_list = res.get('aweme_list', [])
  111. except Exception as e:
  112. Common.logger(log_type, crawler).error(f"获取抖音作者:{out_uid},视频列表失败:{e}")
  113. return
  114. if not aweme_list:
  115. Common.logger(log_type, crawler).warning(f"抖音作者没有获取到更多数据")
  116. return
  117. for info in aweme_list:
  118. if info.get('is_ads'):
  119. continue
  120. publish_time = info.get('create_time')
  121. if not publish_time:
  122. continue
  123. publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time))
  124. publish_day = int((int(time.time()) - publish_time) / (3600 * 24))
  125. video_title = cls.video_title(log_type, env, crawler, info['desc'])
  126. if not video_title:
  127. video_title = random_title(log_type, crawler, env, text='title')
  128. video_dict = {'video_title': video_title,
  129. 'video_id': info['aweme_id'],
  130. 'play_cnt': info['statistics']['play_count'],
  131. 'comment_cnt': info['statistics']['comment_count'],
  132. 'like_cnt': info['statistics']['digg_count'],
  133. 'share_cnt': info['statistics']['share_count'],
  134. 'video_width': info['video']['width'],
  135. 'video_height': info['video']['height'],
  136. 'duration': round(info['video']['duration'] / 1000),
  137. 'publish_time': publish_day,
  138. 'publish_time_stamp': publish_time * 1000,
  139. 'publish_time_str': publish_time_str,
  140. 'user_name': info['author']['nickname'],
  141. 'user_id': info['author_user_id'],
  142. 'user_sec_id': info['author']['sec_uid'],
  143. 'avatar_url': info['author']['avatar_thumb']['url_list'][0],
  144. 'cover_url': info['video']['origin_cover']['url_list'][0].replace('\u0026', '&'),
  145. 'video_url': info['video']['play_addr']['url_list'][0],
  146. 'session': f"douyin{int(time.time())}"
  147. }
  148. for k, v in video_dict.items():
  149. Common.logger(log_type, crawler).info(f"{k}:{v}")
  150. # log_type, crawler, strategy, task, video_dict, rule_dict, our_uid, oss_endpoint, env
  151. cls.download_publish(log_type=log_type,
  152. crawler=crawler,
  153. strategy=strategy,
  154. task=task,
  155. video_dict=video_dict,
  156. our_uid=our_uid,
  157. oss_endpoint=oss_endpoint,
  158. env=env,
  159. )
  160. @classmethod
  161. def repeat_video(cls, log_type, crawler, video_id, env):
  162. sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}"; """
  163. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  164. return len(repeat_video)
  165. # 下载 / 上传
  166. @classmethod
  167. def download_publish(cls, log_type, crawler, strategy, task, video_dict, our_uid, oss_endpoint, env):
  168. try:
  169. filter_words = get_config_from_mysql(log_type, crawler, env, text='filter')
  170. for filter_word in filter_words:
  171. if filter_word in video_dict['video_title']:
  172. Common.logger(log_type, crawler).info('标题已中过滤词:{}\n', video_dict['video_title'])
  173. return
  174. if cls.download_rule(video_dict, task['rule_dict']) is False:
  175. Common.logger(log_type, crawler).info('不满足抓取规则\n')
  176. elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
  177. Common.logger(log_type, crawler).info('视频已下载\n')
  178. else:
  179. # 下载视频
  180. Common.download_method(log_type=log_type, crawler=crawler, text='video',
  181. title=video_dict['video_title'], url=video_dict['video_url'])
  182. md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
  183. if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
  184. # 删除视频文件夹
  185. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  186. Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
  187. return
  188. # ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
  189. # if ffmpeg_dict is None or ffmpeg_dict['size'] == 0:
  190. # Common.logger(log_type, crawler).warning(f"下载的视频无效,已删除\n")
  191. # # 删除视频文件夹
  192. # shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
  193. # return
  194. # 下载封面
  195. Common.download_method(log_type=log_type, crawler=crawler, text='cover',
  196. title=video_dict['video_title'], url=video_dict['cover_url'])
  197. # 保存视频信息至txt
  198. Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
  199. # 上传视频
  200. Common.logger(log_type, crawler).info("开始上传视频...")
  201. our_video_id = Publish.upload_and_publish(log_type=log_type,
  202. crawler=crawler,
  203. strategy=strategy,
  204. our_uid=our_uid,
  205. env=env,
  206. oss_endpoint=oss_endpoint)
  207. if env == 'dev':
  208. our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  209. else:
  210. our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  211. Common.logger(log_type, crawler).info("视频上传完成")
  212. if our_video_id is None:
  213. # 删除视频文件夹
  214. shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
  215. return
  216. # 视频写入飞书
  217. upload_time = int(time.time())
  218. values = [[
  219. our_video_id,
  220. time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
  221. strategy,
  222. str(video_dict['video_id']),
  223. video_dict['video_title'],
  224. our_video_link,
  225. # video_dict['gid'],
  226. video_dict['play_cnt'],
  227. video_dict['comment_cnt'],
  228. video_dict['like_cnt'],
  229. video_dict['share_cnt'],
  230. video_dict['duration'],
  231. str(video_dict['video_width']) + '*' + str(video_dict['video_height']),
  232. video_dict['publish_time_str'],
  233. video_dict['user_name'],
  234. video_dict['user_id'],
  235. video_dict['avatar_url'],
  236. video_dict['cover_url'],
  237. video_dict['video_url']
  238. ]]
  239. # time.sleep(1)
  240. Feishu.insert_columns(log_type, 'douyin', "qV9VC0", "ROWS", 1, 2)
  241. Feishu.update_values(log_type, 'douyin', "qV9VC0", "A2:Z2", values)
  242. Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
  243. # 视频信息保存数据库
  244. insert_sql = f""" insert into crawler_video(video_id,
  245. user_id,
  246. out_user_id,
  247. platform,
  248. strategy,
  249. out_video_id,
  250. video_title,
  251. cover_url,
  252. video_url,
  253. duration,
  254. publish_time,
  255. play_cnt,
  256. comment_cnt,
  257. like_cnt,
  258. share_cnt,
  259. crawler_rule,
  260. width,
  261. height)
  262. values({our_video_id},
  263. {our_uid},
  264. "{video_dict['user_id']}",
  265. "{cls.platform}",
  266. "{strategy}",
  267. "{video_dict['video_id']}",
  268. "{video_dict['video_title']}",
  269. "{video_dict['cover_url']}",
  270. "{video_dict['video_url']}",
  271. {int(video_dict['duration'])},
  272. "{video_dict['publish_time_str']}",
  273. {int(video_dict['play_cnt'])},
  274. {int(video_dict['comment_cnt'])},
  275. {int(video_dict['like_cnt'])},
  276. {int(video_dict['share_cnt'])},
  277. '{json.dumps(task["rule_dict"])}',
  278. {int(video_dict['video_width'])},
  279. {int(video_dict['video_height'])}) """
  280. Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
  281. MysqlHelper.update_values(log_type, crawler, insert_sql, env)
  282. Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
  283. except Exception as e:
  284. Common.logger(log_type, crawler).error(f'download_publish异常:{e}\n')
  285. @classmethod
  286. def get_follow_videos(cls, log_type, crawler, task, oss_endpoint, env):
  287. user_list = get_user_from_mysql(log_type, crawler, crawler, env)
  288. strategy = '定向抓取策略'
  289. for user in user_list:
  290. spider_link = user["link"]
  291. out_uid = spider_link
  292. user_name = user["nick_name"]
  293. our_uid = user["uid"]
  294. Common.logger(log_type, crawler).info(f"开始抓取 {user_name} 用户主页视频\n")
  295. cls.get_videoList(log_type=log_type,
  296. crawler=crawler,
  297. strategy=strategy,
  298. task=task,
  299. our_uid=our_uid,
  300. out_uid=out_uid,
  301. oss_endpoint=oss_endpoint,
  302. env=env)
  303. if __name__ == '__main__':
  304. DyAuthorScheduling.get_follow_videos('author', 'douyin', '定向抓取策略', 'outer', 'prod')