follow_dy.py 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371
  1. # -*- coding: utf-8 -*-
  2. # @Author: lierqiang
  3. # @Time: 2023/4/12
  4. import json
  5. import os
  6. import random
  7. import shutil
  8. import sys
  9. import time
  10. import requests
  11. from hashlib import md5
  12. from common.public import get_user_from_mysql
  13. from douyin.douyin_recommend import get_xb
  14. sys.path.append(os.getcwd())
  15. from common.db import MysqlHelper
  16. from common.feishu import Feishu
  17. from common.publish import Publish
  18. from common.userAgent import get_random_user_agent
  19. from common.common import Common
  20. class DyFollow(object):
  21. platform = "抖音"
  22. tag = "抖音定向爬虫策略"
  23. @classmethod
  24. def get_rule(cls, log_type, crawler):
  25. try:
  26. while True:
  27. rule_sheet = Feishu.get_values_batch(log_type, crawler, "fn2hEO")
  28. if rule_sheet is None:
  29. Common.logger(log_type, crawler).warning("rule_sheet is None! 10秒后重新获取")
  30. time.sleep(10)
  31. continue
  32. rule_dict = {
  33. "video_width": int(rule_sheet[0][2]),
  34. "video_height": int(rule_sheet[1][2]),
  35. "like_cnt": int(rule_sheet[2][2]),
  36. "duration": int(rule_sheet[3][2]),
  37. "publish_time": int(rule_sheet[4][2]),
  38. }
  39. return rule_dict
  40. except Exception as e:
  41. Common.logger(log_type, crawler).error(f"get_rule:{e}\n")
  42. # 下载规则
  43. @classmethod
  44. def download_rule(cls, video_info_dict, rule_dict):
  45. # if video_info_dict['play_cnt'] >= rule_dict['play_cnt']:
  46. # if video_info_dict['comment_cnt'] >= rule_dict['comment_cnt']:
  47. if video_info_dict['like_cnt'] >= rule_dict['like_cnt']:
  48. if video_info_dict['duration'] >= rule_dict['duration']:
  49. if video_info_dict['video_width'] >= rule_dict['video_width'] \
  50. or video_info_dict['video_height'] >= rule_dict['video_height']:
  51. return True
  52. else:
  53. return False
  54. else:
  55. return False
  56. else:
  57. return False
  58. # else:
  59. # return False
  60. # else:
  61. # return False
  62. # 过滤词库
  63. @classmethod
  64. def filter_words(cls, log_type, crawler):
  65. try:
  66. while True:
  67. filter_words_sheet = Feishu.get_values_batch(log_type, crawler, '6BS2RR')
  68. if filter_words_sheet is None:
  69. Common.logger(log_type, crawler).warning(f"filter_words_sheet:{filter_words_sheet} 10秒钟后重试")
  70. continue
  71. filter_words_list = []
  72. for x in filter_words_sheet:
  73. for y in x:
  74. if y is None:
  75. pass
  76. else:
  77. filter_words_list.append(y)
  78. return filter_words_list
  79. except Exception as e:
  80. Common.logger(log_type, crawler).error(f'filter_words异常:{e}\n')
  81. @classmethod
  82. def video_title(cls, log_type, crawler, title):
  83. title_split1 = title.split(" #")
  84. if title_split1[0] != "":
  85. title1 = title_split1[0]
  86. else:
  87. title1 = title_split1[-1]
  88. title_split2 = title1.split(" #")
  89. if title_split2[0] != "":
  90. title2 = title_split2[0]
  91. else:
  92. title2 = title_split2[-1]
  93. title_split3 = title2.split("@")
  94. if title_split3[0] != "":
  95. title3 = title_split3[0]
  96. else:
  97. title3 = title_split3[-1]
  98. video_title = title3.strip().split('#')[0].replace("\n", "") \
  99. .replace("/", "").replace("抖音", "").replace(" ", "") \
  100. .replace(" ", "").replace("&NBSP", "").replace("\r", "") \
  101. .replace(".", "。").replace("\\", "") \
  102. .replace(":", "").replace("*", "").replace("?", "") \
  103. .replace("?", "").replace('"', "").replace("<", "") \
  104. .replace(">", "").replace("|", "").replace("@", "").replace('"', '').replace("'", '')[:40]
  105. if video_title.replace(" ", "") == "" or video_title == "。。。" or video_title == "...":
  106. return cls.random_title(log_type, crawler)
  107. else:
  108. return video_title
  109. @classmethod
  110. def random_title(cls, log_type, crawler):
  111. try:
  112. while True:
  113. random_title_sheet = Feishu.get_values_batch(log_type, crawler, 'sPK2oY')
  114. if random_title_sheet is None:
  115. Common.logger(log_type, crawler).warning(f"filter_words_sheet:{random_title_sheet} 10秒钟后重试")
  116. continue
  117. random_title_list = []
  118. for x in random_title_sheet:
  119. for y in x:
  120. if y is None:
  121. pass
  122. else:
  123. random_title_list.append(y)
  124. return random.choice(random_title_list)
  125. except Exception as e:
  126. Common.logger(log_type, crawler).error(f'random_title:{e}\n')
  127. @classmethod
  128. def get_videoList(cls, log_type, crawler, strategy, our_uid, out_uid, oss_endpoint, env, machine, rule_dict):
  129. try:
  130. max_cursor = ''
  131. url = "https://www.douyin.com/aweme/v1/web/aweme/post/?device_platform=webapp&aid=6383&channel=channel_pc_web&sec_user_id={sec_user_id}&max_cursor={max_cursor}&show_live_replay_strategy=1&count=10&publish_video_strategy_type=2&pc_client_type=1&version_code=170400&version_name=17.4.0&cookie_enabled=true&screen_width=1440&screen_height=900&browser_language=zh-CN&browser_platform=MacIntel&browser_name=Chrome&browser_version=112.0.0.0&browser_online=true&engine_name=Blink&engine_version=112.0.0.0&os_name=Mac+OS&os_version=10.15.7&cpu_core_num=8&device_memory=8&platform=PC&downlink=10&effective_type=4g&round_trip_time=50".format(
  132. sec_user_id=out_uid, max_cursor=max_cursor)
  133. headers = {
  134. 'authority': 'www.douyin.com',
  135. 'accept': 'application/json, text/plain, */*',
  136. 'accept-language': 'zh-CN,zh;q=0.9',
  137. # 'cookie': '__ac_nonce=06437a18000f23ad954f0; __ac_signature=_02B4Z6wo00f01Sb71TAAAIDCsi2OPpjonN0m29GAAC2M85; s_v_web_id=verify_lgeqr3uq_3aDaqQXf_juHS_40Yi_BE8b_tI8FCILZQXPK; _tea_utm_cache_2018=undefined; ttwid=1%7Cq_IBs6hbBUOIEcRR1gxtgY6GiTbTE3U1XhJNLL_9BZA%7C1681367431%7Cf77b36ae4721884fec1c3fa9d6a08c29e308236ae13df58d1be3b0d1f82f8668; strategyABtestKey=%221681367433.454%22; passport_csrf_token=bff0289a5846e58b4b9db6e1f64665f4; passport_csrf_token_default=bff0289a5846e58b4b9db6e1f64665f4; bd_ticket_guard_client_data=eyJiZC10aWNrZXQtZ3VhcmQtdmVyc2lvbiI6MiwiYmQtdGlja2V0LWd1YXJkLWl0ZXJhdGlvbi12ZXJzaW9uIjoxLCJiZC10aWNrZXQtZ3VhcmQtY2xpZW50LWNzciI6Ii0tLS0tQkVHSU4gQ0VSVElGSUNBVEUgUkVRVUVTVC0tLS0tXHJcbk1JSUJEVENCdFFJQkFEQW5NUXN3Q1FZRFZRUUdFd0pEVGpFWU1CWUdBMVVFQXd3UFltUmZkR2xqYTJWMFgyZDFcclxuWVhKa01Ga3dFd1lIS29aSXpqMENBUVlJS29aSXpqMERBUWNEUWdBRVhzcHJ5TElFT3E4Z2tPc2l5MTdSS1dEcVxyXG5xTXZkWEt5Y1V5NStiL3JpSmJ6VkMwMlYrU1dNaWtZTlNOK29IU2g2WVVTTmdUUjJrZEhvRUxISmxGdU9scUFzXHJcbk1Db0dDU3FHU0liM0RRRUpEakVkTUJzd0dRWURWUjBSQkJJd0VJSU9kM2QzTG1SdmRYbHBiaTVqYjIwd0NnWUlcclxuS29aSXpqMEVBd0lEUndBd1JBSWdIeW9SblFNK0h0Z3ZOU2dFMVpHdGpmYWQxT3BuWHJRdVFSNXNSaDkwakRJQ1xyXG5JRG1tVkthRkN5djBLemtpZ0J0RExaTVJSNndURzRBWUVoNUlWUmlZUU9UVVxyXG4tLS0tLUVORCBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS1cclxuIn0=; msToken=ZPkeAqCnLbjDWCkWcWf36ZZIoZTl07X33ca0xcNGk3tZPoMvgx-lo28oNb5JhbCKsXLgLNIoojAbocBrjdAv54Hwf-Tk3_yAjLW7WJxxSa0=; ttcid=54b98e03a03e43e09211ee28db90fdd126; home_can_add_dy_2_desktop=%221%22; msToken=nqMSwn8xJAXLZk2AikdOeJ6P3JvoCsbYjHROoR55KXMDvWs8auYCMpZFGQSClHhitgB0l_vwg8m0-lE-aWQovfN7Ga1QupM3SpdNktiamFRiHMdJExREc9_uxS1ln8E=; tt_scid=DnO5GFg1oLONpPMFuFeL-OveKBn43mRynRVTvHsh1KUQm283ocN6JX6qPKDDrFHbfdf1; download_guide=%222%2F20230413%22; msToken=b9haS5RjLaFgqVDoGp5xSqc8B4kl-miQB5Nku0BSIvHVutKT81Nzk_pPb0wm7xYlAp_nz1gytQng5gYeIRNxcMgZJ_MB7lhejt_093miXlHtvqAaxL0FNg==',
  138. 'referer': 'https://www.douyin.com/user/{}?showTab=post'.format(out_uid),
  139. 'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
  140. 'sec-ch-ua-mobile': '?0',
  141. 'sec-ch-ua-platform': '"macOS"',
  142. 'sec-fetch-dest': 'empty',
  143. 'sec-fetch-mode': 'cors',
  144. 'sec-fetch-site': 'same-origin',
  145. 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
  146. }
  147. x_bogus = get_xb(url, headers['user-agent'])
  148. url = url + '&X-Bogus={}'.format(x_bogus)
  149. if not x_bogus:
  150. return
  151. res = requests.get(url=url, headers=headers, data={}, proxies=Common.tunnel_proxies(), timeout=10).json()
  152. aweme_list = res.get('aweme_list', [])
  153. except Exception as e:
  154. Common.logger(log_type, crawler).error(f"获取抖音作者:{out_uid},视频列表失败:{e}")
  155. return
  156. if not aweme_list:
  157. Common.logger(log_type, crawler).warning(f"抖音作者没有获取到更多数据")
  158. return
  159. for info in aweme_list:
  160. if info.get('is_ads'):
  161. continue
  162. publish_time = info['create_time']
  163. publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time))
  164. publish_day = int((int(time.time()) - publish_time) / (3600 * 24))
  165. video_title = cls.video_title(log_type, crawler, info['desc'])
  166. if not video_title:
  167. video_title = cls.random_title(log_type, crawler)
  168. video_dict = {'video_title': video_title,
  169. 'video_id': info['aweme_id'],
  170. 'play_cnt': info['statistics']['play_count'],
  171. 'comment_cnt': info['statistics']['comment_count'],
  172. 'like_cnt': info['statistics']['digg_count'],
  173. 'share_cnt': info['statistics']['share_count'],
  174. 'video_width': info['video']['width'],
  175. 'video_height': info['video']['height'],
  176. 'duration': round(info['video']['duration'] / 1000),
  177. 'publish_time': publish_day,
  178. 'publish_time_stamp': publish_time * 1000,
  179. 'publish_time_str': publish_time_str,
  180. 'user_name': info['author']['nickname'],
  181. 'user_id': info['author_user_id'],
  182. 'user_sec_id': info['author']['sec_uid'],
  183. 'avatar_url': info['author']['avatar_thumb']['url_list'][0],
  184. 'cover_url': info['video']['origin_cover']['url_list'][0].replace('\u0026', '&'),
  185. 'video_url': info['video']['play_addr']['url_list'][0],
  186. 'session': f"douyin{int(time.time())}"
  187. }
  188. for k, v in video_dict.items():
  189. Common.logger(log_type, crawler).info(f"{k}:{v}")
  190. cls.download_publish(log_type=log_type,
  191. crawler=crawler,
  192. video_dict=video_dict,
  193. rule_dict=rule_dict,
  194. strategy=strategy,
  195. our_uid=our_uid,
  196. oss_endpoint=oss_endpoint,
  197. env=env,
  198. machine=machine)
  199. @classmethod
  200. def repeat_video(cls, log_type, crawler, video_id, env, machine):
  201. sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}"; """
  202. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env, machine)
  203. return len(repeat_video)
  204. # 下载 / 上传
  205. @classmethod
  206. def download_publish(cls, log_type, crawler, strategy, video_dict, rule_dict, our_uid, oss_endpoint, env, machine):
  207. try:
  208. if cls.download_rule(video_dict, rule_dict) is False:
  209. Common.logger(log_type, crawler).info('不满足抓取规则\n')
  210. elif any(word if word in video_dict['video_title'] else False for word in
  211. cls.filter_words(log_type, crawler)) is True:
  212. Common.logger(log_type, crawler).info('标题已中过滤词:{}\n', video_dict['video_title'])
  213. elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env, machine) != 0:
  214. Common.logger(log_type, crawler).info('视频已下载\n')
  215. else:
  216. # 下载视频
  217. Common.download_method(log_type=log_type, crawler=crawler, text='video',
  218. title=video_dict['video_title'], url=video_dict['video_url'])
  219. md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
  220. if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
  221. # 删除视频文件夹
  222. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  223. Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
  224. return
  225. # ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
  226. # if ffmpeg_dict is None or ffmpeg_dict['size'] == 0:
  227. # Common.logger(log_type, crawler).warning(f"下载的视频无效,已删除\n")
  228. # # 删除视频文件夹
  229. # shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
  230. # return
  231. # 下载封面
  232. Common.download_method(log_type=log_type, crawler=crawler, text='cover',
  233. title=video_dict['video_title'], url=video_dict['cover_url'])
  234. # 保存视频信息至txt
  235. Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
  236. # 上传视频
  237. Common.logger(log_type, crawler).info("开始上传视频...")
  238. our_video_id = Publish.upload_and_publish(log_type=log_type,
  239. crawler=crawler,
  240. strategy=strategy,
  241. our_uid=our_uid,
  242. env=env,
  243. oss_endpoint=oss_endpoint)
  244. if env == 'dev':
  245. our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  246. else:
  247. our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  248. Common.logger(log_type, crawler).info("视频上传完成")
  249. if our_video_id is None:
  250. # 删除视频文件夹
  251. shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
  252. return
  253. # 视频写入飞书
  254. upload_time = int(time.time())
  255. values = [[
  256. our_video_id,
  257. time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
  258. strategy,
  259. str(video_dict['video_id']),
  260. video_dict['video_title'],
  261. our_video_link,
  262. # video_dict['gid'],
  263. video_dict['play_cnt'],
  264. video_dict['comment_cnt'],
  265. video_dict['like_cnt'],
  266. video_dict['share_cnt'],
  267. video_dict['duration'],
  268. str(video_dict['video_width']) + '*' + str(video_dict['video_height']),
  269. video_dict['publish_time_str'],
  270. video_dict['user_name'],
  271. video_dict['user_id'],
  272. video_dict['avatar_url'],
  273. video_dict['cover_url'],
  274. video_dict['video_url']
  275. ]]
  276. # time.sleep(1)
  277. Feishu.insert_columns(log_type, 'douyin', "qV9VC0", "ROWS", 1, 2)
  278. Feishu.update_values(log_type, 'douyin', "qV9VC0", "A2:Z2", values)
  279. Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
  280. # 视频信息保存数据库
  281. insert_sql = f""" insert into crawler_video(video_id,
  282. user_id,
  283. out_user_id,
  284. platform,
  285. strategy,
  286. out_video_id,
  287. video_title,
  288. cover_url,
  289. video_url,
  290. duration,
  291. publish_time,
  292. play_cnt,
  293. comment_cnt,
  294. like_cnt,
  295. share_cnt,
  296. crawler_rule,
  297. width,
  298. height)
  299. values({our_video_id},
  300. {our_uid},
  301. "{video_dict['user_id']}",
  302. "{cls.platform}",
  303. "{strategy}",
  304. "{video_dict['video_id']}",
  305. "{video_dict['video_title']}",
  306. "{video_dict['cover_url']}",
  307. "{video_dict['video_url']}",
  308. {int(video_dict['duration'])},
  309. "{video_dict['publish_time_str']}",
  310. {int(video_dict['play_cnt'])},
  311. {int(video_dict['comment_cnt'])},
  312. {int(video_dict['like_cnt'])},
  313. {int(video_dict['share_cnt'])},
  314. '{json.dumps(rule_dict)}',
  315. {int(video_dict['video_width'])},
  316. {int(video_dict['video_height'])}) """
  317. Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
  318. MysqlHelper.update_values(log_type, crawler, insert_sql, env, machine)
  319. Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
  320. except Exception as e:
  321. Common.logger(log_type, crawler).error(f'download_publish异常:{e}\n')
  322. @classmethod
  323. def get_follow_videos(cls, log_type, crawler, strategy, oss_endpoint, env, machine):
  324. user_list = get_user_from_mysql(log_type, crawler, crawler, env)
  325. rule_dict = cls.get_rule(log_type, crawler)
  326. for user in user_list:
  327. spider_link = user["spider_link"]
  328. out_uid = spider_link
  329. user_name = user["nick_name"]
  330. our_uid = user["media_id"]
  331. Common.logger(log_type, crawler).info(f"开始抓取 {user_name} 用户主页视频\n")
  332. cls.get_videoList(log_type=log_type,
  333. crawler=crawler,
  334. strategy=strategy,
  335. our_uid=our_uid,
  336. out_uid=out_uid,
  337. oss_endpoint=oss_endpoint,
  338. env=env,
  339. machine=machine,
  340. rule_dict=rule_dict,
  341. )
  342. if __name__ == '__main__':
  343. DyFollow.get_follow_videos('author', 'douyin', '定向抓取策略', 'outer', 'prod', 'aliyun')