recommend_dy.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361
  1. # -*- coding: utf-8 -*-
  2. # @Author: lierqiang
  3. # @Time: 2023/4/06
  4. import json
  5. import os
  6. import random
  7. import shutil
  8. import sys
  9. import time
  10. import requests
  11. from hashlib import md5
  12. from douyin.douyin_recommend import get_xb
  13. sys.path.append(os.getcwd())
  14. from common.common import Common
  15. from common.feishu import Feishu
  16. from common.db import MysqlHelper
  17. from common.publish import Publish
  18. from common.public import get_config_from_mysql
  19. from common.public import random_title
  20. from common.userAgent import get_random_user_agent
  21. class DyRecommend(object):
  22. # 个人主页视频翻页参数
  23. # offset = 0
  24. platform = "抖音"
  25. # tag = "西瓜视频爬虫,定向爬虫策略"
  26. @classmethod
  27. def get_rule(cls, log_type, crawler):
  28. try:
  29. while True:
  30. rule_sheet = Feishu.get_values_batch(log_type, crawler, "a6L9Kb")
  31. if rule_sheet is None:
  32. Common.logger(log_type, crawler).warning("rule_sheet is None! 10秒后重新获取")
  33. time.sleep(10)
  34. continue
  35. rule_dict = {
  36. # "play_cnt": int(rule_sheet[0][2]),
  37. # "comment_cnt": int(rule_sheet[1][2]),
  38. "video_width": int(rule_sheet[0][2]),
  39. "video_height": int(rule_sheet[1][2]),
  40. "like_cnt": int(rule_sheet[2][2]),
  41. "duration": int(rule_sheet[3][2]),
  42. "publish_time": int(rule_sheet[4][2]),
  43. }
  44. return rule_dict
  45. except Exception as e:
  46. Common.logger(log_type, crawler).error(f"get_rule:{e}\n")
  47. # 下载规则
  48. @classmethod
  49. def download_rule(cls, video_info_dict, rule_dict):
  50. # if video_info_dict['play_cnt'] >= rule_dict['play_cnt']:
  51. # if video_info_dict['comment_cnt'] >= rule_dict['comment_cnt']:
  52. if video_info_dict['like_cnt'] >= rule_dict['like_cnt']:
  53. if video_info_dict['duration'] >= rule_dict['duration']:
  54. if video_info_dict['video_width'] >= rule_dict['video_width'] \
  55. or video_info_dict['video_height'] >= rule_dict['video_height']:
  56. return True
  57. else:
  58. return False
  59. else:
  60. return False
  61. else:
  62. return False
  63. # else:
  64. # return False
  65. # else:
  66. # return False
  67. # 过滤词库
  68. @classmethod
  69. def filter_words(cls, log_type, crawler):
  70. try:
  71. while True:
  72. filter_words_sheet = Feishu.get_values_batch(log_type, crawler, '6BS2RR')
  73. if filter_words_sheet is None:
  74. Common.logger(log_type, crawler).warning(f"filter_words_sheet:{filter_words_sheet} 10秒钟后重试")
  75. continue
  76. filter_words_list = []
  77. for x in filter_words_sheet:
  78. for y in x:
  79. if y is None:
  80. pass
  81. else:
  82. filter_words_list.append(y)
  83. return filter_words_list
  84. except Exception as e:
  85. Common.logger(log_type, crawler).error(f'filter_words异常:{e}\n')
  86. @classmethod
  87. def video_title(cls, log_type, crawler, env, title):
  88. title_split1 = title.split(" #")
  89. if title_split1[0] != "":
  90. title1 = title_split1[0]
  91. else:
  92. title1 = title_split1[-1]
  93. title_split2 = title1.split(" #")
  94. if title_split2[0] != "":
  95. title2 = title_split2[0]
  96. else:
  97. title2 = title_split2[-1]
  98. title_split3 = title2.split("@")
  99. if title_split3[0] != "":
  100. title3 = title_split3[0]
  101. else:
  102. title3 = title_split3[-1]
  103. video_title = title3.strip().replace("\n", "") \
  104. .replace("/", "").replace("抖音", "").replace(" ", "") \
  105. .replace(" ", "").replace("&NBSP", "").replace("\r", "") \
  106. .replace("#", "").replace(".", "。").replace("\\", "") \
  107. .replace(":", "").replace("*", "").replace("?", "") \
  108. .replace("?", "").replace('"', "").replace("<", "") \
  109. .replace(">", "").replace("|", "").replace("@", "").replace('"', '').replace("'", '')[:40]
  110. if video_title.replace(" ", "") == "" or video_title == "。。。" or video_title == "...":
  111. return random_title(log_type, crawler, env, text='title')
  112. else:
  113. return video_title
  114. @classmethod
  115. def get_videolist(cls, log_type, crawler, strategy, our_id, oss_endpoint, env, machine):
  116. rule_dict = cls.get_rule(log_type, crawler)
  117. for page in range(1, 101):
  118. aweme_pc_rec_raw_data = '%7B%22videoPrefer%22:%7B%22fsn%22:%5B%5D,%22like%22:%5B%5D,%22halfMin%22:%5B%227188684310696742200%22,%224380080926896941%22%5D,%22min%22:%5B%5D%7D,%22seo_info%22:%22https:%2F%2Fwww.douyin.com%2F%22,%22is_client%22:false,%22ff_danmaku_status%22:1,%22danmaku_switch_status%22:1%7D'
  119. f_url = 'https://www.douyin.com/aweme/v1/web/tab/feed/?device_platform=webapp&aid=6383&channel=channel_pc_web&tag_id=&share_aweme_id=&count=10&refresh_index={page}&video_type_select=1&aweme_pc_rec_raw_data={aweme_pc_rec_raw_data}&globalwid=&pull_type=2&min_window=0&ug_source=&creative_id=&pc_client_type=1&version_code=170400&version_name=17.4.0&cookie_enabled=true&screen_width=1440&screen_height=900&browser_language=zh-CN&browser_platform=MacIntel&browser_name=Chrome&browser_version=109.0.0.0&browser_online=true&engine_name=Blink&engine_version=109.0.0.0&os_name=Mac+OS&os_version=10.15.7&cpu_core_num=8&device_memory=8&platform=PC&downlink=10&effective_type=4g&round_trip_time=50&webid=7219223873342260736&msToken=Sh6bVLWZUEZ3ruIHq1L3iUXnr1GT5yklyo-XZRO7lNgsFvpYq0C7tcu5Z4Jv9DrMESZ9kGVhwKT4ftFDkBL11ZGPUxth2ToA4M4q-qs2MK9ctR7GhwFkGw=='.format(
  120. page=page, aweme_pc_rec_raw_data=aweme_pc_rec_raw_data)
  121. headers = {
  122. # 'cookie': 'ttwid=1%7CzpiG_VTvd1xpRFvfHUjEHiaq3qkfUqPElZUu0wbTSr8%7C1680507728%7Cc61697d37b4d4d49d42b466a3bbe8ecd5c06ae6e9a751d9e410102d2c52a185d; douyin.com; passport_csrf_token=208a829b0156a2feaa0fa24ad026ea91; passport_csrf_token_default=208a829b0156a2feaa0fa24ad026ea91; s_v_web_id=verify_lg0iwv1g_BwfztkmU_azbL_4Gua_9Fb9_KWfGPVXCyWua; bd_ticket_guard_client_data=eyJiZC10aWNrZXQtZ3VhcmQtdmVyc2lvbiI6MiwiYmQtdGlja2V0LWd1YXJkLWl0ZXJhdGlvbi12ZXJzaW9uIjoxLCJiZC10aWNrZXQtZ3VhcmQtY2xpZW50LWNzciI6Ii0tLS0tQkVHSU4gQ0VSVElGSUNBVEUgUkVRVUVTVC0tLS0tXHJcbk1JSUJEekNCdFFJQkFEQW5NUXN3Q1FZRFZRUUdFd0pEVGpFWU1CWUdBMVVFQXd3UFltUmZkR2xqYTJWMFgyZDFcclxuWVhKa01Ga3dFd1lIS29aSXpqMENBUVlJS29aSXpqMERBUWNEUWdBRU9zWGhGbG5ZWjVNeG5ZRGFFOCtCYmRGdFxyXG5VZTh6SG0ycTRXeWxvdkxXVXVOcy9oV2tlZlBRK3BsNkg2OGQwdGtOVVB5UStmUnpyWlRFL1ZXMTR5UlRkS0FzXHJcbk1Db0dDU3FHU0liM0RRRUpEakVkTUJzd0dRWURWUjBSQkJJd0VJSU9kM2QzTG1SdmRYbHBiaTVqYjIwd0NnWUlcclxuS29aSXpqMEVBd0lEU1FBd1JnSWhBTmdPS3Jkb3V4SHBzcHNiY0dmUHJYQ0lVNnVwcmZkd2ZFY2g5TXZndW5Ea1xyXG5BaUVBM2xVeDQ2bzd0UWJUT0dXdzgzQm45RnFyQkRVVHNOVjkyZUEyR1hPR3BkVT1cclxuLS0tLS1FTkQgQ0VSVElGSUNBVEUgUkVRVUVTVC0tLS0tXHJcbiJ9; csrf_session_id=a5355d954d3c63ed1ba35faada452b4d; ttcid=60304e8eb309434784f02372ef36387b41; xgplayer_user_id=446878319428; __ac_nonce=0642bcb0a001596f3fe5b; __ac_signature=_02B4Z6wo00f012HU-wAAAIDA9QKgDGYSlVNh9P-AALxqUQdGOEO.l3IAhdmUh4D-Y9rXLut3p7moXUuAUmo7rUOUJzpnB9nLx0YdZcvdMNeUgQOjsGIHh9LTN38BOVtrElZBeXDLjuVVC5Hh81; strategyABtestKey=%221680591628.189%22; download_guide=%223%2F20230404%22; VIDEO_FILTER_MEMO_SELECT=%7B%22expireTime%22%3A1681196521206%2C%22type%22%3A0%7D; home_can_add_dy_2_desktop=%221%22; msToken=v_pzGLfpXwl4PugynDwIb5DeepUms68tZLZFNLHl8WQnEeNQZtaawWYVu4Y3TLWxpqbvgqkOFULGmld2BLBZydbzrMJgkx5q5GjqetkVI4GoxLX1QdJQ0CP607uEVw==; tt_scid=NZGTg99heu5lHFfAvBht7p3Qxl0TGP.TyfxOQ7cWIvZjEnOcZERFaJxQ.HnKY-UT18cb; msToken=JFY-VD2YHS-6IO6lNhTj4AcLVtHqjein_5giYzIUh_VRsXMPFXy9QOg-RKDDwQgW5TTbgQB_BLzpfQhNdNEQCv5sGXatzGei9yppG1eSLLkbI9fjhTdBWtdkAJpLIg==',
  123. 'cookie': 'ttwid=1%7CI2Xp275XabSiVJ9GAmfLtqbtqturVSIS2yLbXVkAHnQ%7C1680856567%7Cfd36579475157e2303e36e5fd75cdea4ebad78c20da989be0590305f169242ca; douyin.com; strategyABtestKey=%221680856567.817%22; passport_csrf_token=6a28a7b1e1ad38570cc5ee39deaf587a; passport_csrf_token_default=6a28a7b1e1ad38570cc5ee39deaf587a; s_v_web_id=verify_lg6aloex_WG2LmlFk_kbA6_4tMb_BZlD_iuKxRvsMvrg7; bd_ticket_guard_client_data=eyJiZC10aWNrZXQtZ3VhcmQtdmVyc2lvbiI6MiwiYmQtdGlja2V0LWd1YXJkLWl0ZXJhdGlvbi12ZXJzaW9uIjoxLCJiZC10aWNrZXQtZ3VhcmQtY2xpZW50LWNzciI6Ii0tLS0tQkVHSU4gQ0VSVElGSUNBVEUgUkVRVUVTVC0tLS0tXHJcbk1JSUJEakNCdFFJQkFEQW5NUXN3Q1FZRFZRUUdFd0pEVGpFWU1CWUdBMVVFQXd3UFltUmZkR2xqYTJWMFgyZDFcclxuWVhKa01Ga3dFd1lIS29aSXpqMENBUVlJS29aSXpqMERBUWNEUWdBRWNFZitKV2tDb3l4OHhLekNnY0hReEVaRFxyXG5hN29maHJhUG9rMkQ5b1RaRGRvbDJuTVhaTis5dGJFclV0cVdUcm81ck4zekFyWTFLaXIzRlRUR2ZQUXRmS0FzXHJcbk1Db0dDU3FHU0liM0RRRUpEakVkTUJzd0dRWURWUjBSQkJJd0VJSU9kM2QzTG1SdmRYbHBiaTVqYjIwd0NnWUlcclxuS29aSXpqMEVBd0lEU0FBd1JRSWhBUFZaSGNFdW5HeGtBZFNmQXJ1MmdWb1RHbFhINkhsa1prRzZNc1pyR2hBL1xyXG5BaUJsV2NpM3h5SDk2UnJlTXpPSy8xVmFJQUNuTWUyU0RodUJIY2ZZaE80OWtRPT1cclxuLS0tLS1FTkQgQ0VSVElGSUNBVEUgUkVRVUVTVC0tLS0tXHJcbiJ9; csrf_session_id=a5355d954d3c63ed1ba35faada452b4d; ttcid=585ca9110e2345a09b4499dc543ec39959; odin_tt=d5fbc530c34bf8d4ea8cf8030c85a3f70202ee34dad7fb5b9ef7ff299d20e38ed01c852f02f2f3f864f2a45060480717f72083b58cdbc204c988edbf997fda7c; xgplayer_user_id=64984931555; SEARCH_RESULT_LIST_TYPE=%22single%22; pwa2=%222%7C1%22; download_guide=%223%2F20230407%22; __ac_nonce=0642fff310028297fec16; __ac_signature=_02B4Z6wo00f01M3AeJAAAIDDWRYjnPVAtVzN4HwAAFdlurQp2aR1Npvb7RYeaCY4fZs3DkMFlu7-Obn7zsvc34whBQesvTIc2p8nV-1crQtTacaxSYqP8nwNk3WqH.tkNdKQaUMw6sCC2.xZ15; VIDEO_FILTER_MEMO_SELECT=%7B%22expireTime%22%3A1681472712797%2C%22type%22%3A1%7D; home_can_add_dy_2_desktop=%221%22; tt_scid=PNmY2BL-Q9E9lAfdecenRGOzq64XgFOc0CJGFMN.JIE-QJO51S3Zvw56-Z6O12QVf4d9; msToken=uFCSX87jL9sTq6ScVYJucYgv9Hd5gCbTPvKIGBRMgVLuo7Pp9zLRrutBYzq4BmnCr83WnJAwZb8H78lBr3s3eyJLnySxYO5FgClQRXW1i_mAu7fLfBj3gA==; msToken=Sh6bVLWZUEZ3ruIHq1L3iUXnr1GT5yklyo-XZRO7lNgsFvpYq0C7tcu5Z4Jv9DrMESZ9kGVhwKT4ftFDkBL11ZGPUxth2ToA4M4q-qs2MK9ctR7GhwFkGw==',
  124. 'referer': 'https://www.douyin.com/',
  125. 'user-agent': get_random_user_agent('pc')
  126. }
  127. try:
  128. x_bogus = get_xb(f_url, headers['user-agent'])
  129. if not x_bogus:
  130. continue
  131. url = f_url + '&X-Bogus={}'.format(x_bogus)
  132. res = requests.get(url=url, headers=headers, proxies=Common.tunnel_proxies()).json()
  133. aweme_list = res.get('aweme_list', [])
  134. except Exception as e:
  135. Common.logger(log_type, crawler).error(f"获取抖音推荐失败:{e}")
  136. continue
  137. if not aweme_list:
  138. Common.logger(log_type, crawler).warning(f"抖音推荐没有获取到更多数据,页数:{page}")
  139. continue
  140. for info in aweme_list:
  141. if info.get('is_ads'):
  142. continue
  143. publish_time = info.get('create_time')
  144. if not publish_time:
  145. continue
  146. publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time))
  147. publish_day = int((int(time.time()) - publish_time) / (3600 * 24))
  148. if not info['desc']:
  149. video_title = random_title(log_type, crawler, env, text='title')
  150. else:
  151. video_title = cls.video_title(log_type, crawler, env, info['desc'])
  152. video_dict = {'video_title': video_title,
  153. 'video_id': info['aweme_id'],
  154. 'play_cnt': info['statistics']['play_count'],
  155. 'comment_cnt': info['statistics']['comment_count'],
  156. 'like_cnt': info['statistics']['digg_count'],
  157. 'share_cnt': info['statistics']['share_count'],
  158. 'video_width': info['video']['width'],
  159. 'video_height': info['video']['height'],
  160. 'duration': round(info['video']['duration'] / 1000),
  161. 'publish_time': publish_day,
  162. 'publish_time_stamp': publish_time * 1000,
  163. 'publish_time_str': publish_time_str,
  164. 'user_name': info['author']['nickname'],
  165. 'user_id': info['author_user_id'],
  166. 'user_sec_id': info['author']['sec_uid'],
  167. 'avatar_url': info['author']['avatar_thumb']['url_list'][0],
  168. 'cover_url': info['video']['origin_cover']['url_list'][0].replace('\u0026', '&'),
  169. 'video_url': info['video']['play_addr']['url_list'][0],
  170. 'session': f"douyin{int(time.time())}"
  171. }
  172. for k, v in video_dict.items():
  173. Common.logger(log_type, crawler).info(f"{k}:{v}")
  174. cls.download_publish(log_type=log_type,
  175. crawler=crawler,
  176. video_dict=video_dict,
  177. rule_dict=rule_dict,
  178. strategy=strategy,
  179. our_uid=our_id,
  180. oss_endpoint=oss_endpoint,
  181. env=env,
  182. machine=machine)
  183. @classmethod
  184. def repeat_video(cls, log_type, crawler, video_id, env, machine):
  185. sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}"; """
  186. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env, machine)
  187. return len(repeat_video)
  188. # 下载 / 上传
  189. @classmethod
  190. def download_publish(cls, log_type, crawler, strategy, video_dict, rule_dict, our_uid, oss_endpoint, env, machine):
  191. try:
  192. filter_words = get_config_from_mysql(log_type, crawler, env, text='filter')
  193. for filter_word in filter_words:
  194. if filter_word in video_dict['video_title']:
  195. Common.logger(log_type, crawler).info('标题已中过滤词:{}\n', video_dict['video_title'])
  196. return
  197. if cls.download_rule(video_dict, rule_dict) is False:
  198. Common.logger(log_type, crawler).info('不满足抓取规则\n')
  199. elif any(word if word in video_dict['video_title'] else False for word in
  200. cls.filter_words(log_type, crawler)) is True:
  201. Common.logger(log_type, crawler).info('标题已中过滤词:{}\n', video_dict['video_title'])
  202. elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env, machine) != 0:
  203. Common.logger(log_type, crawler).info('视频已下载\n')
  204. else:
  205. # 下载视频
  206. Common.download_method(log_type=log_type, crawler=crawler, text='video',
  207. title=video_dict['video_title'], url=video_dict['video_url'])
  208. md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
  209. if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
  210. # 删除视频文件夹
  211. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  212. Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
  213. return
  214. # ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
  215. # if ffmpeg_dict is None or ffmpeg_dict['size'] == 0:
  216. # Common.logger(log_type, crawler).warning(f"下载的视频无效,已删除\n")
  217. # # 删除视频文件夹
  218. # shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
  219. # return
  220. # 下载封面
  221. Common.download_method(log_type=log_type, crawler=crawler, text='cover',
  222. title=video_dict['video_title'], url=video_dict['cover_url'])
  223. # 保存视频信息至txt
  224. Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
  225. # 上传视频
  226. Common.logger(log_type, crawler).info("开始上传视频...")
  227. our_video_id = Publish.upload_and_publish(log_type=log_type,
  228. crawler=crawler,
  229. strategy=strategy,
  230. our_uid=our_uid,
  231. env=env,
  232. oss_endpoint=oss_endpoint)
  233. if env == 'dev':
  234. our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  235. else:
  236. our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  237. Common.logger(log_type, crawler).info("视频上传完成")
  238. if our_video_id is None:
  239. # 删除视频文件夹
  240. shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
  241. return
  242. # 视频写入飞书
  243. Feishu.insert_columns(log_type, 'douyin', "82c8d9", "ROWS", 1, 2)
  244. upload_time = int(time.time())
  245. values = [[
  246. our_video_id,
  247. time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
  248. "推荐",
  249. str(video_dict['video_id']),
  250. video_dict['video_title'],
  251. our_video_link,
  252. # video_dict['gid'],
  253. video_dict['play_cnt'],
  254. video_dict['comment_cnt'],
  255. video_dict['like_cnt'],
  256. video_dict['share_cnt'],
  257. video_dict['duration'],
  258. str(video_dict['video_width']) + '*' + str(video_dict['video_height']),
  259. video_dict['publish_time_str'],
  260. video_dict['user_name'],
  261. video_dict['user_id'],
  262. video_dict['avatar_url'],
  263. video_dict['cover_url'],
  264. video_dict['video_url']
  265. ]]
  266. # time.sleep(1)
  267. Feishu.update_values(log_type, 'douyin', "82c8d9", "A2:Z2", values)
  268. Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
  269. # 视频信息保存数据库
  270. insert_sql = f""" insert into crawler_video(video_id,
  271. user_id,
  272. out_user_id,
  273. platform,
  274. strategy,
  275. out_video_id,
  276. video_title,
  277. cover_url,
  278. video_url,
  279. duration,
  280. publish_time,
  281. play_cnt,
  282. comment_cnt,
  283. like_cnt,
  284. share_cnt,
  285. crawler_rule,
  286. width,
  287. height)
  288. values({our_video_id},
  289. {our_uid},
  290. "{video_dict['user_id']}",
  291. "{cls.platform}",
  292. "推荐爬虫策略",
  293. "{video_dict['video_id']}",
  294. "{video_dict['video_title']}",
  295. "{video_dict['cover_url']}",
  296. "{video_dict['video_url']}",
  297. {int(video_dict['duration'])},
  298. "{video_dict['publish_time_str']}",
  299. {int(video_dict['play_cnt'])},
  300. {int(video_dict['comment_cnt'])},
  301. {int(video_dict['like_cnt'])},
  302. {int(video_dict['share_cnt'])},
  303. '{json.dumps(rule_dict)}',
  304. {int(video_dict['video_width'])},
  305. {int(video_dict['video_height'])}) """
  306. Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
  307. MysqlHelper.update_values(log_type, crawler, insert_sql, env, machine)
  308. Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
  309. except Exception as e:
  310. Common.logger(log_type, crawler).error(f'download_publish异常:{e}\n')
  311. # @classmethod
  312. # def get_recommend(cls, log_type, crawler, strategy, oss_endpoint, env, machine):
  313. # try:
  314. #
  315. # user_list = get_user_from_mysql(log_type, crawler, crawler, env)
  316. # for user in user_list:
  317. # spider_link = user["spider_link"]
  318. # out_uid = spider_link.split('/')[-1]
  319. # user_name = user["nick_name"]
  320. # our_uid = user["media_id"]
  321. #
  322. # Common.logger(log_type, crawler).info(f"开始抓取 {user_name} 用户主页视频\n")
  323. # cls.get_videolist(log_type=log_type,
  324. # crawler=crawler,
  325. # strategy=strategy,
  326. # our_uid=our_uid,
  327. # oss_endpoint=oss_endpoint,
  328. # env=env,
  329. # machine=machine)
  330. # time.sleep(1)
  331. # except Exception as e:
  332. # Common.logger(log_type, crawler).error(f"get_follow_videos:{e}\n")
  333. if __name__ == '__main__':
  334. # DyRecommend.get_recommend('recommend','douyin','推荐抓取策略', 'inner','prod', 'aliyun')
  335. DyRecommend.get_videolist('recommend', 'douyin', '推荐抓取策略', 6282709, 'outer', 'dev', 'aliyun')