recommend_dy.py 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324
  1. # -*- coding: utf-8 -*-
  2. # @Author: lierqiang
  3. # @Time: 2023/4/06
  4. import json
  5. import os
  6. import shutil
  7. import sys
  8. import time
  9. import requests
  10. import execjs
  11. from hashlib import md5
  12. sys.path.append(os.getcwd())
  13. from common.db import MysqlHelper
  14. from common.feishu import Feishu
  15. from common.publish import Publish
  16. from common.public import get_user_from_mysql
  17. from common.userAgent import get_random_user_agent
  18. from common.common import Common
  19. def get_xb(f_url, ua):
  20. with open('../xb.js', 'r', encoding='utf-8') as f:
  21. douyin_js = f.read()
  22. params = f_url.split('/?')[1]
  23. # params = urllib.parse.unquote(params)
  24. # params = urllib.parse.unquote(urllib.parse.urlencode(params, safe='='))
  25. ctx = execjs.compile(douyin_js)
  26. xb = ctx.call('_0x11bbd8', params, ua)
  27. return xb
  28. class DyRecommend(object):
  29. # 个人主页视频翻页参数
  30. # offset = 0
  31. platform = "抖音"
  32. # tag = "西瓜视频爬虫,定向爬虫策略"
  33. @classmethod
  34. def get_rule(cls, log_type, crawler):
  35. try:
  36. while True:
  37. rule_sheet = Feishu.get_values_batch(log_type, crawler, "a6L9Kb")
  38. if rule_sheet is None:
  39. Common.logger(log_type, crawler).warning("rule_sheet is None! 10秒后重新获取")
  40. time.sleep(10)
  41. continue
  42. rule_dict = {
  43. # "play_cnt": int(rule_sheet[0][2]),
  44. # "comment_cnt": int(rule_sheet[1][2]),
  45. "video_width": int(rule_sheet[0][2]),
  46. "video_height": int(rule_sheet[1][2]),
  47. "like_cnt": int(rule_sheet[2][2]),
  48. "duration": int(rule_sheet[3][2]),
  49. "publish_time": int(rule_sheet[4][2]),
  50. }
  51. return rule_dict
  52. except Exception as e:
  53. Common.logger(log_type, crawler).error(f"get_rule:{e}\n")
  54. # 下载规则
  55. @classmethod
  56. def download_rule(cls, video_info_dict, rule_dict):
  57. # if video_info_dict['play_cnt'] >= rule_dict['play_cnt']:
  58. # if video_info_dict['comment_cnt'] >= rule_dict['comment_cnt']:
  59. if video_info_dict['like_cnt'] >= rule_dict['like_cnt']:
  60. if video_info_dict['duration'] >= rule_dict['duration']:
  61. if video_info_dict['video_width'] >= rule_dict['video_width'] \
  62. or video_info_dict['video_height'] >= rule_dict['video_height']:
  63. return True
  64. else:
  65. return False
  66. else:
  67. return False
  68. else:
  69. return False
  70. # else:
  71. # return False
  72. # else:
  73. # return False
  74. # 过滤词库
  75. @classmethod
  76. def filter_words(cls, log_type, crawler):
  77. try:
  78. while True:
  79. filter_words_sheet = Feishu.get_values_batch(log_type, crawler, '6BS2RR')
  80. if filter_words_sheet is None:
  81. Common.logger(log_type, crawler).warning(f"filter_words_sheet:{filter_words_sheet} 10秒钟后重试")
  82. continue
  83. filter_words_list = []
  84. for x in filter_words_sheet:
  85. for y in x:
  86. if y is None:
  87. pass
  88. else:
  89. filter_words_list.append(y)
  90. return filter_words_list
  91. except Exception as e:
  92. Common.logger(log_type, crawler).error(f'filter_words异常:{e}\n')
  93. @classmethod
  94. def get_videolist(cls, log_type, crawler, strategy, our_id, oss_endpoint, env, machine):
  95. rule_dict = cls.get_rule(log_type, crawler)
  96. for page in range(1, 101):
  97. try:
  98. aweme_pc_rec_raw_data = '%7B%22videoPrefer%22%3A%7B%22fsn%22%3A%5B%5D%2C%22like%22%3A%5B%5D%2C%22halfMin%22%3A%5B%5D%2C%22min%22%3A%5B%5D%7D%2C%22seo_info%22%3A%22https%3A%2F%2Fwww.douyin.com%2F%22%2C%22is_client%22%3Afalse%2C%22ff_danmaku_status%22%3A1%2C%22danmaku_switch_status%22%3A0%7D'
  99. f_url = 'https://www.douyin.com/aweme/v1/web/tab/feed/?device_platform=webapp&aid=6383&channel=channel_pc_web&tag_id=&share_aweme_id=&count=10&refresh_index={page}&video_type_select=0&aweme_pc_rec_raw_data={aweme_pc_rec_raw_data}&globalwid=&pull_type=2&min_window=0&ug_source=&creative_id=&pc_client_type=1&version_code=170400&version_name=17.4.0&cookie_enabled=true&screen_width=1920&screen_height=1080&browser_language=zh-CN&browser_platform=MacIntel&browser_name=Chrome&browser_version=111.0.0.0&browser_online=true&engine_name=Blink&engine_version=111.0.0.0&os_name=Mac+OS&os_version=10.15.7&cpu_core_num=8&device_memory=8&platform=PC&downlink=10&effective_type=4g&round_trip_time=200&webid=7217725630355097128&msToken=JFY-VD2YHS-6IO6lNhTj4AcLVtHqjein_5giYzIUh_VRsXMPFXy9QOg-RKDDwQgW5TTbgQB_BLzpfQhNdNEQCv5sGXatzGei9yppG1eSLLkbI9fjhTdBWtdkAJpLIg=='.format(
  100. page=page, aweme_pc_rec_raw_data=aweme_pc_rec_raw_data)
  101. headers = {
  102. 'cookie': 'ttwid=1%7CzpiG_VTvd1xpRFvfHUjEHiaq3qkfUqPElZUu0wbTSr8%7C1680507728%7Cc61697d37b4d4d49d42b466a3bbe8ecd5c06ae6e9a751d9e410102d2c52a185d; douyin.com; passport_csrf_token=208a829b0156a2feaa0fa24ad026ea91; passport_csrf_token_default=208a829b0156a2feaa0fa24ad026ea91; s_v_web_id=verify_lg0iwv1g_BwfztkmU_azbL_4Gua_9Fb9_KWfGPVXCyWua; bd_ticket_guard_client_data=eyJiZC10aWNrZXQtZ3VhcmQtdmVyc2lvbiI6MiwiYmQtdGlja2V0LWd1YXJkLWl0ZXJhdGlvbi12ZXJzaW9uIjoxLCJiZC10aWNrZXQtZ3VhcmQtY2xpZW50LWNzciI6Ii0tLS0tQkVHSU4gQ0VSVElGSUNBVEUgUkVRVUVTVC0tLS0tXHJcbk1JSUJEekNCdFFJQkFEQW5NUXN3Q1FZRFZRUUdFd0pEVGpFWU1CWUdBMVVFQXd3UFltUmZkR2xqYTJWMFgyZDFcclxuWVhKa01Ga3dFd1lIS29aSXpqMENBUVlJS29aSXpqMERBUWNEUWdBRU9zWGhGbG5ZWjVNeG5ZRGFFOCtCYmRGdFxyXG5VZTh6SG0ycTRXeWxvdkxXVXVOcy9oV2tlZlBRK3BsNkg2OGQwdGtOVVB5UStmUnpyWlRFL1ZXMTR5UlRkS0FzXHJcbk1Db0dDU3FHU0liM0RRRUpEakVkTUJzd0dRWURWUjBSQkJJd0VJSU9kM2QzTG1SdmRYbHBiaTVqYjIwd0NnWUlcclxuS29aSXpqMEVBd0lEU1FBd1JnSWhBTmdPS3Jkb3V4SHBzcHNiY0dmUHJYQ0lVNnVwcmZkd2ZFY2g5TXZndW5Ea1xyXG5BaUVBM2xVeDQ2bzd0UWJUT0dXdzgzQm45RnFyQkRVVHNOVjkyZUEyR1hPR3BkVT1cclxuLS0tLS1FTkQgQ0VSVElGSUNBVEUgUkVRVUVTVC0tLS0tXHJcbiJ9; csrf_session_id=a5355d954d3c63ed1ba35faada452b4d; ttcid=60304e8eb309434784f02372ef36387b41; xgplayer_user_id=446878319428; __ac_nonce=0642bcb0a001596f3fe5b; __ac_signature=_02B4Z6wo00f012HU-wAAAIDA9QKgDGYSlVNh9P-AALxqUQdGOEO.l3IAhdmUh4D-Y9rXLut3p7moXUuAUmo7rUOUJzpnB9nLx0YdZcvdMNeUgQOjsGIHh9LTN38BOVtrElZBeXDLjuVVC5Hh81; strategyABtestKey=%221680591628.189%22; download_guide=%223%2F20230404%22; VIDEO_FILTER_MEMO_SELECT=%7B%22expireTime%22%3A1681196521206%2C%22type%22%3A0%7D; home_can_add_dy_2_desktop=%221%22; msToken=v_pzGLfpXwl4PugynDwIb5DeepUms68tZLZFNLHl8WQnEeNQZtaawWYVu4Y3TLWxpqbvgqkOFULGmld2BLBZydbzrMJgkx5q5GjqetkVI4GoxLX1QdJQ0CP607uEVw==; tt_scid=NZGTg99heu5lHFfAvBht7p3Qxl0TGP.TyfxOQ7cWIvZjEnOcZERFaJxQ.HnKY-UT18cb; msToken=JFY-VD2YHS-6IO6lNhTj4AcLVtHqjein_5giYzIUh_VRsXMPFXy9QOg-RKDDwQgW5TTbgQB_BLzpfQhNdNEQCv5sGXatzGei9yppG1eSLLkbI9fjhTdBWtdkAJpLIg==',
  103. 'referer': 'https://www.douyin.com/',
  104. 'user-agent': get_random_user_agent('pc')
  105. }
  106. x_bogus = get_xb(f_url, headers['user-agent'])
  107. url = f_url + '&X-Bogus={}'.format(x_bogus)
  108. res = requests.get(url=url, headers=headers, proxies=Common.tunnel_proxies()).json()
  109. aweme_list = res.get('aweme_list', [])
  110. if not aweme_list:
  111. Common.logger(log_type, crawler).warning(f"抖音推荐没有获取到更多数据,页数:{page}")
  112. continue
  113. for info in aweme_list:
  114. if info['is_ads']:
  115. continue
  116. publish_time = info['create_time']
  117. publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time))
  118. publish_day = int((int(time.time()) - publish_time) / (3600 * 24))
  119. video_dict = {'video_title': info['desc'],
  120. 'video_id': info['aweme_id'],
  121. 'play_cnt': info['statistics']['play_count'],
  122. 'comment_cnt': info['statistics']['comment_count'],
  123. 'like_cnt': info['statistics']['digg_count'],
  124. 'share_cnt': info['statistics']['share_count'],
  125. 'video_width': info['video']['width'],
  126. 'video_height': info['video']['height'],
  127. 'duration': round(info['video']['duration'] / 1000),
  128. 'publish_time': publish_day,
  129. 'publish_time_stamp': publish_time * 1000,
  130. 'publish_time_str': publish_time_str,
  131. 'user_name': info['author']['nickname'],
  132. 'user_id': info['author_user_id'],
  133. 'user_sec_id': info['author']['sec_uid'],
  134. 'avatar_url': info['author']['avatar_thumb']['url_list'][0],
  135. 'cover_url': info['video']['origin_cover']['url_list'][0].replace('\u0026', '&'),
  136. 'video_url': info['video']['play_addr']['url_list'][0],
  137. 'session': f"douyin{int(time.time())}"
  138. }
  139. for k, v in video_dict.items():
  140. Common.logger(log_type, crawler).info(f"{k}:{v}")
  141. cls.download_publish(log_type=log_type,
  142. crawler=crawler,
  143. video_dict=video_dict,
  144. rule_dict=rule_dict,
  145. strategy=strategy,
  146. our_uid=our_id,
  147. oss_endpoint=oss_endpoint,
  148. env=env,
  149. machine=machine)
  150. except Exception as e:
  151. Common.logger(log_type, crawler).error(f"获取抖音推荐失败:{e}")
  152. @classmethod
  153. def repeat_video(cls, log_type, crawler, video_id, env, machine):
  154. sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}"; """
  155. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env, machine)
  156. return len(repeat_video)
  157. # 下载 / 上传
  158. @classmethod
  159. def download_publish(cls, log_type, crawler, strategy, video_dict, rule_dict, our_uid, oss_endpoint, env, machine):
  160. try:
  161. if cls.download_rule(video_dict, rule_dict) is False:
  162. Common.logger(log_type, crawler).info('不满足抓取规则\n')
  163. elif any(word if word in video_dict['video_title'] else False for word in
  164. cls.filter_words(log_type, crawler)) is True:
  165. Common.logger(log_type, crawler).info('标题已中过滤词:{}\n', video_dict['video_title'])
  166. elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env, machine) != 0:
  167. Common.logger(log_type, crawler).info('视频已下载\n')
  168. else:
  169. # 下载视频
  170. Common.download_method(log_type=log_type, crawler=crawler, text='video',
  171. title=video_dict['video_title'], url=video_dict['video_url'])
  172. md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
  173. if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
  174. # 删除视频文件夹
  175. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  176. Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
  177. return
  178. # ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
  179. # if ffmpeg_dict is None or ffmpeg_dict['size'] == 0:
  180. # Common.logger(log_type, crawler).warning(f"下载的视频无效,已删除\n")
  181. # # 删除视频文件夹
  182. # shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
  183. # return
  184. # 下载封面
  185. Common.download_method(log_type=log_type, crawler=crawler, text='cover',
  186. title=video_dict['video_title'], url=video_dict['cover_url'])
  187. # 保存视频信息至txt
  188. Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
  189. # 上传视频
  190. Common.logger(log_type, crawler).info("开始上传视频...")
  191. our_video_id = Publish.upload_and_publish(log_type=log_type,
  192. crawler=crawler,
  193. strategy=strategy,
  194. our_uid=our_uid,
  195. env=env,
  196. oss_endpoint=oss_endpoint)
  197. if env == 'dev':
  198. our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  199. else:
  200. our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  201. Common.logger(log_type, crawler).info("视频上传完成")
  202. if our_video_id is None:
  203. # 删除视频文件夹
  204. shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
  205. return
  206. # 视频写入飞书
  207. # Feishu.insert_columns(log_type, 'douyin', "82c8d9", "ROWS", 1, 2)
  208. upload_time = int(time.time())
  209. values = [[
  210. our_video_id,
  211. time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
  212. "推荐",
  213. str(video_dict['video_id']),
  214. video_dict['video_title'],
  215. our_video_link,
  216. # video_dict['gid'],
  217. video_dict['play_cnt'],
  218. video_dict['comment_cnt'],
  219. video_dict['like_cnt'],
  220. video_dict['share_cnt'],
  221. video_dict['duration'],
  222. str(video_dict['video_width']) + '*' + str(video_dict['video_height']),
  223. video_dict['publish_time_str'],
  224. video_dict['user_name'],
  225. video_dict['user_id'],
  226. video_dict['avatar_url'],
  227. video_dict['cover_url'],
  228. video_dict['video_url']
  229. ]]
  230. # time.sleep(1)
  231. Feishu.update_values(log_type, 'douyin', "82c8d9", "A2:Z2", values)
  232. Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
  233. # 视频信息保存数据库
  234. insert_sql = f""" insert into crawler_video(video_id,
  235. user_id,
  236. out_user_id,
  237. platform,
  238. strategy,
  239. out_video_id,
  240. video_title,
  241. cover_url,
  242. video_url,
  243. duration,
  244. publish_time,
  245. play_cnt,
  246. comment_cnt,
  247. like_cnt,
  248. share_cnt,
  249. crawler_rule,
  250. width,
  251. height)
  252. values({our_video_id},
  253. {our_uid},
  254. "{video_dict['user_id']}",
  255. "{cls.platform}",
  256. "推荐爬虫策略",
  257. "{video_dict['video_id']}",
  258. "{video_dict['video_title']}",
  259. "{video_dict['cover_url']}",
  260. "{video_dict['video_url']}",
  261. {int(video_dict['duration'])},
  262. "{video_dict['publish_time_str']}",
  263. {int(video_dict['play_cnt'])},
  264. {int(video_dict['comment_cnt'])},
  265. {int(video_dict['like_cnt'])},
  266. {int(video_dict['share_cnt'])},
  267. '{json.dumps(rule_dict)}',
  268. {int(video_dict['video_width'])},
  269. {int(video_dict['video_height'])}) """
  270. Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
  271. MysqlHelper.update_values(log_type, crawler, insert_sql, env, machine)
  272. Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
  273. except Exception as e:
  274. Common.logger(log_type, crawler).error(f'download_publish异常:{e}\n')
  275. # @classmethod
  276. # def get_recommend(cls, log_type, crawler, strategy, oss_endpoint, env, machine):
  277. # try:
  278. #
  279. # user_list = get_user_from_mysql(log_type, crawler, crawler, env)
  280. # for user in user_list:
  281. # spider_link = user["spider_link"]
  282. # out_uid = spider_link.split('/')[-1]
  283. # user_name = user["nick_name"]
  284. # our_uid = user["media_id"]
  285. #
  286. # Common.logger(log_type, crawler).info(f"开始抓取 {user_name} 用户主页视频\n")
  287. # cls.get_videolist(log_type=log_type,
  288. # crawler=crawler,
  289. # strategy=strategy,
  290. # our_uid=our_uid,
  291. # oss_endpoint=oss_endpoint,
  292. # env=env,
  293. # machine=machine)
  294. # time.sleep(1)
  295. # except Exception as e:
  296. # Common.logger(log_type, crawler).error(f"get_follow_videos:{e}\n")
  297. if __name__ == '__main__':
  298. # DyRecommend.get_recommend('recommend','douyin','推荐抓取策略', 'inner','prod', 'aliyun')
  299. DyRecommend.get_videolist('recommend', 'douyin', '推荐抓取策略', 6282709, 'outer', 'dev', 'aliyun')