recommend_dy.py 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334
  1. # -*- coding: utf-8 -*-
  2. # @Author: lierqiang
  3. # @Time: 2023/4/06
  4. import json
  5. import os
  6. import random
  7. import shutil
  8. import sys
  9. import time
  10. import requests
  11. from hashlib import md5
  12. from douyin.douyin_recommend import get_xb
  13. sys.path.append(os.getcwd())
  14. from common.db import MysqlHelper
  15. from common.feishu import Feishu
  16. from common.publish import Publish
  17. from common.userAgent import get_random_user_agent
  18. from common.common import Common
  19. class DyRecommend(object):
  20. # 个人主页视频翻页参数
  21. # offset = 0
  22. platform = "抖音"
  23. # tag = "西瓜视频爬虫,定向爬虫策略"
  24. @classmethod
  25. def get_rule(cls, log_type, crawler):
  26. try:
  27. while True:
  28. rule_sheet = Feishu.get_values_batch(log_type, crawler, "a6L9Kb")
  29. if rule_sheet is None:
  30. Common.logger(log_type, crawler).warning("rule_sheet is None! 10秒后重新获取")
  31. time.sleep(10)
  32. continue
  33. rule_dict = {
  34. # "play_cnt": int(rule_sheet[0][2]),
  35. # "comment_cnt": int(rule_sheet[1][2]),
  36. "video_width": int(rule_sheet[0][2]),
  37. "video_height": int(rule_sheet[1][2]),
  38. "like_cnt": int(rule_sheet[2][2]),
  39. "duration": int(rule_sheet[3][2]),
  40. "publish_time": int(rule_sheet[4][2]),
  41. }
  42. return rule_dict
  43. except Exception as e:
  44. Common.logger(log_type, crawler).error(f"get_rule:{e}\n")
  45. # 下载规则
  46. @classmethod
  47. def download_rule(cls, video_info_dict, rule_dict):
  48. # if video_info_dict['play_cnt'] >= rule_dict['play_cnt']:
  49. # if video_info_dict['comment_cnt'] >= rule_dict['comment_cnt']:
  50. if video_info_dict['like_cnt'] >= rule_dict['like_cnt']:
  51. if video_info_dict['duration'] >= rule_dict['duration']:
  52. if video_info_dict['video_width'] >= rule_dict['video_width'] \
  53. or video_info_dict['video_height'] >= rule_dict['video_height']:
  54. return True
  55. else:
  56. return False
  57. else:
  58. return False
  59. else:
  60. return False
  61. # else:
  62. # return False
  63. # else:
  64. # return False
  65. # 过滤词库
  66. @classmethod
  67. def filter_words(cls, log_type, crawler):
  68. try:
  69. while True:
  70. filter_words_sheet = Feishu.get_values_batch(log_type, crawler, '6BS2RR')
  71. if filter_words_sheet is None:
  72. Common.logger(log_type, crawler).warning(f"filter_words_sheet:{filter_words_sheet} 10秒钟后重试")
  73. continue
  74. filter_words_list = []
  75. for x in filter_words_sheet:
  76. for y in x:
  77. if y is None:
  78. pass
  79. else:
  80. filter_words_list.append(y)
  81. return filter_words_list
  82. except Exception as e:
  83. Common.logger(log_type, crawler).error(f'filter_words异常:{e}\n')
  84. @classmethod
  85. def random_title(cls, log_type, crawler):
  86. try:
  87. while True:
  88. random_title_sheet = Feishu.get_values_batch(log_type, crawler, 'sPK2oY')
  89. if random_title_sheet is None:
  90. Common.logger(log_type, crawler).warning(f"filter_words_sheet:{random_title_sheet} 10秒钟后重试")
  91. continue
  92. random_title_list = []
  93. for x in random_title_sheet:
  94. for y in x:
  95. if y is None:
  96. pass
  97. else:
  98. random_title_list.append(y)
  99. return random.choice(random_title_list)
  100. except Exception as e:
  101. Common.logger(log_type, crawler).error(f'random_title:{e}\n')
  102. @classmethod
  103. def get_videolist(cls, log_type, crawler, strategy, our_id, oss_endpoint, env, machine):
  104. rule_dict = cls.get_rule(log_type, crawler)
  105. for page in range(1, 101):
  106. try:
  107. aweme_pc_rec_raw_data = '%7B%22videoPrefer%22%3A%7B%22fsn%22%3A%5B%5D%2C%22like%22%3A%5B%5D%2C%22halfMin%22%3A%5B%5D%2C%22min%22%3A%5B%5D%7D%2C%22seo_info%22%3A%22https%3A%2F%2Fwww.douyin.com%2F%22%2C%22is_client%22%3Afalse%2C%22ff_danmaku_status%22%3A1%2C%22danmaku_switch_status%22%3A0%7D'
  108. f_url = 'https://www.douyin.com/aweme/v1/web/tab/feed/?device_platform=webapp&aid=6383&channel=channel_pc_web&tag_id=&share_aweme_id=&count=10&refresh_index={page}&video_type_select=0&aweme_pc_rec_raw_data={aweme_pc_rec_raw_data}&globalwid=&pull_type=2&min_window=0&ug_source=&creative_id=&pc_client_type=1&version_code=170400&version_name=17.4.0&cookie_enabled=true&screen_width=1920&screen_height=1080&browser_language=zh-CN&browser_platform=MacIntel&browser_name=Chrome&browser_version=111.0.0.0&browser_online=true&engine_name=Blink&engine_version=111.0.0.0&os_name=Mac+OS&os_version=10.15.7&cpu_core_num=8&device_memory=8&platform=PC&downlink=10&effective_type=4g&round_trip_time=200&webid=7217725630355097128&msToken=JFY-VD2YHS-6IO6lNhTj4AcLVtHqjein_5giYzIUh_VRsXMPFXy9QOg-RKDDwQgW5TTbgQB_BLzpfQhNdNEQCv5sGXatzGei9yppG1eSLLkbI9fjhTdBWtdkAJpLIg=='.format(
  109. page=page, aweme_pc_rec_raw_data=aweme_pc_rec_raw_data)
  110. headers = {
  111. 'cookie': 'ttwid=1%7CzpiG_VTvd1xpRFvfHUjEHiaq3qkfUqPElZUu0wbTSr8%7C1680507728%7Cc61697d37b4d4d49d42b466a3bbe8ecd5c06ae6e9a751d9e410102d2c52a185d; douyin.com; passport_csrf_token=208a829b0156a2feaa0fa24ad026ea91; passport_csrf_token_default=208a829b0156a2feaa0fa24ad026ea91; s_v_web_id=verify_lg0iwv1g_BwfztkmU_azbL_4Gua_9Fb9_KWfGPVXCyWua; bd_ticket_guard_client_data=eyJiZC10aWNrZXQtZ3VhcmQtdmVyc2lvbiI6MiwiYmQtdGlja2V0LWd1YXJkLWl0ZXJhdGlvbi12ZXJzaW9uIjoxLCJiZC10aWNrZXQtZ3VhcmQtY2xpZW50LWNzciI6Ii0tLS0tQkVHSU4gQ0VSVElGSUNBVEUgUkVRVUVTVC0tLS0tXHJcbk1JSUJEekNCdFFJQkFEQW5NUXN3Q1FZRFZRUUdFd0pEVGpFWU1CWUdBMVVFQXd3UFltUmZkR2xqYTJWMFgyZDFcclxuWVhKa01Ga3dFd1lIS29aSXpqMENBUVlJS29aSXpqMERBUWNEUWdBRU9zWGhGbG5ZWjVNeG5ZRGFFOCtCYmRGdFxyXG5VZTh6SG0ycTRXeWxvdkxXVXVOcy9oV2tlZlBRK3BsNkg2OGQwdGtOVVB5UStmUnpyWlRFL1ZXMTR5UlRkS0FzXHJcbk1Db0dDU3FHU0liM0RRRUpEakVkTUJzd0dRWURWUjBSQkJJd0VJSU9kM2QzTG1SdmRYbHBiaTVqYjIwd0NnWUlcclxuS29aSXpqMEVBd0lEU1FBd1JnSWhBTmdPS3Jkb3V4SHBzcHNiY0dmUHJYQ0lVNnVwcmZkd2ZFY2g5TXZndW5Ea1xyXG5BaUVBM2xVeDQ2bzd0UWJUT0dXdzgzQm45RnFyQkRVVHNOVjkyZUEyR1hPR3BkVT1cclxuLS0tLS1FTkQgQ0VSVElGSUNBVEUgUkVRVUVTVC0tLS0tXHJcbiJ9; csrf_session_id=a5355d954d3c63ed1ba35faada452b4d; ttcid=60304e8eb309434784f02372ef36387b41; xgplayer_user_id=446878319428; __ac_nonce=0642bcb0a001596f3fe5b; __ac_signature=_02B4Z6wo00f012HU-wAAAIDA9QKgDGYSlVNh9P-AALxqUQdGOEO.l3IAhdmUh4D-Y9rXLut3p7moXUuAUmo7rUOUJzpnB9nLx0YdZcvdMNeUgQOjsGIHh9LTN38BOVtrElZBeXDLjuVVC5Hh81; strategyABtestKey=%221680591628.189%22; download_guide=%223%2F20230404%22; VIDEO_FILTER_MEMO_SELECT=%7B%22expireTime%22%3A1681196521206%2C%22type%22%3A0%7D; home_can_add_dy_2_desktop=%221%22; msToken=v_pzGLfpXwl4PugynDwIb5DeepUms68tZLZFNLHl8WQnEeNQZtaawWYVu4Y3TLWxpqbvgqkOFULGmld2BLBZydbzrMJgkx5q5GjqetkVI4GoxLX1QdJQ0CP607uEVw==; tt_scid=NZGTg99heu5lHFfAvBht7p3Qxl0TGP.TyfxOQ7cWIvZjEnOcZERFaJxQ.HnKY-UT18cb; msToken=JFY-VD2YHS-6IO6lNhTj4AcLVtHqjein_5giYzIUh_VRsXMPFXy9QOg-RKDDwQgW5TTbgQB_BLzpfQhNdNEQCv5sGXatzGei9yppG1eSLLkbI9fjhTdBWtdkAJpLIg==',
  112. 'referer': 'https://www.douyin.com/',
  113. 'user-agent': get_random_user_agent('pc')
  114. }
  115. x_bogus = get_xb(f_url, headers['user-agent'])
  116. url = f_url + '&X-Bogus={}'.format(x_bogus)
  117. res = requests.get(url=url, headers=headers, proxies=Common.tunnel_proxies()).json()
  118. aweme_list = res.get('aweme_list', [])
  119. if not aweme_list:
  120. Common.logger(log_type, crawler).warning(f"抖音推荐没有获取到更多数据,页数:{page}")
  121. continue
  122. for info in aweme_list:
  123. if info['is_ads']:
  124. continue
  125. publish_time = info['create_time']
  126. publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time))
  127. publish_day = int((int(time.time()) - publish_time) / (3600 * 24))
  128. if not info['desc']:
  129. video_title = cls.random_title(log_type, crawler)
  130. else:
  131. video_title = info['desc']
  132. video_dict = {'video_title': video_title,
  133. 'video_id': info['aweme_id'],
  134. 'play_cnt': info['statistics']['play_count'],
  135. 'comment_cnt': info['statistics']['comment_count'],
  136. 'like_cnt': info['statistics']['digg_count'],
  137. 'share_cnt': info['statistics']['share_count'],
  138. 'video_width': info['video']['width'],
  139. 'video_height': info['video']['height'],
  140. 'duration': round(info['video']['duration'] / 1000),
  141. 'publish_time': publish_day,
  142. 'publish_time_stamp': publish_time * 1000,
  143. 'publish_time_str': publish_time_str,
  144. 'user_name': info['author']['nickname'],
  145. 'user_id': info['author_user_id'],
  146. 'user_sec_id': info['author']['sec_uid'],
  147. 'avatar_url': info['author']['avatar_thumb']['url_list'][0],
  148. 'cover_url': info['video']['origin_cover']['url_list'][0].replace('\u0026', '&'),
  149. 'video_url': info['video']['play_addr']['url_list'][0],
  150. 'session': f"douyin{int(time.time())}"
  151. }
  152. for k, v in video_dict.items():
  153. Common.logger(log_type, crawler).info(f"{k}:{v}")
  154. cls.download_publish(log_type=log_type,
  155. crawler=crawler,
  156. video_dict=video_dict,
  157. rule_dict=rule_dict,
  158. strategy=strategy,
  159. our_uid=our_id,
  160. oss_endpoint=oss_endpoint,
  161. env=env,
  162. machine=machine)
  163. except Exception as e:
  164. Common.logger(log_type, crawler).error(f"获取抖音推荐失败:{e}")
  165. @classmethod
  166. def repeat_video(cls, log_type, crawler, video_id, env, machine):
  167. sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}"; """
  168. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env, machine)
  169. return len(repeat_video)
  170. # 下载 / 上传
  171. @classmethod
  172. def download_publish(cls, log_type, crawler, strategy, video_dict, rule_dict, our_uid, oss_endpoint, env, machine):
  173. try:
  174. if cls.download_rule(video_dict, rule_dict) is False:
  175. Common.logger(log_type, crawler).info('不满足抓取规则\n')
  176. elif any(word if word in video_dict['video_title'] else False for word in
  177. cls.filter_words(log_type, crawler)) is True:
  178. Common.logger(log_type, crawler).info('标题已中过滤词:{}\n', video_dict['video_title'])
  179. elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env, machine) != 0:
  180. Common.logger(log_type, crawler).info('视频已下载\n')
  181. else:
  182. # 下载视频
  183. Common.download_method(log_type=log_type, crawler=crawler, text='video',
  184. title=video_dict['video_title'], url=video_dict['video_url'])
  185. md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
  186. if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
  187. # 删除视频文件夹
  188. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  189. Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
  190. return
  191. # ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
  192. # if ffmpeg_dict is None or ffmpeg_dict['size'] == 0:
  193. # Common.logger(log_type, crawler).warning(f"下载的视频无效,已删除\n")
  194. # # 删除视频文件夹
  195. # shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
  196. # return
  197. # 下载封面
  198. Common.download_method(log_type=log_type, crawler=crawler, text='cover',
  199. title=video_dict['video_title'], url=video_dict['cover_url'])
  200. # 保存视频信息至txt
  201. Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
  202. # 上传视频
  203. Common.logger(log_type, crawler).info("开始上传视频...")
  204. our_video_id = Publish.upload_and_publish(log_type=log_type,
  205. crawler=crawler,
  206. strategy=strategy,
  207. our_uid=our_uid,
  208. env=env,
  209. oss_endpoint=oss_endpoint)
  210. if env == 'dev':
  211. our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  212. else:
  213. our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  214. Common.logger(log_type, crawler).info("视频上传完成")
  215. if our_video_id is None:
  216. # 删除视频文件夹
  217. shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
  218. return
  219. # 视频写入飞书
  220. Feishu.insert_columns(log_type, 'douyin', "82c8d9", "ROWS", 1, 2)
  221. upload_time = int(time.time())
  222. values = [[
  223. our_video_id,
  224. time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
  225. "推荐",
  226. str(video_dict['video_id']),
  227. video_dict['video_title'],
  228. our_video_link,
  229. # video_dict['gid'],
  230. video_dict['play_cnt'],
  231. video_dict['comment_cnt'],
  232. video_dict['like_cnt'],
  233. video_dict['share_cnt'],
  234. video_dict['duration'],
  235. str(video_dict['video_width']) + '*' + str(video_dict['video_height']),
  236. video_dict['publish_time_str'],
  237. video_dict['user_name'],
  238. video_dict['user_id'],
  239. video_dict['avatar_url'],
  240. video_dict['cover_url'],
  241. video_dict['video_url']
  242. ]]
  243. # time.sleep(1)
  244. Feishu.update_values(log_type, 'douyin', "82c8d9", "A2:Z2", values)
  245. Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
  246. # 视频信息保存数据库
  247. insert_sql = f""" insert into crawler_video(video_id,
  248. user_id,
  249. out_user_id,
  250. platform,
  251. strategy,
  252. out_video_id,
  253. video_title,
  254. cover_url,
  255. video_url,
  256. duration,
  257. publish_time,
  258. play_cnt,
  259. comment_cnt,
  260. like_cnt,
  261. share_cnt,
  262. crawler_rule,
  263. width,
  264. height)
  265. values({our_video_id},
  266. {our_uid},
  267. "{video_dict['user_id']}",
  268. "{cls.platform}",
  269. "推荐爬虫策略",
  270. "{video_dict['video_id']}",
  271. "{video_dict['video_title']}",
  272. "{video_dict['cover_url']}",
  273. "{video_dict['video_url']}",
  274. {int(video_dict['duration'])},
  275. "{video_dict['publish_time_str']}",
  276. {int(video_dict['play_cnt'])},
  277. {int(video_dict['comment_cnt'])},
  278. {int(video_dict['like_cnt'])},
  279. {int(video_dict['share_cnt'])},
  280. '{json.dumps(rule_dict)}',
  281. {int(video_dict['video_width'])},
  282. {int(video_dict['video_height'])}) """
  283. Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
  284. MysqlHelper.update_values(log_type, crawler, insert_sql, env, machine)
  285. Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
  286. except Exception as e:
  287. Common.logger(log_type, crawler).error(f'download_publish异常:{e}\n')
  288. # @classmethod
  289. # def get_recommend(cls, log_type, crawler, strategy, oss_endpoint, env, machine):
  290. # try:
  291. #
  292. # user_list = get_user_from_mysql(log_type, crawler, crawler, env)
  293. # for user in user_list:
  294. # spider_link = user["spider_link"]
  295. # out_uid = spider_link.split('/')[-1]
  296. # user_name = user["nick_name"]
  297. # our_uid = user["media_id"]
  298. #
  299. # Common.logger(log_type, crawler).info(f"开始抓取 {user_name} 用户主页视频\n")
  300. # cls.get_videolist(log_type=log_type,
  301. # crawler=crawler,
  302. # strategy=strategy,
  303. # our_uid=our_uid,
  304. # oss_endpoint=oss_endpoint,
  305. # env=env,
  306. # machine=machine)
  307. # time.sleep(1)
  308. # except Exception as e:
  309. # Common.logger(log_type, crawler).error(f"get_follow_videos:{e}\n")
  310. if __name__ == '__main__':
  311. # DyRecommend.get_recommend('recommend','douyin','推荐抓取策略', 'inner','prod', 'aliyun')
  312. DyRecommend.get_videolist('recommend', 'douyin', '推荐抓取策略', 6282709, 'outer', 'dev', 'aliyun')