douyin_recommend_scheduling.py 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301
  1. # -*- coding: utf-8 -*-
  2. # @Author: lierqiang
  3. # @Time: 2023/4/06
  4. import json
  5. import os
  6. import shutil
  7. import sys
  8. import time
  9. import requests
  10. from hashlib import md5
  11. from common.mq import MQ
  12. sys.path.append(os.getcwd())
  13. from douyin.douyin_recommend import get_xb
  14. from common.common import Common
  15. from common.feishu import Feishu
  16. from common.scheduling_db import MysqlHelper
  17. from common.publish import Publish
  18. from common.userAgent import get_random_user_agent
  19. from common.public import random_title, get_config_from_mysql, download_rule
  20. class DouyinrecommendScheduling:
  21. platform = "抖音"
  22. @classmethod
  23. def video_title(cls, log_type, crawler, env, title):
  24. title_split1 = title.split(" #")
  25. if title_split1[0] != "":
  26. title1 = title_split1[0]
  27. else:
  28. title1 = title_split1[-1]
  29. title_split2 = title1.split(" #")
  30. if title_split2[0] != "":
  31. title2 = title_split2[0]
  32. else:
  33. title2 = title_split2[-1]
  34. title_split3 = title2.split("@")
  35. if title_split3[0] != "":
  36. title3 = title_split3[0]
  37. else:
  38. title3 = title_split3[-1]
  39. video_title = title3.strip().replace("\n", "") \
  40. .replace("/", "").replace("抖音", "").replace(" ", "") \
  41. .replace(" ", "").replace("&NBSP", "").replace("\r", "") \
  42. .replace("#", "").replace(".", "。").replace("\\", "") \
  43. .replace(":", "").replace("*", "").replace("?", "") \
  44. .replace("?", "").replace('"', "").replace("<", "") \
  45. .replace(">", "").replace("|", "").replace("@", "").replace('"', '').replace("'", '')[:40]
  46. if video_title.replace(" ", "") == "" or video_title == "。。。" or video_title == "...":
  47. return random_title(log_type, crawler, env, text='title')
  48. else:
  49. return video_title
  50. @classmethod
  51. def get_videoList(cls, log_type, crawler, rule_dict, our_uid, env):
  52. mq = MQ(topic_name="topic_crawler_etl_" + env)
  53. for page in range(1, 101):
  54. Common.logger(log_type, crawler).info(f"正在抓取第{page}页\n")
  55. Common.logging(log_type, crawler, env, f"正在抓取第{page}页\n")
  56. try:
  57. aweme_pc_rec_raw_data = '%7B%22videoPrefer%22:%7B%22fsn%22:%5B%5D,%22like%22:%5B%5D,%22halfMin%22:%5B%227188684310696742200%22,%224380080926896941%22%5D,%22min%22:%5B%5D%7D,%22seo_info%22:%22https:%2F%2Fwww.douyin.com%2F%22,%22is_client%22:false,%22ff_danmaku_status%22:1,%22danmaku_switch_status%22:1%7D'
  58. f_url = 'https://www.douyin.com/aweme/v1/web/tab/feed/?device_platform=webapp&aid=6383&channel=channel_pc_web&tag_id=&share_aweme_id=&count=10&refresh_index={page}&video_type_select=1&aweme_pc_rec_raw_data={aweme_pc_rec_raw_data}&globalwid=&pull_type=2&min_window=0&ug_source=&creative_id=&pc_client_type=1&version_code=170400&version_name=17.4.0&cookie_enabled=true&screen_width=1440&screen_height=900&browser_language=zh-CN&browser_platform=MacIntel&browser_name=Chrome&browser_version=109.0.0.0&browser_online=true&engine_name=Blink&engine_version=109.0.0.0&os_name=Mac+OS&os_version=10.15.7&cpu_core_num=8&device_memory=8&platform=PC&downlink=10&effective_type=4g&round_trip_time=50&webid=7219223873342260736&msToken=Sh6bVLWZUEZ3ruIHq1L3iUXnr1GT5yklyo-XZRO7lNgsFvpYq0C7tcu5Z4Jv9DrMESZ9kGVhwKT4ftFDkBL11ZGPUxth2ToA4M4q-qs2MK9ctR7GhwFkGw=='.format(
  59. page=page, aweme_pc_rec_raw_data=aweme_pc_rec_raw_data)
  60. headers = {
  61. 'cookie': 'ttwid=1%7CI2Xp275XabSiVJ9GAmfLtqbtqturVSIS2yLbXVkAHnQ%7C1680856567%7Cfd36579475157e2303e36e5fd75cdea4ebad78c20da989be0590305f169242ca; douyin.com; strategyABtestKey=%221680856567.817%22; passport_csrf_token=6a28a7b1e1ad38570cc5ee39deaf587a; passport_csrf_token_default=6a28a7b1e1ad38570cc5ee39deaf587a; s_v_web_id=verify_lg6aloex_WG2LmlFk_kbA6_4tMb_BZlD_iuKxRvsMvrg7; bd_ticket_guard_client_data=eyJiZC10aWNrZXQtZ3VhcmQtdmVyc2lvbiI6MiwiYmQtdGlja2V0LWd1YXJkLWl0ZXJhdGlvbi12ZXJzaW9uIjoxLCJiZC10aWNrZXQtZ3VhcmQtY2xpZW50LWNzciI6Ii0tLS0tQkVHSU4gQ0VSVElGSUNBVEUgUkVRVUVTVC0tLS0tXHJcbk1JSUJEakNCdFFJQkFEQW5NUXN3Q1FZRFZRUUdFd0pEVGpFWU1CWUdBMVVFQXd3UFltUmZkR2xqYTJWMFgyZDFcclxuWVhKa01Ga3dFd1lIS29aSXpqMENBUVlJS29aSXpqMERBUWNEUWdBRWNFZitKV2tDb3l4OHhLekNnY0hReEVaRFxyXG5hN29maHJhUG9rMkQ5b1RaRGRvbDJuTVhaTis5dGJFclV0cVdUcm81ck4zekFyWTFLaXIzRlRUR2ZQUXRmS0FzXHJcbk1Db0dDU3FHU0liM0RRRUpEakVkTUJzd0dRWURWUjBSQkJJd0VJSU9kM2QzTG1SdmRYbHBiaTVqYjIwd0NnWUlcclxuS29aSXpqMEVBd0lEU0FBd1JRSWhBUFZaSGNFdW5HeGtBZFNmQXJ1MmdWb1RHbFhINkhsa1prRzZNc1pyR2hBL1xyXG5BaUJsV2NpM3h5SDk2UnJlTXpPSy8xVmFJQUNuTWUyU0RodUJIY2ZZaE80OWtRPT1cclxuLS0tLS1FTkQgQ0VSVElGSUNBVEUgUkVRVUVTVC0tLS0tXHJcbiJ9; csrf_session_id=a5355d954d3c63ed1ba35faada452b4d; ttcid=585ca9110e2345a09b4499dc543ec39959; odin_tt=d5fbc530c34bf8d4ea8cf8030c85a3f70202ee34dad7fb5b9ef7ff299d20e38ed01c852f02f2f3f864f2a45060480717f72083b58cdbc204c988edbf997fda7c; xgplayer_user_id=64984931555; SEARCH_RESULT_LIST_TYPE=%22single%22; pwa2=%222%7C1%22; download_guide=%223%2F20230407%22; __ac_nonce=0642fff310028297fec16; __ac_signature=_02B4Z6wo00f01M3AeJAAAIDDWRYjnPVAtVzN4HwAAFdlurQp2aR1Npvb7RYeaCY4fZs3DkMFlu7-Obn7zsvc34whBQesvTIc2p8nV-1crQtTacaxSYqP8nwNk3WqH.tkNdKQaUMw6sCC2.xZ15; VIDEO_FILTER_MEMO_SELECT=%7B%22expireTime%22%3A1681472712797%2C%22type%22%3A1%7D; home_can_add_dy_2_desktop=%221%22; tt_scid=PNmY2BL-Q9E9lAfdecenRGOzq64XgFOc0CJGFMN.JIE-QJO51S3Zvw56-Z6O12QVf4d9; msToken=uFCSX87jL9sTq6ScVYJucYgv9Hd5gCbTPvKIGBRMgVLuo7Pp9zLRrutBYzq4BmnCr83WnJAwZb8H78lBr3s3eyJLnySxYO5FgClQRXW1i_mAu7fLfBj3gA==; msToken=Sh6bVLWZUEZ3ruIHq1L3iUXnr1GT5yklyo-XZRO7lNgsFvpYq0C7tcu5Z4Jv9DrMESZ9kGVhwKT4ftFDkBL11ZGPUxth2ToA4M4q-qs2MK9ctR7GhwFkGw==',
  62. 'referer': 'https://www.douyin.com/',
  63. 'user-agent': get_random_user_agent('pc')
  64. }
  65. try:
  66. x_bogus = get_xb(f_url, headers['user-agent'])
  67. if not x_bogus:
  68. continue
  69. url = f_url + '&X-Bogus={}'.format(x_bogus)
  70. res = requests.get(url=url, headers=headers, proxies=Common.tunnel_proxies()).json()
  71. aweme_list = res.get('aweme_list', [])
  72. except Exception as e:
  73. Common.logger(log_type, crawler).error(f"获取抖音推荐失败:{e}")
  74. Common.logging(log_type, crawler, env, f"获取抖音推荐失败:{e}")
  75. continue
  76. if not aweme_list:
  77. Common.logger(log_type, crawler).warning(f"抖音推荐没有获取到更多数据,页数:{page}")
  78. Common.logging(log_type, crawler, env, f"抖音推荐没有获取到更多数据,页数:{page}")
  79. continue
  80. for info in aweme_list:
  81. try:
  82. if info.get('is_ads'):
  83. continue
  84. publish_time = info.get('create_time')
  85. if not publish_time:
  86. continue
  87. publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time))
  88. publish_day = int((int(time.time()) - publish_time) / (3600 * 24))
  89. if not info['desc']:
  90. video_title = random_title(log_type, crawler, env, text='title')
  91. else:
  92. video_title = cls.video_title(log_type, crawler, env, info['desc'])
  93. video_dict = {'video_title': video_title,
  94. 'video_id': info['aweme_id'],
  95. 'play_cnt': info['statistics']['play_count'],
  96. 'comment_cnt': info['statistics']['comment_count'],
  97. 'like_cnt': info['statistics']['digg_count'],
  98. 'share_cnt': info['statistics']['share_count'],
  99. 'video_width': info['video']['width'],
  100. 'video_height': info['video']['height'],
  101. 'duration': round(info['video']['duration'] / 1000),
  102. 'publish_time': publish_day,
  103. 'publish_time_stamp': publish_time,
  104. 'publish_time_str': publish_time_str,
  105. 'user_name': info['author']['nickname'],
  106. 'user_id': info['author_user_id'],
  107. 'user_sec_id': info['author']['sec_uid'],
  108. 'avatar_url': info['author']['avatar_thumb']['url_list'][0],
  109. 'cover_url': info['video']['origin_cover']['url_list'][0].replace('\u0026', '&'),
  110. 'video_url': info['video']['play_addr']['url_list'][0],
  111. 'session': f"douyin{int(time.time())}"
  112. }
  113. for k, v in video_dict.items():
  114. Common.logger(log_type, crawler).info(f"{k}:{v}")
  115. Common.logging(log_type, crawler, env, f"{video_dict}")
  116. if download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict,
  117. rule_dict=rule_dict) is False:
  118. Common.logger(log_type, crawler).info("不满足抓取规则\n")
  119. Common.logging(log_type, crawler, env, "不满足抓取规则\n")
  120. elif any(str(word) if str(word) in video_dict["video_title"] else False
  121. for word in get_config_from_mysql(log_type=log_type,
  122. source=crawler,
  123. env=env,
  124. text="filter",
  125. action="")) is True:
  126. Common.logger(log_type, crawler).info('已中过滤词\n')
  127. Common.logging(log_type, crawler, env, '已中过滤词\n')
  128. elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
  129. Common.logger(log_type, crawler).info('视频已下载\n')
  130. Common.logging(log_type, crawler, env, '视频已下载\n')
  131. else:
  132. # cls.download_publish(log_type=log_type,
  133. # crawler=crawler,
  134. # our_uid=our_uid,
  135. # video_dict=video_dict,
  136. # rule_dict=rule_dict,
  137. # env=env)
  138. video_dict["out_user_id"] = video_dict["user_id"]
  139. video_dict["platform"] = crawler
  140. video_dict["strategy"] = log_type
  141. video_dict["out_video_id"] = video_dict["video_id"]
  142. video_dict["width"] = video_dict["video_width"]
  143. video_dict["height"] = video_dict["video_height"]
  144. video_dict["crawler_rule"] = json.dumps(rule_dict)
  145. video_dict["user_id"] = our_uid
  146. video_dict["publish_time"] = video_dict["publish_time_str"]
  147. video_dict["strategy_type"] = log_type
  148. mq.send_msg(video_dict)
  149. except Exception as e:
  150. Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
  151. Common.logging(log_type, crawler, env, f"抓取单条视频异常:{e}\n")
  152. except Exception as e:
  153. Common.logger(log_type, crawler).error(f"抓取第{page}页时异常:{e}\n")
  154. Common.logging(log_type, crawler, env, f"抓取第{page}页时异常:{e}\n")
  155. @classmethod
  156. def repeat_video(cls, log_type, crawler, video_id, env):
  157. # sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}"; """
  158. sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and out_video_id="{video_id}"; """
  159. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  160. return len(repeat_video)
  161. # 下载 / 上传
  162. @classmethod
  163. def download_publish(cls, log_type, crawler, video_dict, rule_dict, our_uid, env):
  164. # 下载视频
  165. Common.download_method(log_type=log_type, crawler=crawler, text='video', title=video_dict['video_title'], url=video_dict['video_url'])
  166. md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
  167. try:
  168. if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
  169. # 删除视频文件夹
  170. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  171. Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
  172. Common.logging(log_type, crawler, env, "视频size=0,删除成功\n")
  173. return
  174. except FileNotFoundError:
  175. # 删除视频文件夹
  176. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  177. Common.logger(log_type, crawler).info("视频文件不存在,删除文件夹成功\n")
  178. Common.logging(log_type, crawler, env, "视频文件不存在,删除文件夹成功\n")
  179. return
  180. # 下载封面
  181. Common.download_method(log_type=log_type, crawler=crawler, text='cover', title=video_dict['video_title'], url=video_dict['cover_url'])
  182. # 保存视频信息至txt
  183. Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
  184. # 上传视频
  185. Common.logger(log_type, crawler).info("开始上传视频...")
  186. Common.logging(log_type, crawler, env, "开始上传视频...")
  187. if env == "dev":
  188. oss_endpoint = "out"
  189. our_video_id = Publish.upload_and_publish(log_type=log_type,
  190. crawler=crawler,
  191. strategy="推荐抓取策略",
  192. our_uid=our_uid,
  193. env=env,
  194. oss_endpoint=oss_endpoint)
  195. our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  196. else:
  197. oss_endpoint = "inner"
  198. our_video_id = Publish.upload_and_publish(log_type=log_type,
  199. crawler=crawler,
  200. strategy="推荐抓取策略",
  201. our_uid=our_uid,
  202. env=env,
  203. oss_endpoint=oss_endpoint)
  204. our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  205. if our_video_id is None:
  206. try:
  207. # 删除视频文件夹
  208. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  209. return
  210. except FileNotFoundError:
  211. return
  212. # 视频信息保存数据库
  213. insert_sql = f""" insert into crawler_video(video_id,
  214. user_id,
  215. out_user_id,
  216. platform,
  217. strategy,
  218. out_video_id,
  219. video_title,
  220. cover_url,
  221. video_url,
  222. duration,
  223. publish_time,
  224. play_cnt,
  225. comment_cnt,
  226. like_cnt,
  227. share_cnt,
  228. crawler_rule,
  229. width,
  230. height)
  231. values({our_video_id},
  232. {our_uid},
  233. "{video_dict['user_id']}",
  234. "{cls.platform}",
  235. "推荐爬虫策略",
  236. "{video_dict['video_id']}",
  237. "{video_dict['video_title']}",
  238. "{video_dict['cover_url']}",
  239. "{video_dict['video_url']}",
  240. {int(video_dict['duration'])},
  241. "{video_dict['publish_time_str']}",
  242. {int(video_dict['play_cnt'])},
  243. {int(video_dict['comment_cnt'])},
  244. {int(video_dict['like_cnt'])},
  245. {int(video_dict['share_cnt'])},
  246. '{json.dumps(rule_dict)}',
  247. {int(video_dict['video_width'])},
  248. {int(video_dict['video_height'])}) """
  249. Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
  250. Common.logging(log_type, crawler, env, f"insert_sql:{insert_sql}")
  251. MysqlHelper.update_values(log_type, crawler, insert_sql, env)
  252. Common.logger(log_type, crawler).info('视频信息写入数据库成功')
  253. Common.logging(log_type, crawler, env, '视频信息写入数据库成功')
  254. # 视频写入飞书
  255. Feishu.insert_columns(log_type, crawler, "82c8d9", "ROWS", 1, 2)
  256. upload_time = int(time.time())
  257. values = [[
  258. our_video_id,
  259. time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
  260. "推荐",
  261. str(video_dict['video_id']),
  262. video_dict['video_title'],
  263. our_video_link,
  264. video_dict['play_cnt'],
  265. video_dict['comment_cnt'],
  266. video_dict['like_cnt'],
  267. video_dict['share_cnt'],
  268. video_dict['duration'],
  269. str(video_dict['video_width']) + '*' + str(video_dict['video_height']),
  270. video_dict['publish_time_str'],
  271. video_dict['user_name'],
  272. video_dict['user_id'],
  273. video_dict['avatar_url'],
  274. video_dict['cover_url'],
  275. video_dict['video_url']
  276. ]]
  277. time.sleep(0.5)
  278. Feishu.update_values(log_type, crawler, "82c8d9", "A2:Z2", values)
  279. Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
  280. Common.logging(log_type, crawler, env, f"视频已保存至云文档\n")
  281. if __name__ == '__main__':
  282. pass