gongzhonghao_follow.py 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2023/3/28
  4. import datetime
  5. import difflib
  6. import json
  7. import os
  8. import shutil
  9. import sys
  10. import time
  11. from hashlib import md5
  12. import requests
  13. import urllib3
  14. from selenium.webdriver import DesiredCapabilities
  15. from selenium.webdriver.chrome.service import Service
  16. from selenium.webdriver.common.by import By
  17. from selenium import webdriver
  18. sys.path.append(os.getcwd())
  19. from common.common import Common
  20. from common.feishu import Feishu
  21. from common.public import filter_word
  22. from common.publish import Publish
  23. from common.scheduling_db import MysqlHelper
  24. class GongzhonghaoFollow:
  25. # 翻页参数
  26. begin = 0
  27. platform = "公众号"
  28. # 基础门槛规则
  29. @staticmethod
  30. def download_rule(video_dict):
  31. """
  32. 下载视频的基本规则
  33. :param video_dict: 视频信息,字典格式
  34. :return: 满足规则,返回 True;反之,返回 False
  35. """
  36. # 视频时长 20秒 - 45 分钟
  37. if 60 * 45 >= int(float(video_dict['duration'])) >= 20:
  38. # 宽或高
  39. if int(video_dict['video_width']) >= 0 or int(video_dict['video_height']) >= 0:
  40. return True
  41. else:
  42. return False
  43. else:
  44. return False
  45. @classmethod
  46. def title_like(cls, log_type, crawler, title, env):
  47. select_sql = f""" select * from crawler_video where platform="公众号" """
  48. video_list = MysqlHelper.get_values(log_type, crawler, select_sql, env, action="")
  49. if len(video_list) == 0:
  50. return None
  51. for video_dict in video_list:
  52. video_title = video_dict["video_title"]
  53. if difflib.SequenceMatcher(None, title, video_title).quick_ratio() >= 0.8:
  54. return True
  55. else:
  56. pass
  57. # 获取 token
  58. @classmethod
  59. def get_token(cls, log_type, crawler):
  60. while True:
  61. try:
  62. sheet = Feishu.get_values_batch(log_type, "gongzhonghao", "OjyJqs")
  63. if sheet is None:
  64. time.sleep(3)
  65. continue
  66. token = sheet[0][1]
  67. cookie = sheet[1][1]
  68. token_dict = {'token': token, 'cookie': cookie}
  69. return token_dict
  70. except Exception as e:
  71. Common.logger(log_type, crawler).error(f"get_cookie_token异常:{e}\n")
  72. # 获取用户 fakeid
  73. @classmethod
  74. def get_fakeid(cls, log_type, crawler, user, index):
  75. try:
  76. token_dict = cls.get_token(log_type, crawler)
  77. url = "https://mp.weixin.qq.com/cgi-bin/searchbiz?"
  78. headers = {
  79. "accept": "*/*",
  80. "accept-encoding": "gzip, deflate, br",
  81. "accept-language": "zh-CN,zh;q=0.9",
  82. "referer": "https://mp.weixin.qq.com/cgi-bin/appmsg?"
  83. "t=media/appmsg_edit_v2&action=edit&isNew=1"
  84. "&type=77&createType=5&token=1011071554&lang=zh_CN",
  85. 'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
  86. "sec-ch-ua-mobile": "?0",
  87. "sec-ch-ua-platform": '"Windows"',
  88. "sec-fetch-dest": "empty",
  89. "sec-fetch-mode": "cors",
  90. "sec-fetch-site": "same-origin",
  91. "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
  92. " (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
  93. "x-requested-with": "XMLHttpRequest",
  94. 'cookie': token_dict['cookie'],
  95. }
  96. params = {
  97. "action": "search_biz",
  98. "begin": "0",
  99. "count": "5",
  100. "query": str(user),
  101. "token": token_dict['token'],
  102. "lang": "zh_CN",
  103. "f": "json",
  104. "ajax": "1",
  105. }
  106. urllib3.disable_warnings()
  107. r = requests.get(url=url, headers=headers, params=params, verify=False)
  108. while True:
  109. if r.json()["base_resp"]["err_msg"] == "invalid session" and 21 >= datetime.datetime.now().hour >= 10:
  110. Common.logger(log_type, crawler).info(f"status_code:{r.status_code}")
  111. Common.logger(log_type, crawler).warning(f"get_gzh_url:{r.text}\n")
  112. Feishu.bot(log_type, crawler, "token过期啦,请扫码更换token\nhttps://mp.weixin.qq.com/")
  113. time.sleep(60 * 10)
  114. elif r.json()["base_resp"]["err_msg"] == "freq control" and 21 >= datetime.datetime.now().hour >= 10:
  115. Common.logger(log_type, crawler).info(f"status_code:{r.status_code}")
  116. Common.logger(log_type, crawler).warning(f"get_gzh_url:{r.text}\n")
  117. Feishu.bot(log_type, crawler, "公众号频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
  118. time.sleep(60 * 10)
  119. else:
  120. break
  121. if "list" not in r.json() or len(r.json()["list"]) == 0:
  122. Common.logger(log_type, crawler).info(f"status_code:{r.status_code}")
  123. Common.logger(log_type, crawler).warning(f"get_fakeid:{r.text},休眠 1 秒\n")
  124. time.sleep(1)
  125. else:
  126. fakeid = r.json()["list"][int(index) - 1]["fakeid"]
  127. head_url = r.json()["list"][int(index) - 1]["round_head_img"]
  128. fakeid_dict = {'fakeid': fakeid, 'head_url': head_url}
  129. return fakeid_dict
  130. except Exception as e:
  131. Common.logger(log_type, crawler).error(f"get_fakeid异常:{e}\n")
  132. # 获取腾讯视频下载链接
  133. @classmethod
  134. def get_tencent_video_url(cls, log_type, crawler, video_id):
  135. try:
  136. url = 'https://vv.video.qq.com/getinfo?vids=' + str(video_id) + '&platform=101001&charge=0&otype=json'
  137. response = requests.get(url=url).text.replace('QZOutputJson=', '').replace('"};', '"}')
  138. response = json.loads(response)
  139. url = response['vl']['vi'][0]['ul']['ui'][0]['url']
  140. fvkey = response['vl']['vi'][0]['fvkey']
  141. video_url = url + str(video_id) + '.mp4?vkey=' + fvkey
  142. return video_url
  143. except Exception as e:
  144. Common.logger(log_type, crawler).error(f"get_tencent_video_url异常:{e}\n")
  145. @classmethod
  146. def get_video_url(cls, log_type, crawler, article_url, env):
  147. try:
  148. # 打印请求配置
  149. ca = DesiredCapabilities.CHROME
  150. ca["goog:loggingPrefs"] = {"performance": "ALL"}
  151. # 不打开浏览器运行
  152. chrome_options = webdriver.ChromeOptions()
  153. chrome_options.add_argument("headless")
  154. chrome_options.add_argument(
  155. f'user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36')
  156. chrome_options.add_argument("--no-sandbox")
  157. # driver初始化
  158. if env == "prod":
  159. driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options)
  160. else:
  161. driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options, service=Service(
  162. '/Users/wangkun/Downloads/chromedriver/chromedriver_v111/chromedriver'))
  163. driver.implicitly_wait(10)
  164. # Common.logger(log_type, crawler).info('打开文章链接')
  165. driver.get(article_url)
  166. time.sleep(1)
  167. if len(driver.find_elements(By.XPATH, '//div[@class="js_video_poster video_poster"]/*[2]')) != 0:
  168. video_url = driver.find_element(
  169. By.XPATH, '//div[@class="js_video_poster video_poster"]/*[2]').get_attribute('src')
  170. elif len(driver.find_elements(By.XPATH, '//span[@class="js_tx_video_container"]/*[1]')) != 0:
  171. iframe = driver.find_element(By.XPATH, '//span[@class="js_tx_video_container"]/*[1]').get_attribute(
  172. 'src')
  173. video_id = iframe.split('vid=')[-1].split('&')[0]
  174. video_url = cls.get_tencent_video_url(log_type, crawler, video_id)
  175. else:
  176. video_url = 0
  177. return video_url
  178. except Exception as e:
  179. Common.logger(log_type, crawler).info(f'get_video_url异常:{e}\n')
  180. # 获取文章列表
  181. @classmethod
  182. def get_videoList(cls, log_type, crawler, user, index, oss_endpoint, env):
  183. fakeid_dict = cls.get_fakeid(log_type, crawler, user, index)
  184. token_dict = cls.get_token(log_type, crawler)
  185. while True:
  186. try:
  187. url = "https://mp.weixin.qq.com/cgi-bin/appmsg?"
  188. headers = {
  189. "accept": "*/*",
  190. "accept-encoding": "gzip, deflate, br",
  191. "accept-language": "zh-CN,zh;q=0.9",
  192. "referer": "https://mp.weixin.qq.com/cgi-bin/appmsg?"
  193. "t=media/appmsg_edit_v2&action=edit&isNew=1"
  194. "&type=77&createType=5&token=" + str(token_dict['token']) + "&lang=zh_CN",
  195. 'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
  196. "sec-ch-ua-mobile": "?0",
  197. "sec-ch-ua-platform": '"Windows"',
  198. "sec-fetch-dest": "empty",
  199. "sec-fetch-mode": "cors",
  200. "sec-fetch-site": "same-origin",
  201. "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
  202. " (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
  203. "x-requested-with": "XMLHttpRequest",
  204. 'cookie': token_dict['cookie'],
  205. }
  206. params = {
  207. "action": "list_ex",
  208. "begin": str(cls.begin),
  209. "count": "5",
  210. "fakeid": fakeid_dict['fakeid'],
  211. "type": "9",
  212. "query": "",
  213. "token": str(token_dict['token']),
  214. "lang": "zh_CN",
  215. "f": "json",
  216. "ajax": "1",
  217. }
  218. urllib3.disable_warnings()
  219. r = requests.get(url=url, headers=headers, params=params, verify=False)
  220. while True:
  221. if r.json()["base_resp"]["err_msg"] == "invalid session" and 21 >= datetime.datetime.now().hour >= 10:
  222. Common.logger(log_type, crawler).info(f"status_code:{r.status_code}")
  223. Common.logger(log_type, crawler).info(f"response:{r.text}")
  224. Feishu.bot(log_type, crawler, "token过期啦,请扫码更换token\nhttps://mp.weixin.qq.com/")
  225. time.sleep(60 * 10)
  226. elif r.json()["base_resp"]["err_msg"] == "freq control" and 21 >= datetime.datetime.now().hour >= 10:
  227. Common.logger(log_type, crawler).info(f"status_code:{r.status_code}")
  228. Common.logger(log_type, crawler).warning(f"get_gzh_url:{r.text}\n")
  229. Feishu.bot(log_type, crawler, "公众号频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
  230. time.sleep(60 * 10)
  231. else:
  232. break
  233. if 'app_msg_list' not in r.json():
  234. Common.logger(log_type, crawler).info(f"status_code:{r.status_code}")
  235. Common.logger(log_type, crawler).warning(f"get_gzh_url:{r.text}\n")
  236. break
  237. elif len(r.json()['app_msg_list']) == 0:
  238. Common.logger(log_type, crawler).info('没有更多视频了\n')
  239. else:
  240. cls.begin += 5
  241. app_msg_list = r.json()['app_msg_list']
  242. for article_url in app_msg_list:
  243. # title
  244. if 'title' in article_url:
  245. title = article_url['title'].replace('/', '').replace('\n', '') \
  246. .replace('.', '').replace('“', '').replace('”', '').replace(' ', '')
  247. else:
  248. title = 0
  249. # aid
  250. if 'aid' in article_url:
  251. aid = article_url['aid']
  252. else:
  253. aid = 0
  254. # create_time
  255. if 'create_time' in article_url:
  256. create_time = article_url['create_time']
  257. else:
  258. create_time = 0
  259. publish_time_stamp = int(create_time)
  260. publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
  261. avatar_url = fakeid_dict['head_url']
  262. # cover_url
  263. if 'cover' in article_url:
  264. cover_url = article_url['cover']
  265. else:
  266. cover_url = 0
  267. # article_url
  268. if 'link' in article_url:
  269. article_url = article_url['link']
  270. else:
  271. article_url = 0
  272. video_url = cls.get_video_url(log_type, crawler, article_url, env)
  273. video_dict = {
  274. 'video_id': aid,
  275. 'video_title': title,
  276. 'publish_time_stamp': publish_time_stamp,
  277. 'publish_time_str': publish_time_str,
  278. 'user_name': user,
  279. 'play_cnt': 0,
  280. 'comment_cnt': 0,
  281. 'like_cnt': 0,
  282. 'share_cnt': 0,
  283. 'user_id': fakeid_dict['fakeid'],
  284. 'avatar_url': avatar_url,
  285. 'cover_url': cover_url,
  286. 'article_url': article_url,
  287. 'video_url': video_url,
  288. 'session': f'gongzhonghao-follow-{int(time.time())}'
  289. }
  290. for k, v in video_dict.items():
  291. Common.logger(log_type, crawler).info(f"{k}:{v}")
  292. if int(time.time()) - publish_time_stamp >= 3600 * 24 * 3:
  293. Common.logger(log_type, crawler).info(f'发布时间{publish_time_str} > 3 天\n')
  294. cls.begin = 0
  295. return
  296. cls.download_publish(log_type, crawler, video_dict, oss_endpoint, env)
  297. Common.logger(log_type, crawler).info('休眠 5 秒\n')
  298. time.sleep(5)
  299. except Exception as e:
  300. Common.logger(log_type, crawler).error("get_videoList异常:{}\n", e)
  301. @classmethod
  302. def repeat_video(cls, log_type, crawler, video_id, env):
  303. sql = f""" select * from crawler_video where platform="公众号" and out_video_id="{video_id}"; """
  304. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  305. return len(repeat_video)
  306. # 下载/上传
  307. @classmethod
  308. def download_publish(cls, log_type, crawler, video_dict, oss_endpoint, env):
  309. try:
  310. if video_dict['article_url'] == 0 or video_dict['video_url'] == 0:
  311. Common.logger(log_type, crawler).info("文章涉嫌违反相关法律法规和政策\n")
  312. # 标题敏感词过滤
  313. elif any(word if word in video_dict['video_title'] else False for word in
  314. filter_word(log_type, crawler, "公众号", env)) is True:
  315. Common.logger(log_type, crawler).info("标题已中过滤词\n")
  316. # 已下载判断
  317. elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
  318. Common.logger(log_type, crawler).info("视频已下载\n")
  319. # 标题相似度
  320. elif cls.title_like(log_type, crawler, video_dict['video_title'], env) is True:
  321. Common.logger(log_type, crawler).info(f'标题相似度>=80%:{video_dict["video_title"]}\n')
  322. else:
  323. # 下载视频
  324. Common.download_method(log_type=log_type, crawler=crawler, text="video",
  325. title=video_dict["video_title"], url=video_dict["video_url"])
  326. md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
  327. # 获取视频时长
  328. ffmpeg_dict = Common.ffmpeg(log_type, crawler,
  329. f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
  330. if ffmpeg_dict is None:
  331. # 删除视频文件夹
  332. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  333. Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
  334. return
  335. video_dict["video_width"] = ffmpeg_dict["width"]
  336. video_dict["video_height"] = ffmpeg_dict["height"]
  337. video_dict["duration"] = ffmpeg_dict["duration"]
  338. video_size = ffmpeg_dict["size"]
  339. Common.logger(log_type, crawler).info(f'video_width:{video_dict["video_width"]}')
  340. Common.logger(log_type, crawler).info(f'video_height:{video_dict["video_height"]}')
  341. Common.logger(log_type, crawler).info(f'duration:{video_dict["duration"]}')
  342. Common.logger(log_type, crawler).info(f'video_size:{video_size}')
  343. # 视频size=0,直接删除
  344. if int(video_size) == 0 or cls.download_rule(video_dict) is False:
  345. # 删除视频文件夹
  346. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  347. Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
  348. return
  349. # 下载封面
  350. Common.download_method(log_type=log_type, crawler=crawler, text="cover",
  351. title=video_dict["video_title"], url=video_dict["cover_url"])
  352. # 保存视频信息至 "./videos/{video_title}/info.txt"
  353. Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
  354. # 上传视频
  355. Common.logger(log_type, crawler).info("开始上传视频...")
  356. strategy = "定向爬虫策略"
  357. our_video_id = Publish.upload_and_publish(log_type=log_type,
  358. crawler=crawler,
  359. strategy=strategy,
  360. our_uid="follow",
  361. oss_endpoint=oss_endpoint,
  362. env=env)
  363. if env == 'prod':
  364. our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
  365. else:
  366. our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
  367. Common.logger(log_type, crawler).info("视频上传完成")
  368. if our_video_id is None:
  369. # 删除视频文件夹
  370. shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
  371. return
  372. # 视频信息保存数据库
  373. rule_dict = {
  374. "duration": {"min": 20, "max": 45 * 60},
  375. "publish_day": {"min": 3}
  376. }
  377. insert_sql = f""" insert into crawler_video(video_id,
  378. out_user_id,
  379. platform,
  380. strategy,
  381. out_video_id,
  382. video_title,
  383. cover_url,
  384. video_url,
  385. duration,
  386. publish_time,
  387. play_cnt,
  388. crawler_rule,
  389. width,
  390. height)
  391. values({our_video_id},
  392. "{video_dict['user_id']}",
  393. "{cls.platform}",
  394. "定向爬虫策略",
  395. "{video_dict['video_id']}",
  396. "{video_dict['video_title']}",
  397. "{video_dict['cover_url']}",
  398. "{video_dict['video_url']}",
  399. {int(video_dict['duration'])},
  400. "{video_dict['publish_time_str']}",
  401. {int(video_dict['play_cnt'])},
  402. '{json.dumps(rule_dict)}',
  403. {int(video_dict['video_width'])},
  404. {int(video_dict['video_height'])}) """
  405. Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
  406. MysqlHelper.update_values(log_type, crawler, insert_sql, env)
  407. Common.logger(log_type, crawler).info('视频信息插入数据库成功!')
  408. # 视频写入飞书
  409. Feishu.insert_columns(log_type, crawler, "47e39d", "ROWS", 1, 2)
  410. # 视频ID工作表,首行写入数据
  411. upload_time = int(time.time())
  412. values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
  413. "用户主页",
  414. video_dict['video_title'],
  415. video_dict['video_id'],
  416. our_video_link,
  417. int(video_dict['duration']),
  418. f"{video_dict['video_width']}*{video_dict['video_height']}",
  419. video_dict['publish_time_str'],
  420. video_dict['user_name'],
  421. video_dict['user_id'],
  422. video_dict['avatar_url'],
  423. video_dict['cover_url'],
  424. video_dict['article_url'],
  425. video_dict['video_url']]]
  426. time.sleep(0.5)
  427. Feishu.update_values(log_type, crawler, "47e39d", "F2:Z2", values)
  428. Common.logger(log_type, crawler).info('视频下载/上传成功\n')
  429. except Exception as e:
  430. Common.logger(log_type, crawler).error(f"download_publish异常:{e}\n")
  431. @classmethod
  432. def get_users(cls):
  433. # user_sheet = Feishu.get_values_batch("follow", 'gongzhonghao', 'Bzv72P')
  434. # user_list = []
  435. # for i in range(1, len(user_sheet)):
  436. # user_name = user_sheet[i][0]
  437. # index = user_sheet[i][1]
  438. # user_dict = {
  439. # "user_name": user_name,
  440. # "index": index,
  441. # }
  442. # user_list.append(user_dict)
  443. # print(len(user_list))
  444. # print(user_list)
  445. user_list = [{'user_name': '香音难忘', 'index': 1}, {'user_name': '墨儿心灵驿站', 'index': 1},
  446. {'user_name': '荒烟茶生', 'index': 1}, {'user_name': '幸福花朵', 'index': 1},
  447. {'user_name': '我的节日祝福', 'index': 1}, {'user_name': '生活创意妙招', 'index': 1},
  448. {'user_name': '二大妈有话说', 'index': 1}, {'user_name': '医路健康美食', 'index': 1},
  449. {'user_name': '老年相知相伴', 'index': 1}, {'user_name': '一争', 'index': 1},
  450. {'user_name': '老年企退群', 'index': 1}, {'user_name': '消逝的哨声', 'index': 1},
  451. {'user_name': '一颗打破石头的蛋', 'index': 1}, {'user_name': '叩问苍穹荒烟茶生', 'index': 1},
  452. {'user_name': '布衣星火', 'index': 1}, {'user_name': '叩问苍穹', 'index': 1},
  453. {'user_name': '微观调查', 'index': 2}, {'user_name': '传统节日祝福', 'index': 1},
  454. {'user_name': '因和德尚', 'index': 1}, {'user_name': '飨宴心灵', 'index': 1},
  455. {'user_name': '朝闻解局', 'index': 1}, {'user_name': '远见光芒', 'index': 1},
  456. {'user_name': '墨儿微刊', 'index': 1}, {'user_name': '博爱论', 'index': 1},
  457. {'user_name': '张大春讲堂', 'index': 1}, {'user_name': ' 司马南频道', 'index': 1},
  458. {'user_name': '音乐小镇', 'index': 1}, {'user_name': '节日祝福365', 'index': 1},
  459. {'user_name': '动画音乐相册', 'index': 1}, {'user_name': '音乐动漫相册', 'index': 1},
  460. {'user_name': '早点谈健康', 'index': 1}, {'user_name': '早点谈养生', 'index': 1},
  461. {'user_name': '早点谈养身', 'index': 1}, {'user_name': '医道谈养身', 'index': 1},
  462. {'user_name': '中老年谈养身', 'index': 1}, {'user_name': '尼古拉斯瞭望', 'index': 1},
  463. {'user_name': '奇易时光百姓的福音', 'index': 1}, {'user_name': '寰宇时光', 'index': 1},
  464. {'user_name': '红兴文化公苑', 'index': 1}, {'user_name': '早点音乐', 'index': 1},
  465. {'user_name': '小分子生物活性肽', 'index': 1}, {'user_name': '张小妹美食', 'index': 1},
  466. {'user_name': '万物归息', 'index': 1}, {'user_name': '神州红魂', 'index': 1},
  467. {'user_name': '音乐早餐', 'index': 1}, {'user_name': '1条末读消息', 'index': 1},
  468. {'user_name': '环球文摘', 'index': 1}, {'user_name': '精彩有余', 'index': 1},
  469. {'user_name': '一起训练吧', 'index': 1}, {'user_name': '1条重要消息', 'index': 1},
  470. {'user_name': '太上养身', 'index': 1}, {'user_name': '懂点养身秘诀', 'index': 1},
  471. {'user_name': '送乐者', 'index': 1}, {'user_name': '蜂业小百科', 'index': 1},
  472. {'user_name': '健康与养身秘诀', 'index': 1}, {'user_name': '有心人r', 'index': 1},
  473. {'user_name': '古诗词世界', 'index': 1}, {'user_name': '晨间悦读', 'index': 1},
  474. {'user_name': '养身有诀窍', 'index': 1}, {'user_name': '退休族信息圈', 'index': 1},
  475. {'user_name': '艾公铁粉团', 'index': 1}, {'user_name': '酸甜苦辣麻咸', 'index': 1},
  476. {'user_name': '日常生活小帮手', 'index': 1}, {'user_name': '小帅的精彩视频', 'index': 1},
  477. {'user_name': '养身常识小窍门', 'index': 1}, {'user_name': '医学养身技巧', 'index': 1},
  478. {'user_name': '退休圈', 'index': 1}, {'user_name': '生活小助手', 'index': 1},
  479. {'user_name': '经典老歌曲好听的音乐', 'index': 1}, {'user_name': '黑马快讯', 'index': 1},
  480. {'user_name': '绝妙经典', 'index': 1}, {'user_name': '深读时策', 'index': 1},
  481. {'user_name': '健康与生活大全', 'index': 1}, {'user_name': '李肃论道', 'index': 1},
  482. {'user_name': '爱国者吹锋号', 'index': 1}, {'user_name': '兵心可鉴', 'index': 1},
  483. {'user_name': '精选动心金曲', 'index': 1}, {'user_name': '爱二胡群', 'index': 1},
  484. {'user_name': '数码科技大爆炸', 'index': 1}, {'user_name': '何静同学', 'index': 1},
  485. {'user_name': '方敏爱美食', 'index': 1}, {'user_name': '针灸推拿特色技术', 'index': 1},
  486. {'user_name': '挺进天山', 'index': 1}, {'user_name': '紫陌捻花', 'index': 1},
  487. {'user_name': '巨响养身', 'index': 1}, {'user_name': '荣观世界', 'index': 1},
  488. {'user_name': 'Music音乐世界', 'index': 1}, {'user_name': '微观调查组', 'index': 1},
  489. {'user_name': '用汉方拥抱世界', 'index': 1}, {'user_name': '医学养身秘诀', 'index': 1},
  490. {'user_name': '医学老人养身', 'index': 1}, {'user_name': '热文微观', 'index': 1},
  491. {'user_name': '医学养身秘笈', 'index': 1}, {'user_name': '你未读消息', 'index': 2},
  492. {'user_name': '6点谈健康', 'index': 1}]
  493. return user_list
  494. @classmethod
  495. def get_all_videos(cls, log_type, crawler, oss_endpoint, env):
  496. try:
  497. user_list = cls.get_users()
  498. for user_dict in user_list:
  499. user_name = user_dict['user_name']
  500. index = user_dict['index']
  501. Common.logger(log_type, crawler).info(f'获取 {user_name} 公众号视频\n')
  502. cls.get_videoList(log_type, crawler, user_name, index, oss_endpoint, env)
  503. cls.begin = 0
  504. Common.logger(log_type, crawler).info('休眠60秒\n')
  505. time.sleep(60)
  506. except Exception as e:
  507. Common.logger(log_type, crawler).info(f'get_all_videos异常:{e}\n')
  508. if __name__ == "__main__":
  509. GongzhonghaoFollow.get_users()
  510. # GongzhonghaoFollow.get_videoList(log_type="follow",
  511. # crawler="gongzhonghao",
  512. # user="香音难忘",
  513. # index=1,
  514. # oss_endpoint="out",
  515. # env="dev")
  516. pass