gongzhonghao1_author.py 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2023/3/28
  4. import datetime
  5. import json
  6. import os
  7. import shutil
  8. import sys
  9. import time
  10. from hashlib import md5
  11. import requests
  12. import urllib3
  13. from selenium.webdriver import DesiredCapabilities
  14. from selenium.webdriver.chrome.service import Service
  15. from selenium.webdriver.common.by import By
  16. from selenium import webdriver
  17. sys.path.append(os.getcwd())
  18. from common.common import Common
  19. from common.feishu import Feishu
  20. from common.publish import Publish
  21. from common.getuser import getUser
  22. from common.scheduling_db import MysqlHelper
  23. from common.public import get_config_from_mysql, download_rule, title_like
  24. class GongzhonghaoAuthor1:
  25. platform = "公众号"
  26. # 获取 token
  27. @classmethod
  28. def get_token(cls, log_type, crawler, env):
  29. select_sql = f""" select * from crawler_config where source="{crawler}" and title LIKE "%公众号_1%";"""
  30. configs = MysqlHelper.get_values(log_type, crawler, select_sql, env, action="")
  31. if len(configs) == 0:
  32. Feishu.bot(log_type, crawler, "公众号_1:未配置token")
  33. time.sleep(60)
  34. return None
  35. token_dict = {
  36. "token_id": configs[0]["id"],
  37. "title": configs[0]["title"].strip(),
  38. "token": dict(eval(configs[0]["config"]))["token"].strip(),
  39. "cookie": dict(eval(configs[0]["config"]))["cookie"].strip(),
  40. "update_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(configs[0]["update_time"]/1000))),
  41. "operator": configs[0]["operator"].strip()
  42. }
  43. # for k, v in token_dict.items():
  44. # print(f"{k}:{type(v)}, {v}")
  45. return token_dict
  46. @classmethod
  47. def create_user(cls, log_type, crawler, user_sheet, i, env):
  48. user_name = user_sheet[i][0]
  49. wechat_name = user_sheet[i][2]
  50. if wechat_name is None:
  51. wechat_name = user_name
  52. Common.logger(log_type, crawler).info(f"befor_wechat_name:{type(wechat_name)}, {wechat_name}")
  53. our_uid = user_sheet[i][5]
  54. our_user_link = user_sheet[i][6]
  55. user_info_dict = cls.get_user_info(log_type=log_type, crawler=crawler, wechat_name=wechat_name, env=env)
  56. out_uid = user_info_dict["user_id"]
  57. avatar_url = user_info_dict["avatar_url"]
  58. tag1 = user_sheet[i][7]
  59. tag2 = user_sheet[i][8]
  60. tag3 = user_sheet[i][9]
  61. tag4 = user_sheet[i][10]
  62. tag5 = user_sheet[i][11]
  63. tag6 = user_sheet[i][12]
  64. Common.logger(log_type, crawler).info(f"正在更新 {user_name} 用户信息")
  65. if out_uid is None or our_uid is None:
  66. # 用来创建our_id的信息
  67. user_dict = {
  68. 'recommendStatus': -6,
  69. 'appRecommendStatus': -6,
  70. 'nickName': user_info_dict["user_name"],
  71. 'avatarUrl': user_info_dict['avatar_url'],
  72. 'tagName': f'{tag1},{tag2},{tag3},{tag4},{tag5},{tag6}',
  73. }
  74. our_uid = getUser.create_uid(log_type, crawler, user_dict, env)
  75. Common.logger(log_type, crawler).info(f'新创建的站内UID:{our_uid}')
  76. if env == 'prod':
  77. our_user_link = f'https://admin.piaoquantv.com/ums/user/{our_uid}/post'
  78. else:
  79. our_user_link = f'https://testadmin.piaoquantv.com/ums/user/{our_uid}/post'
  80. Feishu.update_values(log_type, crawler, "Bzv72P", f'D{i + 1}:G{i + 1}', [
  81. [user_info_dict["user_id"], user_info_dict["avatar_url"], our_uid, our_user_link]])
  82. Common.logger(log_type, crawler).info(f'用户信息创建成功!\n')
  83. else:
  84. Common.logger(log_type, crawler).info("用户信息已存在\n")
  85. our_user_dict = {
  86. 'user_name': user_name,
  87. 'user_id': out_uid,
  88. 'wechat_name': wechat_name,
  89. 'our_uid': our_uid,
  90. 'our_user_link': our_user_link,
  91. 'avatar_url': avatar_url,
  92. }
  93. return our_user_dict
  94. # 获取用户 fakeid
  95. @classmethod
  96. def get_user_info(cls, log_type, crawler, wechat_name, env):
  97. Common.logger(log_type, crawler).info(f"wechat_name:{wechat_name}")
  98. while True:
  99. token_dict = cls.get_token(log_type, crawler, env)
  100. url = "https://mp.weixin.qq.com/cgi-bin/searchbiz?"
  101. headers = {
  102. "accept": "*/*",
  103. "accept-encoding": "gzip, deflate, br",
  104. "accept-language": "zh-CN,zh;q=0.9",
  105. "referer": "https://mp.weixin.qq.com/cgi-bin/appmsg?"
  106. "t=media/appmsg_edit_v2&action=edit&isNew=1"
  107. "&type=77&createType=5&token=1011071554&lang=zh_CN",
  108. 'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
  109. "sec-ch-ua-mobile": "?0",
  110. "sec-ch-ua-platform": '"Windows"',
  111. "sec-fetch-dest": "empty",
  112. "sec-fetch-mode": "cors",
  113. "sec-fetch-site": "same-origin",
  114. "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
  115. " (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
  116. "x-requested-with": "XMLHttpRequest",
  117. 'cookie': token_dict['cookie'],
  118. }
  119. params = {
  120. "action": "search_biz",
  121. "begin": "0",
  122. "count": "5",
  123. "query": str(wechat_name),
  124. "token": token_dict['token'],
  125. "lang": "zh_CN",
  126. "f": "json",
  127. "ajax": "1",
  128. }
  129. urllib3.disable_warnings()
  130. r = requests.get(url=url, headers=headers, params=params, verify=False)
  131. r.close()
  132. if r.json()["base_resp"]["err_msg"] == "invalid session":
  133. Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
  134. Common.logger(log_type, crawler).warning(f"get_fakeid:{r.text}\n")
  135. if 20 >= datetime.datetime.now().hour >= 10:
  136. Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n过期啦,请扫码更换token\nhttps://mp.weixin.qq.com/")
  137. time.sleep(60 * 10)
  138. continue
  139. if r.json()["base_resp"]["err_msg"] == "freq control":
  140. Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
  141. Common.logger(log_type, crawler).warning(f"get_fakeid:{r.text}\n")
  142. if 20 >= datetime.datetime.now().hour >= 10:
  143. Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
  144. time.sleep(60 * 10)
  145. continue
  146. if "list" not in r.json() or len(r.json()["list"]) == 0:
  147. Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
  148. Common.logger(log_type, crawler).warning(f"get_fakeid:{r.text}\n")
  149. if 20 >= datetime.datetime.now().hour >= 10:
  150. Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
  151. time.sleep(60 * 10)
  152. continue
  153. user_info_dict = {'user_name': r.json()["list"][0]["nickname"],
  154. 'user_id': r.json()["list"][0]["fakeid"],
  155. 'avatar_url': r.json()["list"][0]["round_head_img"]}
  156. return user_info_dict
  157. # 获取腾讯视频下载链接
  158. @classmethod
  159. def get_tencent_video_url(cls, video_id):
  160. url = 'https://vv.video.qq.com/getinfo?vids=' + str(video_id) + '&platform=101001&charge=0&otype=json'
  161. response = requests.get(url=url).text.replace('QZOutputJson=', '').replace('"};', '"}')
  162. response = json.loads(response)
  163. url = response['vl']['vi'][0]['ul']['ui'][0]['url']
  164. fvkey = response['vl']['vi'][0]['fvkey']
  165. video_url = url + str(video_id) + '.mp4?vkey=' + fvkey
  166. return video_url
  167. @classmethod
  168. def get_video_url(cls, article_url, env):
  169. # 打印请求配置
  170. ca = DesiredCapabilities.CHROME
  171. ca["goog:loggingPrefs"] = {"performance": "ALL"}
  172. # 不打开浏览器运行
  173. chrome_options = webdriver.ChromeOptions()
  174. chrome_options.add_argument("headless")
  175. chrome_options.add_argument(
  176. f'user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36')
  177. chrome_options.add_argument("--no-sandbox")
  178. # driver初始化
  179. if env == "prod":
  180. driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options)
  181. else:
  182. driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options, service=Service(
  183. '/Users/wangkun/Downloads/chromedriver/chromedriver_v113/chromedriver'))
  184. driver.implicitly_wait(10)
  185. driver.get(article_url)
  186. time.sleep(1)
  187. if len(driver.find_elements(By.XPATH, '//div[@class="js_video_poster video_poster"]/*[2]')) != 0:
  188. video_url = driver.find_element(
  189. By.XPATH, '//div[@class="js_video_poster video_poster"]/*[2]').get_attribute('src')
  190. elif len(driver.find_elements(By.XPATH, '//span[@class="js_tx_video_container"]/*[1]')) != 0:
  191. iframe = driver.find_element(By.XPATH, '//span[@class="js_tx_video_container"]/*[1]').get_attribute(
  192. 'src')
  193. video_id = iframe.split('vid=')[-1].split('&')[0]
  194. video_url = cls.get_tencent_video_url(video_id)
  195. else:
  196. video_url = 0
  197. driver.quit()
  198. return video_url
  199. # 获取文章列表
  200. @classmethod
  201. def get_videoList(cls, log_type, crawler, rule_dict, user_dict, env):
  202. begin = 0
  203. while True:
  204. token_dict = cls.get_token(log_type, crawler, env)
  205. url = "https://mp.weixin.qq.com/cgi-bin/appmsg?"
  206. headers = {
  207. "accept": "*/*",
  208. "accept-encoding": "gzip, deflate, br",
  209. "accept-language": "zh-CN,zh;q=0.9",
  210. "referer": "https://mp.weixin.qq.com/cgi-bin/appmsg?"
  211. "t=media/appmsg_edit_v2&action=edit&isNew=1"
  212. "&type=77&createType=5&token=" + str(token_dict['token']) + "&lang=zh_CN",
  213. 'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
  214. "sec-ch-ua-mobile": "?0",
  215. "sec-ch-ua-platform": '"Windows"',
  216. "sec-fetch-dest": "empty",
  217. "sec-fetch-mode": "cors",
  218. "sec-fetch-site": "same-origin",
  219. "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
  220. " (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
  221. "x-requested-with": "XMLHttpRequest",
  222. 'cookie': token_dict['cookie'],
  223. }
  224. params = {
  225. "action": "list_ex",
  226. "begin": str(begin),
  227. "count": "5",
  228. "fakeid": user_dict['user_id'],
  229. "type": "9",
  230. "query": "",
  231. "token": str(token_dict['token']),
  232. "lang": "zh_CN",
  233. "f": "json",
  234. "ajax": "1",
  235. }
  236. urllib3.disable_warnings()
  237. r = requests.get(url=url, headers=headers, params=params, verify=False)
  238. r.close()
  239. if r.json()["base_resp"]["err_msg"] == "invalid session":
  240. Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
  241. Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
  242. if 20 >= datetime.datetime.now().hour >= 10:
  243. Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']}\n过期啦,请扫码更换token\nhttps://mp.weixin.qq.com/")
  244. time.sleep(60 * 10)
  245. continue
  246. if r.json()["base_resp"]["err_msg"] == "freq control":
  247. Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
  248. Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
  249. if 20 >= datetime.datetime.now().hour >= 10:
  250. Feishu.bot(log_type, crawler,f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
  251. time.sleep(60 * 10)
  252. continue
  253. if 'app_msg_list' not in r.json():
  254. Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}")
  255. Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
  256. if 20 >= datetime.datetime.now().hour >= 10:
  257. Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']}\n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
  258. time.sleep(60 * 10)
  259. continue
  260. if len(r.json()['app_msg_list']) == 0:
  261. Common.logger(log_type, crawler).info('没有更多视频了\n')
  262. return
  263. else:
  264. begin += 5
  265. app_msg_list = r.json()['app_msg_list']
  266. for article in app_msg_list:
  267. # try:
  268. create_time = article.get('create_time', 0)
  269. publish_time_stamp = int(create_time)
  270. publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
  271. article_url = article.get('link', '')
  272. video_dict = {
  273. 'video_id': article.get('aid', ''),
  274. 'video_title': article.get("title", "").replace(' ', '').replace('"', '').replace("'", ""),
  275. 'publish_time_stamp': publish_time_stamp,
  276. 'publish_time_str': publish_time_str,
  277. 'user_name': user_dict["user_name"],
  278. 'play_cnt': 0,
  279. 'comment_cnt': 0,
  280. 'like_cnt': 0,
  281. 'share_cnt': 0,
  282. 'user_id': user_dict['user_id'],
  283. 'avatar_url': user_dict['avatar_url'],
  284. 'cover_url': article.get('cover', ''),
  285. 'article_url': article.get('link', ''),
  286. 'video_url': cls.get_video_url(article_url, env),
  287. 'session': f'gongzhonghao-author1-{int(time.time())}'
  288. }
  289. for k, v in video_dict.items():
  290. Common.logger(log_type, crawler).info(f"{k}:{v}")
  291. if int(time.time()) - publish_time_stamp > 3600 * 24 * int(rule_dict.get('period', {}).get('max', 1000)):
  292. Common.logger(log_type, crawler).info(f"发布时间超过{int(rule_dict.get('period', {}).get('max', 1000))}天\n")
  293. return
  294. if video_dict['article_url'] == 0 or video_dict['video_url'] == 0:
  295. Common.logger(log_type, crawler).info("文章涉嫌违反相关法律法规和政策\n")
  296. # 标题敏感词过滤
  297. elif any(str(word) if str(word) in video_dict['video_title'] else False
  298. for word in get_config_from_mysql(log_type=log_type,
  299. source=crawler,
  300. env=env,
  301. text="filter",
  302. action="")) is True:
  303. Common.logger(log_type, crawler).info("标题已中过滤词\n")
  304. # 已下载判断
  305. elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
  306. Common.logger(log_type, crawler).info("视频已下载\n")
  307. # 标题相似度
  308. elif title_like(log_type, crawler, video_dict['video_title'], cls.platform, env) is True:
  309. Common.logger(log_type, crawler).info(f'标题相似度>=80%:{video_dict["video_title"]}\n')
  310. else:
  311. cls.download_publish(log_type=log_type,
  312. crawler=crawler,
  313. video_dict=video_dict,
  314. rule_dict=rule_dict,
  315. user_dict=user_dict,
  316. env=env)
  317. # except Exception as e:
  318. # Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
  319. Common.logger(log_type, crawler).info('休眠 60 秒\n')
  320. time.sleep(60)
  321. @classmethod
  322. def repeat_video(cls, log_type, crawler, video_id, env):
  323. sql = f""" select * from crawler_video where platform="公众号" and out_video_id="{video_id}"; """
  324. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  325. return len(repeat_video)
  326. # 下载/上传
  327. @classmethod
  328. def download_publish(cls, log_type, crawler, video_dict, rule_dict, user_dict, env):
  329. # 下载视频
  330. Common.download_method(log_type=log_type, crawler=crawler, text="video", title=video_dict["video_title"], url=video_dict["video_url"])
  331. md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
  332. try:
  333. if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
  334. # 删除视频文件夹
  335. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  336. Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
  337. return
  338. except FileNotFoundError:
  339. # 删除视频文件夹
  340. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  341. Common.logger(log_type, crawler).info("视频文件不存在,删除文件夹成功\n")
  342. return
  343. # 获取视频时长
  344. ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
  345. video_dict["video_width"] = ffmpeg_dict["width"]
  346. video_dict["video_height"] = ffmpeg_dict["height"]
  347. video_dict["duration"] = ffmpeg_dict["duration"]
  348. Common.logger(log_type, crawler).info(f'video_width:{video_dict["video_width"]}')
  349. Common.logger(log_type, crawler).info(f'video_height:{video_dict["video_height"]}')
  350. Common.logger(log_type, crawler).info(f'duration:{video_dict["duration"]}')
  351. if download_rule(log_type, crawler, video_dict, rule_dict) is False:
  352. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  353. Common.logger(log_type, crawler).info("不满足抓取规则,删除成功\n")
  354. return
  355. # 下载封面
  356. Common.download_method(log_type=log_type, crawler=crawler, text="cover",
  357. title=video_dict["video_title"], url=video_dict["cover_url"])
  358. # 保存视频信息至 "./videos/{video_title}/info.txt"
  359. Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
  360. # 上传视频
  361. Common.logger(log_type, crawler).info("开始上传视频...")
  362. strategy = "定向榜爬虫策略"
  363. if env == 'prod':
  364. oss_endpoint = "inner"
  365. our_video_id = Publish.upload_and_publish(log_type=log_type,
  366. crawler=crawler,
  367. strategy=strategy,
  368. our_uid=user_dict["our_uid"],
  369. oss_endpoint=oss_endpoint,
  370. env=env)
  371. our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
  372. else:
  373. oss_endpoint = "out"
  374. our_video_id = Publish.upload_and_publish(log_type=log_type,
  375. crawler=crawler,
  376. strategy=strategy,
  377. our_uid=user_dict["our_uid"],
  378. oss_endpoint=oss_endpoint,
  379. env=env)
  380. our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
  381. if our_video_id is None:
  382. try:
  383. # 删除视频文件夹
  384. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  385. return
  386. except FileNotFoundError:
  387. return
  388. insert_sql = f""" insert into crawler_video(video_id,
  389. out_user_id,
  390. platform,
  391. strategy,
  392. out_video_id,
  393. video_title,
  394. cover_url,
  395. video_url,
  396. duration,
  397. publish_time,
  398. play_cnt,
  399. crawler_rule,
  400. width,
  401. height)
  402. values({our_video_id},
  403. "{video_dict['user_id']}",
  404. "{cls.platform}",
  405. "定向爬虫策略",
  406. "{video_dict['video_id']}",
  407. "{video_dict['video_title']}",
  408. "{video_dict['cover_url']}",
  409. "{video_dict['video_url']}",
  410. {int(video_dict['duration'])},
  411. "{video_dict['publish_time_str']}",
  412. {int(video_dict['play_cnt'])},
  413. '{json.dumps(rule_dict)}',
  414. {int(video_dict['video_width'])},
  415. {int(video_dict['video_height'])}) """
  416. Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
  417. MysqlHelper.update_values(log_type, crawler, insert_sql, env)
  418. Common.logger(log_type, crawler).info('视频信息写入数据库成功')
  419. # 视频写入飞书
  420. Feishu.insert_columns(log_type, crawler, "47e39d", "ROWS", 1, 2)
  421. # 视频ID工作表,首行写入数据
  422. upload_time = int(time.time())
  423. values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
  424. "用户主页",
  425. video_dict['video_title'],
  426. video_dict['video_id'],
  427. our_video_link,
  428. int(video_dict['duration']),
  429. f"{video_dict['video_width']}*{video_dict['video_height']}",
  430. video_dict['publish_time_str'],
  431. video_dict['user_name'],
  432. video_dict['user_id'],
  433. video_dict['avatar_url'],
  434. video_dict['cover_url'],
  435. video_dict['article_url'],
  436. video_dict['video_url']]]
  437. time.sleep(0.5)
  438. Feishu.update_values(log_type, crawler, "47e39d", "F2:Z2", values)
  439. Common.logger(log_type, crawler).info('视频下载/上传成功\n')
  440. @classmethod
  441. def get_all_videos(cls, log_type, crawler, rule_dict, env):
  442. while True:
  443. user_sheet = Feishu.get_values_batch(log_type, crawler, "Bzv72P")
  444. if user_sheet is None:
  445. Common.logger(log_type, crawler).warning(f"user_sheet:{user_sheet}, 2秒后重试")
  446. time.sleep(2)
  447. continue
  448. len_sheet = len(user_sheet)
  449. if len_sheet >= 101:
  450. len_sheet = 101
  451. Common.logger(log_type, crawler).info(f"len_sheet:{len_sheet}")
  452. for i in range(6, len_sheet):
  453. user_dict = cls.create_user(log_type=log_type, crawler=crawler, user_sheet=user_sheet, i=i, env=env)
  454. # try:
  455. Common.logger(log_type, crawler).info(f'获取 {user_dict["user_name"]} 公众号视频\n')
  456. cls.get_videoList(log_type=log_type,
  457. crawler=crawler,
  458. rule_dict=rule_dict,
  459. user_dict=user_dict,
  460. env=env)
  461. Common.logger(log_type, crawler).info('休眠 60 秒\n')
  462. time.sleep(60)
  463. # except Exception as e:
  464. # Common.logger(log_type, crawler).info(f'抓取{user_dict["user_name"]}公众号时异常:{e}\n')
  465. if __name__ == "__main__":
  466. # GongzhonghaoAuthor1.get_token("author", "gongzhonghao", "prod")
  467. # print(GongzhonghaoAuthor1.get_users("author", "gongzhonghao", "Bzv72P", "dev"))
  468. # print(get_config_from_mysql("author", "gongzhonghao", "dev", "filter", action=""))
  469. # print(title_like("author", "gongzhonghao", "公众号", "123", "dev"))
  470. print(GongzhonghaoAuthor1.get_user_info("author", "gongzhonghao", "幸福花朵", "dev"))
  471. pass