gongzhonghao1_author.py 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2023/3/28
  4. import datetime
  5. import json
  6. import os
  7. import random
  8. import shutil
  9. import sys
  10. import time
  11. from hashlib import md5
  12. import requests
  13. import urllib3
  14. from selenium.webdriver import DesiredCapabilities
  15. from selenium.webdriver.chrome.service import Service
  16. from selenium.webdriver.common.by import By
  17. from selenium import webdriver
  18. from common.mq import MQ
  19. sys.path.append(os.getcwd())
  20. from common.common import Common
  21. from common.feishu import Feishu
  22. from common.publish import Publish
  23. # from common.getuser import getUser
  24. from common.scheduling_db import MysqlHelper
  25. from common.public import get_config_from_mysql, download_rule, title_like
  26. class GongzhonghaoAuthor1:
  27. platform = "公众号"
  28. # 获取 token
  29. @classmethod
  30. def get_token(cls, log_type, crawler, env):
  31. select_sql = f""" select * from crawler_config where source="{crawler}" and title LIKE "%公众号_1%";"""
  32. configs = MysqlHelper.get_values(log_type, crawler, select_sql, env, action="")
  33. if len(configs) == 0:
  34. Feishu.bot(log_type, crawler, "公众号_1:未配置token")
  35. time.sleep(60)
  36. return None
  37. token_dict = {
  38. "token_id": configs[0]["id"],
  39. "title": configs[0]["title"].strip(),
  40. "token": dict(eval(configs[0]["config"]))["token"].strip(),
  41. "cookie": dict(eval(configs[0]["config"]))["cookie"].strip(),
  42. "update_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(configs[0]["update_time"]/1000))),
  43. "operator": configs[0]["operator"].strip()
  44. }
  45. # for k, v in token_dict.items():
  46. # print(f"{k}:{type(v)}, {v}")
  47. return token_dict
  48. @classmethod
  49. def get_users(cls, log_type, crawler, user_sheet, sheetid, i, env):
  50. user_name = user_sheet[i][0]
  51. wechat_name = user_sheet[i][2]
  52. if wechat_name is None or wechat_name.strip() == "" or wechat_name.replace(" ", "") == "":
  53. wechat_name = user_name
  54. out_uid = user_sheet[i][3]
  55. avatar_url = user_sheet[i][4]
  56. if out_uid is None or out_uid.strip() == "" or out_uid.replace(" ", "") == "":
  57. user_info_dict = cls.get_user_info(log_type=log_type, crawler=crawler, wechat_name=wechat_name, env=env)
  58. out_uid = user_info_dict["user_id"]
  59. avatar_url = user_info_dict["avatar_url"]
  60. Feishu.update_values(log_type, crawler, sheetid, f'D{i + 1}:E{i + 1}', [[out_uid, avatar_url]])
  61. our_user_dict = {
  62. 'user_name': user_name,
  63. 'user_id': out_uid,
  64. 'wechat_name': wechat_name,
  65. 'avatar_url': avatar_url,
  66. }
  67. for k, v in our_user_dict.items():
  68. Common.logger(log_type, crawler).info(f"{k}:{v}")
  69. Common.logging(log_type, crawler, env, f'our_user_dict:{our_user_dict}')
  70. return our_user_dict
  71. # 获取用户 fakeid
  72. @classmethod
  73. def get_user_info(cls, log_type, crawler, wechat_name, env):
  74. Common.logger(log_type, crawler).info(f"获取站外用户信息:{wechat_name}")
  75. Common.logging(log_type, crawler, env, f"获取站外用户信息:{wechat_name}")
  76. while True:
  77. token_dict = cls.get_token(log_type, crawler, env)
  78. url = "https://mp.weixin.qq.com/cgi-bin/searchbiz?"
  79. headers = {
  80. "accept": "*/*",
  81. "accept-encoding": "gzip, deflate, br",
  82. "accept-language": "zh-CN,zh;q=0.9",
  83. "referer": "https://mp.weixin.qq.com/cgi-bin/appmsg?"
  84. "t=media/appmsg_edit_v2&action=edit&isNew=1"
  85. "&type=77&createType=5&token=1011071554&lang=zh_CN",
  86. 'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
  87. "sec-ch-ua-mobile": "?0",
  88. "sec-ch-ua-platform": '"Windows"',
  89. "sec-fetch-dest": "empty",
  90. "sec-fetch-mode": "cors",
  91. "sec-fetch-site": "same-origin",
  92. "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
  93. " (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
  94. "x-requested-with": "XMLHttpRequest",
  95. 'cookie': token_dict['cookie'],
  96. }
  97. params = {
  98. "action": "search_biz",
  99. "begin": "0",
  100. "count": "5",
  101. "query": str(wechat_name),
  102. "token": token_dict['token'],
  103. "lang": "zh_CN",
  104. "f": "json",
  105. "ajax": "1",
  106. }
  107. urllib3.disable_warnings()
  108. r = requests.get(url=url, headers=headers, params=params, verify=False)
  109. r.close()
  110. if r.json()["base_resp"]["err_msg"] == "invalid session":
  111. Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}, get_fakeid:{r.text}\n")
  112. Common.logging(log_type, crawler, env, f"status_code:{r.status_code}, get_fakeid:{r.text}\n")
  113. if 20 >= datetime.datetime.now().hour >= 10:
  114. Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n过期啦,请扫码更换token\nhttps://mp.weixin.qq.com/")
  115. time.sleep(60 * 15)
  116. continue
  117. if r.json()["base_resp"]["err_msg"] == "freq control":
  118. Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}, get_fakeid:{r.text}\n")
  119. Common.logging(log_type, crawler, env, f"status_code:{r.status_code}, get_fakeid:{r.text}\n")
  120. if 20 >= datetime.datetime.now().hour >= 10:
  121. Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
  122. time.sleep(60 * 15)
  123. continue
  124. if "list" not in r.json() or len(r.json()["list"]) == 0:
  125. Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}, get_fakeid:{r.text}\n")
  126. Common.logging(log_type, crawler, env, f"status_code:{r.status_code}, get_fakeid:{r.text}\n")
  127. if 20 >= datetime.datetime.now().hour >= 10:
  128. Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
  129. time.sleep(60 * 15)
  130. continue
  131. user_info_dict = {'user_name': r.json()["list"][0]["nickname"],
  132. 'user_id': r.json()["list"][0]["fakeid"],
  133. 'avatar_url': r.json()["list"][0]["round_head_img"]}
  134. return user_info_dict
  135. # 获取腾讯视频下载链接
  136. @classmethod
  137. def get_tencent_video_url(cls, video_id):
  138. url = 'https://vv.video.qq.com/getinfo?vids=' + str(video_id) + '&platform=101001&charge=0&otype=json'
  139. response = requests.get(url=url).text.replace('QZOutputJson=', '').replace('"};', '"}')
  140. response = json.loads(response)
  141. url = response['vl']['vi'][0]['ul']['ui'][0]['url']
  142. fvkey = response['vl']['vi'][0]['fvkey']
  143. video_url = url + str(video_id) + '.mp4?vkey=' + fvkey
  144. return video_url
  145. @classmethod
  146. def get_video_url(cls, article_url, env):
  147. # 打印请求配置
  148. ca = DesiredCapabilities.CHROME
  149. ca["goog:loggingPrefs"] = {"performance": "ALL"}
  150. # 不打开浏览器运行
  151. chrome_options = webdriver.ChromeOptions()
  152. chrome_options.add_argument("headless")
  153. chrome_options.add_argument(
  154. f'user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36')
  155. chrome_options.add_argument("--no-sandbox")
  156. # driver初始化
  157. if env == "prod":
  158. driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options)
  159. else:
  160. driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options, service=Service(
  161. '/Users/wangkun/Downloads/chromedriver/chromedriver_v113/chromedriver'))
  162. driver.implicitly_wait(10)
  163. driver.get(article_url)
  164. time.sleep(1)
  165. if len(driver.find_elements(By.XPATH, '//div[@class="js_video_poster video_poster"]/*[2]')) != 0:
  166. video_url = driver.find_element(
  167. By.XPATH, '//div[@class="js_video_poster video_poster"]/*[2]').get_attribute('src')
  168. elif len(driver.find_elements(By.XPATH, '//span[@class="js_tx_video_container"]/*[1]')) != 0:
  169. iframe = driver.find_element(By.XPATH, '//span[@class="js_tx_video_container"]/*[1]').get_attribute(
  170. 'src')
  171. video_id = iframe.split('vid=')[-1].split('&')[0]
  172. video_url = cls.get_tencent_video_url(video_id)
  173. else:
  174. video_url = 0
  175. driver.quit()
  176. return video_url
  177. # 获取文章列表
  178. @classmethod
  179. def get_videoList(cls, log_type, crawler, rule_dict, user_dict, env):
  180. mq = MQ(topic_name="topic_crawler_etl_" + env)
  181. begin = 0
  182. while True:
  183. token_dict = cls.get_token(log_type, crawler, env)
  184. url = "https://mp.weixin.qq.com/cgi-bin/appmsg?"
  185. headers = {
  186. "accept": "*/*",
  187. "accept-encoding": "gzip, deflate, br",
  188. "accept-language": "zh-CN,zh;q=0.9",
  189. "referer": "https://mp.weixin.qq.com/cgi-bin/appmsg?"
  190. "t=media/appmsg_edit_v2&action=edit&isNew=1"
  191. "&type=77&createType=5&token=" + str(token_dict['token']) + "&lang=zh_CN",
  192. 'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
  193. "sec-ch-ua-mobile": "?0",
  194. "sec-ch-ua-platform": '"Windows"',
  195. "sec-fetch-dest": "empty",
  196. "sec-fetch-mode": "cors",
  197. "sec-fetch-site": "same-origin",
  198. "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
  199. " (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
  200. "x-requested-with": "XMLHttpRequest",
  201. 'cookie': token_dict['cookie'],
  202. }
  203. params = {
  204. "action": "list_ex",
  205. "begin": str(begin),
  206. "count": "5",
  207. "fakeid": user_dict['user_id'],
  208. "type": "9",
  209. "query": "",
  210. "token": str(token_dict['token']),
  211. "lang": "zh_CN",
  212. "f": "json",
  213. "ajax": "1",
  214. }
  215. urllib3.disable_warnings()
  216. r = requests.get(url=url, headers=headers, params=params, verify=False)
  217. r.close()
  218. if r.json()["base_resp"]["err_msg"] == "invalid session":
  219. Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}, get_videoList:{r.text}\n")
  220. Common.logging(log_type, crawler, env, f"status_code:{r.status_code}, get_videoList:{r.text}\n")
  221. if 20 >= datetime.datetime.now().hour >= 10:
  222. Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']}\n过期啦,请扫码更换token\nhttps://mp.weixin.qq.com/")
  223. time.sleep(60 * 15)
  224. continue
  225. if r.json()["base_resp"]["err_msg"] == "freq control":
  226. Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}, get_videoList:{r.text}\n")
  227. Common.logging(log_type, crawler, env, f"status_code:{r.status_code}, get_videoList:{r.text}\n")
  228. if 20 >= datetime.datetime.now().hour >= 10:
  229. Feishu.bot(log_type, crawler,f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
  230. time.sleep(60 * 15)
  231. continue
  232. if r.json()["base_resp"]["err_msg"] == "invalid args" and r.json()["base_resp"]["ret"] == 200002:
  233. Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}, get_videoList:{r.text}\n")
  234. Common.logging(log_type, crawler, env, f"status_code:{r.status_code}, get_videoList:{r.text}\n")
  235. if 20 >= datetime.datetime.now().hour >= 10:
  236. Feishu.bot(log_type, crawler,f"公众号:{user_dict['user_name']}\n抓取异常, 请检查该公众号\n")
  237. return
  238. if 'app_msg_list' not in r.json():
  239. Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}, get_videoList:{r.text}\n")
  240. Common.logging(log_type, crawler, env, f"status_code:{r.status_code}, get_videoList:{r.text}\n")
  241. if 20 >= datetime.datetime.now().hour >= 10:
  242. Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']}\n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
  243. time.sleep(60 * 15)
  244. continue
  245. if len(r.json()['app_msg_list']) == 0:
  246. Common.logger(log_type, crawler).info('没有更多视频了\n')
  247. Common.logging(log_type, crawler, env, '没有更多视频了\n')
  248. return
  249. else:
  250. begin += 5
  251. app_msg_list = r.json()['app_msg_list']
  252. for article in app_msg_list:
  253. try:
  254. create_time = article.get('create_time', 0)
  255. publish_time_stamp = int(create_time)
  256. publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
  257. article_url = article.get('link', '')
  258. video_dict = {
  259. 'video_id': article.get('aid', ''),
  260. 'video_title': article.get("title", "").replace(' ', '').replace('"', '').replace("'", ""),
  261. 'publish_time_stamp': publish_time_stamp,
  262. 'publish_time_str': publish_time_str,
  263. 'user_name': user_dict["user_name"],
  264. 'play_cnt': 0,
  265. 'comment_cnt': 0,
  266. 'like_cnt': 0,
  267. 'share_cnt': 0,
  268. 'user_id': user_dict['user_id'],
  269. 'avatar_url': user_dict['avatar_url'],
  270. 'cover_url': article.get('cover', ''),
  271. 'article_url': article.get('link', ''),
  272. 'video_url': cls.get_video_url(article_url, env),
  273. 'session': f'gongzhonghao-author1-{int(time.time())}'
  274. }
  275. for k, v in video_dict.items():
  276. Common.logger(log_type, crawler).info(f"{k}:{v}")
  277. Common.logging(log_type, crawler, env, f'video_dict:{video_dict}')
  278. if int(time.time()) - publish_time_stamp > 3600 * 24 * int(rule_dict.get('period', {}).get('max', 1000)):
  279. Common.logger(log_type, crawler).info(f"发布时间超过{int(rule_dict.get('period', {}).get('max', 1000))}天\n")
  280. Common.logging(log_type, crawler, env, f"发布时间超过{int(rule_dict.get('period', {}).get('max', 1000))}天\n")
  281. return
  282. if video_dict['article_url'] == 0 or video_dict['video_url'] == 0:
  283. Common.logger(log_type, crawler).info("文章涉嫌违反相关法律法规和政策\n")
  284. Common.logging(log_type, crawler, env, "文章涉嫌违反相关法律法规和政策\n")
  285. # 标题敏感词过滤
  286. elif any(str(word) if str(word) in video_dict['video_title'] else False
  287. for word in get_config_from_mysql(log_type=log_type,
  288. source=crawler,
  289. env=env,
  290. text="filter",
  291. action="")) is True:
  292. Common.logger(log_type, crawler).info("标题已中过滤词\n")
  293. Common.logging(log_type, crawler, env, "标题已中过滤词\n")
  294. # 已下载判断
  295. elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
  296. Common.logger(log_type, crawler).info("视频已下载\n")
  297. Common.logging(log_type, crawler, env, "视频已下载\n")
  298. # 标题相似度
  299. elif title_like(log_type, crawler, video_dict['video_title'], cls.platform, env) is True:
  300. Common.logger(log_type, crawler).info(f'标题相似度>=80%:{video_dict["video_title"]}\n')
  301. Common.logging(log_type, crawler, env, f'标题相似度>=80%:{video_dict["video_title"]}\n')
  302. else:
  303. # cls.download_publish(log_type=log_type,
  304. # crawler=crawler,
  305. # video_dict=video_dict,
  306. # rule_dict=rule_dict,
  307. # # user_dict=user_dict,
  308. # env=env)
  309. video_dict["out_user_id"] = video_dict["user_id"]
  310. video_dict["platform"] = crawler
  311. video_dict["strategy"] = log_type
  312. video_dict["out_video_id"] = video_dict["video_id"]
  313. video_dict["width"] = 0
  314. video_dict["height"] = 0
  315. video_dict["crawler_rule"] = json.dumps(rule_dict)
  316. # video_dict["user_id"] = user_dict["uid"]
  317. video_dict["user_id"] = Publish.uids(crawler, "定向爬虫策略", "", env)
  318. video_dict["publish_time"] = video_dict["publish_time_str"]
  319. mq.send_msg(video_dict)
  320. except Exception as e:
  321. Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
  322. Common.logging(log_type, crawler, env, f"抓取单条视频异常:{e}\n")
  323. Common.logger(log_type, crawler).info('休眠 60 秒\n')
  324. Common.logging(log_type, crawler, env, '休眠 60 秒\n')
  325. time.sleep(60)
  326. @classmethod
  327. def repeat_video(cls, log_type, crawler, video_id, env):
  328. # sql = f""" select * from crawler_video where platform="公众号" and out_video_id="{video_id}"; """
  329. sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and out_video_id="{video_id}"; """
  330. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  331. return len(repeat_video)
  332. # 下载/上传
  333. @classmethod
  334. def download_publish(cls, log_type, crawler, video_dict, rule_dict, env):
  335. # 下载视频
  336. Common.download_method(log_type=log_type, crawler=crawler, text="video", title=video_dict["video_title"], url=video_dict["video_url"])
  337. md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
  338. try:
  339. if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
  340. # 删除视频文件夹
  341. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  342. Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
  343. Common.logging(log_type, crawler, env, "视频size=0,删除成功\n")
  344. return
  345. except FileNotFoundError:
  346. # 删除视频文件夹
  347. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  348. Common.logger(log_type, crawler).info("视频文件不存在,删除文件夹成功\n")
  349. Common.logging(log_type, crawler, env, "视频文件不存在,删除文件夹成功\n")
  350. return
  351. # 获取视频时长
  352. ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
  353. video_dict["video_width"] = ffmpeg_dict["width"]
  354. video_dict["video_height"] = ffmpeg_dict["height"]
  355. video_dict["duration"] = ffmpeg_dict["duration"]
  356. Common.logger(log_type, crawler).info(f'video_width:{video_dict["video_width"]}')
  357. Common.logging(log_type, crawler, env, f'video_width:{video_dict["video_width"]}')
  358. Common.logger(log_type, crawler).info(f'video_height:{video_dict["video_height"]}')
  359. Common.logging(log_type, crawler, env, f'video_height:{video_dict["video_height"]}')
  360. Common.logger(log_type, crawler).info(f'duration:{video_dict["duration"]}')
  361. Common.logging(log_type, crawler, env, f'duration:{video_dict["duration"]}')
  362. if download_rule(log_type, crawler, video_dict, rule_dict) is False:
  363. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  364. Common.logger(log_type, crawler).info("不满足抓取规则,删除成功\n")
  365. Common.logging(log_type, crawler, env, "不满足抓取规则,删除成功\n")
  366. return
  367. # 下载封面
  368. Common.download_method(log_type=log_type, crawler=crawler, text="cover",
  369. title=video_dict["video_title"], url=video_dict["cover_url"])
  370. # 保存视频信息至 "./videos/{video_title}/info.txt"
  371. Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
  372. # 上传视频
  373. Common.logger(log_type, crawler).info("开始上传视频...")
  374. Common.logging(log_type, crawler, env, "开始上传视频...")
  375. strategy = "定向爬虫策略"
  376. if env == 'prod':
  377. oss_endpoint = "inner"
  378. our_video_id = Publish.upload_and_publish(log_type=log_type,
  379. crawler=crawler,
  380. strategy=strategy,
  381. our_uid="follow",
  382. oss_endpoint=oss_endpoint,
  383. env=env)
  384. our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
  385. else:
  386. oss_endpoint = "out"
  387. our_video_id = Publish.upload_and_publish(log_type=log_type,
  388. crawler=crawler,
  389. strategy=strategy,
  390. our_uid="follow",
  391. oss_endpoint=oss_endpoint,
  392. env=env)
  393. our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
  394. if our_video_id is None:
  395. try:
  396. # 删除视频文件夹
  397. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  398. return
  399. except FileNotFoundError:
  400. return
  401. insert_sql = f""" insert into crawler_video(video_id,
  402. out_user_id,
  403. platform,
  404. strategy,
  405. out_video_id,
  406. video_title,
  407. cover_url,
  408. video_url,
  409. duration,
  410. publish_time,
  411. play_cnt,
  412. crawler_rule,
  413. width,
  414. height)
  415. values({our_video_id},
  416. "{video_dict['user_id']}",
  417. "{cls.platform}",
  418. "定向爬虫策略",
  419. "{video_dict['video_id']}",
  420. "{video_dict['video_title']}",
  421. "{video_dict['cover_url']}",
  422. "{video_dict['video_url']}",
  423. {int(video_dict['duration'])},
  424. "{video_dict['publish_time_str']}",
  425. {int(video_dict['play_cnt'])},
  426. '{json.dumps(rule_dict)}',
  427. {int(video_dict['video_width'])},
  428. {int(video_dict['video_height'])}) """
  429. Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
  430. Common.logging(log_type, crawler, env, f"insert_sql:{insert_sql}")
  431. MysqlHelper.update_values(log_type, crawler, insert_sql, env)
  432. Common.logger(log_type, crawler).info('视频信息写入数据库成功')
  433. Common.logging(log_type, crawler, env, '视频信息写入数据库成功')
  434. # 视频写入飞书
  435. Feishu.insert_columns(log_type, crawler, "47e39d", "ROWS", 1, 2)
  436. # 视频ID工作表,首行写入数据
  437. upload_time = int(time.time())
  438. values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
  439. "用户主页",
  440. video_dict['video_title'],
  441. video_dict['video_id'],
  442. our_video_link,
  443. int(video_dict['duration']),
  444. f"{video_dict['video_width']}*{video_dict['video_height']}",
  445. video_dict['publish_time_str'],
  446. video_dict['user_name'],
  447. video_dict['user_id'],
  448. video_dict['avatar_url'],
  449. video_dict['cover_url'],
  450. video_dict['article_url'],
  451. video_dict['video_url']]]
  452. time.sleep(0.5)
  453. Feishu.update_values(log_type, crawler, "47e39d", "F2:Z2", values)
  454. Common.logger(log_type, crawler).info('视频下载/上传成功\n')
  455. Common.logging(log_type, crawler, env, '视频下载/上传成功\n')
  456. @classmethod
  457. def get_all_videos(cls, log_type, crawler, rule_dict, env):
  458. while True:
  459. sheetid = "Bzv72P"
  460. # sheetid = "SHRnwl"
  461. user_sheet = Feishu.get_values_batch(log_type, crawler, sheetid)
  462. if user_sheet is None:
  463. Common.logger(log_type, crawler).warning(f"user_sheet:{user_sheet}, 2秒后重试")
  464. Common.logging(log_type, crawler, env, f"user_sheet:{user_sheet}, 2秒后重试")
  465. time.sleep(2)
  466. continue
  467. len_sheet = len(user_sheet)
  468. if len_sheet >= 101:
  469. len_sheet = 101
  470. for i in range(1, len_sheet):
  471. user_dict = cls.get_users(log_type=log_type,
  472. crawler=crawler,
  473. user_sheet=user_sheet,
  474. sheetid=sheetid,
  475. i=i,
  476. env=env)
  477. Common.logger(log_type, crawler).info(f'获取 {user_dict["user_name"]} 公众号视频\n')
  478. Common.logging(log_type, crawler, env, f'获取 {user_dict["user_name"]} 公众号视频\n')
  479. try:
  480. cls.get_videoList(log_type=log_type,
  481. crawler=crawler,
  482. rule_dict=rule_dict,
  483. user_dict=user_dict,
  484. env=env)
  485. Common.logger(log_type, crawler).info('休眠 60 秒\n')
  486. Common.logging(log_type, crawler, env, '休眠 60 秒\n')
  487. time.sleep(60)
  488. except Exception as e:
  489. Common.logger(log_type, crawler).info(f'抓取{user_dict["user_name"]}公众号时异常:{e}\n')
  490. Common.logging(log_type, crawler, env, f'抓取{user_dict["user_name"]}公众号时异常:{e}\n')
  491. break
  492. if __name__ == "__main__":
  493. # GongzhonghaoAuthor1.get_token("author", "gongzhonghao", "prod")
  494. # print(GongzhonghaoAuthor1.get_users("author", "gongzhonghao", "Bzv72P", "dev"))
  495. # print(get_config_from_mysql("author", "gongzhonghao", "dev", "filter", action=""))
  496. # print(title_like("author", "gongzhonghao", "公众号", "123", "dev"))
  497. # print(GongzhonghaoAuthor1.get_user_info("author", "gongzhonghao", "幸福花朵", "dev"))
  498. GongzhonghaoAuthor1.get_all_videos("author", "gongzhonghao", {}, "dev")
  499. pass