gongzhonghao2_author.py 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2023/3/28
  4. import datetime
  5. import json
  6. import os
  7. import shutil
  8. import sys
  9. import time
  10. from hashlib import md5
  11. import requests
  12. import urllib3
  13. from selenium.webdriver import DesiredCapabilities
  14. from selenium.webdriver.chrome.service import Service
  15. from selenium.webdriver.common.by import By
  16. from selenium import webdriver
  17. from common.mq import MQ
  18. sys.path.append(os.getcwd())
  19. # from common.getuser import getUser
  20. from common.common import Common
  21. from common.feishu import Feishu
  22. from common.publish import Publish
  23. from common.scheduling_db import MysqlHelper
  24. from common.public import get_config_from_mysql, title_like, download_rule
  25. class GongzhonghaoAuthor2:
  26. platform = "公众号"
  27. # 获取 token
  28. @classmethod
  29. def get_token(cls, log_type, crawler, env):
  30. select_sql = f""" select * from crawler_config where source="{crawler}" and title LIKE "%公众号_2%";"""
  31. configs = MysqlHelper.get_values(log_type, crawler, select_sql, env, action="")
  32. if len(configs) == 0:
  33. Feishu.bot(log_type, crawler, "公众号_2:未配置token")
  34. time.sleep(60)
  35. return None
  36. token_dict = {
  37. "token_id": configs[0]["id"],
  38. "title": configs[0]["title"].strip(),
  39. "token": dict(eval(configs[0]["config"]))["token"].strip(),
  40. "cookie": dict(eval(configs[0]["config"]))["cookie"].strip(),
  41. "update_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(configs[0]["update_time"]/1000))),
  42. "operator": configs[0]["operator"].strip()
  43. }
  44. # for k, v in token_dict.items():
  45. # print(f"{k}:{v}")
  46. return token_dict
  47. @classmethod
  48. def get_users(cls, log_type, crawler, user_sheet, sheetid, i, env):
  49. user_name = user_sheet[i][0]
  50. wechat_name = user_sheet[i][2]
  51. if wechat_name is None or wechat_name.strip() == "" or wechat_name.replace(" ", "") == "":
  52. wechat_name = user_name
  53. out_uid = user_sheet[i][3]
  54. avatar_url = user_sheet[i][4]
  55. if out_uid is None or out_uid.strip() == "" or out_uid.replace(" ", "") == "":
  56. user_info_dict = cls.get_user_info(log_type=log_type, crawler=crawler, wechat_name=wechat_name, env=env)
  57. out_uid = user_info_dict["user_id"]
  58. avatar_url = user_info_dict["avatar_url"]
  59. Feishu.update_values(log_type, crawler, sheetid, f'D{i + 1}:E{i + 1}', [[out_uid, avatar_url]])
  60. our_user_dict = {
  61. 'user_name': user_name,
  62. 'user_id': out_uid,
  63. 'wechat_name': wechat_name,
  64. 'avatar_url': avatar_url,
  65. }
  66. for k, v in our_user_dict.items():
  67. Common.logger(log_type, crawler).info(f"{k}:{v}")
  68. Common.logging(log_type, crawler, env, f'our_user_dict:{our_user_dict}')
  69. return our_user_dict
  70. # 获取用户 fakeid
  71. @classmethod
  72. def get_user_info(cls, log_type, crawler, wechat_name, env):
  73. while True:
  74. token_dict = cls.get_token(log_type, crawler, env)
  75. url = "https://mp.weixin.qq.com/cgi-bin/searchbiz?"
  76. headers = {
  77. "accept": "*/*",
  78. "accept-encoding": "gzip, deflate, br",
  79. "accept-language": "zh-CN,zh;q=0.9",
  80. "referer": "https://mp.weixin.qq.com/cgi-bin/appmsg?"
  81. "t=media/appmsg_edit_v2&action=edit&isNew=1"
  82. "&type=77&createType=5&token=1011071554&lang=zh_CN",
  83. 'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
  84. "sec-ch-ua-mobile": "?0",
  85. "sec-ch-ua-platform": '"Windows"',
  86. "sec-fetch-dest": "empty",
  87. "sec-fetch-mode": "cors",
  88. "sec-fetch-site": "same-origin",
  89. "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
  90. " (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
  91. "x-requested-with": "XMLHttpRequest",
  92. 'cookie': token_dict['cookie'],
  93. }
  94. params = {
  95. "action": "search_biz",
  96. "begin": "0",
  97. "count": "5",
  98. "query": str(wechat_name),
  99. "token": token_dict['token'],
  100. "lang": "zh_CN",
  101. "f": "json",
  102. "ajax": "1",
  103. }
  104. urllib3.disable_warnings()
  105. r = requests.get(url=url, headers=headers, params=params, verify=False)
  106. r.close()
  107. if r.json()["base_resp"]["err_msg"] == "invalid session":
  108. Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}, get_fakeid:{r.text}\n")
  109. Common.logging(log_type, crawler, env, f"status_code:{r.status_code}, get_fakeid:{r.text}\n")
  110. if 20 >= datetime.datetime.now().hour >= 10:
  111. Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n过期啦,请扫码更换token\nhttps://mp.weixin.qq.com/")
  112. time.sleep(60 * 15)
  113. continue
  114. if r.json()["base_resp"]["err_msg"] == "freq control":
  115. Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}, get_fakeid:{r.text}\n")
  116. Common.logging(log_type, crawler, env, f"status_code:{r.status_code}, get_fakeid:{r.text}\n")
  117. if 20 >= datetime.datetime.now().hour >= 10:
  118. Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
  119. time.sleep(60 * 15)
  120. continue
  121. if "list" not in r.json() or len(r.json()["list"]) == 0:
  122. Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}, get_fakeid:{r.text}\n")
  123. Common.logging(log_type, crawler, env, f"status_code:{r.status_code}, get_fakeid:{r.text}\n")
  124. if 20 >= datetime.datetime.now().hour >= 10:
  125. Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
  126. time.sleep(60 * 15)
  127. continue
  128. user_info_dict = {'user_name': r.json()["list"][0]["nickname"],
  129. 'user_id': r.json()["list"][0]["fakeid"],
  130. 'avatar_url': r.json()["list"][0]["round_head_img"]}
  131. return user_info_dict
  132. # 获取腾讯视频下载链接
  133. @classmethod
  134. def get_tencent_video_url(cls, video_id):
  135. url = 'https://vv.video.qq.com/getinfo?vids=' + str(video_id) + '&platform=101001&charge=0&otype=json'
  136. response = requests.get(url=url).text.replace('QZOutputJson=', '').replace('"};', '"}')
  137. response = json.loads(response)
  138. url = response['vl']['vi'][0]['ul']['ui'][0]['url']
  139. fvkey = response['vl']['vi'][0]['fvkey']
  140. video_url = url + str(video_id) + '.mp4?vkey=' + fvkey
  141. return video_url
  142. @classmethod
  143. def get_video_url(cls, article_url, env):
  144. # 打印请求配置
  145. ca = DesiredCapabilities.CHROME
  146. ca["goog:loggingPrefs"] = {"performance": "ALL"}
  147. # 不打开浏览器运行
  148. chrome_options = webdriver.ChromeOptions()
  149. chrome_options.add_argument("headless")
  150. chrome_options.add_argument(
  151. f'user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36')
  152. chrome_options.add_argument("--no-sandbox")
  153. # driver初始化
  154. if env == "prod":
  155. driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options)
  156. else:
  157. driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options, service=Service('/Users/wangkun/Downloads/chromedriver/chromedriver_v113/chromedriver'))
  158. driver.implicitly_wait(10)
  159. driver.get(article_url)
  160. time.sleep(1)
  161. if len(driver.find_elements(By.XPATH, '//div[@class="js_video_poster video_poster"]/*[2]')) != 0:
  162. video_url = driver.find_element(
  163. By.XPATH, '//div[@class="js_video_poster video_poster"]/*[2]').get_attribute('src')
  164. elif len(driver.find_elements(By.XPATH, '//span[@class="js_tx_video_container"]/*[1]')) != 0:
  165. iframe = driver.find_element(By.XPATH, '//span[@class="js_tx_video_container"]/*[1]').get_attribute(
  166. 'src')
  167. video_id = iframe.split('vid=')[-1].split('&')[0]
  168. video_url = cls.get_tencent_video_url(video_id)
  169. else:
  170. video_url = 0
  171. driver.quit()
  172. return video_url
  173. # 获取文章列表
  174. @classmethod
  175. def get_videoList(cls, log_type, crawler, rule_dict, user_dict, env):
  176. mq = MQ(topic_name="topic_crawler_etl_" + env)
  177. begin = 0
  178. while True:
  179. token_dict = cls.get_token(log_type, crawler, env)
  180. url = "https://mp.weixin.qq.com/cgi-bin/appmsg?"
  181. headers = {
  182. "accept": "*/*",
  183. "accept-encoding": "gzip, deflate, br",
  184. "accept-language": "zh-CN,zh;q=0.9",
  185. "referer": "https://mp.weixin.qq.com/cgi-bin/appmsg?"
  186. "t=media/appmsg_edit_v2&action=edit&isNew=1"
  187. "&type=77&createType=5&token=" + str(token_dict['token']) + "&lang=zh_CN",
  188. 'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
  189. "sec-ch-ua-mobile": "?0",
  190. "sec-ch-ua-platform": '"Windows"',
  191. "sec-fetch-dest": "empty",
  192. "sec-fetch-mode": "cors",
  193. "sec-fetch-site": "same-origin",
  194. "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
  195. " (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
  196. "x-requested-with": "XMLHttpRequest",
  197. 'cookie': token_dict['cookie'],
  198. }
  199. params = {
  200. "action": "list_ex",
  201. "begin": str(begin),
  202. "count": "5",
  203. "fakeid": user_dict['user_id'],
  204. "type": "9",
  205. "query": "",
  206. "token": str(token_dict['token']),
  207. "lang": "zh_CN",
  208. "f": "json",
  209. "ajax": "1",
  210. }
  211. urllib3.disable_warnings()
  212. r = requests.get(url=url, headers=headers, params=params, verify=False)
  213. r.close()
  214. if r.json()["base_resp"]["err_msg"] == "invalid session":
  215. Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}, get_videoList:{r.text}\n")
  216. Common.logging(log_type, crawler, env, f"status_code:{r.status_code}, get_videoList:{r.text}\n")
  217. if 20 >= datetime.datetime.now().hour >= 10:
  218. Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']}\n过期啦,请扫码更换token\nhttps://mp.weixin.qq.com/")
  219. time.sleep(60 * 15)
  220. continue
  221. if r.json()["base_resp"]["err_msg"] == "freq control":
  222. Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}, get_videoList:{r.text}\n")
  223. Common.logging(log_type, crawler, env, f"status_code:{r.status_code}, get_videoList:{r.text}\n")
  224. if 20 >= datetime.datetime.now().hour >= 10:
  225. Feishu.bot(log_type, crawler,f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
  226. time.sleep(60 * 15)
  227. continue
  228. if r.json()["base_resp"]["err_msg"] == "invalid args" and r.json()["base_resp"]["ret"] == 200002:
  229. Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}, get_videoList:{r.text}\n")
  230. Common.logging(log_type, crawler, env, f"status_code:{r.status_code}, get_videoList:{r.text}\n")
  231. if 20 >= datetime.datetime.now().hour >= 10:
  232. Feishu.bot(log_type, crawler,f"公众号:{user_dict['user_name']}\n抓取异常, 请检查该公众号\n")
  233. return
  234. if 'app_msg_list' not in r.json():
  235. Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}, get_videoList:{r.text}\n")
  236. Common.logging(log_type, crawler, env, f"status_code:{r.status_code}, get_videoList:{r.text}\n")
  237. if 20 >= datetime.datetime.now().hour >= 10:
  238. Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']}\n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
  239. time.sleep(60 * 15)
  240. continue
  241. if len(r.json()['app_msg_list']) == 0:
  242. Common.logger(log_type, crawler).info('没有更多视频了\n')
  243. Common.logging(log_type, crawler, env, '没有更多视频了\n')
  244. return
  245. else:
  246. begin += 5
  247. app_msg_list = r.json()['app_msg_list']
  248. for article in app_msg_list:
  249. try:
  250. create_time = article.get('create_time', 0)
  251. publish_time_stamp = int(create_time)
  252. publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
  253. article_url = article.get('link', '')
  254. video_dict = {
  255. 'video_id': article.get('aid', ''),
  256. 'video_title': article.get("title", "").replace(' ', '').replace('"', '').replace("'", ""),
  257. 'publish_time_stamp': publish_time_stamp,
  258. 'publish_time_str': publish_time_str,
  259. 'user_name': user_dict["user_name"],
  260. 'play_cnt': 0,
  261. 'comment_cnt': 0,
  262. 'like_cnt': 0,
  263. 'share_cnt': 0,
  264. 'user_id': user_dict['user_id'],
  265. 'avatar_url': user_dict['avatar_url'],
  266. 'cover_url': article.get('cover', ''),
  267. 'article_url': article.get('link', ''),
  268. 'video_url': cls.get_video_url(article_url, env),
  269. 'session': f'gongzhonghao-author1-{int(time.time())}'
  270. }
  271. for k, v in video_dict.items():
  272. Common.logger(log_type, crawler).info(f"{k}:{v}")
  273. Common.logging(log_type, crawler, env, f'video_dict:{video_dict}')
  274. if int(time.time()) - publish_time_stamp > 3600 * 24 * int(rule_dict.get('period', {}).get('max', 1000)):
  275. Common.logger(log_type, crawler).info(f"发布时间超过{int(rule_dict.get('period', {}).get('max', 1000))}天\n")
  276. Common.logging(log_type, crawler, env, f"发布时间超过{int(rule_dict.get('period', {}).get('max', 1000))}天\n")
  277. return
  278. if video_dict['article_url'] == 0 or video_dict['video_url'] == 0:
  279. Common.logger(log_type, crawler).info("文章涉嫌违反相关法律法规和政策\n")
  280. Common.logging(log_type, crawler, env, "文章涉嫌违反相关法律法规和政策\n")
  281. # 标题敏感词过滤
  282. elif any(str(word) if str(word) in video_dict['video_title'] else False
  283. for word in get_config_from_mysql(log_type=log_type,
  284. source=crawler,
  285. env=env,
  286. text="filter",
  287. action="")) is True:
  288. Common.logger(log_type, crawler).info("标题已中过滤词\n")
  289. Common.logging(log_type, crawler, env, "标题已中过滤词\n")
  290. # 已下载判断
  291. elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
  292. Common.logger(log_type, crawler).info("视频已下载\n")
  293. Common.logging(log_type, crawler, env, "视频已下载\n")
  294. # 标题相似度
  295. elif title_like(log_type, crawler, video_dict['video_title'], cls.platform, env) is True:
  296. Common.logger(log_type, crawler).info(f'标题相似度>=80%:{video_dict["video_title"]}\n')
  297. Common.logging(log_type, crawler, env, f'标题相似度>=80%:{video_dict["video_title"]}\n')
  298. else:
  299. # cls.download_publish(log_type=log_type,
  300. # crawler=crawler,
  301. # video_dict=video_dict,
  302. # rule_dict=rule_dict,
  303. # # user_dict=user_dict,
  304. # env=env)
  305. video_dict["out_user_id"] = video_dict["user_id"]
  306. video_dict["platform"] = crawler
  307. video_dict["strategy"] = log_type
  308. video_dict["out_video_id"] = video_dict["video_id"]
  309. video_dict["width"] = 0
  310. video_dict["height"] = 0
  311. video_dict["crawler_rule"] = json.dumps(rule_dict)
  312. # video_dict["user_id"] = user_dict["uid"]
  313. video_dict["user_id"] = Publish.uids(crawler, "定向爬虫策略", "", env)
  314. video_dict["publish_time"] = video_dict["publish_time_str"]
  315. mq.send_msg(video_dict)
  316. except Exception as e:
  317. Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
  318. Common.logging(log_type, crawler, env, f"抓取单条视频异常:{e}\n")
  319. Common.logger(log_type, crawler).info('休眠 60 秒\n')
  320. Common.logging(log_type, crawler, env, '休眠 60 秒\n')
  321. time.sleep(60)
  322. @classmethod
  323. def repeat_video(cls, log_type, crawler, video_id, env):
  324. # sql = f""" select * from crawler_video where platform="公众号" and out_video_id="{video_id}"; """
  325. sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and out_video_id="{video_id}"; """
  326. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  327. return len(repeat_video)
  328. # 下载/上传
  329. @classmethod
  330. def download_publish(cls, log_type, crawler, video_dict, rule_dict, env):
  331. # 下载视频
  332. Common.download_method(log_type=log_type, crawler=crawler, text="video", title=video_dict["video_title"], url=video_dict["video_url"])
  333. md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
  334. try:
  335. if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
  336. # 删除视频文件夹
  337. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  338. Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
  339. Common.logging(log_type, crawler, env, "视频size=0,删除成功\n")
  340. return
  341. except FileNotFoundError:
  342. # 删除视频文件夹
  343. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  344. Common.logger(log_type, crawler).info("视频文件不存在,删除文件夹成功\n")
  345. Common.logging(log_type, crawler, env, "视频文件不存在,删除文件夹成功\n")
  346. return
  347. # 获取视频时长
  348. ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
  349. video_dict["video_width"] = ffmpeg_dict["width"]
  350. video_dict["video_height"] = ffmpeg_dict["height"]
  351. video_dict["duration"] = ffmpeg_dict["duration"]
  352. Common.logger(log_type, crawler).info(f'video_width:{video_dict["video_width"]}')
  353. Common.logging(log_type, crawler, env, f'video_width:{video_dict["video_width"]}')
  354. Common.logger(log_type, crawler).info(f'video_height:{video_dict["video_height"]}')
  355. Common.logging(log_type, crawler, env, f'video_height:{video_dict["video_height"]}')
  356. Common.logger(log_type, crawler).info(f'duration:{video_dict["duration"]}')
  357. Common.logging(log_type, crawler, env, f'duration:{video_dict["duration"]}')
  358. if download_rule(log_type, crawler, video_dict, rule_dict) is False:
  359. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  360. Common.logger(log_type, crawler).info("不满足抓取规则,删除成功\n")
  361. Common.logging(log_type, crawler, env, "不满足抓取规则,删除成功\n")
  362. return
  363. # 下载封面
  364. Common.download_method(log_type=log_type, crawler=crawler, text="cover",
  365. title=video_dict["video_title"], url=video_dict["cover_url"])
  366. # 保存视频信息至 "./videos/{video_title}/info.txt"
  367. Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
  368. # 上传视频
  369. Common.logger(log_type, crawler).info("开始上传视频...")
  370. Common.logging(log_type, crawler, env, "开始上传视频...")
  371. strategy = "定向爬虫策略"
  372. if env == 'prod':
  373. oss_endpoint = "inner"
  374. our_video_id = Publish.upload_and_publish(log_type=log_type,
  375. crawler=crawler,
  376. strategy=strategy,
  377. our_uid="follow",
  378. oss_endpoint=oss_endpoint,
  379. env=env)
  380. our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
  381. else:
  382. oss_endpoint = "out"
  383. our_video_id = Publish.upload_and_publish(log_type=log_type,
  384. crawler=crawler,
  385. strategy=strategy,
  386. our_uid="follow",
  387. oss_endpoint=oss_endpoint,
  388. env=env)
  389. our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
  390. if our_video_id is None:
  391. try:
  392. # 删除视频文件夹
  393. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  394. return
  395. except FileNotFoundError:
  396. return
  397. insert_sql = f""" insert into crawler_video(video_id,
  398. out_user_id,
  399. platform,
  400. strategy,
  401. out_video_id,
  402. video_title,
  403. cover_url,
  404. video_url,
  405. duration,
  406. publish_time,
  407. play_cnt,
  408. crawler_rule,
  409. width,
  410. height)
  411. values({our_video_id},
  412. "{video_dict['user_id']}",
  413. "{cls.platform}",
  414. "定向爬虫策略",
  415. "{video_dict['video_id']}",
  416. "{video_dict['video_title']}",
  417. "{video_dict['cover_url']}",
  418. "{video_dict['video_url']}",
  419. {int(video_dict['duration'])},
  420. "{video_dict['publish_time_str']}",
  421. {int(video_dict['play_cnt'])},
  422. '{json.dumps(rule_dict)}',
  423. {int(video_dict['video_width'])},
  424. {int(video_dict['video_height'])}) """
  425. Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
  426. Common.logging(log_type, crawler, env, f"insert_sql:{insert_sql}")
  427. MysqlHelper.update_values(log_type, crawler, insert_sql, env)
  428. Common.logger(log_type, crawler).info('视频信息插入数据库成功!')
  429. Common.logging(log_type, crawler, env, '视频信息插入数据库成功!')
  430. # 视频写入飞书
  431. Feishu.insert_columns(log_type, crawler, "47e39d", "ROWS", 1, 2)
  432. # 视频ID工作表,首行写入数据
  433. upload_time = int(time.time())
  434. values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
  435. "用户主页",
  436. video_dict['video_title'],
  437. video_dict['video_id'],
  438. our_video_link,
  439. int(video_dict['duration']),
  440. f"{video_dict['video_width']}*{video_dict['video_height']}",
  441. video_dict['publish_time_str'],
  442. video_dict['user_name'],
  443. video_dict['user_id'],
  444. video_dict['avatar_url'],
  445. video_dict['cover_url'],
  446. video_dict['article_url'],
  447. video_dict['video_url']]]
  448. time.sleep(0.5)
  449. Feishu.update_values(log_type, crawler, "47e39d", "F2:Z2", values)
  450. Common.logger(log_type, crawler).info('视频下载/上传成功\n')
  451. Common.logging(log_type, crawler, env, '视频下载/上传成功\n')
  452. @classmethod
  453. def get_all_videos(cls, log_type, crawler, rule_dict, env):
  454. while True:
  455. sheetid = "Bzv72P"
  456. user_sheet = Feishu.get_values_batch(log_type, crawler, sheetid)
  457. if user_sheet is None:
  458. Common.logger(log_type, crawler).warning(f"user_sheet:{user_sheet}, 2秒后重试")
  459. Common.logging(log_type, crawler, env, f"user_sheet:{user_sheet}, 2秒后重试")
  460. time.sleep(2)
  461. continue
  462. len_sheet = len(user_sheet)
  463. if len_sheet <= 101:
  464. Common.logger(log_type, crawler).info("抓取用户数<=100,无需启动第二套抓取脚本\n")
  465. Common.logging(log_type, crawler, env, "抓取用户数<=100,无需启动第二套抓取脚本\n")
  466. return
  467. if len_sheet >= 201:
  468. len_sheet = 201
  469. for i in range(101, len_sheet):
  470. user_dict = cls.get_users(log_type=log_type,
  471. crawler=crawler,
  472. user_sheet=user_sheet,
  473. sheetid=sheetid,
  474. i=i,
  475. env=env)
  476. Common.logger(log_type, crawler).info(f'获取 {user_dict["user_name"]} 公众号视频\n')
  477. Common.logging(log_type, crawler, env, f'获取 {user_dict["user_name"]} 公众号视频\n')
  478. try:
  479. cls.get_videoList(log_type=log_type,
  480. crawler=crawler,
  481. rule_dict=rule_dict,
  482. user_dict=user_dict,
  483. env=env)
  484. Common.logger(log_type, crawler).info('休眠 60 秒\n')
  485. Common.logging(log_type, crawler, env, '休眠 60 秒\n')
  486. time.sleep(60)
  487. except Exception as e:
  488. Common.logger(log_type, crawler).info(f'抓取{user_dict["user_name"]}公众号时异常:{e}\n')
  489. Common.logging(log_type, crawler, env, f'抓取{user_dict["user_name"]}公众号时异常:{e}\n')
  490. break
  491. if __name__ == "__main__":
  492. GongzhonghaoAuthor2.get_token("author", "gongzhonghao", "dev")
  493. pass