gongzhonghao_author_create_user.py 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2023/3/28
  4. import datetime
  5. import json
  6. import os
  7. import shutil
  8. import sys
  9. import time
  10. from hashlib import md5
  11. import requests
  12. import urllib3
  13. from selenium.webdriver import DesiredCapabilities
  14. from selenium.webdriver.chrome.service import Service
  15. from selenium.webdriver.common.by import By
  16. from selenium import webdriver
  17. sys.path.append(os.getcwd())
  18. from common.common import Common
  19. from common.feishu import Feishu
  20. from common.publish import Publish
  21. from common.getuser import getUser
  22. from common.scheduling_db import MysqlHelper
  23. from common.public import get_config_from_mysql, download_rule, title_like
  24. class GongzhonghaoAuthor1:
  25. platform = "公众号"
  26. # 获取 token
  27. @classmethod
  28. def get_token(cls, log_type, crawler, env):
  29. select_sql = f""" select * from crawler_config where source="{crawler}" and title LIKE "%公众号_1%";"""
  30. configs = MysqlHelper.get_values(log_type, crawler, select_sql, env, action="")
  31. if len(configs) == 0:
  32. Feishu.bot(log_type, crawler, "公众号_1:未配置token")
  33. time.sleep(60)
  34. return None
  35. token_dict = {
  36. "token_id": configs[0]["id"],
  37. "title": configs[0]["title"].strip(),
  38. "token": dict(eval(configs[0]["config"]))["token"].strip(),
  39. "cookie": dict(eval(configs[0]["config"]))["cookie"].strip(),
  40. "update_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(configs[0]["update_time"]/1000))),
  41. "operator": configs[0]["operator"].strip()
  42. }
  43. # for k, v in token_dict.items():
  44. # print(f"{k}:{type(v)}, {v}")
  45. return token_dict
  46. @classmethod
  47. def get_users(cls, log_type, crawler, sheetid, env):
  48. while True:
  49. user_sheet = Feishu.get_values_batch(log_type, crawler, sheetid)
  50. if user_sheet is None:
  51. Common.logger(log_type, crawler).warning(f"user_sheet:{user_sheet}, 2秒后重试")
  52. Common.logging(log_type, crawler, env, f"user_sheet:{user_sheet}, 2秒后重试")
  53. time.sleep(2)
  54. continue
  55. user_list = []
  56. len_sheet = len(user_sheet)
  57. if len_sheet >= 101:
  58. len_sheet = 101
  59. for i in range(1, len_sheet):
  60. # for i in range(1, 3):
  61. user_name = user_sheet[i][0]
  62. wechat_name = user_sheet[i][2]
  63. if wechat_name is None or wechat_name.strip() == "" or wechat_name.replace(" ", "") == "":
  64. wechat_name = user_name
  65. Common.logger(log_type, crawler).info(f"befor_wechat_name:{type(wechat_name)}, {wechat_name}")
  66. Common.logging(log_type, crawler, env, f"befor_wechat_name:{type(wechat_name)}, {wechat_name}")
  67. our_uid = user_sheet[i][5]
  68. our_user_link = user_sheet[i][6]
  69. user_info_dict = cls.get_user_info(log_type=log_type, crawler=crawler, wechat_name=wechat_name, env=env)
  70. out_uid = user_info_dict["user_id"]
  71. avatar_url = user_info_dict["avatar_url"]
  72. tag1 = user_sheet[i][7]
  73. tag2 = user_sheet[i][8]
  74. tag3 = user_sheet[i][9]
  75. tag4 = user_sheet[i][10]
  76. tag5 = user_sheet[i][11]
  77. tag6 = user_sheet[i][12]
  78. Common.logger(log_type, crawler).info(f"正在更新 {user_name} 用户信息")
  79. Common.logging(log_type, crawler, env, f"正在更新 {user_name} 用户信息")
  80. if out_uid is None or our_uid is None:
  81. # 用来创建our_id的信息
  82. user_dict = {
  83. 'recommendStatus': -6,
  84. 'appRecommendStatus': -6,
  85. 'nickName': user_info_dict["user_name"],
  86. 'avatarUrl': user_info_dict['avatar_url'],
  87. 'tagName': f'{tag1},{tag2},{tag3},{tag4},{tag5},{tag6}',
  88. }
  89. our_uid = getUser.create_uid(log_type, crawler, user_dict, env)
  90. Common.logger(log_type, crawler).info(f'新创建的站内UID:{our_uid}')
  91. Common.logging(log_type, crawler, env, f'新创建的站内UID:{our_uid}')
  92. if env == 'prod':
  93. our_user_link = f'https://admin.piaoquantv.com/ums/user/{our_uid}/post'
  94. else:
  95. our_user_link = f'https://testadmin.piaoquantv.com/ums/user/{our_uid}/post'
  96. Feishu.update_values(log_type, crawler, "Bzv72P", f'D{i + 1}:G{i + 1}', [
  97. [user_info_dict["user_id"], user_info_dict["avatar_url"], our_uid, our_user_link]])
  98. Common.logger(log_type, crawler).info(f'用户信息创建成功!\n')
  99. Common.logging(log_type, crawler, env, f'用户信息创建成功!\n')
  100. else:
  101. Common.logger(log_type, crawler).info("用户信息已存在\n")
  102. Common.logging(log_type, crawler, env, "用户信息已存在\n")
  103. our_user_dict = {
  104. 'user_name': user_name,
  105. 'user_id': out_uid,
  106. 'wechat_name': wechat_name,
  107. 'our_uid': our_uid,
  108. 'our_user_link': our_user_link,
  109. 'avatar_url': avatar_url,
  110. }
  111. user_list.append(our_user_dict)
  112. return user_list
  113. # 获取用户 fakeid
  114. @classmethod
  115. def get_user_info(cls, log_type, crawler, wechat_name, env):
  116. Common.logger(log_type, crawler).info(f"wechat_name:{wechat_name}")
  117. Common.logging(log_type, crawler, env, f"wechat_name:{wechat_name}")
  118. while True:
  119. token_dict = cls.get_token(log_type, crawler, env)
  120. url = "https://mp.weixin.qq.com/cgi-bin/searchbiz?"
  121. headers = {
  122. "accept": "*/*",
  123. "accept-encoding": "gzip, deflate, br",
  124. "accept-language": "zh-CN,zh;q=0.9",
  125. "referer": "https://mp.weixin.qq.com/cgi-bin/appmsg?"
  126. "t=media/appmsg_edit_v2&action=edit&isNew=1"
  127. "&type=77&createType=5&token=1011071554&lang=zh_CN",
  128. 'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
  129. "sec-ch-ua-mobile": "?0",
  130. "sec-ch-ua-platform": '"Windows"',
  131. "sec-fetch-dest": "empty",
  132. "sec-fetch-mode": "cors",
  133. "sec-fetch-site": "same-origin",
  134. "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
  135. " (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
  136. "x-requested-with": "XMLHttpRequest",
  137. 'cookie': token_dict['cookie'],
  138. }
  139. params = {
  140. "action": "search_biz",
  141. "begin": "0",
  142. "count": "5",
  143. "query": str(wechat_name),
  144. "token": token_dict['token'],
  145. "lang": "zh_CN",
  146. "f": "json",
  147. "ajax": "1",
  148. }
  149. urllib3.disable_warnings()
  150. r = requests.get(url=url, headers=headers, params=params, verify=False)
  151. r.close()
  152. if r.json()["base_resp"]["err_msg"] == "invalid session":
  153. Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}, get_fakeid:{r.text}\n")
  154. Common.logging(log_type, crawler, env, f"status_code:{r.status_code}, get_fakeid:{r.text}\n")
  155. if 20 >= datetime.datetime.now().hour >= 10:
  156. Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n过期啦,请扫码更换token\nhttps://mp.weixin.qq.com/")
  157. time.sleep(60 * 10)
  158. continue
  159. if r.json()["base_resp"]["err_msg"] == "freq control":
  160. Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}, get_fakeid:{r.text}\n")
  161. Common.logging(log_type, crawler, env, f"status_code:{r.status_code}, get_fakeid:{r.text}\n")
  162. if 20 >= datetime.datetime.now().hour >= 10:
  163. Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
  164. time.sleep(60 * 10)
  165. continue
  166. if "list" not in r.json() or len(r.json()["list"]) == 0:
  167. Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}, get_fakeid:{r.text}\n")
  168. Common.logging(log_type, crawler, env, f"status_code:{r.status_code}, get_fakeid:{r.text}\n")
  169. if 20 >= datetime.datetime.now().hour >= 10:
  170. Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
  171. time.sleep(60 * 10)
  172. continue
  173. user_info_dict = {'user_name': r.json()["list"][0]["nickname"],
  174. 'user_id': r.json()["list"][0]["fakeid"],
  175. 'avatar_url': r.json()["list"][0]["round_head_img"]}
  176. return user_info_dict
  177. # 获取腾讯视频下载链接
  178. @classmethod
  179. def get_tencent_video_url(cls, video_id):
  180. url = 'https://vv.video.qq.com/getinfo?vids=' + str(video_id) + '&platform=101001&charge=0&otype=json'
  181. response = requests.get(url=url).text.replace('QZOutputJson=', '').replace('"};', '"}')
  182. response = json.loads(response)
  183. url = response['vl']['vi'][0]['ul']['ui'][0]['url']
  184. fvkey = response['vl']['vi'][0]['fvkey']
  185. video_url = url + str(video_id) + '.mp4?vkey=' + fvkey
  186. return video_url
  187. @classmethod
  188. def get_video_url(cls, article_url, env):
  189. # 打印请求配置
  190. ca = DesiredCapabilities.CHROME
  191. ca["goog:loggingPrefs"] = {"performance": "ALL"}
  192. # 不打开浏览器运行
  193. chrome_options = webdriver.ChromeOptions()
  194. chrome_options.add_argument("headless")
  195. chrome_options.add_argument(
  196. f'user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36')
  197. chrome_options.add_argument("--no-sandbox")
  198. # driver初始化
  199. if env == "prod":
  200. driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options)
  201. else:
  202. driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options, service=Service(
  203. '/Users/wangkun/Downloads/chromedriver/chromedriver_v113/chromedriver'))
  204. driver.implicitly_wait(10)
  205. driver.get(article_url)
  206. time.sleep(1)
  207. if len(driver.find_elements(By.XPATH, '//div[@class="js_video_poster video_poster"]/*[2]')) != 0:
  208. video_url = driver.find_element(
  209. By.XPATH, '//div[@class="js_video_poster video_poster"]/*[2]').get_attribute('src')
  210. elif len(driver.find_elements(By.XPATH, '//span[@class="js_tx_video_container"]/*[1]')) != 0:
  211. iframe = driver.find_element(By.XPATH, '//span[@class="js_tx_video_container"]/*[1]').get_attribute(
  212. 'src')
  213. video_id = iframe.split('vid=')[-1].split('&')[0]
  214. video_url = cls.get_tencent_video_url(video_id)
  215. else:
  216. video_url = 0
  217. driver.quit()
  218. return video_url
  219. # 获取文章列表
  220. @classmethod
  221. def get_videoList(cls, log_type, crawler, rule_dict, user_dict, env):
  222. begin = 0
  223. while True:
  224. token_dict = cls.get_token(log_type, crawler, env)
  225. url = "https://mp.weixin.qq.com/cgi-bin/appmsg?"
  226. headers = {
  227. "accept": "*/*",
  228. "accept-encoding": "gzip, deflate, br",
  229. "accept-language": "zh-CN,zh;q=0.9",
  230. "referer": "https://mp.weixin.qq.com/cgi-bin/appmsg?"
  231. "t=media/appmsg_edit_v2&action=edit&isNew=1"
  232. "&type=77&createType=5&token=" + str(token_dict['token']) + "&lang=zh_CN",
  233. 'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
  234. "sec-ch-ua-mobile": "?0",
  235. "sec-ch-ua-platform": '"Windows"',
  236. "sec-fetch-dest": "empty",
  237. "sec-fetch-mode": "cors",
  238. "sec-fetch-site": "same-origin",
  239. "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
  240. " (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
  241. "x-requested-with": "XMLHttpRequest",
  242. 'cookie': token_dict['cookie'],
  243. }
  244. params = {
  245. "action": "list_ex",
  246. "begin": str(begin),
  247. "count": "5",
  248. "fakeid": user_dict['user_id'],
  249. "type": "9",
  250. "query": "",
  251. "token": str(token_dict['token']),
  252. "lang": "zh_CN",
  253. "f": "json",
  254. "ajax": "1",
  255. }
  256. urllib3.disable_warnings()
  257. r = requests.get(url=url, headers=headers, params=params, verify=False)
  258. r.close()
  259. if r.json()["base_resp"]["err_msg"] == "invalid session":
  260. Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}, get_videoList:{r.text}\n")
  261. Common.logging(log_type, crawler, env, f"status_code:{r.status_code}, get_videoList:{r.text}\n")
  262. if 20 >= datetime.datetime.now().hour >= 10:
  263. Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']}\n过期啦,请扫码更换token\nhttps://mp.weixin.qq.com/")
  264. time.sleep(60 * 10)
  265. continue
  266. if r.json()["base_resp"]["err_msg"] == "freq control":
  267. Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}, get_videoList:{r.text}\n")
  268. Common.logging(log_type, crawler, env, f"status_code:{r.status_code}, get_videoList:{r.text}\n")
  269. if 20 >= datetime.datetime.now().hour >= 10:
  270. Feishu.bot(log_type, crawler,f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
  271. time.sleep(60 * 10)
  272. continue
  273. if 'app_msg_list' not in r.json():
  274. Common.logger(log_type, crawler).warning(f"status_code:{r.status_code}, get_videoList:{r.text}\n")
  275. Common.logging(log_type, crawler, env, f"status_code:{r.status_code}, get_videoList:{r.text}\n")
  276. if 20 >= datetime.datetime.now().hour >= 10:
  277. Feishu.bot(log_type, crawler, f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']}\n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/")
  278. time.sleep(60 * 10)
  279. continue
  280. if len(r.json()['app_msg_list']) == 0:
  281. Common.logger(log_type, crawler).info('没有更多视频了\n')
  282. Common.logging(log_type, crawler, env, "没有更多视频了\n")
  283. return
  284. else:
  285. begin += 5
  286. app_msg_list = r.json()['app_msg_list']
  287. for article in app_msg_list:
  288. try:
  289. create_time = article.get('create_time', 0)
  290. publish_time_stamp = int(create_time)
  291. publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
  292. article_url = article.get('link', '')
  293. video_dict = {
  294. 'video_id': article.get('aid', ''),
  295. 'video_title': article.get("title", "").replace(' ', '').replace('"', '').replace("'", ""),
  296. 'publish_time_stamp': publish_time_stamp,
  297. 'publish_time_str': publish_time_str,
  298. 'user_name': user_dict["user_name"],
  299. 'play_cnt': 0,
  300. 'comment_cnt': 0,
  301. 'like_cnt': 0,
  302. 'share_cnt': 0,
  303. 'user_id': user_dict['user_id'],
  304. 'avatar_url': user_dict['avatar_url'],
  305. 'cover_url': article.get('cover', ''),
  306. 'article_url': article.get('link', ''),
  307. 'video_url': cls.get_video_url(article_url, env),
  308. 'session': f'gongzhonghao-author1-{int(time.time())}'
  309. }
  310. for k, v in video_dict.items():
  311. Common.logger(log_type, crawler).info(f"{k}:{v}")
  312. Common.logging(log_type, crawler, env, f"video_dict:{video_dict}")
  313. if int(time.time()) - publish_time_stamp > 3600 * 24 * int(rule_dict.get('period', {}).get('max', 1000)):
  314. Common.logger(log_type, crawler).info(f"发布时间超过{int(rule_dict.get('period', {}).get('max', 1000))}天\n")
  315. Common.logging(log_type, crawler, env, f"发布时间超过{int(rule_dict.get('period', {}).get('max', 1000))}天\n")
  316. return
  317. if video_dict['article_url'] == 0 or video_dict['video_url'] == 0:
  318. Common.logger(log_type, crawler).info("文章涉嫌违反相关法律法规和政策\n")
  319. Common.logging(log_type, crawler, env, "文章涉嫌违反相关法律法规和政策\n")
  320. # 标题敏感词过滤
  321. elif any(str(word) if str(word) in video_dict['video_title'] else False
  322. for word in get_config_from_mysql(log_type=log_type,
  323. source=crawler,
  324. env=env,
  325. text="filter",
  326. action="")) is True:
  327. Common.logger(log_type, crawler).info("标题已中过滤词\n")
  328. Common.logging(log_type, crawler, env, "标题已中过滤词\n")
  329. # 已下载判断
  330. elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
  331. Common.logger(log_type, crawler).info("视频已下载\n")
  332. Common.logging(log_type, crawler, env, "视频已下载\n")
  333. # 标题相似度
  334. elif title_like(log_type, crawler, video_dict['video_title'], cls.platform, env) is True:
  335. Common.logger(log_type, crawler).info(f'标题相似度>=80%:{video_dict["video_title"]}\n')
  336. Common.logging(log_type, crawler, env, f'标题相似度>=80%:{video_dict["video_title"]}\n')
  337. else:
  338. cls.download_publish(log_type=log_type,
  339. crawler=crawler,
  340. video_dict=video_dict,
  341. rule_dict=rule_dict,
  342. user_dict=user_dict,
  343. env=env)
  344. except Exception as e:
  345. Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
  346. Common.logging(log_type, crawler, env, f"抓取单条视频异常:{e}\n")
  347. Common.logger(log_type, crawler).info('休眠 60 秒\n')
  348. Common.logging(log_type, crawler, env, '休眠 60 秒\n')
  349. time.sleep(60)
  350. @classmethod
  351. def repeat_video(cls, log_type, crawler, video_id, env):
  352. sql = f""" select * from crawler_video where platform="公众号" and out_video_id="{video_id}"; """
  353. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  354. return len(repeat_video)
  355. # 下载/上传
  356. @classmethod
  357. def download_publish(cls, log_type, crawler, video_dict, rule_dict, user_dict, env):
  358. # 下载视频
  359. Common.download_method(log_type=log_type, crawler=crawler, text="video", title=video_dict["video_title"], url=video_dict["video_url"])
  360. md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
  361. try:
  362. if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
  363. # 删除视频文件夹
  364. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  365. Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
  366. Common.logging(log_type, crawler, env, "视频size=0,删除成功\n")
  367. return
  368. except FileNotFoundError:
  369. # 删除视频文件夹
  370. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  371. Common.logger(log_type, crawler).info("视频文件不存在,删除文件夹成功\n")
  372. Common.logging(log_type, crawler, env, "视频文件不存在,删除文件夹成功\n")
  373. return
  374. # 获取视频时长
  375. ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
  376. video_dict["video_width"] = ffmpeg_dict["width"]
  377. video_dict["video_height"] = ffmpeg_dict["height"]
  378. video_dict["duration"] = ffmpeg_dict["duration"]
  379. Common.logger(log_type, crawler).info(f'video_width:{video_dict["video_width"]}')
  380. Common.logging(log_type, crawler, env, f'video_width:{video_dict["video_width"]}')
  381. Common.logger(log_type, crawler).info(f'video_height:{video_dict["video_height"]}')
  382. Common.logging(log_type, crawler, env, f'video_height:{video_dict["video_height"]}')
  383. Common.logger(log_type, crawler).info(f'duration:{video_dict["duration"]}')
  384. Common.logging(log_type, crawler, env, f'duration:{video_dict["duration"]}')
  385. if download_rule(log_type, crawler, video_dict, rule_dict) is False:
  386. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  387. Common.logger(log_type, crawler).info("不满足抓取规则,删除成功\n")
  388. Common.logging(log_type, crawler, env, "不满足抓取规则,删除成功\n")
  389. return
  390. # 下载封面
  391. Common.download_method(log_type=log_type, crawler=crawler, text="cover",
  392. title=video_dict["video_title"], url=video_dict["cover_url"])
  393. # 保存视频信息至 "./videos/{video_title}/info.txt"
  394. Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
  395. # 上传视频
  396. Common.logger(log_type, crawler).info("开始上传视频...")
  397. Common.logging(log_type, crawler, env, "开始上传视频...")
  398. strategy = "定向榜爬虫策略"
  399. if env == 'prod':
  400. oss_endpoint = "inner"
  401. our_video_id = Publish.upload_and_publish(log_type=log_type,
  402. crawler=crawler,
  403. strategy=strategy,
  404. our_uid=user_dict["our_uid"],
  405. oss_endpoint=oss_endpoint,
  406. env=env)
  407. our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
  408. else:
  409. oss_endpoint = "out"
  410. our_video_id = Publish.upload_and_publish(log_type=log_type,
  411. crawler=crawler,
  412. strategy=strategy,
  413. our_uid=user_dict["our_uid"],
  414. oss_endpoint=oss_endpoint,
  415. env=env)
  416. our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
  417. if our_video_id is None:
  418. try:
  419. # 删除视频文件夹
  420. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  421. return
  422. except FileNotFoundError:
  423. return
  424. insert_sql = f""" insert into crawler_video(video_id,
  425. out_user_id,
  426. platform,
  427. strategy,
  428. out_video_id,
  429. video_title,
  430. cover_url,
  431. video_url,
  432. duration,
  433. publish_time,
  434. play_cnt,
  435. crawler_rule,
  436. width,
  437. height)
  438. values({our_video_id},
  439. "{video_dict['user_id']}",
  440. "{cls.platform}",
  441. "定向爬虫策略",
  442. "{video_dict['video_id']}",
  443. "{video_dict['video_title']}",
  444. "{video_dict['cover_url']}",
  445. "{video_dict['video_url']}",
  446. {int(video_dict['duration'])},
  447. "{video_dict['publish_time_str']}",
  448. {int(video_dict['play_cnt'])},
  449. '{json.dumps(rule_dict)}',
  450. {int(video_dict['video_width'])},
  451. {int(video_dict['video_height'])}) """
  452. Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
  453. Common.logging(log_type, crawler, env, f"insert_sql:{insert_sql}")
  454. MysqlHelper.update_values(log_type, crawler, insert_sql, env)
  455. Common.logger(log_type, crawler).info('视频信息写入数据库成功')
  456. Common.logging(log_type, crawler, env, '视频信息写入数据库成功')
  457. # 视频写入飞书
  458. Feishu.insert_columns(log_type, crawler, "47e39d", "ROWS", 1, 2)
  459. # 视频ID工作表,首行写入数据
  460. upload_time = int(time.time())
  461. values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
  462. "用户主页",
  463. video_dict['video_title'],
  464. video_dict['video_id'],
  465. our_video_link,
  466. int(video_dict['duration']),
  467. f"{video_dict['video_width']}*{video_dict['video_height']}",
  468. video_dict['publish_time_str'],
  469. video_dict['user_name'],
  470. video_dict['user_id'],
  471. video_dict['avatar_url'],
  472. video_dict['cover_url'],
  473. video_dict['article_url'],
  474. video_dict['video_url']]]
  475. time.sleep(0.5)
  476. Feishu.update_values(log_type, crawler, "47e39d", "F2:Z2", values)
  477. Common.logger(log_type, crawler).info('视频下载/上传成功\n')
  478. Common.logging(log_type, crawler, env, '视频下载/上传成功\n')
  479. @classmethod
  480. def get_all_videos(cls, log_type, crawler, rule_dict, env):
  481. user_list = cls.get_users(log_type, crawler, "Bzv72P", env)
  482. if user_list is None or len(user_list) == 0:
  483. Common.logger(log_type, crawler).warning(f"抓取用户列表为空\n")
  484. Common.logging(log_type, crawler, env, f"抓取用户列表为空\n")
  485. return
  486. for user_dict in user_list:
  487. try:
  488. Common.logger(log_type, crawler).info(f'获取 {user_dict["user_name"]} 公众号视频\n')
  489. Common.logging(log_type, crawler, env, f'获取 {user_dict["user_name"]} 公众号视频\n')
  490. cls.get_videoList(log_type=log_type,
  491. crawler=crawler,
  492. rule_dict=rule_dict,
  493. user_dict=user_dict,
  494. env=env)
  495. Common.logger(log_type, crawler).info('休眠 60 秒\n')
  496. Common.logging(log_type, crawler, env, '休眠 60 秒\n')
  497. time.sleep(60)
  498. except Exception as e:
  499. Common.logger(log_type, crawler).info(f'抓取{user_dict["user_name"]}公众号时异常:{e}\n')
  500. Common.logging(log_type, crawler, env, f'抓取{user_dict["user_name"]}公众号时异常:{e}\n')
  501. if __name__ == "__main__":
  502. print(GongzhonghaoAuthor1.get_user_info("author", "gongzhonghao", "幸福花朵", "dev"))
  503. pass