gzh_new.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2022/8/16
  4. import os
  5. import random
  6. import sys
  7. import time
  8. import ffmpeg
  9. import requests
  10. import urllib3
  11. sys.path.append(os.getcwd())
  12. from main.common import Common
  13. from main.feishu_lib import Feishu
  14. from main.publish import Publish
  15. class GZH:
  16. # 翻页参数
  17. begin = 0
  18. # 每个用户抓取文章数量
  19. gzh_count = []
  20. # 获取已下载视频宽高、时长等信息
  21. @classmethod
  22. def get_video_info_from_local(cls, video_path):
  23. probe = ffmpeg.probe(video_path)
  24. # print('video_path: {}'.format(video_path))
  25. # format1 = probe['format']
  26. # bit_rate = int(format1['bit_rate']) / 1000
  27. # duration = format['duration']
  28. # size = int(format1['size']) / 1024 / 1024
  29. video_stream = next((stream for stream in probe['streams'] if stream['codec_type'] == 'video'), None)
  30. if video_stream is None:
  31. print('No video stream found!')
  32. return
  33. width = int(video_stream['width'])
  34. height = int(video_stream['height'])
  35. # num_frames = int(video_stream['nb_frames'])
  36. # fps = int(video_stream['r_frame_rate'].split('/')[0]) / int(video_stream['r_frame_rate'].split('/')[1])
  37. duration = float(video_stream['duration'])
  38. # print('width: {}'.format(width))
  39. # print('height: {}'.format(height))
  40. # print('num_frames: {}'.format(num_frames))
  41. # print('bit_rate: {}k'.format(bit_rate))
  42. # print('fps: {}'.format(fps))
  43. # print('size: {}MB'.format(size))
  44. # print('duration: {}'.format(duration))
  45. return width, height, duration
  46. # 获取 搜索词/token
  47. @classmethod
  48. def get_cookie_token(cls, log_type, text):
  49. try:
  50. sheet = Feishu.get_values_batch(log_type, "gzh", "pxHL2C")
  51. token = sheet[0][1]
  52. cookie = sheet[1][1]
  53. if text == "cookie":
  54. return cookie
  55. elif text == "token":
  56. return token
  57. except Exception as e:
  58. Common.logger(log_type).error("get_cookie_token:{}\n", e)
  59. # 获取视频下载链接
  60. @classmethod
  61. def get_url(cls, log_type, url):
  62. try:
  63. payload = {}
  64. headers = {
  65. 'Cookie': 'rewardsn=; wxtokenkey=777'
  66. }
  67. urllib3.disable_warnings()
  68. response = requests.get(url=url, headers=headers, data=payload, verify=False)
  69. # Common.logger(log_type).info('gzh_response:{}', response.text)
  70. response_list = response.text.splitlines()
  71. video_url_list = []
  72. for m in response_list:
  73. if "mpvideo.qpic.cn" in m:
  74. video_url = m.split("url: '")[1].split("',")[0].replace(r"\x26amp;", "&")
  75. video_url_list.append(video_url)
  76. # Common.logger(log_type).info('video_url_list:{}\n', video_url_list)
  77. if len(video_url_list) == 0:
  78. video_url = 0
  79. else:
  80. video_url = video_url_list[0]
  81. return video_url
  82. except Exception as e:
  83. Common.logger(log_type).error("get_url异常:{}\n", e)
  84. # 获取公众号文章信息,并写入文章列表
  85. @classmethod
  86. def get_gzh_url(cls, log_type, username, userid, head_url):
  87. while True:
  88. try:
  89. url = "https://mp.weixin.qq.com/cgi-bin/appmsg?"
  90. headers = {
  91. "accept": "*/*",
  92. "accept-encoding": "gzip, deflate, br",
  93. "accept-language": "zh-CN,zh;q=0.9",
  94. "referer": "https://mp.weixin.qq.com/cgi-bin/appmsg?"
  95. "t=media/appmsg_edit_v2&action=edit&isNew=1"
  96. "&type=77&createType=5&token="+str(cls.get_cookie_token(log_type, "token"))+"&lang=zh_CN",
  97. 'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
  98. "sec-ch-ua-mobile": "?0",
  99. "sec-ch-ua-platform": '"Windows"',
  100. "sec-fetch-dest": "empty",
  101. "sec-fetch-mode": "cors",
  102. "sec-fetch-site": "same-origin",
  103. "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
  104. " (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
  105. "x-requested-with": "XMLHttpRequest",
  106. 'cookie': cls.get_cookie_token(log_type, "cookie"),
  107. }
  108. params = {
  109. "action": "list_ex",
  110. "begin": str(cls.begin),
  111. "count": "5",
  112. "fakeid": userid,
  113. "type": "9",
  114. "query": "",
  115. "token": str(cls.get_cookie_token(log_type, "token")),
  116. "lang": "zh_CN",
  117. "f": "json",
  118. "ajax": "1",
  119. }
  120. urllib3.disable_warnings()
  121. r = requests.get(url=url, headers=headers, params=params, verify=False)
  122. cls.begin += 5
  123. if 'app_msg_list' not in r.json() or len(r.json()['app_msg_list']) == 0:
  124. Common.logger(log_type).warning("get_gzh_url:response:{}\n", r.text)
  125. break
  126. else:
  127. app_msg_list = r.json()['app_msg_list']
  128. for gzh_url in app_msg_list:
  129. # print(gzh_url)
  130. # title
  131. if 'title' in gzh_url:
  132. title = gzh_url['title']
  133. else:
  134. title = 0
  135. # aid
  136. if 'aid' in gzh_url:
  137. aid = gzh_url['aid']
  138. else:
  139. aid = 0
  140. # create_time
  141. if 'create_time' in gzh_url:
  142. create_time = gzh_url['create_time']
  143. else:
  144. create_time = 0
  145. # duration
  146. if 'duration' in gzh_url:
  147. duration = gzh_url['duration']
  148. else:
  149. duration = 0
  150. # cover_url
  151. if 'cover' in gzh_url:
  152. cover_url = gzh_url['cover']
  153. else:
  154. cover_url = 0
  155. # gzh_url
  156. if 'link' in gzh_url:
  157. gzh_url = gzh_url['link']
  158. else:
  159. gzh_url = 0
  160. play_cnt = 0
  161. like_cnt = 0
  162. if cls.get_url(log_type, gzh_url) == 0:
  163. video_url = 0
  164. else:
  165. video_url = cls.get_url(log_type, gzh_url)
  166. Common.logger(log_type).info("title:{}", title)
  167. Common.logger(log_type).info("aid:{}", aid)
  168. Common.logger(log_type).info("create_time:{}", create_time)
  169. Common.logger(log_type).info("duration:{}", duration)
  170. Common.logger(log_type).info("cover_url:{}", cover_url)
  171. Common.logger(log_type).info("gzh_url:{}", gzh_url)
  172. Common.logger(log_type).info("video_url:{}", video_url)
  173. # 判断无效文章
  174. if gzh_url == 0 or video_url == 0:
  175. Common.logger(log_type).info("文章无视频 / 视频地址解析失败\n")
  176. elif int(time.time()) - int(create_time) > 3600*24*3:
  177. Common.logger(log_type).info(
  178. "发布时间{}超过 3 天\n", time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(create_time)))
  179. return
  180. # 时长判断
  181. elif int(duration) < 60:
  182. Common.logger(log_type).info("时长:{}<60秒\n", duration)
  183. # 已下载表去重
  184. elif str(aid) in [x for y in Feishu.get_values_batch(log_type, "gzh", "fCs3BT") for x in y]:
  185. Common.logger(log_type).info("文章已下载\n")
  186. # 文章去重
  187. elif str(aid) in [x for y in Feishu.get_values_batch(log_type, "gzh", "P6GKb3") for x in y]:
  188. Common.logger(log_type).info("文章已存在\n")
  189. else:
  190. # 已抓取文章列表添加当前文章ID
  191. cls.gzh_count.append(aid)
  192. # 公众号文章表插入行
  193. upload_time = time.time()
  194. Feishu.insert_columns(log_type, 'gzh', 'P6GKb3', 'ROWS', 1, 2)
  195. # 抓取到的文章写入飞书表
  196. values = [[time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(upload_time)),
  197. '公众号',
  198. title,
  199. str(aid),
  200. play_cnt,
  201. like_cnt,
  202. duration,
  203. "宽*高",
  204. time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(create_time)),
  205. username,
  206. userid,
  207. head_url,
  208. cover_url,
  209. gzh_url,
  210. video_url]]
  211. time.sleep(1)
  212. Feishu.update_values(log_type, 'gzh', 'P6GKb3', 'F2:W2', values)
  213. Common.logger(log_type).info("文章写入文档成功\n")
  214. if len(cls.gzh_count) >= 1:
  215. Common.logger(log_type).info("当前用户已抓取:{}条数据\n", len(cls.gzh_count))
  216. cls.gzh_count = []
  217. return
  218. time.sleep(10)
  219. except Exception as e:
  220. Common.logger(log_type).error("get_gzh_url异常:{}\n", e)
  221. # 下载/上传
  222. @classmethod
  223. def download_publish(cls, log_type, env):
  224. try:
  225. gzh_sheet = Feishu.get_values_batch(log_type, 'gzh', 'P6GKb3')
  226. for i in range(1, len(gzh_sheet)):
  227. download_title = gzh_sheet[i][7].strip().replace('"', '')\
  228. .replace('“', '').replace('“', '…').replace("\n", "") \
  229. .replace("/", "").replace("\r", "").replace("#", "") \
  230. .replace(".", "。").replace("\\", "").replace("&NBSP", "") \
  231. .replace(":", "").replace("*", "").replace("?", "") \
  232. .replace("?", "").replace('"', "").replace("<", "") \
  233. .replace(">", "").replace("|", "").replace(" ", "")
  234. download_vid = gzh_sheet[i][8]
  235. download_play_cnt = gzh_sheet[i][9]
  236. download_like_cnt = gzh_sheet[i][10]
  237. download_duration = gzh_sheet[i][11]
  238. download_send_time = gzh_sheet[i][13]
  239. download_username = gzh_sheet[i][14]
  240. download_userid = gzh_sheet[i][15]
  241. download_head_url = gzh_sheet[i][16]
  242. download_cover_url = gzh_sheet[i][17]
  243. download_video_url = gzh_sheet[i][19]
  244. download_video_comment_cnt = 0
  245. download_video_share_cnt = 0
  246. Common.logger(log_type).info("download_title:{}", download_title)
  247. Common.logger(log_type).info("download_send_time:{}", download_send_time)
  248. Common.logger(log_type).info("download_username:{}", download_username)
  249. Common.logger(log_type).info("download_video_url:{}", download_video_url)
  250. # Common.logger(log_type).info("download_vid:{}", download_vid)
  251. # Common.logger(log_type).info("download_play_cnt:{}", download_play_cnt)
  252. # Common.logger(log_type).info("download_like_cnt:{}", download_like_cnt)
  253. # Common.logger(log_type).info("download_duration:{}", download_duration)
  254. # Common.logger(log_type).info("download_userid:{}", download_userid)
  255. # Common.logger(log_type).info("download_head_url:{}", download_head_url)
  256. # Common.logger(log_type).info("download_cover_url:{}", download_cover_url)
  257. # 判断空行
  258. if download_video_url is None or download_title is None:
  259. Feishu.dimension_range(log_type, 'gzh', 'P6GKb3', 'ROWS', i+1, i+1)
  260. Common.logger(log_type).info("空行,删除成功\n")
  261. return
  262. # 已下载判断
  263. elif str(download_vid) in [x for y in Feishu.get_values_batch(log_type, 'gzh', 'fCs3BT') for x in y]:
  264. Feishu.dimension_range(log_type, 'gzh', 'P6GKb3', 'ROWS', i + 1, i + 1)
  265. Common.logger(log_type).info("视频已下载\n")
  266. return
  267. # 已下载判断
  268. elif str(download_title) in [x for y in Feishu.get_values_batch(log_type, 'gzh', 'fCs3BT') for x in y]:
  269. Feishu.dimension_range(log_type, 'gzh', 'P6GKb3', 'ROWS', i + 1, i + 1)
  270. Common.logger(log_type).info("视频已下载\n")
  271. return
  272. else:
  273. # 下载封面
  274. Common.download_method(log_type=log_type, text="cover",
  275. d_name=str(download_title), d_url=str(download_cover_url))
  276. # 下载视频
  277. Common.download_method(log_type=log_type, text="video",
  278. d_name=str(download_title), d_url=str(download_video_url))
  279. # 获取视频宽高
  280. video_info = cls.get_video_info_from_local("./videos/" + download_title + "/video.mp4")
  281. download_video_resolution = str(video_info[0]) + "*" + str(video_info[1])
  282. # 保存视频信息至 "./videos/{download_video_title}/info.txt"
  283. with open("./videos/" + download_title
  284. + "/" + "info.txt", "a", encoding="UTF-8") as f_a:
  285. f_a.write(str(download_vid) + "\n" +
  286. str(download_title) + "\n" +
  287. str(int(download_duration)) + "\n" +
  288. str(download_play_cnt) + "\n" +
  289. str(download_video_comment_cnt) + "\n" +
  290. str(download_like_cnt) + "\n" +
  291. str(download_video_share_cnt) + "\n" +
  292. str(download_video_resolution) + "\n" +
  293. str(int(time.mktime(
  294. time.strptime(download_send_time, "%Y/%m/%d %H:%M:%S")))) + "\n" +
  295. str(download_username) + "\n" +
  296. str(download_head_url) + "\n" +
  297. str(download_video_url) + "\n" +
  298. str(download_cover_url) + "\n" +
  299. "gongzhonghao\n")
  300. Common.logger(log_type).info("==========视频信息已保存至info.txt==========")
  301. # 上传视频
  302. Common.logger(log_type).info("开始上传视频:{}".format(download_title))
  303. our_video_id = Publish.upload_and_publish(log_type, env, "play")
  304. our_video_link = "https://admin.piaoquantv.com/cms/post-detail/" + str(our_video_id) + "/info"
  305. Common.logger(log_type).info("视频上传完成:{}", download_title)
  306. # 保存视频 ID 到云文档
  307. Common.logger(log_type).info("保存视频ID至云文档:{}", download_title)
  308. # 视频ID工作表,插入首行
  309. Feishu.insert_columns(log_type, "gzh", "fCs3BT", "ROWS", 1, 2)
  310. # 视频ID工作表,首行写入数据
  311. upload_time = int(time.time())
  312. values = [[time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(upload_time)),
  313. "公众号",
  314. str(download_title),
  315. str(download_vid),
  316. our_video_link,
  317. download_play_cnt,
  318. download_like_cnt,
  319. download_duration,
  320. str(download_video_resolution),
  321. str(download_send_time),
  322. str(download_username),
  323. str(download_userid),
  324. str(download_head_url),
  325. str(download_cover_url),
  326. str(download_video_url)]]
  327. time.sleep(1)
  328. Feishu.update_values(log_type, "gzh", "fCs3BT", "D2:W2", values)
  329. # 删除行或列,可选 ROWS、COLUMNS
  330. Feishu.dimension_range(log_type, "gzh", "P6GKb3", "ROWS", i + 1, i + 1)
  331. Common.logger(log_type).info("视频:{},下载/上传成功\n", download_title)
  332. return
  333. except Exception as e:
  334. Common.logger(log_type).error("download_publish异常:{}\n", e)
  335. Feishu.dimension_range(log_type, "gzh", "P6GKb3", "ROWS", 2, 2)
  336. # 执行下载/上传
  337. @classmethod
  338. def run_download_publish(cls, log_type, env):
  339. try:
  340. while True:
  341. time.sleep(1)
  342. if len(Feishu.get_values_batch(log_type, 'gzh', 'P6GKb3')) == 1:
  343. Common.logger(log_type).info("下载/上传完成\n")
  344. break
  345. else:
  346. cls.download_publish(log_type, env)
  347. except Exception as e:
  348. Common.logger(log_type).error("run_download_publish异常:{}\n", e)
  349. # 根据关键字搜索 UP 主信息,并写入电影票(勿动)
  350. @classmethod
  351. def search_user_by_word(cls, log_type, env):
  352. try:
  353. sheet = Feishu.get_values_batch(log_type, "gzh", "pxHL2C")
  354. for i in range(3, len(sheet)):
  355. word = sheet[i][0]
  356. index = sheet[i][1]
  357. url = "https://mp.weixin.qq.com/cgi-bin/searchbiz?"
  358. headers = {
  359. "accept": "*/*",
  360. "accept-encoding": "gzip, deflate, br",
  361. "accept-language": "zh-CN,zh;q=0.9",
  362. "referer": "https://mp.weixin.qq.com/cgi-bin/appmsg?"
  363. "t=media/appmsg_edit_v2&action=edit&isNew=1"
  364. "&type=77&createType=5&token=1011071554&lang=zh_CN",
  365. 'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
  366. "sec-ch-ua-mobile": "?0",
  367. "sec-ch-ua-platform": '"Windows"',
  368. "sec-fetch-dest": "empty",
  369. "sec-fetch-mode": "cors",
  370. "sec-fetch-site": "same-origin",
  371. "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
  372. " (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
  373. "x-requested-with": "XMLHttpRequest",
  374. 'cookie': cls.get_cookie_token(log_type, "cookie"),
  375. }
  376. params = {
  377. "action": "search_biz",
  378. "begin": "0",
  379. "count": "5",
  380. "query": word,
  381. "token": cls.get_cookie_token(log_type, "token"),
  382. "lang": "zh_CN",
  383. "f": "json",
  384. "ajax": "1",
  385. }
  386. urllib3.disable_warnings()
  387. r = requests.get(url=url, headers=headers, params=params, verify=False)
  388. if "list" not in r.json() or len(r.json()["list"]) == 0:
  389. Common.logger(log_type).warning("search_user_by_word,随机休眠 3-15 分钟:{}\n", r.text)
  390. time.sleep(random.randint(60 * 3, 60 * 15))
  391. else:
  392. fakeid = r.json()["list"][int(index)-1]["fakeid"]
  393. head_url = r.json()["list"][int(index)-1]["round_head_img"]
  394. time.sleep(0.5)
  395. Feishu.update_values(log_type, 'gzh', 'pxHL2C', 'C'+str(i+1)+':C'+str(i+1), [[fakeid]])
  396. Common.logger(log_type).info("{}的fakeid写入飞书成功成功", word)
  397. time.sleep(0.5)
  398. Feishu.update_values(log_type, 'gzh', 'pxHL2C', 'D'+str(i+1)+':D'+str(i+1), [[head_url]])
  399. Common.logger(log_type).info("{}的头像写入飞书成功\n", word)
  400. cls.get_gzh_url(log_type, word, fakeid, head_url)
  401. Common.logger(log_type).info("下载/上传 {} 公众号视频\n", word)
  402. cls.run_download_publish(log_type, env)
  403. Common.logger(log_type).info('{}视频抓取完成,随机休眠 3-15 分钟\n', word)
  404. time.sleep(random.randint(60*3, 60*15))
  405. Common.logger(log_type).info("获取所有用户视频完成\n")
  406. except Exception as e:
  407. Common.logger(log_type).error("search_user_by_word异常:{}\n", e)
  408. if __name__ == "__main__":
  409. # GZH.search_user_by_word("gzh")
  410. # GZH.get_all_gzh('gzh')
  411. # GZH.download_publish('gzh', 'dev')
  412. # print(GZH.get_cookie_token('gzh', 'token'))
  413. GZH.get_gzh_url('gzh', '何静同学', 'MzkyODMzODQ2Mg==', 'http://mmbiz.qpic.cn/mmbiz_png/go7km0I9Dg3NTxRdMs8MIC6DricCibEdH3OVnEFLmspaVB67iaLdje4lCHFsdjqdXpelf5EicPwHfLWibHWCg5R5urg/0?wx_fmt=png')