haitunzhufu_recommend2.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2023/9/13
  4. import json
  5. import os
  6. import random
  7. import re
  8. import shutil
  9. import sys
  10. import time
  11. from hashlib import md5
  12. from appium import webdriver
  13. from appium.webdriver.extensions.android.nativekey import AndroidKey
  14. from appium.webdriver.webdriver import WebDriver
  15. from bs4 import BeautifulSoup
  16. from selenium.common import NoSuchElementException
  17. from selenium.webdriver.common.by import By
  18. sys.path.append(os.getcwd())
  19. from common.common import Common
  20. from common.feishu import Feishu
  21. from common.publish import Publish
  22. from common.scheduling_db import MysqlHelper
  23. class HTZFRecommend:
  24. platform = "海豚祝福"
  25. i = 0
  26. element_list = []
  27. @classmethod
  28. def today_download_cnt(cls, log_type, crawler, env):
  29. select_sql = """ SELECT COUNT(*) FROM crawler_video WHERE platform IN ("haitunzhufu", "海豚祝福") AND DATE(create_time) = CURDATE(); """
  30. today_download_cnt = MysqlHelper.get_values(log_type, crawler, select_sql, env, action="")[0]['COUNT(*)']
  31. return today_download_cnt
  32. @classmethod
  33. def start_wechat(cls, log_type, crawler, videos_cnt, env):
  34. if env == "dev":
  35. chromedriverExecutable = "/Users/wangkun/Downloads/chromedriver/chromedriver_v111/chromedriver"
  36. else:
  37. chromedriverExecutable = "/Users/piaoquan/Downloads/chromedriver"
  38. Common.logger(log_type, crawler).info("启动微信")
  39. caps = {
  40. "platformName": "Android",
  41. "platformVersion": "11",
  42. "devicesName": "Android",
  43. "appPackage": "com.tencent.mm",
  44. "appActivity": ".ui.LauncherUI",
  45. "noReset": True,
  46. "resetkeyboard": True,
  47. "unicodekeyboard": True,
  48. "showChromedriverLog": True,
  49. "autoGrantPermissions": True,
  50. "printPageSourceOnFailure": True,
  51. "recreateChromeDriverSessions": True,
  52. "enableWebviewDetailsCollention": True,
  53. "newCommandTimeout": 6000,
  54. "automationName": "UiAutomator2",
  55. "chromedriverExecutable": chromedriverExecutable,
  56. "chromeOptions": {"androidProcess": "com.tencent.mm:appbrand0"},
  57. }
  58. driver = webdriver.Remote("http://localhost:4723/wd/hub", caps)
  59. driver.implicitly_wait(20)
  60. for i in range(120):
  61. try:
  62. if driver.find_element(By.ID, "com.tencent.mm:id/f2s"):
  63. break
  64. elif driver.find_element(By.ID, "com.android.system:id/dismiss_view"):
  65. Common.logger(log_type, crawler).info("发现并关闭系统下拉菜单栏")
  66. else:
  67. pass
  68. except NoSuchElementException:
  69. pass
  70. Common.logger(log_type, crawler).info("下滑,展示小程序选择面板")
  71. size = driver.get_window_size()
  72. driver.swipe(int(size["width"] * 0.5), int(size["height"] * 0.2),
  73. int(size["width"] * 0.5), int(size["height"] * 0.8), 200)
  74. time.sleep(3)
  75. Common.logger(log_type, crawler).info('打开小程序"海豚祝福"')
  76. driver.find_elements(By.XPATH, '//*[@text="海豚祝福"]')[-1].click()
  77. time.sleep(5)
  78. cls.get_videoList(log_type=log_type,
  79. crawler=crawler,
  80. driver=driver,
  81. videos_cnt=videos_cnt,
  82. env=env)
  83. time.sleep(1)
  84. driver.quit()
  85. @classmethod
  86. def search_elements(cls, driver: WebDriver, xpath):
  87. time.sleep(1)
  88. windowHandles = driver.window_handles
  89. for handle in windowHandles:
  90. driver.switch_to.window(handle)
  91. time.sleep(1)
  92. try:
  93. elements = driver.find_elements(By.XPATH, xpath)
  94. if elements:
  95. return elements
  96. except NoSuchElementException:
  97. pass
  98. @classmethod
  99. def repeat_out_video_id(cls, log_type, crawler, out_video_id, env):
  100. sql = f""" select * from crawler_video where platform in ("众妙音信", "刚刚都传", "吉祥幸福", "知青天天看", "zhufuquanzi", "祝福圈子", "haitunzhufu", "海豚祝福") and out_video_id="{out_video_id}"; """
  101. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  102. return len(repeat_video)
  103. @classmethod
  104. def get_video_url(cls, driver: WebDriver, video_title_element):
  105. for i in range(3):
  106. cls.search_elements(driver, '//*[@class="list"]')
  107. video_title_element[0].click()
  108. time.sleep(5)
  109. video_url_elements = cls.search_elements(driver, '//*[@id="myVideo"]')
  110. if video_url_elements:
  111. return video_url_elements[0].get_attribute("src")
  112. @classmethod
  113. def swipe_up(cls, driver: WebDriver):
  114. cls.search_elements(driver, '//*[@class="list"]')
  115. size = driver.get_window_size()
  116. driver.swipe(int(size["width"] * 0.5), int(size["height"] * 0.8),
  117. int(size["width"] * 0.5), int(size["height"] * 0.55), 200)
  118. @classmethod
  119. def get_videoList(cls, log_type, crawler, driver: WebDriver, videos_cnt, env):
  120. driver.implicitly_wait(20)
  121. webviews = driver.contexts
  122. Common.logger(log_type, crawler).info(f"webviews:{webviews}")
  123. driver.switch_to.context(webviews[1])
  124. windowHandles = driver.window_handles
  125. for handle in windowHandles:
  126. driver.switch_to.window(handle)
  127. time.sleep(1)
  128. try:
  129. if cls.search_elements(driver, '//*[@class="bottom_scroll"]'):
  130. Common.logger(log_type, crawler).info("切换到小程序")
  131. break
  132. except NoSuchElementException:
  133. time.sleep(1)
  134. cls.search_elements(driver, '//*[@class="nav cur"]')[-1].click()
  135. Common.logger(log_type, crawler).info('点击"推荐"列表成功\n')
  136. # while True:
  137. for page in range(500):
  138. Common.logger(log_type, crawler).info(f"正在抓取第{page+1}页")
  139. if cls.search_elements(driver, '//*[@class="list"]') is None:
  140. Common.logger(log_type, crawler).info("列表页窗口已销毁\n")
  141. cls.element_list = []
  142. cls.i = 0
  143. return
  144. for i in range(1):
  145. cls.swipe_up(driver)
  146. time.sleep(0.5)
  147. page_source = driver.page_source
  148. soup = BeautifulSoup(page_source, 'html.parser')
  149. soup.prettify()
  150. video_list_elements = soup.findAll("wx-view", class_="img_bf")
  151. # video_list_elements 有,cls.element_list 中没有的元素
  152. video_list_elements = list(set(video_list_elements).difference(set(cls.element_list)))
  153. # video_list_elements 与 cls.element_list 的并集
  154. cls.element_list = list(set(video_list_elements) | set(cls.element_list))
  155. Common.logger(log_type, crawler).info(f"第{page+1}页共:{len(video_list_elements)}条视频\n")
  156. if len(video_list_elements) == 0:
  157. for i in range(10):
  158. Common.logger(log_type, crawler).info(f"向上滑动第{i + 1}次")
  159. cls.swipe_up(driver)
  160. time.sleep(0.5)
  161. continue
  162. for i, video_element in enumerate(video_list_elements):
  163. try:
  164. today_download = cls.today_download_cnt(log_type, crawler, env)
  165. if today_download >= videos_cnt:
  166. Common.logger(log_type, crawler).info(f"今日已下载视频数:{today_download}")
  167. cls.element_list = []
  168. cls.i = 0
  169. return
  170. cls.i += 1
  171. Common.logger(log_type, crawler).info(f"第{cls.i}条视频")
  172. video_title = video_element.find("wx-view", class_="title").text
  173. play_str = video_element.find("wx-view", class_="wan").text
  174. play_cnt = int(re.sub(r"\D", "", play_str)) * 10000 if "万" in play_str else play_str
  175. cover_url = video_element.find("wx-image", class_="img")["src"]
  176. out_video_id = md5(video_title.encode('utf8')).hexdigest()
  177. video_dict = {
  178. "video_title": video_title,
  179. 'video_id': out_video_id,
  180. "plat_cnt_str": play_str,
  181. "play_cnt": play_cnt,
  182. 'comment_cnt': 0,
  183. 'like_cnt': 0,
  184. 'share_cnt': 0,
  185. 'publish_time_stamp': int(time.time()),
  186. 'publish_time_str': time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time()))),
  187. 'user_name': "haitunzhufu",
  188. 'user_id': "haitunzhufu",
  189. "cover_url": cover_url,
  190. 'avatar_url': cover_url,
  191. 'session': f"haitunzhufu-{int(time.time())}"
  192. }
  193. for k, v in video_dict.items():
  194. Common.logger(log_type, crawler).info(f"{k}:{v}")
  195. if video_title is None or cover_url is None:
  196. Common.logger(log_type, crawler).info("无效视频\n")
  197. cls.swipe_up(driver)
  198. time.sleep(1)
  199. elif cls.repeat_out_video_id(log_type=log_type,
  200. crawler=crawler,
  201. out_video_id=out_video_id,
  202. env=env) != 0:
  203. Common.logger(log_type, crawler).info('视频已下载\n')
  204. cls.swipe_up(driver)
  205. time.sleep(1)
  206. else:
  207. video_title_element = cls.search_elements(driver, f'//*[contains(text(), "{video_title}")]')
  208. if video_title_element is None:
  209. Common.logger(log_type, crawler).warning(f"未找到该视频标题的element:{video_title_element}")
  210. continue
  211. Common.logger(log_type, crawler).info("点击标题,进入视频详情页")
  212. video_url = cls.get_video_url(driver, video_title_element)
  213. if video_url is None:
  214. Common.logger(log_type, crawler).info("未获取到视频播放地址\n")
  215. driver.press_keycode(AndroidKey.BACK)
  216. time.sleep(3)
  217. continue
  218. video_dict['video_url'] = video_url
  219. Common.logger(log_type, crawler).info(f"video_url:{video_url}\n")
  220. cls.download_publish(log_type=log_type,
  221. crawler=crawler,
  222. video_dict=video_dict,
  223. env=env)
  224. driver.press_keycode(AndroidKey.BACK)
  225. time.sleep(3)
  226. except Exception as e:
  227. Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
  228. Common.logger(log_type, crawler).info('已抓取完一组视频,休眠5秒\n')
  229. time.sleep(5)
  230. @classmethod
  231. def get_our_uid(cls, log_type, crawler, env):
  232. select_sql = f""" SELECT uid FROM crawler_user_v3 WHERE `source`="{crawler}"; """
  233. uids = MysqlHelper.get_values(log_type, crawler, select_sql, env, action="")
  234. uid_list = []
  235. for uid_dict in uids:
  236. uid_list.append(uid_dict["uid"])
  237. return random.choice(uid_list)
  238. @classmethod
  239. def download_publish(cls, log_type, crawler, video_dict, env):
  240. Common.download_method(log_type=log_type, crawler=crawler, text='video', title=video_dict['video_title'],
  241. url=video_dict['video_url'])
  242. ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
  243. if ffmpeg_dict is None:
  244. md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
  245. shutil.rmtree(f"./{crawler}/videos/{md_title}/")
  246. Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
  247. return
  248. video_dict["duration"] = ffmpeg_dict["duration"]
  249. video_dict["video_width"] = ffmpeg_dict["width"]
  250. video_dict["video_height"] = ffmpeg_dict["height"]
  251. # 下载封面
  252. Common.download_method(log_type=log_type, crawler=crawler, text='cover', title=video_dict['video_title'],
  253. url=video_dict['cover_url'])
  254. # 保存视频信息至txt
  255. Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
  256. # 上传视频
  257. Common.logger(log_type, crawler).info("开始上传视频...")
  258. our_video_id = Publish.upload_and_publish(log_type=log_type,
  259. crawler=crawler,
  260. strategy="推荐榜爬虫策略",
  261. our_uid=cls.get_our_uid(log_type, crawler, env),
  262. env=env,
  263. oss_endpoint="out")
  264. if env == 'dev':
  265. our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  266. else:
  267. our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  268. Common.logger(log_type, crawler).info("视频上传完成")
  269. if our_video_id is None:
  270. # 删除视频文件夹
  271. shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
  272. return
  273. # 视频信息保存至飞书
  274. Feishu.insert_columns(log_type, crawler, "d51d20", "ROWS", 1, 2)
  275. # 视频ID工作表,首行写入数据
  276. upload_time = int(time.time())
  277. values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
  278. "推荐榜爬虫策略",
  279. video_dict["video_title"],
  280. video_dict["video_id"],
  281. video_dict["play_cnt"],
  282. video_dict["duration"],
  283. f'{video_dict["video_width"]}*{video_dict["video_height"]}',
  284. our_video_link,
  285. video_dict["cover_url"],
  286. video_dict["video_url"]]]
  287. time.sleep(1)
  288. Feishu.update_values(log_type, crawler, "d51d20", "F2:V2", values)
  289. Common.logger(log_type, crawler).info(f"视频已保存至飞书文档\n")
  290. rule_dict = {}
  291. # 视频信息保存数据库
  292. insert_sql = f""" insert into crawler_video(video_id,
  293. out_user_id,
  294. platform,
  295. strategy,
  296. out_video_id,
  297. video_title,
  298. cover_url,
  299. video_url,
  300. duration,
  301. publish_time,
  302. play_cnt,
  303. crawler_rule,
  304. width,
  305. height)
  306. values({our_video_id},
  307. "{video_dict['user_id']}",
  308. "{cls.platform}",
  309. "推荐榜爬虫策略",
  310. "{video_dict['video_id']}",
  311. "{video_dict['video_title']}",
  312. "{video_dict['cover_url']}",
  313. "{video_dict['video_url']}",
  314. {int(video_dict['duration'])},
  315. "{video_dict['publish_time_str']}",
  316. {int(video_dict['play_cnt'])},
  317. '{json.dumps(rule_dict)}',
  318. {int(video_dict['video_width'])},
  319. {int(video_dict['video_height'])}) """
  320. Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
  321. MysqlHelper.update_values(log_type, crawler, insert_sql, env, action='')
  322. Common.logger(log_type, crawler).info('视频信息写入数据库成功!\n')
  323. if __name__ == "__main__":
  324. HTZFRecommend.start_wechat("recommend", "haitunzhufu", 5, "dev")
  325. # HTZFRecommend.today_download_cnt("recommend", "haitunzhufu", "dev")
  326. # HTZFRecommend.get_play_cnt()
  327. pass