xiaoniangao_plus_scheduling.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365
  1. # -*- coding: utf-8 -*-
  2. # @Author: luojunhui
  3. # @Time: 2023/9/27
  4. import json
  5. import os
  6. import sys
  7. import time
  8. import random
  9. from hashlib import md5
  10. from appium import webdriver
  11. from appium.webdriver.extensions.android.nativekey import AndroidKey
  12. from appium.webdriver.webdriver import WebDriver
  13. # from appium.webdriver.common.touch_action import TouchAction
  14. from bs4 import BeautifulSoup
  15. from selenium.common.exceptions import NoSuchElementException
  16. from selenium.webdriver.common.by import By
  17. sys.path.append(os.getcwd())
  18. from common.common import Common
  19. from common.mq import MQ
  20. from common.public import download_rule, get_config_from_mysql
  21. from common.scheduling_db import MysqlHelper
  22. class XiaoNianGaoPlusRecommend:
  23. platform = "小年糕"
  24. download_cnt = 0
  25. element_list = []
  26. i = 0
  27. swipe_count = 0
  28. @classmethod
  29. def start_wechat(cls, log_type, crawler, env, rule_dict, our_uid):
  30. if env == "dev":
  31. chromedriverExecutable = "/Users/a123456/Downloads/chromedriver_v111/chromedriver"
  32. else:
  33. chromedriverExecutable = "/Users/a123456/Downloads/chromedriver_v111/chromedriver"
  34. Common.logger(log_type, crawler).info("启动微信")
  35. Common.logging(log_type, crawler, env, '启动微信')
  36. # 微信的配置文件
  37. caps = {
  38. "platformName": "Android",
  39. "devicesName": "Android",
  40. "platformVersion": "13",
  41. # "udid": "emulator-5554",
  42. "appPackage": "com.tencent.mm",
  43. "appActivity": ".ui.LauncherUI",
  44. "autoGrantPermissions": "true",
  45. "noReset": True,
  46. "resetkeyboard": True,
  47. "unicodekeyboard": True,
  48. "showChromedriverLog": True,
  49. "printPageSourceOnFailure": True,
  50. "recreateChromeDriverSessions": True,
  51. "enableWebviewDetailsCollection": True,
  52. "setWebContentsDebuggingEnabled": True,
  53. "newCommandTimeout": 6000,
  54. "automationName": "UiAutomator2",
  55. "chromedriverExecutable": chromedriverExecutable,
  56. "chromeOptions": {"androidProcess": "com.tencent.mm:appbrand0"},
  57. }
  58. driver = webdriver.Remote("http://localhost:4723/wd/hub", caps)
  59. driver.implicitly_wait(30)
  60. # action = TouchAction(driver)
  61. for i in range(120):
  62. try:
  63. if driver.find_elements(By.ID, "com.tencent.mm:id/f2s"):
  64. Common.logger(log_type, crawler).info("微信启动成功")
  65. Common.logging(log_type, crawler, env, '微信启动成功')
  66. break
  67. elif driver.find_element(By.ID, "com.android.systemui:id/dismiss_view"):
  68. Common.logger(log_type, crawler).info("发现并关闭系统下拉菜单")
  69. Common.logging(log_type, crawler, env, '发现并关闭系统下拉菜单')
  70. driver.find_element(By.ID, "com.android.system:id/dismiss_view").click()
  71. else:
  72. pass
  73. except NoSuchElementException:
  74. time.sleep(1)
  75. Common.logger(log_type, crawler).info("下滑,展示小程序选择面板")
  76. # Common.logging(log_type, crawler, env, '下滑,展示小程序选择面板')
  77. size = driver.get_window_size()
  78. driver.swipe(int(size['width'] * 0.5), int(size['height'] * 0.2),
  79. int(size['width'] * 0.5), int(size['height'] * 0.8), 200)
  80. time.sleep(1)
  81. Common.logger(log_type, crawler).info('打开小程序"小年糕+"')
  82. # Common.logging(log_type, crawler, env, '打开小程序"小年糕+"')
  83. driver.find_elements(By.XPATH, '//*[@text="小年糕+"]')[-1].click()
  84. time.sleep(5)
  85. cls.get_videoList(log_type, crawler, driver, env, rule_dict, our_uid)
  86. time.sleep(1)
  87. driver.quit()
  88. @classmethod
  89. def search_elements(cls, driver: WebDriver, xpath):
  90. time.sleep(1)
  91. windowHandles = driver.window_handles
  92. for handle in windowHandles:
  93. driver.switch_to.window(handle)
  94. time.sleep(1)
  95. try:
  96. elements = driver.find_elements(By.XPATH, xpath)
  97. if elements:
  98. # print(" driver find elements")
  99. return elements
  100. # else:
  101. # print(" driver not find elements")
  102. except NoSuchElementException:
  103. pass
  104. @classmethod
  105. def check_to_applet(cls, log_type, crawler, env, driver: WebDriver, xpath):
  106. time.sleep(1)
  107. webViews = driver.contexts
  108. driver.switch_to.context(webViews[1])
  109. windowHandles = driver.window_handles
  110. for handle in windowHandles:
  111. driver.switch_to.window(handle)
  112. time.sleep(1)
  113. try:
  114. driver.find_element(By.XPATH, xpath)
  115. Common.logger(log_type, crawler).info("切换到WebView成功\n")
  116. Common.logging(log_type, crawler, env, '切换到WebView成功\n')
  117. return
  118. except NoSuchElementException:
  119. time.sleep(1)
  120. @classmethod
  121. def repeat_video(cls, log_type, crawler, video_id, env):
  122. sql = f""" select * from crawler_video where platform in ("众妙音信", "刚刚都传", "吉祥幸福", "知青天天看", "zhufuquanzi", "祝福圈子", "haitunzhufu", "海豚祝福", "小年糕") and out_video_id="{video_id}"; """
  123. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  124. return len(repeat_video)
  125. @classmethod
  126. def swipe_up(cls, driver: WebDriver):
  127. cls.search_elements(driver, '//*[@class="list-list--list"]')
  128. size = driver.get_window_size()
  129. driver.swipe(int(size["width"] * 0.5), int(size["height"] * 0.8),
  130. int(size["width"] * 0.5), int(size["height"] * 0.4), 200)
  131. cls.swipe_count += 1
  132. @classmethod
  133. def get_video_url(cls, log_type, crawler, driver: WebDriver, video_title_element, env):
  134. for i in range(3):
  135. cls.search_elements(driver, '//*[@class="list-list--list"]')
  136. Common.logger(log_type, crawler).info(f"video_title_element:{video_title_element[0]}")
  137. time.sleep(1)
  138. Common.logger(log_type, crawler).info("滑动标题至可见状态")
  139. driver.execute_script("arguments[0].scrollIntoView({block:'center',inline:'center'});",
  140. video_title_element[0])
  141. time.sleep(3)
  142. Common.logger(log_type, crawler).info("点击标题")
  143. video_title_element[0].click()
  144. cls.check_to_applet(log_type=log_type, crawler=crawler, driver=driver, env=env,
  145. xpath=r'//wx-video[@class="dynamic-index--video-item dynamic-index--video"]')
  146. Common.logger(log_type, crawler).info("点击标题完成")
  147. time.sleep(10)
  148. video_url_elements = cls.search_elements(driver,
  149. '//wx-video[@class="dynamic-index--video-item dynamic-index--video"]')
  150. if video_url_elements:
  151. return video_url_elements[0].get_attribute("src")
  152. @classmethod
  153. def get_videoList(cls, log_type, crawler, driver: WebDriver, env, rule_dict, our_uid):
  154. mq = MQ(topic_name="topic_crawler_etl_" + env)
  155. driver.implicitly_wait(20)
  156. # 切换到 web_view
  157. cls.check_to_applet(log_type=log_type, crawler=crawler, env=env, driver=driver,
  158. xpath='//*[@class="tab-bar--tab tab-bar--tab-selected"]')
  159. time.sleep(1)
  160. page = 0
  161. while True:
  162. if cls.search_elements(driver, '//*[@class="list-list--list"]') is None:
  163. Common.logger(log_type, crawler).info("窗口已销毁\n")
  164. Common.logging(log_type, crawler, env, '窗口已销毁\n')
  165. cls.i = 0
  166. cls.download_cnt = 0
  167. cls.element_list = []
  168. return
  169. cls.swipe_up(driver)
  170. page_source = driver.page_source
  171. soup = BeautifulSoup(page_source, 'html.parser')
  172. soup.prettify()
  173. video_list_elements = soup.findAll("wx-view", class_="expose--adapt-parent")
  174. # video_list_elements 有,cls.element_list 中没有的元素
  175. video_list_elements = list(set(video_list_elements).difference(set(cls.element_list)))
  176. # video_list_elements 与 cls.element_list 的并集
  177. cls.element_list = list(set(video_list_elements) | set(cls.element_list))
  178. Common.logger(log_type, crawler).info(f"正在抓取第{page + 1}页,共:{len(video_list_elements)}条视频")
  179. Common.logging(log_type, crawler, env, f"正在抓取第{page + 1}页,共:{len(video_list_elements)}条视频")
  180. if len(video_list_elements) == 0:
  181. for i in range(10):
  182. Common.logger(log_type, crawler).info(f"向上滑动第{i + 1}次")
  183. # scroll_down(action, driver)
  184. # print(f"向上滑动第{i + 1}次 s")
  185. cls.swipe_up(driver)
  186. # print(f"向上滑动第{i + 1}次 e")
  187. time.sleep(0.5)
  188. continue
  189. for i, video_element in enumerate(video_list_elements):
  190. try:
  191. Common.logger(log_type, crawler).info(f"本轮已抓取{cls.download_cnt}条视频\n")
  192. Common.logging(log_type, crawler, env, f"本轮已抓取{cls.download_cnt}条视频\n")
  193. if cls.download_cnt >= int(rule_dict.get("videos_cnt", {}).get("min", 10)):
  194. cls.i = 0
  195. cls.download_cnt = 0
  196. cls.element_list = []
  197. return
  198. cls.i += 1
  199. Common.logger(log_type, crawler).info(f"第{cls.i}条视频")
  200. Common.logging(log_type, crawler, env, f"第{cls.i}条视频")
  201. # 标题
  202. video_title = video_element.find("wx-view", class_="dynamic--title").text
  203. # 播放量字符串
  204. play_str = video_element.find("wx-view", class_="dynamic--views").text
  205. info_list = video_element.findAll("wx-view", class_="dynamic--commerce-btn-text")
  206. # 点赞数量
  207. like_str = info_list[1].text
  208. # 评论数量
  209. comment_str = info_list[2].text
  210. # 视频时长
  211. duration_str = video_element.find("wx-view", class_="dynamic--duration").text
  212. user_name = video_element.find("wx-view", class_="dynamic--nick-top").text
  213. # 头像 URL
  214. avatar_url = video_element.find("wx-image", class_="avatar--avatar")["src"]
  215. # 封面 URL
  216. cover_url = video_element.find("wx-image", class_="dynamic--bg-image")["src"]
  217. play_cnt = int(play_str.replace("+", "").replace("次播放", ""))
  218. duration = int(duration_str.split(":")[0].strip()) * 60 + int(duration_str.split(":")[-1].strip())
  219. if "点赞" in like_str:
  220. like_cnt = 0
  221. elif "万" in like_str:
  222. like_cnt = int(like_str.split("万")[0]) * 10000
  223. else:
  224. like_cnt = int(like_str)
  225. if "评论" in comment_str:
  226. comment_cnt = 0
  227. elif "万" in comment_str:
  228. comment_cnt = int(comment_str.split("万")[0]) * 10000
  229. else:
  230. comment_cnt = int(comment_str)
  231. out_video_id = md5(video_title.encode('utf8')).hexdigest()
  232. out_user_id = md5(user_name.encode('utf8')).hexdigest()
  233. video_dict = {
  234. "video_title": video_title,
  235. "video_id": out_video_id,
  236. "duration_str": duration_str,
  237. "duration": duration,
  238. "play_str": play_str,
  239. "play_cnt": play_cnt,
  240. "like_str": like_str,
  241. "like_cnt": like_cnt,
  242. "comment_cnt": comment_cnt,
  243. "share_cnt": 0,
  244. "user_name": user_name,
  245. "user_id": out_user_id,
  246. 'publish_time_stamp': int(time.time()),
  247. 'publish_time_str': time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time()))),
  248. "avatar_url": avatar_url,
  249. "cover_url": cover_url,
  250. "session": f"xiaoniangao-{int(time.time())}"
  251. }
  252. for k, v in video_dict.items():
  253. Common.logger(log_type, crawler).info(f"{k}:{v}")
  254. Common.logging(log_type, crawler, env, f"video_dict:{video_dict}")
  255. # Common.logger(log_type, crawler).info(f"==========分割线==========\n")
  256. if video_title is None or cover_url is None:
  257. Common.logger(log_type, crawler).info("无效视频\n")
  258. Common.logging(log_type, crawler, env, '无效视频\n')
  259. cls.swipe_up(driver)
  260. time.sleep(0.5)
  261. elif download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict,
  262. rule_dict=rule_dict) is False:
  263. Common.logger(log_type, crawler).info("不满足抓取规则\n")
  264. Common.logging(log_type, crawler, env, "不满足抓取规则\n")
  265. cls.swipe_up(driver)
  266. time.sleep(0.5)
  267. elif any(str(word) if str(word) in video_dict["video_title"] else False
  268. for word in get_config_from_mysql(log_type=log_type,
  269. source=crawler,
  270. env=env,
  271. text="filter",
  272. action="")) is True:
  273. Common.logger(log_type, crawler).info('已中过滤词\n')
  274. Common.logging(log_type, crawler, env, '已中过滤词\n')
  275. cls.swipe_up(driver)
  276. time.sleep(0.5)
  277. elif cls.repeat_video(log_type, crawler, out_video_id, env) != 0:
  278. Common.logger(log_type, crawler).info('视频已下载\n')
  279. Common.logging(log_type, crawler, env, '视频已下载\n')
  280. cls.swipe_up(driver)
  281. time.sleep(5)
  282. else:
  283. video_title_element = cls.search_elements(driver, f'//*[contains(text(), "{video_title}")]')
  284. if video_title_element is None:
  285. Common.logger(log_type, crawler).warning(f"未找到该视频标题的element:{video_title_element}")
  286. Common.logging(log_type, crawler, env, f"未找到该视频标题的element:{video_title_element}")
  287. continue
  288. Common.logger(log_type, crawler).info("点击标题,进入视频详情页")
  289. Common.logging(log_type, crawler, env, "点击标题,进入视频详情页")
  290. video_url = cls.get_video_url(log_type, crawler, driver, video_title_element, env=env)
  291. if video_url is None:
  292. Common.logger(log_type, crawler).info("未获取到视频播放地址\n")
  293. driver.press_keycode(AndroidKey.BACK)
  294. time.sleep(5)
  295. continue
  296. video_dict['video_url'] = video_url
  297. Common.logger(log_type, crawler).info(f"video_url:{video_url}")
  298. video_dict["platform"] = crawler
  299. video_dict["strategy"] = log_type
  300. video_dict["out_video_id"] = video_dict["video_id"]
  301. video_dict["crawler_rule"] = json.dumps(rule_dict)
  302. video_dict["user_id"] = our_uid
  303. video_dict["publish_time"] = video_dict["publish_time_str"]
  304. mq.send_msg(video_dict)
  305. # print(video_dict)
  306. cls.download_cnt += 1
  307. driver.press_keycode(AndroidKey.BACK)
  308. time.sleep(5)
  309. # scroll_down(action, driver)
  310. cls.swipe_up(driver)
  311. if cls.swipe_count > 200:
  312. print("一共滑动超过200次")
  313. return
  314. except Exception as e:
  315. Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
  316. Common.logging(log_type, crawler, env, f"抓取单条视频异常:{e}\n")
  317. Common.logger(log_type, crawler).info("已抓取完一组,休眠 5 秒\n")
  318. Common.logging(log_type, crawler, env, "已抓取完一组,休眠 5 秒\n")
  319. time.sleep(5)
  320. page += 1
  321. def scroll_down(action, driver: WebDriver):
  322. """
  323. 刷视频函数,使用该函数可以往下滑动进入下一个视频
  324. """
  325. time.sleep(1)
  326. width = driver.get_window_size()['width'] # 获取屏幕宽
  327. height = driver.get_window_size()['height'] # 获取屏幕高
  328. # print(width, height)
  329. action.press(x=int(0.5 * width), y=int(0.25 * height))
  330. action.wait(ms=random.randint(200, 400))
  331. action.move_to(x=int(0.5 * width), y=int(0.75 * height))
  332. action.release()
  333. action.perform()
  334. if __name__ == "__main__":
  335. rule_dict1 = {"period": {"min": 365, "max": 365},
  336. "duration": {"min": 30, "max": 1800},
  337. "favorite_cnt": {"min": 999999, "max": 0},
  338. "videos_cnt": {"min": 100, "max": 0},
  339. "share_cnt": {"min": 999999, "max": 0}}
  340. XiaoNianGaoPlusRecommend.start_wechat("recommend", "xiaoniangao", "dev", rule_dict1, 6267141)