xiaoniangao_plus_user.py 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389
  1. # -*- coding: utf-8 -*-
  2. # @Time: 2023/10/31
  3. import json
  4. import os
  5. from datetime import date, timedelta
  6. import requests
  7. import sys
  8. import time
  9. from hashlib import md5
  10. from appium import webdriver
  11. from appium.webdriver.extensions.android.nativekey import AndroidKey
  12. from appium.webdriver.webdriver import WebDriver
  13. from bs4 import BeautifulSoup
  14. from selenium.common import NoSuchElementException
  15. from selenium.webdriver.common.by import By
  16. sys.path.append(os.getcwd())
  17. from common.common import Common
  18. from common.mq import MQ
  19. from common.public import download_rule, get_config_from_mysql
  20. from common.scheduling_db import MysqlHelper
  21. def get_redirect_url(url):
  22. res = requests.get(url, allow_redirects=False)
  23. if res.status_code == 302 or res.status_code == 301:
  24. return res.headers['Location']
  25. else:
  26. return url
  27. class XiaoNianGaoPlusRecommend:
  28. platform = "小年糕-user"
  29. download_cnt = 0
  30. element_list = []
  31. i = 0
  32. @classmethod
  33. def start_wechat(cls, log_type, crawler, env, rule_dict, our_uid):
  34. if env == "dev":
  35. chromedriverExecutable = "/Users/tzld/Downloads/chromedriver_v111/chromedriver"
  36. else:
  37. chromedriverExecutable = "/Users/tzld/Downloads/chromedriver_v111/chromedriver"
  38. Common.logger(log_type, crawler).info("启动微信")
  39. Common.logging(log_type, crawler, env, '启动微信')
  40. caps = {
  41. "platformName": "Android",
  42. "devicesName": "Android",
  43. # "platformVersion": "11",
  44. # "udid": "emulator-5554",
  45. "appPackage": "com.tencent.mm",
  46. "appActivity": ".ui.LauncherUI",
  47. "autoGrantPermissions": "true",
  48. "noReset": True,
  49. "resetkeyboard": True,
  50. "unicodekeyboard": True,
  51. "showChromedriverLog": True,
  52. "printPageSourceOnFailure": True,
  53. "recreateChromeDriverSessions": True,
  54. "enableWebviewDetailsCollection": True,
  55. "setWebContentsDebuggingEnabled": True,
  56. "newCommandTimeout": 6000,
  57. "automationName": "UiAutomator2",
  58. "chromedriverExecutable": chromedriverExecutable,
  59. "chromeOptions": {"androidProcess": "com.tencent.mm:appbrand0"},
  60. }
  61. driver = webdriver.Remote("http://localhost:4723/wd/hub", caps)
  62. driver.implicitly_wait(30)
  63. for i in range(120):
  64. try:
  65. if driver.find_elements(By.ID, 'com.tencent.mm:id/f2s'):
  66. Common.logger(log_type, crawler).info("微信启动成功")
  67. Common.logging(log_type, crawler, env, '微信启动成功')
  68. break
  69. elif driver.find_element(By.ID, "com.android.systemui:id/dismiss_view"):
  70. Common.logger(log_type, crawler).info("发现并关闭系统下拉菜单")
  71. Common.logging(log_type, crawler, env, '发现并关闭系统下拉菜单')
  72. driver.find_element(By.ID, "com.android.system:id/dismiss_view").click()
  73. else:
  74. pass
  75. except NoSuchElementException:
  76. time.sleep(1)
  77. Common.logger(log_type, crawler).info("下滑,展示小程序选择面板")
  78. Common.logging(log_type, crawler, env, '下滑,展示小程序选择面板')
  79. size = driver.get_window_size()
  80. driver.swipe(int(size['width'] * 0.5), int(size['height'] * 0.2),
  81. int(size['width'] * 0.5), int(size['height'] * 0.8), 200)
  82. time.sleep(1)
  83. Common.logger(log_type, crawler).info('打开小程序"小年糕+"')
  84. Common.logging(log_type, crawler, env, '打开小程序"小年糕+"')
  85. driver.find_elements(By.XPATH, '//*[@text="小年糕+"]')[-1].click()
  86. time.sleep(5)
  87. cls.get_videoList(log_type, crawler, driver, env, rule_dict, our_uid)
  88. time.sleep(1)
  89. driver.quit()
  90. @classmethod
  91. def search_elements(cls, driver: WebDriver, xpath):
  92. time.sleep(1)
  93. windowHandles = driver.window_handles
  94. for handle in windowHandles:
  95. driver.switch_to.window(handle)
  96. time.sleep(1)
  97. try:
  98. elements = driver.find_elements(By.XPATH, xpath)
  99. if elements:
  100. return elements
  101. except NoSuchElementException:
  102. pass
  103. @classmethod
  104. def check_to_applet(cls, log_type, crawler, env, driver: WebDriver):
  105. time.sleep(1)
  106. webViews = driver.contexts
  107. Common.logger(log_type, crawler).info(f"webViews:{webViews}")
  108. Common.logging(log_type, crawler, env, f"webViews:{webViews}")
  109. driver.switch_to.context(webViews[1])
  110. windowHandles = driver.window_handles
  111. for handle in windowHandles:
  112. driver.switch_to.window(handle)
  113. time.sleep(1)
  114. try:
  115. driver.find_element(By.XPATH, '//*[@class="tab-bar--tab tab-bar--tab-selected"]')
  116. Common.logger(log_type, crawler).info("切换到小程序成功\n")
  117. Common.logging(log_type, crawler, env, '切换到小程序成功\n')
  118. return
  119. except NoSuchElementException:
  120. time.sleep(1)
  121. @classmethod
  122. def repeat_video(cls, log_type, crawler, video_id, env):
  123. sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and out_video_id="{video_id}"; """
  124. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  125. return len(repeat_video)
  126. @classmethod
  127. def swipe_up(cls, driver: WebDriver):
  128. cls.search_elements(driver, '//*[@class="videoplay"]')
  129. size = driver.get_window_size()
  130. driver.swipe(int(size["width"] * 0.7), int(size["height"] * 0.8),
  131. int(size["width"] * 0.7), int(size["height"] * 0.4), 200)
  132. @classmethod
  133. def get_video_url(cls, log_type, crawler, driver: WebDriver, video_title_element):
  134. for i in range(3):
  135. cls.search_elements(driver, '//wx-view[@class="expose--adapt-parent"]')
  136. Common.logger(log_type, crawler).info(f"video_title_element:{video_title_element[0]}")
  137. time.sleep(1)
  138. Common.logger(log_type, crawler).info("滑动标题至可见状态")
  139. driver.execute_script("arguments[0].scrollIntoView({block:'center',inline:'center'});",
  140. video_title_element[0])
  141. time.sleep(3)
  142. Common.logger(log_type, crawler).info("点击标题")
  143. video_title_element[0].click()
  144. # driver.execute_script("arguments[0].click();", video_title_element[0])
  145. Common.logger(log_type, crawler).info("点击标题完成")
  146. time.sleep(1)
  147. video_url_elements = cls.search_elements(driver,
  148. '//wx-video[@class="video-player--video"]')
  149. if video_url_elements:
  150. return video_url_elements[0].get_attribute("src")
  151. @classmethod
  152. def get_user(cls, video_element,driver, log_type, crawler, env):
  153. # 播放量字符串
  154. play_str = video_element.find("wx-view", class_="dynamic--views").text
  155. user_name = video_element.find("wx-view", class_="dynamic--nick-top").text
  156. play_cnt = int(play_str.replace("+", "").replace("次播放", ""))
  157. if play_cnt < 30000:
  158. Common.logger(log_type, crawler).warning(f"播放量:{play_cnt}")
  159. Common.logging(log_type, crawler, env, f"播放量:{play_cnt}")
  160. return
  161. video_name_element = cls.search_elements(driver, f'//*[contains(text(), "{user_name}")]')
  162. if video_name_element is None:
  163. Common.logger(log_type, crawler).warning(f"未找到该用户id的element:{video_name_element}")
  164. Common.logging(log_type, crawler, env, f"未找到该用户id的element:{video_name_element}")
  165. return
  166. Common.logger(log_type, crawler).info("点击用户名,进入用户主页")
  167. Common.logging(log_type, crawler, env, "点击用户名,进入用户主页")
  168. cls.search_elements(driver, '//*[@class="list-list--list"]')
  169. Common.logger(log_type, crawler).info(f"video_title_element:{video_name_element[0]}")
  170. time.sleep(1)
  171. Common.logger(log_type, crawler).info("滑动用户名至可见状态")
  172. driver.execute_script("arguments[0].scrollIntoView({block:'center',inline:'center'});",
  173. video_name_element[0])
  174. time.sleep(3)
  175. Common.logger(log_type, crawler).info("用户名")
  176. video_name_element[0].click()
  177. Common.logger(log_type, crawler).info("点击用户名")
  178. time.sleep(30)
  179. cls.search_elements(driver, '//wx-view[@class="expose--adapt-parent"]')
  180. page_source = driver.page_source
  181. soup = BeautifulSoup(page_source, 'html.parser')
  182. soup.prettify()
  183. video_url_elements = soup.findAll("wx-view", class_="expose--adapt-parent")
  184. video_url_elements = list(set(video_url_elements).difference(set(cls.element_list)))
  185. cls.element_list = list(set(video_url_elements) | set(cls.element_list))
  186. if video_url_elements:
  187. return video_url_elements,user_name
  188. @classmethod
  189. def get_videoList(cls, log_type, crawler, driver: WebDriver, env, rule_dict, our_uid):
  190. mq = MQ(topic_name="topic_crawler_etl_" + env)
  191. driver.implicitly_wait(20)
  192. # 鼠标左键点击, 1为x坐标, 2为y坐标
  193. cls.check_to_applet(log_type=log_type, crawler=crawler, env=env, driver=driver)
  194. time.sleep(1)
  195. page = 0
  196. while True:
  197. if cls.search_elements(driver, '//*[@class="list-list--list"]') is None:
  198. Common.logger(log_type, crawler).info("窗口已销毁\n")
  199. Common.logging(log_type, crawler, env, '窗口已销毁\n')
  200. cls.i = 0
  201. cls.download_cnt = 0
  202. cls.element_list = []
  203. return
  204. cls.swipe_up(driver)
  205. page_source = driver.page_source
  206. soup = BeautifulSoup(page_source, 'html.parser')
  207. soup.prettify()
  208. video_list_elements = soup.findAll("wx-view", class_="expose--adapt-parent")
  209. video_list_elements = list(set(video_list_elements).difference(set(cls.element_list)))
  210. cls.element_list = list(set(video_list_elements) | set(cls.element_list))
  211. Common.logger(log_type, crawler).info(f"正在抓取第{page + 1}页,共:{len(video_list_elements)}条视频")
  212. Common.logging(log_type, crawler, env, f"正在抓取第{page + 1}页,共:{len(video_list_elements)}条视频")
  213. if len(video_list_elements) == 0:
  214. for i in range(10):
  215. Common.logger(log_type, crawler).info(f"向上滑动第{i + 1}次")
  216. cls.swipe_up(driver)
  217. time.sleep(0.5)
  218. continue
  219. for i, video_element in enumerate(video_list_elements):
  220. try:
  221. video_url_elements,user_name = cls.get_user(video_element,driver, log_type, crawler, env)
  222. if len(video_url_elements) == 0:
  223. continue
  224. for i, video_element in enumerate(video_url_elements):
  225. try:
  226. Common.logger(log_type, crawler).info(f"本轮已抓取{cls.download_cnt}条视频\n")
  227. Common.logging(log_type, crawler, env, f"本轮已抓取{cls.download_cnt}条视频\n")
  228. if cls.download_cnt >= int(rule_dict.get("videos_cnt", {}).get("min", 10)):
  229. cls.i = 0
  230. cls.download_cnt = 0
  231. cls.element_list = []
  232. return
  233. cls.i += 1
  234. Common.logger(log_type, crawler).info(f"第{cls.i}条视频")
  235. Common.logging(log_type, crawler, env, f"第{cls.i}条视频")
  236. video_title = video_element.find("wx-view", class_="album--album-cover-title").text
  237. # 播放量字符串
  238. play_str = video_element.find("wx-view", class_="album--album-cover-views").text
  239. # 封面 URL
  240. cover_url = video_element.find("wx-image", class_="album--album-cover-bg")["src"]
  241. play_cnt = int(play_str.replace("+", "").replace("次播放", ""))
  242. # 发布时间
  243. play_time = video_element.find("wx-view", class_="album--album-time").text
  244. date_three_days_ago_string = (date.today() + timedelta(days=-3)).strftime("%Y.%m.%d")
  245. rule = play_time > date_three_days_ago_string
  246. if rule == False:
  247. Common.logger(log_type, crawler).info(f"发布时间小于3天,发布时间:{rule}\n")
  248. continue
  249. out_video_id = md5(video_title.encode('utf8')).hexdigest()
  250. out_video_id = out_video_id + "user"
  251. out_user_id = md5(user_name.encode('utf8')).hexdigest()
  252. out_user_id = out_user_id + "user"
  253. video_dict = {
  254. "video_title": video_title,
  255. "video_id": out_video_id,
  256. "duration_str": '',
  257. "duration": 0,
  258. "play_str": play_str,
  259. "play_cnt": play_cnt,
  260. "like_str": "",
  261. "like_cnt": 0,
  262. "comment_cnt": 0,
  263. "share_cnt": 0,
  264. "user_name": user_name,
  265. "user_id": out_user_id,
  266. 'publish_time_stamp': int(time.time()),
  267. 'publish_time_str': time.strftime("%Y-%m-%d %H:%M:%S",
  268. time.localtime(int(time.time()))),
  269. "avatar_url": cover_url,
  270. "cover_url": cover_url,
  271. "session": f"xiaoniangao-{int(time.time())}"
  272. }
  273. for k, v in video_dict.items():
  274. Common.logger(log_type, crawler).info(f"{k}:{v}")
  275. Common.logging(log_type, crawler, env, f"video_dict:{video_dict}")
  276. # Common.logger(log_type, crawler).info(f"==========分割线==========\n")
  277. if video_title is None or cover_url is None:
  278. Common.logger(log_type, crawler).info("无效视频\n")
  279. Common.logging(log_type, crawler, env, '无效视频\n')
  280. cls.swipe_up(driver)
  281. time.sleep(0.5)
  282. elif download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict,
  283. rule_dict=rule_dict) is False:
  284. Common.logger(log_type, crawler).info("不满足抓取规则\n")
  285. Common.logging(log_type, crawler, env, "不满足抓取规则\n")
  286. cls.swipe_up(driver)
  287. time.sleep(0.5)
  288. elif any(str(word) if str(word) in video_dict["video_title"] else False
  289. for word in get_config_from_mysql(log_type=log_type,
  290. source=crawler,
  291. env=env,
  292. text="filter",
  293. action="")) is True:
  294. Common.logger(log_type, crawler).info('已中过滤词\n')
  295. Common.logging(log_type, crawler, env, '已中过滤词\n')
  296. cls.swipe_up(driver)
  297. time.sleep(0.5)
  298. elif cls.repeat_video(log_type, crawler, out_video_id, env) != 0:
  299. Common.logger(log_type, crawler).info('视频已下载\n')
  300. Common.logging(log_type, crawler, env, '视频已下载\n')
  301. cls.swipe_up(driver)
  302. time.sleep(5)
  303. else:
  304. video_title_element = cls.search_elements(driver,
  305. f'//*[contains(text(), "{video_title}")]')
  306. if video_title_element is None:
  307. Common.logger(log_type, crawler).warning(f"未找到该视频标题的element:{video_title_element}")
  308. Common.logging(log_type, crawler, env, f"未找到该视频标题的element:{video_title_element}")
  309. continue
  310. Common.logger(log_type, crawler).info("点击标题,进入视频详情页")
  311. Common.logging(log_type, crawler, env, "点击标题,进入视频详情页")
  312. video_url = cls.get_video_url(log_type, crawler, driver, video_title_element)
  313. video_url = get_redirect_url(video_url)
  314. if video_url is None:
  315. Common.logger(log_type, crawler).info("未获取到视频播放地址\n")
  316. driver.press_keycode(AndroidKey.BACK)
  317. time.sleep(5)
  318. continue
  319. video_dict['video_url'] = video_url
  320. Common.logger(log_type, crawler).info(f"video_url:{video_url}")
  321. video_dict["platform"] = crawler
  322. video_dict["strategy"] = log_type
  323. video_dict["out_video_id"] = video_dict["video_id"]
  324. video_dict["crawler_rule"] = json.dumps(rule_dict)
  325. video_dict["user_id"] = our_uid
  326. video_dict["publish_time"] = video_dict["publish_time_str"]
  327. mq.send_msg(video_dict)
  328. cls.download_cnt += 1
  329. driver.press_keycode(AndroidKey.BACK)
  330. time.sleep(5)
  331. except Exception as e:
  332. Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
  333. Common.logging(log_type, crawler, env, f"抓取单条视频异常:{e}\n")
  334. driver.press_keycode(AndroidKey.BACK)
  335. time.sleep(5)
  336. except Exception as e:
  337. Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
  338. Common.logging(log_type, crawler, env, f"抓取单条视频异常:{e}\n")
  339. Common.logger(log_type, crawler).info("已抓取完一组,休眠 5 秒\n")
  340. Common.logging(log_type, crawler, env, "已抓取完一组,休眠 5 秒\n")
  341. time.sleep(5)
  342. page += 1
  343. if __name__ == "__main__":
  344. rule_dict1 = {"period": {"min": 0, "max": 365},
  345. "duration": {"min": 0, "max": 1800},
  346. "favorite_cnt": {"min": 0, "max": 0},
  347. "videos_cnt": {"min": 10, "max": 20},
  348. "share_cnt": {"min": 0, "max": 0}}
  349. XiaoNianGaoPlusRecommend.start_wechat("recommend", "xiaoniangao", "dev", rule_dict1, 6267141)