xiaoniangao_plus.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333
  1. # -*- coding: utf-8 -*-
  2. # @Author: luojunhui
  3. # @Time: 2023/12/18
  4. import json
  5. import os
  6. import random
  7. import sys
  8. import time
  9. import uuid
  10. from hashlib import md5
  11. from appium import webdriver
  12. from appium.webdriver.extensions.android.nativekey import AndroidKey
  13. from bs4 import BeautifulSoup
  14. from selenium.common.exceptions import NoSuchElementException
  15. from selenium.webdriver.common.by import By
  16. sys.path.append(os.getcwd())
  17. from application.functions import get_redirect_url
  18. from application.pipeline import PiaoQuanPipelineTest
  19. from application.common.messageQueue import MQ
  20. from application.common.log import Local, AliyunLogger
  21. class XiaoNianGaoPlusRecommend(object):
  22. """
  23. 小年糕+线下爬虫
  24. """
  25. def __init__(self, log_type, crawler, env, rule_dict, our_uid):
  26. self.mq = None
  27. self.platform = "xiaoniangaoplus"
  28. self.download_cnt = 0
  29. self.element_list = []
  30. self.count = 0
  31. self.swipe_count = 0
  32. self.log_type = log_type
  33. self.crawler = crawler
  34. self.env = env
  35. self.rule_dict = rule_dict
  36. self.our_uid = our_uid
  37. chromedriverExecutable = "/Users/luojunhui/Downloads/chromedriver_mac_116/chromedriver"
  38. print("启动微信")
  39. # 微信的配置文件
  40. caps = {
  41. "platformName": "Android",
  42. "devicesName": "Android",
  43. "appPackage": "com.tencent.mm",
  44. "appActivity": ".ui.LauncherUI",
  45. "autoGrantPermissions": True,
  46. "noReset": True,
  47. "resetkeyboard": True,
  48. "unicodekeyboard": True,
  49. "showChromedriverLog": True,
  50. "printPageSourceOnFailure": True,
  51. "recreateChromeDriverSessions": True,
  52. "enableWebviewDetailsCollection": True,
  53. "setWebContentsDebuggingEnabled": True,
  54. "newCommandTimeout": 6000,
  55. "automationName": "UiAutomator2",
  56. "chromedriverExecutable": chromedriverExecutable,
  57. "chromeOptions": {"androidProcess": "com.tencent.mm:appbrand0"},
  58. }
  59. try:
  60. self.driver = webdriver.Remote("http://localhost:4723/wd/hub", caps)
  61. except Exception as e:
  62. print(e)
  63. return
  64. self.driver.implicitly_wait(30)
  65. for i in range(10):
  66. try:
  67. if self.driver.find_elements(By.ID, "com.tencent.mm:id/f2s"):
  68. print("启动微信成功")
  69. break
  70. elif self.driver.find_element(
  71. By.ID, "com.android.systemui:id/dismiss_view"
  72. ):
  73. print("发现并关闭系统下拉菜单")
  74. size = self.driver.get_window_size()
  75. self.driver.swipe(
  76. int(size["width"] * 0.5),
  77. int(size["height"] * 0.8),
  78. int(size["width"] * 0.5),
  79. int(size["height"] * 0.2),
  80. 200,
  81. )
  82. else:
  83. pass
  84. except Exception as e:
  85. print(f"打开微信异常:{e}")
  86. time.sleep(1)
  87. size = self.driver.get_window_size()
  88. self.driver.swipe(
  89. int(size["width"] * 0.5),
  90. int(size["height"] * 0.2),
  91. int(size["width"] * 0.5),
  92. int(size["height"] * 0.8),
  93. 200,
  94. )
  95. time.sleep(1)
  96. self.driver.find_elements(By.XPATH, '//*[@text="小年糕+"]')[-1].click()
  97. print("打开小程序小年糕+成功")
  98. time.sleep(5)
  99. self.get_videoList()
  100. time.sleep(1)
  101. self.driver.quit()
  102. def search_elements(self, xpath):
  103. time.sleep(1)
  104. windowHandles = self.driver.window_handles
  105. for handle in windowHandles:
  106. self.driver.switch_to.window(handle)
  107. time.sleep(1)
  108. try:
  109. elements = self.driver.find_elements(By.XPATH, xpath)
  110. if elements:
  111. return elements
  112. except NoSuchElementException:
  113. pass
  114. def check_to_applet(self, xpath):
  115. time.sleep(1)
  116. webViews = self.driver.contexts
  117. self.driver.switch_to.context(webViews[-1])
  118. windowHandles = self.driver.window_handles
  119. for handle in windowHandles:
  120. self.driver.switch_to.window(handle)
  121. time.sleep(1)
  122. try:
  123. self.driver.find_element(By.XPATH, xpath)
  124. print("切换到WebView成功\n")
  125. return
  126. except NoSuchElementException:
  127. time.sleep(1)
  128. def swipe_up(self):
  129. self.search_elements('//*[@class="list-list--list"]')
  130. size = self.driver.get_window_size()
  131. self.driver.swipe(
  132. int(size["width"] * 0.5),
  133. int(size["height"] * 0.8),
  134. int(size["width"] * 0.5),
  135. int(size["height"] * 0.442),
  136. 200,
  137. )
  138. self.swipe_count += 1
  139. def get_video_url(self, video_title_element):
  140. for i in range(3):
  141. self.search_elements('//*[@class="list-list--list"]')
  142. time.sleep(1)
  143. self.driver.execute_script(
  144. "arguments[0].scrollIntoView({block:'center',inline:'center'});",
  145. video_title_element[0],
  146. )
  147. time.sleep(3)
  148. video_title_element[0].click()
  149. self.check_to_applet(
  150. xpath=r'//wx-video[@class="dynamic-index--video-item dynamic-index--video"]'
  151. )
  152. time.sleep(10)
  153. video_url_elements = self.search_elements(
  154. '//wx-video[@class="dynamic-index--video-item dynamic-index--video"]'
  155. )
  156. return video_url_elements[0].get_attribute("src")
  157. def parse_detail(self, index):
  158. page_source = self.driver.page_source
  159. soup = BeautifulSoup(page_source, "html.parser")
  160. soup.prettify()
  161. video_list = soup.findAll(
  162. name="wx-view", attrs={"class": "expose--adapt-parent"}
  163. )
  164. index = index + 1
  165. element_list = [i for i in video_list][index:]
  166. return element_list[0]
  167. def get_video_info_2(self, video_element):
  168. if self.download_cnt >= int(
  169. self.rule_dict.get("videos_cnt", {}).get("min", 10)
  170. ):
  171. self.count = 0
  172. self.download_cnt = 0
  173. self.element_list = []
  174. return
  175. self.count += 1
  176. # 获取 trace_id, 并且把该 id 当做视频生命周期唯一索引
  177. trace_id = self.crawler + str(uuid.uuid1())
  178. print("扫描到一条视频")
  179. # 标题
  180. video_title = video_element.find("wx-view", class_="dynamic--title").text
  181. # 播放量字符串
  182. play_str = video_element.find("wx-view", class_="dynamic--views").text
  183. info_list = video_element.findAll(
  184. "wx-view", class_="dynamic--commerce-btn-text"
  185. )
  186. # 点赞数量
  187. like_str = info_list[1].text
  188. # 评论数量
  189. comment_str = info_list[2].text
  190. # 视频时长
  191. duration_str = video_element.find("wx-view", class_="dynamic--duration").text
  192. user_name = video_element.find("wx-view", class_="dynamic--nick-top").text
  193. # 头像 URL
  194. avatar_url = video_element.find("wx-image", class_="avatar--avatar")["src"]
  195. # 封面 URL
  196. cover_url = video_element.find("wx-image", class_="dynamic--bg-image")["src"]
  197. play_cnt = int(play_str.replace("+", "").replace("次播放", ""))
  198. duration = int(duration_str.split(":")[0].strip()) * 60 + int(
  199. duration_str.split(":")[-1].strip()
  200. )
  201. if "点赞" in like_str:
  202. like_cnt = 0
  203. elif "万" in like_str:
  204. like_cnt = int(like_str.split("万")[0]) * 10000
  205. else:
  206. like_cnt = int(like_str)
  207. if "评论" in comment_str:
  208. comment_cnt = 0
  209. elif "万" in comment_str:
  210. comment_cnt = int(comment_str.split("万")[0]) * 10000
  211. else:
  212. comment_cnt = int(comment_str)
  213. out_video_id = md5(video_title.encode("utf8")).hexdigest()
  214. out_user_id = md5(user_name.encode("utf8")).hexdigest()
  215. video_dict = {
  216. "video_title": video_title,
  217. "video_id": out_video_id,
  218. "out_video_id": out_video_id,
  219. "duration_str": duration_str,
  220. "duration": duration,
  221. "play_str": play_str,
  222. "play_cnt": play_cnt,
  223. "like_str": like_str,
  224. "like_cnt": like_cnt,
  225. "comment_cnt": comment_cnt,
  226. "share_cnt": 0,
  227. "user_name": user_name,
  228. "user_id": out_user_id,
  229. "publish_time_stamp": int(time.time()),
  230. "publish_time_str": time.strftime(
  231. "%Y-%m-%d %H:%M:%S", time.localtime(int(time.time()))
  232. ),
  233. "update_time_stamp": int(time.time()),
  234. "avatar_url": avatar_url,
  235. "cover_url": cover_url,
  236. "session": f"xiaoniangao-{int(time.time())}",
  237. }
  238. print(json.dumps(video_dict, ensure_ascii=False, indent=4))
  239. Local.logger(platform=self.platform, mode=self.log_type).info(
  240. "scan_data_" + json.dumps(video_dict, ensure_ascii=False))
  241. AliyunLogger(platform=self.platform, mode=self.log_type).logging(
  242. code="7000",
  243. message="监控到一条视频",
  244. data=video_dict
  245. )
  246. # pipeline = PiaoQuanPipelineTest(
  247. # platform=self.crawler,
  248. # mode=self.log_type,
  249. # item=video_dict,
  250. # rule_dict=self.rule_dict,
  251. # env=self.env,
  252. # trace_id=trace_id,
  253. # )
  254. # flag = pipeline.process_item()
  255. # if flag:
  256. # video_title_element = self.search_elements(
  257. # f'//*[contains(text(), "{video_title}")]'
  258. # )
  259. # if video_title_element is None:
  260. # return
  261. # print("点击标题,进入视频详情页")
  262. # video_url = self.get_video_url(video_title_element)
  263. # print(video_url)
  264. # video_url = get_redirect_url(video_url)
  265. # print(video_url)
  266. # if video_url is None:
  267. # self.driver.press_keycode(AndroidKey.BACK)
  268. # time.sleep(5)
  269. # return
  270. # video_dict["video_url"] = video_url
  271. # video_dict["platform"] = self.crawler
  272. # video_dict["strategy"] = self.log_type
  273. # video_dict["out_video_id"] = video_dict["video_id"]
  274. # video_dict["crawler_rule"] = json.dumps(self.rule_dict)
  275. # video_dict["user_id"] = self.our_uid
  276. # video_dict["publish_time"] = video_dict["publish_time_str"]
  277. # print(json.dumps(video_dict, ensure_ascii=False, indent=4))
  278. # self.download_cnt += 1
  279. # self.driver.press_keycode(AndroidKey.BACK)
  280. # time.sleep(5)
  281. #
  282. def get_video_info(self, video_element):
  283. try:
  284. self.get_video_info_2(video_element)
  285. except Exception as e:
  286. self.driver.press_keycode(AndroidKey.BACK)
  287. print(f"抓取单条视频异常:{e}\n")
  288. def get_videoList(self):
  289. """
  290. 获取视频列表
  291. :return:
  292. """
  293. # while True:
  294. self.driver.implicitly_wait(20)
  295. # 切换到 web_view
  296. self.check_to_applet(xpath='//*[@class="tab-bar--tab tab-bar--tab-selected"]')
  297. print("切换到 webview 成功")
  298. time.sleep(1)
  299. if self.search_elements('//*[@class="list-list--list"]') is None:
  300. print("窗口已销毁")
  301. self.count = 0
  302. self.download_cnt = 0
  303. self.element_list = []
  304. return
  305. print("开始获取视频信息")
  306. for i in range(50):
  307. print("下滑{}次".format(i))
  308. element = self.parse_detail(i)
  309. self.get_video_info(element)
  310. self.swipe_up()
  311. time.sleep(random.randint(1, 5))
  312. # if self.swipe_count > 100:
  313. # return
  314. print("已抓取完一组,休眠 600 秒\n")
  315. # time.sleep(600)