kanyikan_recommend_offline.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347
  1. # -*- coding: utf-8 -*-
  2. # @Time: 2023/9/6
  3. import json
  4. import os
  5. import re
  6. import sys
  7. import time
  8. from appium import webdriver
  9. from appium.webdriver.extensions.android.nativekey import AndroidKey
  10. from appium.webdriver.webdriver import WebDriver
  11. from bs4 import BeautifulSoup
  12. from selenium.common import NoSuchElementException
  13. from selenium.webdriver.common.by import By
  14. sys.path.append(os.getcwd())
  15. from common.common import Common
  16. from common.mq import MQ
  17. from common.public import download_rule, get_config_from_mysql
  18. from common.scheduling_db import MysqlHelper
  19. class KanyikanRecommend:
  20. platform = "看一看+"
  21. download_cnt = 0
  22. element_list = []
  23. i = 0
  24. @classmethod
  25. def start_wechat(cls, log_type, crawler, env, rule_dict, our_uid):
  26. if env == "dev":
  27. chromedriverExecutable = "/Users/tzld/Downloads/chromedriver_v111/chromedriver"
  28. else:
  29. chromedriverExecutable = "/Users/crawler/Downloads/chromedriver_v111/chromedriver"
  30. Common.logger(log_type, crawler).info("启动微信")
  31. Common.logging(log_type, crawler, env, '启动微信')
  32. caps = {
  33. "platformName": "Android",
  34. "devicesName": "Android",
  35. # "platformVersion": "11",
  36. # "udid": "emulator-5554",
  37. "appPackage": "com.tencent.mm",
  38. "appActivity": ".ui.LauncherUI",
  39. "autoGrantPermissions": "true",
  40. "noReset": True,
  41. "resetkeyboard": True,
  42. "unicodekeyboard": True,
  43. "showChromedriverLog": True,
  44. "printPageSourceOnFailure": True,
  45. "recreateChromeDriverSessions": True,
  46. "enableWebviewDetailsCollection": True,
  47. "setWebContentsDebuggingEnabled": True,
  48. "newCommandTimeout": 6000,
  49. "automationName": "UiAutomator2",
  50. "chromedriverExecutable": chromedriverExecutable,
  51. "chromeOptions": {"androidProcess": "com.tencent.mm:appbrand0"},
  52. }
  53. driver = webdriver.Remote("http://localhost:4723/wd/hub", caps)
  54. driver.implicitly_wait(30)
  55. for i in range(120):
  56. try:
  57. if driver.find_elements(By.ID, "com.tencent.mm:id/f2s"):
  58. Common.logger(log_type, crawler).info("微信启动成功")
  59. Common.logging(log_type, crawler, env, '微信启动成功')
  60. break
  61. elif driver.find_element(By.ID, "com.android.systemui:id/dismiss_view"):
  62. Common.logger(log_type, crawler).info("发现并关闭系统下拉菜单")
  63. Common.logging(log_type, crawler, env, '发现并关闭系统下拉菜单')
  64. driver.find_element(By.ID, "com.android.system:id/dismiss_view").click()
  65. else:
  66. pass
  67. except NoSuchElementException:
  68. time.sleep(1)
  69. Common.logger(log_type, crawler).info("下滑,展示小程序选择面板")
  70. Common.logging(log_type, crawler, env, '下滑,展示小程序选择面板')
  71. size = driver.get_window_size()
  72. driver.swipe(int(size['width'] * 0.5), int(size['height'] * 0.2),
  73. int(size['width'] * 0.5), int(size['height'] * 0.8), 200)
  74. time.sleep(1)
  75. Common.logger(log_type, crawler).info('打开小程序"看一看+"')
  76. Common.logging(log_type, crawler, env, '打开小程序"看一看+"')
  77. driver.find_elements(By.XPATH, '//*[@text="看一看+"]')[-1].click()
  78. time.sleep(5)
  79. cls.get_videoList(log_type, crawler, driver, env, rule_dict, our_uid)
  80. time.sleep(1)
  81. driver.quit()
  82. @classmethod
  83. def search_elements(cls, driver: WebDriver, xpath):
  84. time.sleep(1)
  85. windowHandles = driver.window_handles
  86. for handle in windowHandles:
  87. driver.switch_to.window(handle)
  88. time.sleep(1)
  89. try:
  90. elements = driver.find_elements(By.XPATH, xpath)
  91. if elements:
  92. return elements
  93. except NoSuchElementException:
  94. pass
  95. @classmethod
  96. def check_to_applet(cls, log_type, crawler, env, driver: WebDriver):
  97. time.sleep(1)
  98. webViews = driver.contexts
  99. Common.logger(log_type, crawler).info(f"webViews:{webViews}")
  100. Common.logging(log_type, crawler, env, f"webViews:{webViews}")
  101. driver.switch_to.context(webViews[1])
  102. windowHandles = driver.window_handles
  103. for handle in windowHandles:
  104. driver.switch_to.window(handle)
  105. time.sleep(1)
  106. try:
  107. driver.find_element(By.XPATH, "//wx-view[@class='new-bottom-tabs']//wx-view[1]")
  108. Common.logger(log_type, crawler).info("切换到小程序成功\n")
  109. Common.logging(log_type, crawler, env, '切换到小程序成功\n')
  110. return
  111. except NoSuchElementException:
  112. time.sleep(1)
  113. @classmethod
  114. def repeat_video(cls, log_type, crawler, video_id, env):
  115. sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and out_video_id="{video_id}"; """
  116. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  117. return len(repeat_video)
  118. @classmethod
  119. def swipe_up(cls, driver: WebDriver):
  120. cls.search_elements(driver, '//*[@class="snapshot--title snapshot--shadow"]')
  121. size = driver.get_window_size()
  122. driver.swipe(int(size["width"] * 0.7), int(size["height"] * 0.8),
  123. int(size["width"] * 0.7), int(size["height"] * 0.4), 200)
  124. @classmethod
  125. def get_video_url(cls, log_type, crawler, driver: WebDriver, video_title_element):
  126. for i in range(3):
  127. cls.search_elements(driver, '//*[@class="snapshot--title snapshot--shadow"]')
  128. Common.logger(log_type, crawler).info(f"video_title_element:{video_title_element[0]}")
  129. time.sleep(1)
  130. Common.logger(log_type, crawler).info("滑动标题至可见状态")
  131. driver.execute_script("arguments[0].scrollIntoView({block:'center',inline:'center'});", video_title_element[0])
  132. time.sleep(3)
  133. Common.logger(log_type, crawler).info("点击标题")
  134. video_title_element[0].click()
  135. Common.logger(log_type, crawler).info("点击标题完成")
  136. time.sleep(1)
  137. video_url_elements = cls.search_elements(driver, "//wx-video[@class='videofull--video']")
  138. if video_url_elements:
  139. return video_url_elements[0].get_attribute("src")
  140. @classmethod
  141. def get_videoList(cls, log_type, crawler, driver: WebDriver, env, rule_dict, our_uid):
  142. mq = MQ(topic_name="topic_crawler_etl_" + env)
  143. driver.implicitly_wait(20)
  144. # 鼠标左键点击, 1为x坐标, 2为y坐标
  145. cls.check_to_applet(log_type=log_type, crawler=crawler, env=env, driver=driver)
  146. time.sleep(1)
  147. page = 0
  148. while True:
  149. if cls.search_elements(driver, "//wx-view[@class='new-bottom-tabs']//wx-view[1]") is None:
  150. Common.logger(log_type, crawler).info("窗口已销毁\n")
  151. Common.logging(log_type, crawler, env, '窗口已销毁\n')
  152. cls.i = 0
  153. cls.download_cnt = 0
  154. cls.element_list = []
  155. return
  156. cls.swipe_up(driver)
  157. page_source = driver.page_source
  158. soup = BeautifulSoup(page_source, 'html.parser')
  159. soup.prettify()
  160. video_list_elements = soup.findAll("wx-snapshotcomp", class_="snap-shot-comp")
  161. # video_list_elements 有,cls.element_list 中没有的元素
  162. video_list_elements = list(set(video_list_elements).difference(set(cls.element_list)))
  163. # video_list_elements 与 cls.element_list 的并集
  164. cls.element_list = list(set(video_list_elements) | set(cls.element_list))
  165. Common.logger(log_type, crawler).info(f"正在抓取第{page + 1}页,共:{len(video_list_elements)}条视频")
  166. Common.logging(log_type, crawler, env, f"正在抓取第{page + 1}页,共:{len(video_list_elements)}条视频")
  167. if len(video_list_elements) == 0:
  168. for i in range(10):
  169. Common.logger(log_type, crawler).info(f"向上滑动第{i + 1}次")
  170. cls.swipe_up(driver)
  171. time.sleep(0.5)
  172. continue
  173. for i, video_element in enumerate(video_list_elements):
  174. try:
  175. Common.logger(log_type, crawler).info(f"本轮已抓取{cls.download_cnt}条视频\n")
  176. Common.logging(log_type, crawler, env, f"本轮已抓取{cls.download_cnt}条视频\n")
  177. if cls.download_cnt >= int(rule_dict.get("videos_cnt", {}).get("min", 10)):
  178. cls.i = 0
  179. cls.download_cnt = 0
  180. cls.element_list = []
  181. return
  182. cls.i += 1
  183. Common.logger(log_type, crawler).info(f"第{cls.i}条视频")
  184. Common.logging(log_type, crawler, env, f"第{cls.i}条视频")
  185. video_title = video_element.find("wx-view", class_="snapshot--title snapshot--shadow").text
  186. cover_url = video_element.find("wx-image", class_="snapshot--snapshoot")["src"]
  187. user_name = video_element.find("wx-view", class_="snapshot--author-name").text
  188. out_video_id = video_element.get("id")
  189. play_str = video_element.find("wx-text", class_="snapshot--comment-text").select_one(
  190. "span:nth-of-type(2)").text.strip()
  191. play_cnt = play_str.replace("+", "").replace("播放", "")
  192. if "万" in play_cnt:
  193. match = re.search(r'(\d+(?:\.\d+)?)万', play_cnt)
  194. if match:
  195. number = float(match.group(1))
  196. play_cnt = int(number * 10000)
  197. else:
  198. play_cnt = int(play_cnt)
  199. video_dict = {
  200. "video_title": video_title,
  201. "video_id": out_video_id,
  202. "duration": 0,
  203. "play_str": play_str,
  204. "play_cnt": play_cnt,
  205. "user_name": user_name,
  206. "user_id": '',
  207. 'publish_time_stamp': int(time.time()),
  208. 'publish_time_str': time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time()))),
  209. "avatar_url": cover_url,
  210. "cover_url": cover_url,
  211. "session": ""
  212. }
  213. for k, v in video_dict.items():
  214. Common.logger(log_type, crawler).info(f"{k}:{v}")
  215. Common.logging(log_type, crawler, env, f"video_dict:{video_dict}")
  216. # Common.logger(log_type, crawler).info(f"==========分割线==========\n")
  217. if video_title is None or cover_url is None:
  218. Common.logger(log_type, crawler).info("无效视频\n")
  219. Common.logging(log_type, crawler, env, '无效视频\n')
  220. cls.swipe_up(driver)
  221. time.sleep(0.5)
  222. elif download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict,
  223. rule_dict=rule_dict) is False:
  224. Common.logger(log_type, crawler).info("不满足抓取规则\n")
  225. Common.logging(log_type, crawler, env, "不满足抓取规则\n")
  226. cls.swipe_up(driver)
  227. time.sleep(0.5)
  228. elif any(str(word) if str(word) in video_dict["video_title"] else False
  229. for word in get_config_from_mysql(log_type=log_type,
  230. source=crawler,
  231. env=env,
  232. text="filter",
  233. action="")) is True:
  234. Common.logger(log_type, crawler).info('已中过滤词\n')
  235. Common.logging(log_type, crawler, env, '已中过滤词\n')
  236. cls.swipe_up(driver)
  237. time.sleep(0.5)
  238. elif cls.repeat_video(log_type, crawler, out_video_id, env) != 0:
  239. Common.logger(log_type, crawler).info('视频已下载\n')
  240. Common.logging(log_type, crawler, env, '视频已下载\n')
  241. cls.swipe_up(driver)
  242. time.sleep(5)
  243. else:
  244. video_title_element = cls.search_elements(driver, f'//*[contains(text(), "{video_title}")]')
  245. if video_title_element is None:
  246. Common.logger(log_type, crawler).warning(f"未找到该视频标题的element:{video_title_element}")
  247. Common.logging(log_type, crawler, env, f"未找到该视频标题的element:{video_title_element}")
  248. continue
  249. Common.logger(log_type, crawler).info("点击标题,进入视频详情页")
  250. Common.logging(log_type, crawler, env, "点击标题,进入视频详情页")
  251. video_url = cls.get_video_url(log_type, crawler, driver, video_title_element)
  252. if video_url is None:
  253. Common.logger(log_type, crawler).info("未获取到视频播放地址\n")
  254. driver.press_keycode(AndroidKey.BACK)
  255. time.sleep(5)
  256. continue
  257. Common.logger(log_type, crawler).info(f"video_url:{video_url}")
  258. comment_cnt = driver.find_element(By.XPATH,
  259. "//wx-swiper-item[@data-key='A']//wx-view[@class='videofull--comments']//span[2]").text
  260. like_cnt = driver.find_element(By.XPATH,
  261. "//wx-swiper-item[@data-key='A']//wx-view[@class='videofull--large-btn-box videofull--share-model-timeline']//wx-view[2]//wx-text[1]//span[2]").text
  262. share_cnt = driver.find_element(By.XPATH,
  263. "//wx-swiper-item[@data-key='A']//wx-view[@class='videofull--large-btn-box videofull--share-model-timeline']//wx-view[3]//wx-text[1]//span[2]").text
  264. # 评论
  265. comment_cnt = comment_cnt.replace("+", "")
  266. if "万" in comment_cnt:
  267. comment_cnt = int(comment_cnt.split("万")[0]) * 10000
  268. elif comment_cnt == "":
  269. comment_cnt = 0
  270. else:
  271. comment_cnt = int(comment_cnt)
  272. # 点赞
  273. like_cnt = like_cnt.replace("+", "")
  274. if "万" in like_cnt:
  275. like_cnt = int(like_cnt.split("万")[0]) * 10000
  276. elif like_cnt == "":
  277. like_cnt = 0
  278. else:
  279. like_cnt = int(like_cnt)
  280. # 分享
  281. share_cnt = share_cnt.replace("+", "")
  282. if "万" in share_cnt:
  283. share_cnt = int(share_cnt.split("万")[0]) * 10000
  284. elif share_cnt == "":
  285. share_cnt = 0
  286. else:
  287. share_cnt = int(share_cnt)
  288. video_dict['like_cnt'] = like_cnt
  289. video_dict['comment_cnt'] = comment_cnt
  290. video_dict['share_cnt'] = share_cnt
  291. video_dict['video_url'] = video_url
  292. video_dict["platform"] = crawler
  293. video_dict["strategy"] = log_type
  294. video_dict["out_video_id"] = video_dict["video_id"]
  295. video_dict["crawler_rule"] = json.dumps(rule_dict)
  296. video_dict["user_id"] = our_uid
  297. video_dict["publish_time"] = video_dict["publish_time_str"]
  298. mq.send_msg(video_dict)
  299. cls.download_cnt += 1
  300. driver.press_keycode(AndroidKey.BACK)
  301. time.sleep(5)
  302. cls.swipe_up(driver)
  303. except Exception as e:
  304. Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
  305. Common.logging(log_type, crawler, env, f"抓取单条视频异常:{e}\n")
  306. Common.logger(log_type, crawler).info("已抓取完一组,休眠 5 秒\n")
  307. Common.logging(log_type, crawler, env, "已抓取完一组,休眠 5 秒\n")
  308. time.sleep(5)
  309. page += 1
  310. if __name__ == "__main__":
  311. rule_dict1 = {"period": {"min": 0, "max": 365},
  312. "duration": {"min": 0, "max": 1800},
  313. "favorite_cnt": {"min": 0, "max": 0},
  314. "videos_cnt": {"min": 10, "max": 20},
  315. "share_cnt": {"min": 0, "max": 0}}
  316. KanyikanRecommend.start_wechat("recommend", "kanyikan", "dev", rule_dict1, 6267141)