xiaoniangao_plus_scheduling2.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370
  1. # -*- coding: utf-8 -*-
  2. # @Author: luojunhui
  3. # @Time: 2023/9/27
  4. import json
  5. import os
  6. import sys
  7. import time
  8. from hashlib import md5
  9. import requests
  10. from appium import webdriver
  11. from appium.webdriver.extensions.android.nativekey import AndroidKey
  12. from appium.webdriver.webdriver import WebDriver
  13. from bs4 import BeautifulSoup
  14. from selenium.common.exceptions import NoSuchElementException
  15. from selenium.webdriver.common.by import By
  16. import multiprocessing
  17. sys.path.append(os.getcwd())
  18. from common.common import Common
  19. from common.mq import MQ
  20. from common.public import download_rule, get_config_from_mysql
  21. from common.scheduling_db import MysqlHelper
  22. def get_redirect_url(url):
  23. res = requests.get(url, allow_redirects=False)
  24. if res.status_code == 302 or res.status_code == 301:
  25. return res.headers['Location']
  26. else:
  27. return url
  28. class XiaoNianGaoPlusRecommend:
  29. env = None
  30. driver = None
  31. log_type = None
  32. def __init__(self, log_type, crawler, env, rule_dict, our_uid):
  33. self.mq = None
  34. self.platform = "小年糕"
  35. self.download_cnt = 0
  36. self.element_list = []
  37. self.count = 0
  38. self.swipe_count = 0
  39. self.log_type = log_type
  40. self.crawler = crawler
  41. self.env = env
  42. self.rule_dict = rule_dict
  43. self.our_uid = our_uid
  44. if self.env == "dev":
  45. chromedriverExecutable = "/Users/a123456/Downloads/chromedriver_V111/chromedriver"
  46. else:
  47. chromedriverExecutable = "/Users/a123456/Downloads/chromedriver_v111/chromedriver"
  48. Common.logger(self.log_type, self.crawler).info("启动微信")
  49. Common.logging(self.log_type, self.crawler, self.env, '启动微信')
  50. # 微信的配置文件
  51. caps = {
  52. "platformName": "Android",
  53. "devicesName": "Android",
  54. "platformVersion": "13",
  55. # "udid": "emulator-5554",
  56. "appPackage": "com.tencent.mm",
  57. "appActivity": ".ui.LauncherUI",
  58. "autoGrantPermissions": "true",
  59. "noReset": True,
  60. "resetkeyboard": True,
  61. "unicodekeyboard": True,
  62. "showChromedriverLog": True,
  63. "printPageSourceOnFailure": True,
  64. "recreateChromeDriverSessions": True,
  65. "enableWebviewDetailsCollection": True,
  66. "setWebContentsDebuggingEnabled": True,
  67. "newCommandTimeout": 6000,
  68. "automationName": "UiAutomator2",
  69. "chromedriverExecutable": chromedriverExecutable,
  70. "chromeOptions": {"androidProcess": "com.tencent.mm:appbrand0"},
  71. }
  72. self.driver = webdriver.Remote("http://localhost:4723/wd/hub", caps)
  73. self.driver.implicitly_wait(30)
  74. for i in range(120):
  75. try:
  76. if self.driver.find_elements(By.ID, "com.tencent.mm:id/f2s"):
  77. Common.logger(self.log_type, self.crawler).info("微信启动成功")
  78. Common.logging(self.log_type, self.crawler, self.env, '微信启动成功')
  79. break
  80. elif self.driver.find_element(By.ID, "com.android.systemui:id/dismiss_view"):
  81. Common.logger(self.log_type, self.crawler).info("发现并关闭系统下拉菜单")
  82. Common.logging(self.log_type, self.crawler, self.env, '发现并关闭系统下拉菜单')
  83. self.driver.find_element(By.ID, "com.android.system:id/dismiss_view").click()
  84. else:
  85. pass
  86. except NoSuchElementException:
  87. time.sleep(1)
  88. Common.logger(self.log_type, self.crawler).info("下滑,展示小程序选择面板")
  89. # Common.logging(self.log_type, self.crawler, self.env, '下滑,展示小程序选择面板')
  90. size = self.driver.get_window_size()
  91. self.driver.swipe(int(size['width'] * 0.5), int(size['height'] * 0.2),
  92. int(size['width'] * 0.5), int(size['height'] * 0.8), 200)
  93. time.sleep(1)
  94. Common.logger(self.log_type, self.crawler).info('打开小程序"小年糕+"')
  95. # Common.logging(self.log_type, self.crawler, self.env, '打开小程序"小年糕+"')
  96. self.driver.find_elements(By.XPATH, '//*[@text="小年糕+"]')[-1].click()
  97. time.sleep(5)
  98. self.get_videoList()
  99. time.sleep(1)
  100. self.driver.quit()
  101. def search_elements(self, xpath):
  102. time.sleep(1)
  103. windowHandles = self.driver.window_handles
  104. for handle in windowHandles:
  105. self.driver.switch_to.window(handle)
  106. time.sleep(1)
  107. try:
  108. elements = self.driver.find_elements(By.XPATH, xpath)
  109. if elements:
  110. return elements
  111. except NoSuchElementException:
  112. pass
  113. def check_to_applet(self, xpath):
  114. time.sleep(1)
  115. webViews = self.driver.contexts
  116. self.driver.switch_to.context(webViews[-1])
  117. windowHandles = self.driver.window_handles
  118. for handle in windowHandles:
  119. self.driver.switch_to.window(handle)
  120. time.sleep(1)
  121. try:
  122. self.driver.find_element(By.XPATH, xpath)
  123. Common.logger(self.log_type, self.crawler).info("切换到WebView成功\n")
  124. Common.logging(self.log_type, self.crawler, self.env, '切换到WebView成功\n')
  125. return
  126. except NoSuchElementException:
  127. time.sleep(1)
  128. def repeat_video(self, video_id):
  129. sql = f""" select * from crawler_video where platform in ("众妙音信", "刚刚都传", "吉祥幸福", "知青天天看", "zhufuquanzi", "祝福圈子", "haitunzhufu", "海豚祝福", "小年糕") and out_video_id="{video_id}"; """
  130. repeat_video = MysqlHelper.get_values(self.log_type, self.crawler, sql, self.env)
  131. return len(repeat_video)
  132. def swipe_up(self):
  133. self.search_elements('//*[@class="list-list--list"]')
  134. size = self.driver.get_window_size()
  135. self.driver.swipe(int(size["width"] * 0.5), int(size["height"] * 0.8),
  136. int(size["width"] * 0.5), int(size["height"] * 0.442), 200)
  137. self.swipe_count += 1
  138. def get_video_url(self, video_title_element):
  139. for i in range(3):
  140. self.search_elements('//*[@class="list-list--list"]')
  141. Common.logger(self.log_type, self.crawler).info(f"video_title_element:{video_title_element[0]}")
  142. time.sleep(1)
  143. Common.logger(self.log_type, self.crawler).info("滑动标题至可见状态")
  144. self.driver.execute_script("arguments[0].scrollIntoView({block:'center',inline:'center'});",
  145. video_title_element[0])
  146. time.sleep(3)
  147. Common.logger(self.log_type, self.crawler).info("点击标题")
  148. video_title_element[0].click()
  149. self.check_to_applet(xpath=r'//wx-video[@class="dynamic-index--video-item dynamic-index--video"]')
  150. Common.logger(self.log_type, self.crawler).info("点击标题完成")
  151. time.sleep(10)
  152. video_url_elements = self.search_elements(
  153. '//wx-video[@class="dynamic-index--video-item dynamic-index--video"]')
  154. if video_url_elements:
  155. return video_url_elements[0].get_attribute("src")
  156. def parse_detail(self, index):
  157. page_source = self.driver.page_source
  158. soup = BeautifulSoup(page_source, 'html.parser')
  159. soup.prettify()
  160. video_list = soup.findAll(name="wx-view", attrs={"class": "expose--adapt-parent"})
  161. element_list = [i for i in video_list][index:]
  162. return element_list[0]
  163. def get_video_info_2(self, video_element):
  164. Common.logger(self.log_type, self.crawler).info(f"本轮已抓取{self.download_cnt}条视频\n")
  165. # Common.logging(self.log_type, self.crawler, self.env, f"本轮已抓取{self.download_cnt}条视频\n")
  166. if self.download_cnt >= int(self.rule_dict.get("videos_cnt", {}).get("min", 10)):
  167. self.count = 0
  168. self.download_cnt = 0
  169. self.element_list = []
  170. return
  171. self.count += 1
  172. Common.logger(self.log_type, self.crawler).info(f"第{self.count}条视频")
  173. # Common.logging(self.log_type, self.crawler, self.env, f"第{self.count}条视频")
  174. # 标题
  175. video_title = video_element.find("wx-view", class_="dynamic--title").text
  176. # 播放量字符串
  177. play_str = video_element.find("wx-view", class_="dynamic--views").text
  178. info_list = video_element.findAll("wx-view", class_="dynamic--commerce-btn-text")
  179. # 点赞数量
  180. like_str = info_list[1].text
  181. # 评论数量
  182. comment_str = info_list[2].text
  183. # 视频时长
  184. duration_str = video_element.find("wx-view", class_="dynamic--duration").text
  185. user_name = video_element.find("wx-view", class_="dynamic--nick-top").text
  186. # 头像 URL
  187. avatar_url = video_element.find("wx-image", class_="avatar--avatar")["src"]
  188. # 封面 URL
  189. cover_url = video_element.find("wx-image", class_="dynamic--bg-image")["src"]
  190. play_cnt = int(play_str.replace("+", "").replace("次播放", ""))
  191. duration = int(duration_str.split(":")[0].strip()) * 60 + int(duration_str.split(":")[-1].strip())
  192. if "点赞" in like_str:
  193. like_cnt = 0
  194. elif "万" in like_str:
  195. like_cnt = int(like_str.split("万")[0]) * 10000
  196. else:
  197. like_cnt = int(like_str)
  198. if "评论" in comment_str:
  199. comment_cnt = 0
  200. elif "万" in comment_str:
  201. comment_cnt = int(comment_str.split("万")[0]) * 10000
  202. else:
  203. comment_cnt = int(comment_str)
  204. out_video_id = md5(video_title.encode('utf8')).hexdigest()
  205. out_user_id = md5(user_name.encode('utf8')).hexdigest()
  206. video_dict = {
  207. "video_title": video_title,
  208. "video_id": out_video_id,
  209. "duration_str": duration_str,
  210. "duration": duration,
  211. "play_str": play_str,
  212. "play_cnt": play_cnt,
  213. "like_str": like_str,
  214. "like_cnt": like_cnt,
  215. "comment_cnt": comment_cnt,
  216. "share_cnt": 0,
  217. "user_name": user_name,
  218. "user_id": out_user_id,
  219. 'publish_time_stamp': int(time.time()),
  220. 'publish_time_str': time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time()))),
  221. "avatar_url": avatar_url,
  222. "cover_url": cover_url,
  223. "session": f"xiaoniangao-{int(time.time())}"
  224. }
  225. for k, v in video_dict.items():
  226. Common.logger(self.log_type, self.crawler).info(f"{k}:{v}")
  227. Common.logging(self.log_type, self.crawler, self.env, f"video_dict:{video_dict}")
  228. # Common.logger(self.log_type, self.crawler).info(f"==========分割线==========\n")
  229. if video_title is None or cover_url is None:
  230. Common.logger(self.log_type, self.crawler).info("无效视频\n")
  231. Common.logging(self.log_type, self.crawler, self.env, '无效视频\n')
  232. # self.swipe_up()
  233. time.sleep(0.5)
  234. elif download_rule(log_type=self.log_type,
  235. crawler=self.crawler,
  236. video_dict=video_dict,
  237. rule_dict=self.rule_dict) is False:
  238. Common.logger(self.log_type, self.crawler).info("不满足抓取规则\n")
  239. Common.logging(self.log_type, self.crawler, self.env, "不满足抓取规则\n")
  240. # self.swipe_up()
  241. time.sleep(0.5)
  242. elif any(str(word) if str(word) in video_dict["video_title"] else False
  243. for word in get_config_from_mysql(log_type=self.log_type,
  244. source=self.crawler,
  245. env=self.env,
  246. text="filter",
  247. action="")) is True:
  248. Common.logger(self.log_type, self.crawler).info('已中过滤词\n')
  249. Common.logging(self.log_type, self.crawler, self.env, '已中过滤词\n')
  250. # self.swipe_up()
  251. time.sleep(0.5)
  252. elif self.repeat_video(out_video_id) != 0:
  253. Common.logger(self.log_type, self.crawler).info('视频已下载\n')
  254. Common.logging(self.log_type, self.crawler, self.env, '视频已下载\n')
  255. # self.swipe_up()
  256. time.sleep(5)
  257. else:
  258. video_title_element = self.search_elements(f'//*[contains(text(), "{video_title}")]')
  259. if video_title_element is None:
  260. Common.logger(self.log_type, self.crawler).warning(
  261. f"未找到该视频标题的element:{video_title_element}")
  262. Common.logging(self.log_type, self.crawler, self.env,
  263. f"未找到该视频标题的element:{video_title_element}")
  264. # continue
  265. return
  266. Common.logger(self.log_type, self.crawler).info("点击标题,进入视频详情页")
  267. Common.logging(self.log_type, self.crawler, self.env, "点击标题,进入视频详情页")
  268. video_url = self.get_video_url(video_title_element)
  269. video_url = get_redirect_url(video_url)
  270. if video_url is None:
  271. Common.logger(self.log_type, self.crawler).info("未获取到视频播放地址\n")
  272. self.driver.press_keycode(AndroidKey.BACK)
  273. time.sleep(5)
  274. return
  275. video_dict['video_url'] = video_url
  276. Common.logger(self.log_type, self.crawler).info(f"video_url:{video_url}")
  277. video_dict["platform"] = self.crawler
  278. video_dict["strategy"] = self.log_type
  279. video_dict["out_video_id"] = video_dict["video_id"]
  280. video_dict["crawler_rule"] = json.dumps(self.rule_dict)
  281. video_dict["user_id"] = self.our_uid
  282. video_dict["publish_time"] = video_dict["publish_time_str"]
  283. self.mq.send_msg(video_dict)
  284. # print(video_dict)
  285. self.download_cnt += 1
  286. self.driver.press_keycode(AndroidKey.BACK)
  287. time.sleep(5)
  288. def get_video_info(self, video_element):
  289. try:
  290. self.get_video_info_2(video_element)
  291. except Exception as e:
  292. Common.logger(self.log_type, self.crawler).error(f"抓取单条视频异常:{e}\n")
  293. # Common.logging(self.log_type, self.crawler, self.env, f"抓取单条视频异常:{e}\n")
  294. def get_videoList(self):
  295. self.mq = MQ(topic_name="topic_crawler_etl_" + self.env)
  296. self.driver.implicitly_wait(20)
  297. # 切换到 web_view
  298. self.check_to_applet(xpath='//*[@class="tab-bar--tab tab-bar--tab-selected"]')
  299. print("切换到 webview 成功")
  300. time.sleep(1)
  301. page = 0
  302. if self.search_elements('//*[@class="list-list--list"]') is None:
  303. Common.logger(self.log_type, self.crawler).info("窗口已销毁\n")
  304. Common.logging(self.log_type, self.crawler, self.env, '窗口已销毁\n')
  305. self.count = 0
  306. self.download_cnt = 0
  307. self.element_list = []
  308. return
  309. print("开始获取视频信息")
  310. for i in range(50):
  311. print("下滑{}次".format(i))
  312. element = self.parse_detail(i)
  313. self.get_video_info(element)
  314. self.swipe_up()
  315. time.sleep(1)
  316. if self.swipe_count > 100:
  317. return
  318. print("下滑完成")
  319. # time.sleep(100)
  320. Common.logger(self.log_type, self.crawler).info("已抓取完一组,休眠 5 秒\n")
  321. Common.logging(self.log_type, self.crawler, self.env, "已抓取完一组,休眠 5 秒\n")
  322. time.sleep(5)
  323. def run():
  324. rule_dict1 = {"period": {"min": 365, "max": 365},
  325. "duration": {"min": 30, "max": 1800},
  326. "favorite_cnt": {"min": 0, "max": 0},
  327. "videos_cnt": {"min": 5000, "max": 0},
  328. "share_cnt": {"min": 0, "max": 0}}
  329. XiaoNianGaoPlusRecommend("recommend", "xiaoniangao", "dev", rule_dict1, 6267141)
  330. if __name__ == "__main__":
  331. process = multiprocessing.Process(
  332. target=run
  333. )
  334. process.start()
  335. while True:
  336. if not process.is_alive():
  337. print("正在重启")
  338. process.terminate()
  339. time.sleep(60)
  340. os.system("adb forward --remove-all")
  341. process = multiprocessing.Process(target=run)
  342. process.start()
  343. time.sleep(60)