shipinhao_search_scheduling.py 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2023/4/25
  4. import datetime
  5. import json
  6. import os
  7. import sys
  8. import time
  9. from datetime import date, timedelta
  10. from hashlib import md5
  11. from appium import webdriver
  12. from appium.webdriver.extensions.android.nativekey import AndroidKey
  13. from appium.webdriver.webdriver import WebDriver
  14. from selenium.common import NoSuchElementException
  15. from selenium.webdriver.common.by import By
  16. sys.path.append(os.getcwd())
  17. from common.public import download_rule
  18. from common.mq import MQ
  19. from common.common import Common
  20. from common.scheduling_db import MysqlHelper
  21. class ShipinhaoSearchScheduling:
  22. platform = "视频号"
  23. i = 0
  24. download_cnt = 0
  25. @classmethod
  26. def start_wechat(cls, log_type, crawler, rule_dict, user_dict, env):
  27. Common.logger(log_type, crawler).info('启动微信')
  28. Common.logging(log_type, crawler, env, '启动微信')
  29. if env == "dev":
  30. chromedriverExecutable = "/Users/wangkun/Downloads/chromedriver/chromedriver_v107/chromedriver"
  31. else:
  32. chromedriverExecutable = "/Users/piaoquan/Downloads/chromedriver/chromedriver_v111/chromedriver"
  33. caps = {
  34. "platformName": "Android", # 手机操作系统 Android / iOS
  35. "deviceName": "Android", # 连接的设备名(模拟器或真机),安卓可以随便写
  36. "platforVersion": "13", # 手机对应的系统版本(Android 13)
  37. "appPackage": "com.tencent.mm", # 被测APP的包名,乐活圈 Android
  38. "appActivity": ".ui.LauncherUI", # 启动的Activity名
  39. "autoGrantPermissions": True, # 让 appium 自动授权 base 权限,
  40. # 如果 noReset 为 True,则该条不生效(该参数为 Android 独有),对应的值为 True 或 False
  41. "unicodekeyboard": True, # 使用自带输入法,输入中文时填True
  42. "resetkeyboard": True, # 执行完程序恢复原来输入法
  43. "noReset": True, # 不重置APP
  44. "recreateChromeDriverSessions": True, # 切换到非 chrome-Driver 会 kill 掉 session,就不需要手动 kill 了
  45. "printPageSourceOnFailure": True, # 找不到元素时,appium log 会完整记录当前页面的 pagesource
  46. "newCommandTimeout": 6000, # 初始等待时间
  47. "automationName": "UiAutomator2", # 使用引擎,默认为 Appium,
  48. # 其中 Appium、UiAutomator2、Selendroid、Espresso 用于 Android,XCUITest 用于 iOS
  49. "showChromedriverLog": True,
  50. # "chromeOptions": {"androidProcess": "com.tencent.mm:appbrand0"},
  51. "chromeOptions": {"androidProcess": "com.tencent.mm:tools"},
  52. # "chromeOptions": {"androidProcess": "com.tencent.mm:toolsmp"},
  53. # "chromeOptions": {"androidProcess": "com.tencent.mm"},
  54. 'enableWebviewDetailsCollection': True,
  55. 'setWebContentsDebuggingEnabled': True,
  56. 'chromedriverExecutable': chromedriverExecutable,
  57. }
  58. driver = webdriver.Remote("http://localhost:4723/wd/hub", caps)
  59. driver.implicitly_wait(10)
  60. # Common.logger(log_type, crawler).info("点击微信")
  61. # if len(driver.find_elements(By.ID, 'android:id/text1')) != 0:
  62. # driver.find_elements(By.ID, 'android:id/text1')[0].click()
  63. # Common.logger(log_type, crawler).info("等待 5s")
  64. time.sleep(5)
  65. cls.search_video(log_type=log_type,
  66. crawler=crawler,
  67. rule_dict=rule_dict,
  68. user_dict=user_dict,
  69. driver=driver,
  70. env=env)
  71. cls.close_wechat(log_type=log_type,
  72. crawler=crawler,
  73. env=env,
  74. driver=driver)
  75. @classmethod
  76. def close_wechat(cls, log_type, crawler, env, driver: WebDriver):
  77. driver.quit()
  78. Common.logger(log_type, crawler).info(f"微信退出成功\n")
  79. Common.logging(log_type, crawler, env, f"微信退出成功\n")
  80. @classmethod
  81. def is_contain_chinese(cls, strword):
  82. for ch in strword:
  83. if u'\u4e00' <= ch <= u'\u9fff':
  84. return True
  85. return False
  86. # 查找元素
  87. @classmethod
  88. def search_elements(cls, driver: WebDriver, xpath):
  89. time.sleep(1)
  90. windowHandles = driver.window_handles
  91. for handle in windowHandles:
  92. driver.switch_to.window(handle)
  93. time.sleep(1)
  94. try:
  95. elements = driver.find_elements(By.XPATH, xpath)
  96. if elements:
  97. return elements
  98. except NoSuchElementException:
  99. pass
  100. @classmethod
  101. def check_to_webview(cls, log_type, crawler, driver: WebDriver):
  102. webviews = driver.contexts
  103. Common.logger(log_type, crawler).info(f"webviews:{webviews}")
  104. driver.switch_to.context(webviews[1])
  105. Common.logger(log_type, crawler).info(driver.current_context)
  106. time.sleep(1)
  107. windowHandles = driver.window_handles
  108. for handle in windowHandles:
  109. try:
  110. driver.switch_to.window(handle)
  111. time.sleep(1)
  112. driver.find_element(By.XPATH, '//div[@class="unit"]')
  113. Common.logger(log_type, crawler).info('切换 webview 成功')
  114. return "成功"
  115. except Exception:
  116. Common.logger(log_type, crawler).info("切换 webview 失败")
  117. @classmethod
  118. def repeat_out_video_id(cls, log_type, crawler, out_video_id, env):
  119. sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and out_video_id="{out_video_id}"; """
  120. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  121. return len(repeat_video)
  122. @classmethod
  123. def repeat_video_url(cls, log_type, crawler, video_url, env):
  124. sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and video_url="{video_url}"; """
  125. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  126. return len(repeat_video)
  127. @classmethod
  128. def search_video(cls, log_type, crawler, rule_dict, driver: WebDriver, user_dict, env):
  129. mq = MQ(topic_name="topic_crawler_etl_" + env)
  130. # 点击微信搜索框,并输入搜索词
  131. driver.implicitly_wait(10)
  132. Common.logger(log_type, crawler).info("点击搜索框")
  133. Common.logging(log_type, crawler, env, "点击搜索框")
  134. driver.find_element(By.ID, 'com.tencent.mm:id/j5t').click() # 微信8.0.30版本
  135. # driver.find_element(By.ID, 'com.tencent.mm:id/he6').click() # 微信8.0.16版本
  136. time.sleep(0.5)
  137. driver.find_element(By.ID, 'com.tencent.mm:id/cd7').clear().send_keys(user_dict["link"]) # 微信8.0.30版本
  138. # driver.find_element(By.ID, 'com.tencent.mm:id/bxz').clear().send_keys(word) # 微信8.0.16版本
  139. driver.press_keycode(AndroidKey.ENTER)
  140. Common.logger(log_type, crawler).info("进入搜索词页面")
  141. Common.logging(log_type, crawler, env, "进入搜索词页面")
  142. driver.find_elements(By.ID, 'com.tencent.mm:id/br8')[0].click() # 微信8.0.30版本
  143. # driver.find_elements(By.ID, 'com.tencent.mm:id/jkg')[0].click() # 微信8.0.16版本
  144. time.sleep(5)
  145. # 切换到微信搜索结果页 webview
  146. check_to_webview = cls.check_to_webview(log_type, crawler, driver)
  147. if check_to_webview is None:
  148. Common.logger(log_type, crawler).info("切换到视频号 webview 失败\n")
  149. Common.logging(log_type, crawler, env, "切换到视频号 webview 失败\n")
  150. return
  151. time.sleep(1)
  152. # 切换到"视频号"分类
  153. shipinhao_tags = cls.search_elements(driver, '//div[@class="unit"]/*[2]')
  154. Common.logger(log_type, crawler).info('点击"视频号"分类')
  155. Common.logging(log_type, crawler, env, '点击"视频号"分类')
  156. shipinhao_tags[0].click()
  157. time.sleep(5)
  158. videos_cnt = rule_dict.get('videos_cnt', {}).get('min', 30)
  159. index = 0
  160. while True:
  161. if cls.search_elements(driver, '//*[@class="mixed-box__bd"]') is None:
  162. Common.logger(log_type, crawler).info('窗口已销毁\n')
  163. Common.logging(log_type, crawler, env, '窗口已销毁\n')
  164. return
  165. Common.logger(log_type, crawler).info('获取视频列表\n')
  166. Common.logging(log_type, crawler, env, '获取视频列表\n')
  167. video_elements = cls.search_elements(driver, '//div[@class="rich-media active__absolute"]')
  168. if video_elements is None:
  169. Common.logger(log_type, crawler).warning(f'video_elements:{video_elements}')
  170. Common.logging(log_type, crawler, env, f'video_elements:{video_elements}')
  171. return
  172. video_element_temp = video_elements[index:]
  173. if len(video_element_temp) == 0:
  174. Common.logger(log_type, crawler).info('到底啦~~~~~~~~~~~~~\n')
  175. Common.logging(log_type, crawler, env, '到底啦~~~~~~~~~~~~~\n')
  176. return
  177. for i, video_element in enumerate(video_element_temp):
  178. try:
  179. Common.logger(log_type, crawler).info(f"download_cnt:{cls.download_cnt}")
  180. Common.logging(log_type, crawler, env, f"download_cnt:{cls.download_cnt}")
  181. if cls.download_cnt >= int(videos_cnt):
  182. Common.logger(log_type, crawler).info(f'搜索词:{user_dict["link"]},已抓取视频数:{cls.download_cnt}')
  183. Common.logging(log_type, crawler, env, f'搜索词:{user_dict["link"]},已抓取视频数:{cls.download_cnt}')
  184. cls.download_cnt = 0
  185. return
  186. if video_element is None:
  187. Common.logger(log_type, crawler).info('到底啦~\n')
  188. Common.logging(log_type, crawler, env, '到底啦~\n')
  189. return
  190. cls.i += 1
  191. cls.search_elements(driver, '//*[@class="rich-media active__absolute"]')
  192. Common.logger(log_type, crawler).info(f'拖动"视频"列表第{cls.i}个至屏幕中间')
  193. Common.logging(log_type, crawler, env, f'拖动"视频"列表第{cls.i}个至屏幕中间')
  194. time.sleep(3)
  195. driver.execute_script("arguments[0].scrollIntoView({block:'center',inline:'center'})", video_element)
  196. if len(video_element.find_elements(By.XPATH, "//*[@text='没有更多的搜索结果']")) != 0:
  197. Common.logger(log_type, crawler).info("没有更多的搜索结果\n")
  198. Common.logging(log_type, crawler, env, "没有更多的搜索结果\n")
  199. return
  200. video_title = \
  201. video_element.find_elements(By.XPATH, '//div[@class="rich-media__title ellipsis_2"]/span')[
  202. index + i].text[:40]
  203. video_url = video_element.find_elements(By.XPATH, '//div[@class="video-player"]')[
  204. index + i].get_attribute('src')
  205. cover_url = video_element.find_elements(By.XPATH, '//div[@class="video-player__bd"]')[
  206. index + i].get_attribute('style')
  207. cover_url = cover_url.split('url("')[-1].split('")')[0]
  208. duration = video_element.find_elements(By.XPATH, '//div[@class="video-player-mask__text"]')[
  209. index + i].text
  210. duration = int(duration.split(':')[0]) * 60 + int(duration.split(':')[-1])
  211. user_name = video_element.find_elements(By.XPATH, '//div[@class="rich-media__source__title"]')[
  212. index + i].text
  213. avatar_url = video_element.find_elements(By.XPATH,
  214. '//div[@class="ui-image-image ui-image rich-media__source__thumb"]')[
  215. index + i].get_attribute('style')
  216. avatar_url = avatar_url.split('url("')[-1].split('")')[0]
  217. out_video_id = md5(video_title.encode('utf8')).hexdigest()
  218. out_user_id = md5(user_name.encode('utf8')).hexdigest()
  219. video_dict = {
  220. "video_title": video_title,
  221. "video_id": out_video_id,
  222. "play_cnt": 0,
  223. "duration": duration,
  224. # "duration": 60,
  225. "user_name": user_name,
  226. "user_id": out_user_id,
  227. "avatar_url": avatar_url,
  228. "cover_url": cover_url,
  229. "video_url": video_url,
  230. "session": f"shipinhao-search-{int(time.time())}"
  231. }
  232. for k, v in video_dict.items():
  233. Common.logger(log_type, crawler).info(f"{k}:{v}")
  234. Common.logging(log_type, crawler, env, f"{video_dict}")
  235. if video_title is None or video_url is None:
  236. Common.logger(log_type, crawler).info("无效视频\n")
  237. Common.logging(log_type, crawler, env, "无效视频\n")
  238. elif cls.repeat_out_video_id(log_type, crawler, out_video_id, env) != 0:
  239. Common.logger(log_type, crawler).info('视频已下载\n')
  240. Common.logging(log_type, crawler, env, '视频已下载\n')
  241. elif cls.repeat_video_url(log_type, crawler, video_url, env) != 0:
  242. Common.logger(log_type, crawler).info('视频已下载\n')
  243. Common.logging(log_type, crawler, env, '视频已下载\n')
  244. else:
  245. video_element.click()
  246. time.sleep(3)
  247. video_info_dict = cls.get_video_info(driver)
  248. video_dict["like_cnt"] = video_info_dict["like_cnt"]
  249. video_dict["share_cnt"] = video_info_dict["share_cnt"]
  250. video_dict["favorite_cnt"] = video_info_dict["favorite_cnt"]
  251. video_dict["comment_cnt"] = video_info_dict["comment_cnt"]
  252. video_dict["publish_time_str"] = video_info_dict["publish_time_str"] + " 00:00:00"
  253. video_dict["publish_time_stamp"] = video_info_dict["publish_time_stamp"]
  254. Common.logger(log_type, crawler).info(f'publish_time:{video_dict["publish_time_str"]}')
  255. Common.logging(log_type, crawler, env, f'publish_time:{video_dict["publish_time_str"]}')
  256. if download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
  257. Common.logger(log_type, crawler).info("不满足抓取规则\n")
  258. Common.logging(log_type, crawler, env, "不满足抓取规则\n")
  259. else:
  260. video_dict["out_user_id"] = video_dict["user_id"]
  261. video_dict["platform"] = crawler
  262. video_dict["strategy"] = log_type
  263. video_dict["out_video_id"] = video_dict["video_id"]
  264. video_dict["width"] = 0
  265. video_dict["height"] = 0
  266. video_dict["crawler_rule"] = json.dumps(rule_dict)
  267. video_dict["user_id"] = user_dict["uid"]
  268. video_dict["publish_time"] = video_dict["publish_time_str"]
  269. mq.send_msg(video_dict)
  270. cls.download_cnt += 1
  271. except Exception as e:
  272. Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
  273. Common.logging(log_type, crawler, env, f"抓取单条视频异常:{e}\n")
  274. Common.logger(log_type, crawler).info('已抓取完一组视频,休眠1秒\n')
  275. Common.logging(log_type, crawler, env, '已抓取完一组视频,休眠1秒\n')
  276. time.sleep(1)
  277. index = index + len(video_element_temp)
  278. @classmethod
  279. def get_video_info(cls, driver: WebDriver):
  280. # Common.logger(log_type, crawler).info('切回NATIVE_APP')
  281. driver.switch_to.context('NATIVE_APP')
  282. # 点赞
  283. like_id = driver.find_element(By.ID, 'com.tencent.mm:id/k04') # 微信版本 8.0.30
  284. like_cnt = like_id.get_attribute('name')
  285. if '万' in like_cnt:
  286. like_cnt = int(float(like_cnt.split('万')[0]) * 10000)
  287. elif '万+' in like_cnt:
  288. like_cnt = int(float(like_cnt.split('万+')[0]) * 10000)
  289. elif like_cnt == "" or like_cnt == "喜欢" or like_cnt == "火" or cls.is_contain_chinese(like_cnt) is True:
  290. like_cnt = 0
  291. else:
  292. like_cnt = int(float(like_cnt))
  293. # 分享
  294. share_id = driver.find_element(By.ID, 'com.tencent.mm:id/jhv')
  295. share_cnt = share_id.get_attribute('name')
  296. if '万' in share_cnt:
  297. share_cnt = int(float(share_cnt.split('万')[0]) * 10000)
  298. elif '万+' in share_cnt:
  299. share_cnt = int(float(share_cnt.split('万+')[0]) * 10000)
  300. elif share_cnt == "" or share_cnt == "转发" or cls.is_contain_chinese(share_cnt) is True:
  301. share_cnt = 0
  302. else:
  303. share_cnt = int(float(share_cnt))
  304. # 收藏
  305. favorite_id = driver.find_element(By.ID, 'com.tencent.mm:id/fnp')
  306. favorite_cnt = favorite_id.get_attribute('name')
  307. if '万' in favorite_cnt:
  308. favorite_cnt = int(float(favorite_cnt.split('万')[0]) * 10000)
  309. elif '万+' in favorite_cnt:
  310. favorite_cnt = int(float(favorite_cnt.split('万+')[0]) * 10000)
  311. elif favorite_cnt == "" or favorite_cnt == "收藏" or favorite_cnt == "推荐" or favorite_cnt == "火" or cls.is_contain_chinese(
  312. favorite_cnt) is True:
  313. favorite_cnt = 0
  314. else:
  315. favorite_cnt = int(float(favorite_cnt))
  316. # 评论
  317. comment_id = driver.find_element(By.ID, 'com.tencent.mm:id/bje')
  318. comment_cnt = comment_id.get_attribute('name')
  319. if '万' in comment_cnt:
  320. comment_cnt = int(float(comment_cnt.split('万')[0]) * 10000)
  321. elif '万+' in comment_cnt:
  322. comment_cnt = int(float(comment_cnt.split('万+')[0]) * 10000)
  323. elif comment_cnt == "" or comment_cnt == "评论" or cls.is_contain_chinese(comment_cnt) is True:
  324. comment_cnt = 0
  325. else:
  326. comment_cnt = int(float(comment_cnt))
  327. # 发布时间
  328. comment_id.click()
  329. time.sleep(1)
  330. publish_time = driver.find_element(By.ID, "com.tencent.mm:id/bre").get_attribute("name")
  331. if "秒" in publish_time or "分钟" in publish_time or "小时" in publish_time:
  332. publish_time_str = (date.today() + timedelta(days=0)).strftime("%Y-%m-%d")
  333. elif "天前" in publish_time:
  334. days = int(publish_time.replace("天前", ""))
  335. publish_time_str = (date.today() + timedelta(days=-days)).strftime("%Y-%m-%d")
  336. elif "年" in publish_time:
  337. # publish_time_str = publish_time.replace("年", "-").replace("月", "-").replace("日", "")
  338. year_str = publish_time.split("年")[0]
  339. month_str = publish_time.split("年")[-1].split("月")[0]
  340. day_str = publish_time.split("月")[-1].split("日")[0]
  341. if int(month_str) < 10:
  342. month_str = f"0{month_str}"
  343. if int(day_str) < 10:
  344. day_str = f"0{day_str}"
  345. publish_time_str = f"{year_str}-{month_str}-{day_str}"
  346. else:
  347. year_str = str(datetime.datetime.now().year)
  348. month_str = publish_time.split("月")[0]
  349. day_str = publish_time.split("月")[-1].split("日")[0]
  350. if int(month_str) < 10:
  351. month_str = f"0{month_str}"
  352. if int(day_str) < 10:
  353. day_str = f"0{day_str}"
  354. publish_time_str = f"{year_str}-{month_str}-{day_str}"
  355. # publish_time_str = f'2023-{publish_time.replace("月", "-").replace("日", "")}'
  356. publish_time_stamp = int(time.mktime(time.strptime(publish_time_str, "%Y-%m-%d")))
  357. # 收起评论
  358. # Common.logger(log_type, crawler).info("收起评论")
  359. driver.find_element(By.ID, "com.tencent.mm:id/be_").click()
  360. time.sleep(0.5)
  361. # 返回 webview
  362. # Common.logger(log_type, crawler).info(f"操作手机返回按键")
  363. driver.find_element(By.ID, "com.tencent.mm:id/a2z").click()
  364. time.sleep(0.5)
  365. # driver.press_keycode(AndroidKey.BACK)
  366. # cls.check_to_webview(log_type=log_type, crawler=crawler, driver=driver)
  367. webviews = driver.contexts
  368. driver.switch_to.context(webviews[1])
  369. video_dict = {
  370. "like_cnt": like_cnt,
  371. "share_cnt": share_cnt,
  372. "favorite_cnt": favorite_cnt,
  373. "comment_cnt": comment_cnt,
  374. "publish_time_str": publish_time_str,
  375. "publish_time_stamp": publish_time_stamp,
  376. }
  377. return video_dict
  378. @classmethod
  379. def get_search_videos(cls, log_type, crawler, rule_dict, user_list, env):
  380. Common.logger(log_type, crawler).info(f"搜索词总数:{len(user_list)}\n")
  381. Common.logging(log_type, crawler, env, f"搜索词总数:{len(user_list)}\n")
  382. if len(user_list) == 0:
  383. return
  384. for user_dict in user_list:
  385. try:
  386. cls.i = 0
  387. cls.download_cnt = 0
  388. Common.logger(log_type, crawler).info(f"开始抓取 {user_dict['link']}\n")
  389. Common.logging(log_type, crawler, env, f"开始抓取 {user_dict['link']}\n")
  390. cls.start_wechat(log_type=log_type,
  391. crawler=crawler,
  392. rule_dict=rule_dict,
  393. user_dict=user_dict,
  394. env=env)
  395. except Exception as e:
  396. Common.logger(log_type, crawler).error(f"抓取 {user_dict['link']} 时异常:{e}\n")
  397. Common.logging(log_type, crawler, env, f"抓取 {user_dict['link']} 时异常:{e}\n")
  398. if __name__ == '__main__':
  399. print(ShipinhaoSearchScheduling.repeat_out_video_id(log_type="search",
  400. crawler="shipinhao",
  401. out_video_id="123",
  402. env="dev"))
  403. pass