zhiqingtiantiankan_recommend.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2023/4/18
  4. import json
  5. import os
  6. import shutil
  7. import sys
  8. import time
  9. from hashlib import md5
  10. from appium import webdriver
  11. from appium.webdriver.common.touch_action import TouchAction
  12. from appium.webdriver.extensions.android.nativekey import AndroidKey
  13. from appium.webdriver.webdriver import WebDriver
  14. from selenium.common.exceptions import NoSuchElementException
  15. from selenium.webdriver.common.by import By
  16. sys.path.append(os.getcwd())
  17. from common.common import Common
  18. from common.feishu import Feishu
  19. from common.publish import Publish
  20. from common.scheduling_db import MysqlHelper
  21. class ZhiqingtiantiankanRecommend:
  22. platform = "知青天天看"
  23. i = 0
  24. @classmethod
  25. def zhiqingtiantiankan_config(cls, log_type, crawler, text, env):
  26. select_sql = f"""select * from crawler_config where source="zhiqingtiantiankan" """
  27. contents = MysqlHelper.get_values(log_type, crawler, select_sql, env, action='')
  28. title_list = []
  29. filter_list = []
  30. for content in contents:
  31. config = content['config']
  32. config_dict = eval(config)
  33. for k, v in config_dict.items():
  34. if k == "title":
  35. title_list_config = v.split(",")
  36. for title in title_list_config:
  37. title_list.append(title)
  38. if k == "filter":
  39. filter_list_config = v.split(",")
  40. for filter_word in filter_list_config:
  41. filter_list.append(filter_word)
  42. if text == "title":
  43. return title_list
  44. elif text == "filter":
  45. return filter_list
  46. @classmethod
  47. def start_wechat(cls, log_type, crawler, env):
  48. try:
  49. Common.logger(log_type, crawler).info('启动微信')
  50. if env == "dev":
  51. chromedriverExecutable = '/Users/wangkun/Downloads/chromedriver/chromedriver_v111/chromedriver'
  52. # chromedriverExecutable = 'C:\\chromedriver\\chromedriver.exe' # 阿里云 Windows
  53. else:
  54. chromedriverExecutable = '/Users/piaoquan/Downloads/chromedriver' # Mac 爬虫机器
  55. # chromedriverExecutable = 'C:\\chromedriver\\chromedriver.exe' # 阿里云 Windows
  56. caps = {
  57. "platformName": "Android", # 手机操作系统 Android / iOS
  58. "deviceName": "Android", # 连接的设备名(模拟器或真机),安卓可以随便写
  59. # "udid": "emulator-5554", # 指定 adb devices 中的哪一台设备
  60. "platforVersion": "11", # 手机对应的系统版本
  61. "appPackage": "com.tencent.mm", # 被测APP的包名
  62. "appActivity": ".ui.LauncherUI", # 启动的Activity名
  63. "autoGrantPermissions": "true", # 让 appium 自动授权 base 权限,
  64. # 如果 noReset 为 True,则该条不生效(该参数为 Android 独有),对应的值为 True 或 False
  65. "unicodekeyboard": True, # 使用自带输入法,输入中文时填True
  66. "resetkeyboard": True, # 执行完程序恢复原来输入法
  67. "noReset": True, # 不重置APP
  68. "printPageSourceOnFailure": True, # 找不到元素时,appium log 会完整记录当前页面的 pagesource
  69. "newCommandTimeout": 6000, # 初始等待时间
  70. "automationName": "UiAutomator2", # 使用引擎,默认为 Appium,
  71. # 其中 Appium、UiAutomator2、Selendroid、Espresso 用于 Android,XCUITest 用于 iOS
  72. "showChromedriverLog": True,
  73. 'enableWebviewDetailsCollection': True,
  74. 'setWebContentsDebuggingEnabled': True,
  75. 'recreateChromeDriverSessions': True,
  76. 'chromedriverExecutable': chromedriverExecutable,
  77. "chromeOptions": {"androidProcess": "com.tencent.mm:appbrand0"},
  78. # "chromeOptions": {"androidProcess": "com.tencent.mm:tools"},
  79. 'browserName': ''
  80. }
  81. driver = webdriver.Remote("http://localhost:4723/wd/hub", caps) # 指定 Appium 端口号:4723
  82. driver.implicitly_wait(30)
  83. # 向下滑动页面,展示出小程序选择面板
  84. for i in range(120):
  85. try:
  86. # 发现微信消息 TAB,代表微信已启动成功
  87. if driver.find_elements(By.ID, 'com.tencent.mm:id/f2s'):
  88. break
  89. # 发现并关闭系统菜单栏
  90. elif driver.find_element(By.ID, 'com.android.systemui:id/dismiss_view'):
  91. Common.logger(log_type, crawler).info('发现并关闭系统下拉菜单栏')
  92. driver.find_element(By.ID, 'com.android.systemui:id/dismiss_view').click()
  93. else:
  94. pass
  95. except NoSuchElementException:
  96. time.sleep(1)
  97. Common.logger(log_type, crawler).info('下滑,展示小程序选择面板')
  98. size = driver.get_window_size()
  99. driver.swipe(int(size['width'] * 0.5), int(size['height'] * 0.2), int(size['width'] * 0.5),
  100. int(size['height'] * 0.8), 200)
  101. # 打开小程序"知青天天看"
  102. time.sleep(5)
  103. Common.logger(log_type, crawler).info('打开小程序"知青天天看"')
  104. driver.find_elements(By.XPATH, '//*[@text="知青天天看"]')[-1].click()
  105. # 获取视频信息
  106. # time.sleep(30)
  107. time.sleep(10)
  108. cls.get_videoList(log_type, crawler, driver, env)
  109. # 退出微信
  110. cls.quit(log_type, crawler, driver)
  111. except Exception as e:
  112. Common.logger(log_type, crawler).error('start_wechat异常:{}\n', e)
  113. # 退出 APP
  114. @classmethod
  115. def quit(cls, log_type, crawler, driver: WebDriver):
  116. driver.quit()
  117. Common.logger(log_type, crawler).info('退出微信APP成功\n')
  118. # 切换 Handle
  119. @classmethod
  120. def search_elements(cls, driver: WebDriver, xpath):
  121. windowHandles = driver.window_handles
  122. for handle in windowHandles:
  123. driver.switch_to.window(handle)
  124. time.sleep(1)
  125. try:
  126. elements = driver.find_elements(By.XPATH, xpath)
  127. if elements:
  128. return elements
  129. except NoSuchElementException:
  130. pass
  131. @classmethod
  132. def repeat_out_video_id(cls, log_type, crawler, out_video_id, env):
  133. sql = f""" select * from crawler_video where platform in ("众妙音信", "刚刚都传", "吉祥幸福", "知青天天看", "zhufuquanzi", "祝福圈子", "haitunzhufu", "海豚祝福") and out_video_id="{out_video_id}"; """
  134. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  135. return len(repeat_video)
  136. @classmethod
  137. def repeat_video_url(cls, log_type, crawler, video_url, env):
  138. sql = f""" select * from crawler_video where platform="{cls.platform}" and video_url="{video_url}"; """
  139. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  140. return len(repeat_video)
  141. @classmethod
  142. def check_to_applet(cls, log_type, crawler, driver: WebDriver):
  143. while True:
  144. webview = driver.contexts
  145. Common.logger(log_type, crawler).info(f"webview:{webview}")
  146. driver.switch_to.context(webview[1])
  147. windowHandles = driver.window_handles
  148. for handle in windowHandles:
  149. driver.switch_to.window(handle)
  150. time.sleep(1)
  151. try:
  152. applet = driver.find_element(By.XPATH, '//wx-view[@class="u-title navbar--u-title u-line-1 navbar--u-line-1 data-v-febd4d40 navbar--data-v-febd4d40"]')
  153. if applet:
  154. Common.logger(log_type, crawler).info('切换到小程序成功\n')
  155. return
  156. except NoSuchElementException:
  157. time.sleep(1)
  158. Common.logger(log_type, crawler).info("切换到小程序失败\n")
  159. break
  160. @classmethod
  161. def find_ad(cls, log_type, crawler, driver: WebDriver):
  162. windowHandles = driver.window_handles
  163. # Common.logger(log_type, crawler).info('windowHandles:{}', windowHandles)
  164. # 遍历所有的handles,找到当前页面所在的handle:如果pageSource有包含你想要的元素,就是所要找的handle
  165. # 小程序的页面来回切换也需要:遍历所有的handles,切换到元素所在的handle
  166. for handle in windowHandles:
  167. driver.switch_to.window(handle)
  168. time.sleep(1)
  169. try:
  170. Common.logger(log_type, crawler).info("寻找广告~~~~~~")
  171. ad_element = driver.find_element(By.XPATH, '//div[@class="ad-text"]')
  172. if ad_element:
  173. Common.logger(log_type, crawler).info("发现广告")
  174. for i in range(20):
  175. if driver.find_element(By.XPATH, '//div[@id="count_down_container"]/*[1]').text == "已完成浏览":
  176. Common.logger(log_type, crawler).info("广告播放完毕,点击返回")
  177. driver.press_keycode(AndroidKey.BACK)
  178. return
  179. else:
  180. Common.logger(log_type, crawler).info("广告未播放完毕,等待 1 秒")
  181. time.sleep(1)
  182. else:
  183. Common.logger(log_type, crawler).info("未发现广告, 退出")
  184. return
  185. except NoSuchElementException:
  186. time.sleep(1)
  187. @classmethod
  188. def get_video_url(cls, log_type, crawler, driver: WebDriver, video_element):
  189. video_element.click()
  190. time.sleep(5)
  191. cls.close_native_ad(log_type, crawler, driver)
  192. windowHandles = driver.window_handles
  193. for handle in windowHandles:
  194. driver.switch_to.window(handle)
  195. time.sleep(1)
  196. try:
  197. video_url_element = driver.find_element(By.XPATH, '//*[@class="wx-swiper-slide-frame"]/*[2]//*[@class="video_item videoswiper--video_item"]')
  198. video_url = video_url_element.get_attribute("src")
  199. # cls.find_ad(log_type, crawler, driver)
  200. return video_url
  201. except NoSuchElementException:
  202. time.sleep(1)
  203. @classmethod
  204. def close_native_ad(cls, log_type, crawler, driver: WebDriver):
  205. Common.logger(log_type, crawler).info('关闭广告')
  206. size = driver.get_window_size()
  207. time.sleep(3)
  208. TouchAction(driver).tap(x=int(size['width'] * 0.4), y=int(size['height'] * 0.1)).perform()
  209. @classmethod
  210. def get_videoList(cls, log_type, crawler, driver: WebDriver, env):
  211. driver.implicitly_wait(20)
  212. # 关闭广告
  213. # cls.close_native_ad(log_type, crawler, driver)
  214. # 切换到小程序
  215. cls.check_to_applet(log_type, crawler, driver)
  216. time.sleep(5)
  217. index = 0
  218. while True:
  219. try:
  220. if cls.search_elements(driver, '//wx-view[@class="listbox"]') is None:
  221. Common.logger(log_type, crawler).info('窗口已销毁\n')
  222. return
  223. Common.logger(log_type, crawler).info('获取视频列表\n')
  224. video_elements = cls.search_elements(driver, '//wx-view[@class="videolist-box videolist--videolist-box"]')
  225. if video_elements is None:
  226. Common.logger(log_type, crawler).warning(f'video_elements:{video_elements}')
  227. return
  228. video_element_temp = video_elements[index:]
  229. if len(video_element_temp) == 0:
  230. Common.logger(log_type, crawler).info('到底啦~~~~~~~~~~~~~\n')
  231. return
  232. for i, video_element in enumerate(video_element_temp):
  233. if video_element is None:
  234. Common.logger(log_type, crawler).info('到底啦~\n')
  235. return
  236. cls.i += 1
  237. cls.search_elements(driver, '//wx-view[@class="videolist-box videolist--videolist-box"]')
  238. Common.logger(log_type, crawler).info(f'拖动"视频"列表第{cls.i}个至屏幕中间')
  239. time.sleep(3)
  240. driver.execute_script("arguments[0].scrollIntoView({block:'center',inline:'center'})", video_element)
  241. # video_title = video_element.find_elements(By.XPATH, '//wx-view[@class="video_title videolist--video_title"]')[cls.i-1].text
  242. # cover_url = video_element.find_elements(By.XPATH, '//wx-image[@class="itemimage videolist--itemimage"]')[cls.i-1].get_attribute('src')
  243. # play_cnt = video_element.find_elements(By.XPATH, '//wx-view[@class="clickbox videolist--clickbox"]')[cls.i-1].text
  244. video_title = video_element.find_elements(By.XPATH, '//wx-view[@class="video_title videolist--video_title"]')[index+i].text
  245. cover_url = video_element.find_elements(By.XPATH, '//wx-image[@class="itemimage videolist--itemimage"]')[index+i].get_attribute('src')
  246. play_cnt = video_element.find_elements(By.XPATH, '//wx-view[@class="clickbox videolist--clickbox"]')[index+i].text
  247. play_cnt = int(float(play_cnt.replace("阅读数", "").strip().split("万")[0]) * 10000)
  248. out_video_id = md5(video_title.encode('utf8')).hexdigest()
  249. video_dict = {
  250. 'video_title': video_title,
  251. 'video_id': out_video_id,
  252. 'play_cnt': play_cnt,
  253. 'comment_cnt': 0,
  254. 'like_cnt': 0,
  255. 'share_cnt': 0,
  256. 'publish_time_stamp': int(time.time()),
  257. 'publish_time_str': time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time()))),
  258. 'user_name': "zhiqingtiantiankan",
  259. 'user_id': "zhiqingtiantiankan",
  260. 'avatar_url': cover_url,
  261. 'cover_url': cover_url,
  262. 'session': f"zhiqingtiantiankan-{int(time.time())}"
  263. }
  264. for k, v in video_dict.items():
  265. Common.logger(log_type, crawler).info(f"{k}:{v}")
  266. if video_title is None or cover_url is None:
  267. Common.logger(log_type, crawler).info("无效视频\n")
  268. elif any(str(word) if str(word) in video_title else False for word in
  269. cls.zhiqingtiantiankan_config(log_type, crawler, "filter", env)) is True:
  270. Common.logger(log_type, crawler).info('已中过滤词\n')
  271. elif cls.repeat_out_video_id(log_type, crawler, out_video_id, env) != 0:
  272. Common.logger(log_type, crawler).info('视频已下载\n')
  273. else:
  274. video_url = cls.get_video_url(log_type, crawler, driver, video_element)
  275. if video_url is None:
  276. Common.logger(log_type, crawler).info("未获取到视频播放地址\n")
  277. driver.press_keycode(AndroidKey.BACK)
  278. elif cls.repeat_video_url(log_type, crawler, video_url, env) != 0:
  279. Common.logger(log_type, crawler).info('视频已下载\n')
  280. driver.press_keycode(AndroidKey.BACK)
  281. else:
  282. video_dict["video_url"] = video_url
  283. Common.logger(log_type, crawler).info(f"video_url:{video_url}\n")
  284. # driver.press_keycode(AndroidKey.BACK)
  285. cls.download_publish(log_type, crawler, video_dict, env, driver)
  286. Common.logger(log_type, crawler).info('已抓取完一组视频,休眠10秒\n')
  287. time.sleep(10)
  288. index = index + len(video_element_temp)
  289. except Exception as e:
  290. Common.logger(log_type, crawler).info(f"get_videoList:{e}\n")
  291. cls.i = 0
  292. return
  293. @classmethod
  294. def download_publish(cls, log_type, crawler, video_dict, env, driver: WebDriver):
  295. # 下载视频
  296. Common.download_method(log_type=log_type, crawler=crawler, text='video', title=video_dict['video_title'],
  297. url=video_dict['video_url'])
  298. ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
  299. if ffmpeg_dict is None:
  300. md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
  301. shutil.rmtree(f"./{crawler}/videos/{md_title}/")
  302. Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
  303. return
  304. video_dict["duration"] = ffmpeg_dict["duration"]
  305. video_dict["video_width"] = ffmpeg_dict["width"]
  306. video_dict["video_height"] = ffmpeg_dict["height"]
  307. # 下载封面
  308. Common.download_method(log_type=log_type, crawler=crawler, text='cover', title=video_dict['video_title'],
  309. url=video_dict['cover_url'])
  310. # 保存视频信息至txt
  311. Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
  312. # 上传视频
  313. Common.logger(log_type, crawler).info("开始上传视频...")
  314. our_video_id = Publish.upload_and_publish(log_type=log_type,
  315. crawler=crawler,
  316. strategy="推荐榜爬虫策略",
  317. our_uid="recommend",
  318. env=env,
  319. oss_endpoint="out")
  320. if env == 'dev':
  321. our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  322. else:
  323. our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  324. Common.logger(log_type, crawler).info("视频上传完成")
  325. if our_video_id is None:
  326. # 删除视频文件夹
  327. shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
  328. return
  329. # 视频信息保存至飞书
  330. Feishu.insert_columns(log_type, crawler, "1a88b3", "ROWS", 1, 2)
  331. # 视频ID工作表,首行写入数据
  332. upload_time = int(time.time())
  333. values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
  334. "推荐榜爬虫策略",
  335. video_dict["video_title"],
  336. video_dict["video_id"],
  337. video_dict["play_cnt"],
  338. video_dict["duration"],
  339. f'{video_dict["video_width"]}*{video_dict["video_height"]}',
  340. our_video_link,
  341. video_dict["cover_url"],
  342. video_dict["video_url"]]]
  343. time.sleep(1)
  344. Feishu.update_values(log_type, crawler, "1a88b3", "F2:V2", values)
  345. Common.logger(log_type, crawler).info(f"视频已保存至飞书文档\n")
  346. rule_dict = {}
  347. # 视频信息保存数据库
  348. insert_sql = f""" insert into crawler_video(video_id,
  349. out_user_id,
  350. platform,
  351. strategy,
  352. out_video_id,
  353. video_title,
  354. cover_url,
  355. video_url,
  356. duration,
  357. publish_time,
  358. play_cnt,
  359. crawler_rule,
  360. width,
  361. height)
  362. values({our_video_id},
  363. "{video_dict['user_id']}",
  364. "{cls.platform}",
  365. "推荐榜爬虫策略",
  366. "{video_dict['video_id']}",
  367. "{video_dict['video_title']}",
  368. "{video_dict['cover_url']}",
  369. "{video_dict['video_url']}",
  370. {int(video_dict['duration'])},
  371. "{video_dict['publish_time_str']}",
  372. {int(video_dict['play_cnt'])},
  373. '{json.dumps(rule_dict)}',
  374. {int(video_dict['video_width'])},
  375. {int(video_dict['video_height'])}) """
  376. Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
  377. MysqlHelper.update_values(log_type, crawler, insert_sql, env, action='')
  378. Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
  379. driver.press_keycode(AndroidKey.BACK)
  380. if __name__ == '__main__':
  381. ZhiqingtiantiankanRecommend.start_wechat('recommend', 'zhiqingtiantiankan', 'prod')
  382. pass