shipinhao_search_scheduling.py 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2023/4/25
  4. import datetime
  5. import json
  6. import os
  7. import shutil
  8. import sys
  9. import time
  10. from datetime import date, timedelta
  11. from hashlib import md5
  12. from appium import webdriver
  13. from appium.webdriver.extensions.android.nativekey import AndroidKey
  14. from appium.webdriver.webdriver import WebDriver
  15. from selenium.common import NoSuchElementException
  16. from selenium.webdriver.common.by import By
  17. sys.path.append(os.getcwd())
  18. from common.feishu import Feishu
  19. from common.publish import Publish
  20. from common.common import Common
  21. from common.getuser import getUser
  22. from common.scheduling_db import MysqlHelper
  23. class ShipinhaoSearchScheduling:
  24. platform = "视频号"
  25. i = 0
  26. download_cnt = 0
  27. # 基础门槛规则
  28. @staticmethod
  29. def download_rule(log_type, crawler, video_dict, rule_dict):
  30. """
  31. 下载视频的基本规则
  32. :param log_type: 日志
  33. :param crawler: 哪款爬虫
  34. :param video_dict: 视频信息,字典格式
  35. :param rule_dict: 规则信息,字典格式
  36. :return: 满足规则,返回 True;反之,返回 False
  37. """
  38. rule_play_cnt_min = rule_dict.get('play_cnt', {}).get('min', 0)
  39. rule_play_cnt_max = rule_dict.get('play_cnt', {}).get('max', 100000000)
  40. if rule_play_cnt_max == 0:
  41. rule_play_cnt_max = 100000000
  42. rule_duration_min = rule_dict.get('duration', {}).get('min', 0)
  43. rule_duration_max = rule_dict.get('duration', {}).get('max', 100000000)
  44. if rule_duration_max == 0:
  45. rule_duration_max = 100000000
  46. # rule_period_min = rule_dict.get('period', {}).get('min', 0)
  47. # rule_period_max = rule_dict.get('period', {}).get('max', 100000000)
  48. # if rule_period_max == 0:
  49. # rule_period_max = 100000000
  50. # rule_fans_cnt_min = rule_dict.get('fans_cnt', {}).get('min', 0)
  51. # rule_fans_cnt_max = rule_dict.get('fans_cnt', {}).get('max', 100000000)
  52. # if rule_fans_cnt_max == 0:
  53. # rule_fans_cnt_max = 100000000
  54. # rule_videos_cnt_min = rule_dict.get('videos_cnt', {}).get('min', 0)
  55. # rule_videos_cnt_max = rule_dict.get('videos_cnt', {}).get('max', 100000000)
  56. # if rule_videos_cnt_max == 0:
  57. # rule_videos_cnt_max = 100000000
  58. rule_like_cnt_min = rule_dict.get('like_cnt', {}).get('min', 0)
  59. rule_like_cnt_max = rule_dict.get('like_cnt', {}).get('max', 100000000)
  60. if rule_like_cnt_max == 0:
  61. rule_like_cnt_max = 100000000
  62. rule_width_min = rule_dict.get('width', {}).get('min', 0)
  63. rule_width_max = rule_dict.get('width', {}).get('max', 100000000)
  64. if rule_width_max == 0:
  65. rule_width_max = 100000000
  66. rule_height_min = rule_dict.get('height', {}).get('min', 0)
  67. rule_height_max = rule_dict.get('height', {}).get('max', 100000000)
  68. if rule_height_max == 0:
  69. rule_height_max = 100000000
  70. rule_share_cnt_min = rule_dict.get('share_cnt', {}).get('min', 0)
  71. rule_share_cnt_max = rule_dict.get('share_cnt', {}).get('max', 100000000)
  72. if rule_share_cnt_max == 0:
  73. rule_share_cnt_max = 100000000
  74. rule_favorite_cnt_min = rule_dict.get('favorite_cnt', {}).get('min', 0)
  75. rule_favorite_cnt_max = rule_dict.get('favorite_cnt', {}).get('max', 100000000)
  76. if rule_favorite_cnt_max == 0:
  77. rule_favorite_cnt_max = 100000000
  78. rule_comment_cnt_min = rule_dict.get('comment_cnt', {}).get('min', 0)
  79. rule_comment_cnt_max = rule_dict.get('comment_cnt', {}).get('max', 100000000)
  80. if rule_comment_cnt_max == 0:
  81. rule_comment_cnt_max = 100000000
  82. rule_publish_time_min = rule_dict.get('publish_time', {}).get('min', 0)
  83. rule_publish_time_max = rule_dict.get('publish_time', {}).get('max', 0)
  84. if rule_publish_time_max == 0:
  85. rule_publish_time_max = 4102415999000 # 2099-12-31 23:59:59
  86. Common.logger(log_type, crawler).info(
  87. f'rule_duration_max:{rule_duration_max} >= duration:{int(float(video_dict["duration"]))} >= rule_duration_min:{int(rule_duration_min)}')
  88. Common.logger(log_type, crawler).info(
  89. f'rule_play_cnt_max:{int(rule_play_cnt_max)} >= play_cnt:{int(video_dict["play_cnt"])} >= rule_play_cnt_min:{int(rule_play_cnt_min)}')
  90. Common.logger(log_type, crawler).info(
  91. f'rule_like_cnt_max:{int(rule_like_cnt_max)} >= like_cnt:{int(video_dict["like_cnt"])} >= rule_like_cnt_min:{int(rule_like_cnt_min)}')
  92. Common.logger(log_type, crawler).info(
  93. f'rule_comment_cnt_max:{int(rule_comment_cnt_max)} >= comment_cnt:{int(video_dict["comment_cnt"])} >= rule_comment_cnt_min:{int(rule_comment_cnt_min)}')
  94. Common.logger(log_type, crawler).info(
  95. f'rule_share_cnt_max:{int(rule_share_cnt_max)} >= share_cnt:{int(video_dict["share_cnt"])} >= rule_share_cnt_min:{int(rule_share_cnt_min)}')
  96. Common.logger(log_type, crawler).info(
  97. f'rule_favorite_cnt_max:{int(rule_favorite_cnt_max)} >= favorite_cnt:{int(video_dict["favorite_cnt"])} >= rule_favorite_cnt_min:{int(rule_favorite_cnt_min)}')
  98. Common.logger(log_type, crawler).info(
  99. f'rule_width_max:{int(rule_width_max)} >= video_width:{int(video_dict["video_width"])} >= rule_width_min:{int(rule_width_min)}')
  100. Common.logger(log_type, crawler).info(
  101. f'rule_height_max:{int(rule_height_max)} >= video_height:{int(video_dict["video_height"])} >= rule_height_min:{int(rule_height_min)}')
  102. Common.logger(log_type, crawler).info(
  103. f'rule_publish_time_max:{int(rule_publish_time_max)} >= publish_time_stamp:{int(video_dict["publish_time_stamp"])*1000} >= rule_publish_time_min:{int(rule_publish_time_min)}')
  104. if int(rule_duration_max) >= int(float(video_dict["duration"])) >= int(rule_duration_min) \
  105. and int(rule_play_cnt_max) >= int(video_dict['play_cnt']) >= int(rule_play_cnt_min) \
  106. and int(rule_like_cnt_max) >= int(video_dict['like_cnt']) >= int(rule_like_cnt_min) \
  107. and int(rule_comment_cnt_max) >= int(video_dict['comment_cnt']) >= int(rule_comment_cnt_min) \
  108. and int(rule_share_cnt_max) >= int(video_dict['share_cnt']) >= int(rule_share_cnt_min) \
  109. and int(rule_favorite_cnt_max) >= int(video_dict['favorite_cnt']) >= int(rule_favorite_cnt_min) \
  110. and int(rule_width_max) >= int(video_dict['video_width']) >= int(rule_width_min) \
  111. and int(rule_height_max) >= int(video_dict['video_height']) >= int(rule_height_min) \
  112. and int(rule_publish_time_max) >= int(video_dict['publish_time_stamp'])*1000 >= int(rule_publish_time_min):
  113. return True
  114. else:
  115. return False
  116. @classmethod
  117. def start_wechat(cls, log_type, crawler, word, rule_dict, our_uid, oss_endpoint, env):
  118. Common.logger(log_type, crawler).info('启动微信')
  119. if env == "dev":
  120. chromedriverExecutable = "/Users/wangkun/Downloads/chromedriver/chromedriver_v107/chromedriver"
  121. else:
  122. chromedriverExecutable = '/Users/piaoquan/Downloads/chromedriver'
  123. caps = {
  124. "platformName": "Android", # 手机操作系统 Android / iOS
  125. "deviceName": "Android", # 连接的设备名(模拟器或真机),安卓可以随便写
  126. "platforVersion": "13", # 手机对应的系统版本(Android 13)
  127. "appPackage": "com.tencent.mm", # 被测APP的包名,乐活圈 Android
  128. "appActivity": ".ui.LauncherUI", # 启动的Activity名
  129. "autoGrantPermissions": True, # 让 appium 自动授权 base 权限,
  130. # 如果 noReset 为 True,则该条不生效(该参数为 Android 独有),对应的值为 True 或 False
  131. "unicodekeyboard": True, # 使用自带输入法,输入中文时填True
  132. "resetkeyboard": True, # 执行完程序恢复原来输入法
  133. "noReset": True, # 不重置APP
  134. "recreateChromeDriverSessions": True, # 切换到非 chrome-Driver 会 kill 掉 session,就不需要手动 kill 了
  135. "printPageSourceOnFailure": True, # 找不到元素时,appium log 会完整记录当前页面的 pagesource
  136. "newCommandTimeout": 6000, # 初始等待时间
  137. "automationName": "UiAutomator2", # 使用引擎,默认为 Appium,
  138. # 其中 Appium、UiAutomator2、Selendroid、Espresso 用于 Android,XCUITest 用于 iOS
  139. "showChromedriverLog": True,
  140. # "chromeOptions": {"androidProcess": "com.tencent.mm:appbrand0"},
  141. "chromeOptions": {"androidProcess": "com.tencent.mm:tools"},
  142. 'enableWebviewDetailsCollection': True,
  143. 'setWebContentsDebuggingEnabled': True,
  144. 'chromedriverExecutable': chromedriverExecutable,
  145. }
  146. driver = webdriver.Remote("http://localhost:4723/wd/hub", caps)
  147. driver.implicitly_wait(10)
  148. if len(driver.find_elements(By.ID, 'android:id/text1')) != 0:
  149. driver.find_elements(By.ID, 'android:id/text1')[0].click()
  150. time.sleep(5)
  151. cls.search_video(log_type=log_type,
  152. crawler=crawler,
  153. word=word,
  154. rule_dict=rule_dict,
  155. our_uid=our_uid,
  156. oss_endpoint=oss_endpoint,
  157. driver=driver,
  158. env=env)
  159. cls.close_wechat(log_type=log_type,
  160. crawler=crawler,
  161. driver=driver)
  162. @classmethod
  163. def close_wechat(cls, log_type, crawler, driver: WebDriver):
  164. driver.quit()
  165. Common.logger(log_type, crawler).info(f"微信退出成功\n")
  166. @classmethod
  167. def is_contain_chinese(cls, strword):
  168. for ch in strword:
  169. if u'\u4e00' <= ch <= u'\u9fff':
  170. return True
  171. return False
  172. # 查找元素
  173. @classmethod
  174. def search_elements(cls, driver: WebDriver, xpath):
  175. time.sleep(1)
  176. windowHandles = driver.window_handles
  177. for handle in windowHandles:
  178. driver.switch_to.window(handle)
  179. time.sleep(1)
  180. try:
  181. elements = driver.find_elements(By.XPATH, xpath)
  182. if elements:
  183. return elements
  184. except NoSuchElementException:
  185. pass
  186. @classmethod
  187. def check_to_webview(cls, log_type, crawler, driver: WebDriver):
  188. # Common.logger(log_type, crawler).info('切换到webview')
  189. webviews = driver.contexts
  190. driver.switch_to.context(webviews[1])
  191. time.sleep(1)
  192. windowHandles = driver.window_handles
  193. for handle in windowHandles:
  194. driver.switch_to.window(handle)
  195. try:
  196. shipinhao_webview = driver.find_element(By.XPATH, '//div[@class="unit"]')
  197. if shipinhao_webview:
  198. Common.logger(log_type, crawler).info('切换到视频号 webview 成功')
  199. return "成功"
  200. except Exception as e:
  201. Common.logger(log_type, crawler).info(f"{e}\n")
  202. @classmethod
  203. def repeat_out_video_id(cls, log_type, crawler, out_video_id, env):
  204. sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{out_video_id}"; """
  205. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  206. return len(repeat_video)
  207. @classmethod
  208. def repeat_video_url(cls, log_type, crawler, video_url, env):
  209. sql = f""" select * from crawler_video where platform="{cls.platform}" and video_url="{video_url}"; """
  210. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  211. return len(repeat_video)
  212. @classmethod
  213. def search_video(cls, log_type, crawler, word, rule_dict, driver: WebDriver, our_uid, oss_endpoint, env):
  214. # 点击微信搜索框,并输入搜索词
  215. driver.implicitly_wait(10)
  216. driver.find_element(By.ID, 'com.tencent.mm:id/j5t').click()
  217. time.sleep(0.5)
  218. Common.logger(log_type, crawler).info(f'输入搜索词:{word}')
  219. driver.find_element(By.ID, 'com.tencent.mm:id/cd7').clear().send_keys(word)
  220. driver.press_keycode(AndroidKey.ENTER)
  221. # driver.find_elements(By.ID, 'com.tencent.mm:id/oi4')[0].click()
  222. driver.find_element(By.ID, 'com.tencent.mm:id/m94').click()
  223. time.sleep(5)
  224. # 切换到微信搜索结果页 webview
  225. check_to_webview = cls.check_to_webview(log_type, crawler, driver)
  226. if check_to_webview is None:
  227. Common.logger(log_type, crawler).info("切换到视频号 webview 失败\n")
  228. return
  229. time.sleep(1)
  230. # 切换到"视频号"分类
  231. shipinhao_tags = cls.search_elements(driver, '//div[@class="unit"]/*[2]')
  232. Common.logger(log_type, crawler).info('点击"视频号"分类')
  233. shipinhao_tags[0].click()
  234. time.sleep(5)
  235. videos_cnt = rule_dict.get('videos_cnt', {}).get('min', 0)
  236. index = 0
  237. while True:
  238. if cls.search_elements(driver, '//*[@class="double-rich double-rich_vertical"]') is None:
  239. Common.logger(log_type, crawler).info('窗口已销毁\n')
  240. return
  241. Common.logger(log_type, crawler).info('获取视频列表\n')
  242. video_elements = cls.search_elements(driver, '//div[@class="vc active__mask"]')
  243. if video_elements is None:
  244. Common.logger(log_type, crawler).warning(f'video_elements:{video_elements}')
  245. return
  246. video_element_temp = video_elements[index:]
  247. if len(video_element_temp) == 0:
  248. Common.logger(log_type, crawler).info('到底啦~~~~~~~~~~~~~\n')
  249. return
  250. for i, video_element in enumerate(video_element_temp):
  251. try:
  252. Common.logger(log_type, crawler).info(f"download_cnt:{cls.download_cnt}")
  253. if cls.download_cnt >= int(videos_cnt):
  254. Common.logger(log_type, crawler).info(f'搜索词:"{word}",已抓取视频数:{cls.download_cnt}')
  255. cls.download_cnt = 0
  256. return
  257. if video_element is None:
  258. Common.logger(log_type, crawler).info('到底啦~\n')
  259. return
  260. cls.i += 1
  261. cls.search_elements(driver, '//div[@class="vc active__mask"]')
  262. Common.logger(log_type, crawler).info(f'拖动"视频"列表第{cls.i}个至屏幕中间')
  263. time.sleep(3)
  264. driver.execute_script("arguments[0].scrollIntoView({block:'center',inline:'center'})",
  265. video_element)
  266. if len(video_element.find_elements(By.XPATH, "//*[@text='没有更多的搜索结果']")) != 0:
  267. Common.logger(log_type, crawler).info("没有更多的搜索结果\n")
  268. return
  269. video_title = video_element.find_elements(By.XPATH, '//div[@class="title ellipsis_2"]/*[2]')[index + i].text
  270. video_url = video_element.find_elements(By.XPATH, '//div[@class="video-player"]')[index+i].get_attribute('src')
  271. cover_url = video_element.find_elements(By.XPATH, '//div[@class="video-player__bd"]')[index+i].get_attribute('style')
  272. cover_url = cover_url.split('url("')[-1].split('")')[0]
  273. duration = video_element.find_elements(By.XPATH, '//div[@class="play-mask__text"]/*[2]')[index+i].text
  274. duration = int(duration.split(':')[0]) * 60 + int(duration.split(':')[-1])
  275. user_name = video_element.find_elements(By.XPATH, '//p[@class="vc-source__text"]')[index+i].text
  276. avatar_url = video_element.find_elements(By.XPATH, '//div[@class="ui-image-image ui-image vc-source__thumb"]')[index+i].get_attribute('style')
  277. avatar_url = avatar_url.split('url("')[-1].split('")')[0]
  278. out_video_id = md5(video_title.encode('utf8')).hexdigest()
  279. out_user_id = md5(user_name.encode('utf8')).hexdigest()
  280. video_dict = {
  281. "video_title": video_title,
  282. "video_id": out_video_id,
  283. "play_cnt": 0,
  284. "duration": duration,
  285. "user_name": user_name,
  286. "user_id": out_user_id,
  287. "avatar_url": avatar_url,
  288. "cover_url": cover_url,
  289. "video_url": video_url,
  290. "session": f"shipinhao-search-{int(time.time())}"
  291. }
  292. for k, v in video_dict.items():
  293. Common.logger(log_type, crawler).info(f"{k}:{v}")
  294. if video_title is None or video_url is None:
  295. Common.logger(log_type, crawler).info("无效视频\n")
  296. elif cls.repeat_out_video_id(log_type, crawler, out_video_id, env) != 0:
  297. Common.logger(log_type, crawler).info('视频已下载\n')
  298. elif cls.repeat_video_url(log_type, crawler, video_url, env) != 0:
  299. Common.logger(log_type, crawler).info('视频已下载\n')
  300. else:
  301. video_element.click()
  302. time.sleep(3)
  303. video_info_dict = cls.get_video_info(driver)
  304. video_dict["like_cnt"] = video_info_dict["like_cnt"]
  305. video_dict["share_cnt"] = video_info_dict["share_cnt"]
  306. video_dict["favorite_cnt"] = video_info_dict["favorite_cnt"]
  307. video_dict["comment_cnt"] = video_info_dict["comment_cnt"]
  308. video_dict["publish_time_str"] = video_info_dict["publish_time_str"]
  309. video_dict["publish_time_stamp"] = video_info_dict["publish_time_stamp"]
  310. cls.download_publish(log_type=log_type,
  311. crawler=crawler,
  312. word=word,
  313. rule_dict=rule_dict,
  314. video_dict=video_dict,
  315. our_uid=our_uid,
  316. oss_endpoint=oss_endpoint,
  317. env=env)
  318. except Exception as e:
  319. Common.logger(log_type, crawler).error(f"抓取单条视频时异常:{e}\n")
  320. Common.logger(log_type, crawler).info('已抓取完一组视频,休眠1秒\n')
  321. time.sleep(1)
  322. index = index + len(video_element_temp)
  323. @classmethod
  324. def download_publish(cls, log_type, crawler, word, rule_dict, video_dict, our_uid, oss_endpoint, env):
  325. # 下载视频
  326. Common.download_method(log_type=log_type, crawler=crawler, text="video", title=video_dict["video_title"], url=video_dict["video_url"])
  327. # ffmpeg 获取视频宽高
  328. ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
  329. if ffmpeg_dict is None:
  330. md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
  331. shutil.rmtree(f"./{crawler}/videos/{md_title}/")
  332. Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
  333. return
  334. video_dict["video_width"] = ffmpeg_dict["width"]
  335. video_dict["video_height"] = ffmpeg_dict["height"]
  336. # 规则判断
  337. if cls.download_rule(log_type=log_type,
  338. crawler=crawler,
  339. video_dict=video_dict,
  340. rule_dict=rule_dict) is False:
  341. md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
  342. shutil.rmtree(f"./{crawler}/videos/{md_title}/")
  343. Common.logger(log_type, crawler).info("不满足抓取规则,删除成功\n")
  344. return
  345. # 下载封面
  346. Common.download_method(log_type=log_type, crawler=crawler, text="cover", title=video_dict["video_title"], url=video_dict["cover_url"])
  347. # 保存视频信息至 "./videos/{download_video_title}/info.txt"
  348. Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
  349. # 上传视频
  350. Common.logger(log_type, crawler).info("开始上传视频...")
  351. our_video_id = Publish.upload_and_publish(log_type=log_type,
  352. crawler=crawler,
  353. strategy="搜索爬虫策略",
  354. our_uid=our_uid,
  355. env=env,
  356. oss_endpoint=oss_endpoint)
  357. if env == "dev":
  358. our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  359. else:
  360. our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  361. Common.logger(log_type, crawler).info("视频上传完成")
  362. if our_video_id is None:
  363. try:
  364. # 删除视频文件夹
  365. md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
  366. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  367. Common.logger(log_type, crawler).warning(f"our_video_id:{our_video_id}, 删除成功\n")
  368. return
  369. except FileNotFoundError:
  370. return
  371. insert_sql = f""" insert into crawler_video(video_id,
  372. out_user_id,
  373. platform,
  374. strategy,
  375. out_video_id,
  376. video_title,
  377. cover_url,
  378. video_url,
  379. duration,
  380. publish_time,
  381. play_cnt,
  382. crawler_rule,
  383. width,
  384. height)
  385. values({our_video_id},
  386. "{video_dict['user_id']}",
  387. "{cls.platform}",
  388. "搜索爬虫策略",
  389. "{video_dict['video_id']}",
  390. "{video_dict['video_title']}",
  391. "{video_dict['cover_url']}",
  392. "{video_dict['video_url']}",
  393. {int(video_dict['duration'])},
  394. "{video_dict['publish_time_str']}",
  395. {int(video_dict['play_cnt'])},
  396. '{json.dumps(rule_dict)}',
  397. {int(video_dict['video_width'])},
  398. {int(video_dict['video_height'])}) """
  399. Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
  400. MysqlHelper.update_values(log_type, crawler, insert_sql, env)
  401. Common.logger(log_type, crawler).info('视频信息插入数据库成功!')
  402. # 写飞书
  403. Feishu.insert_columns(log_type, crawler, "xYWCzf", "ROWS", 1, 2)
  404. time.sleep(0.5)
  405. values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time()))),
  406. "搜索爬虫策略",
  407. word,
  408. video_dict["video_title"],
  409. our_video_link,
  410. video_dict["duration"],
  411. video_dict["like_cnt"],
  412. video_dict["share_cnt"],
  413. video_dict["favorite_cnt"],
  414. video_dict["comment_cnt"],
  415. f'{video_dict["video_width"]}*{video_dict["video_height"]}',
  416. video_dict["publish_time_str"],
  417. video_dict["user_name"],
  418. video_dict["avatar_url"],
  419. video_dict["cover_url"],
  420. video_dict["video_url"]]]
  421. Feishu.update_values(log_type, crawler, "xYWCzf", "F2:Z2", values)
  422. Common.logger(log_type, crawler).info("写入飞书成功\n")
  423. cls.download_cnt += 1
  424. @classmethod
  425. def get_video_info(cls, driver: WebDriver):
  426. # Common.logger(log_type, crawler).info('切回NATIVE_APP')
  427. driver.switch_to.context('NATIVE_APP')
  428. # 点赞
  429. like_id = driver.find_element(By.ID, 'com.tencent.mm:id/k04')
  430. like_cnt = like_id.get_attribute('name')
  431. if like_cnt == "" or like_cnt == "喜欢" or like_cnt == "火" or cls.is_contain_chinese(like_cnt) is True:
  432. like_cnt = 0
  433. elif '万' in like_cnt:
  434. like_cnt = int(float(like_cnt.split('万')[0]) * 10000)
  435. elif '万+' in like_cnt:
  436. like_cnt = int(float(like_cnt.split('万+')[0]) * 10000)
  437. else:
  438. like_cnt = int(float(like_cnt))
  439. # 分享
  440. share_id = driver.find_element(By.ID, 'com.tencent.mm:id/jhv')
  441. share_cnt = share_id.get_attribute('name')
  442. if share_cnt == "" or share_cnt == "转发" or cls.is_contain_chinese(share_cnt) is True:
  443. share_cnt = 0
  444. elif '万' in share_cnt:
  445. share_cnt = int(float(share_cnt.split('万')[0]) * 10000)
  446. elif '万+' in share_cnt:
  447. share_cnt = int(float(share_cnt.split('万+')[0]) * 10000)
  448. else:
  449. share_cnt = int(float(share_cnt))
  450. # 收藏
  451. favorite_id = driver.find_element(By.ID, 'com.tencent.mm:id/fnp')
  452. favorite_cnt = favorite_id.get_attribute('name')
  453. if favorite_cnt == "" or favorite_cnt == "收藏" or favorite_cnt == "推荐" or favorite_cnt == "火" or cls.is_contain_chinese(favorite_cnt) is True:
  454. favorite_cnt = 0
  455. elif '万' in favorite_cnt:
  456. favorite_cnt = int(float(favorite_cnt.split('万')[0]) * 10000)
  457. elif '万+' in favorite_cnt:
  458. favorite_cnt = int(float(favorite_cnt.split('万+')[0]) * 10000)
  459. else:
  460. favorite_cnt = int(float(favorite_cnt))
  461. # 评论
  462. comment_id = driver.find_element(By.ID, 'com.tencent.mm:id/bje')
  463. comment_cnt = comment_id.get_attribute('name')
  464. if comment_cnt == "" or comment_cnt == "评论" or cls.is_contain_chinese(comment_cnt) is True:
  465. comment_cnt = 0
  466. elif '万' in comment_cnt:
  467. comment_cnt = int(float(comment_cnt.split('万')[0]) * 10000)
  468. elif '万+' in comment_cnt:
  469. comment_cnt = int(float(comment_cnt.split('万+')[0]) * 10000)
  470. else:
  471. comment_cnt = int(float(comment_cnt))
  472. # 发布时间
  473. comment_id.click()
  474. time.sleep(1)
  475. publish_time = driver.find_element(By.ID, "com.tencent.mm:id/bre").get_attribute("name")
  476. if "秒" in publish_time or "分钟" in publish_time or "小时" in publish_time:
  477. publish_time_str = (date.today() + timedelta(days=0)).strftime("%Y-%m-%d")
  478. elif "天前" in publish_time:
  479. days = int(publish_time.replace("天前", ""))
  480. publish_time_str = (date.today() + timedelta(days=-days)).strftime("%Y-%m-%d")
  481. elif "年" in publish_time:
  482. # publish_time_str = publish_time.replace("年", "-").replace("月", "-").replace("日", "")
  483. year_str = publish_time.split("年")[0]
  484. month_str = publish_time.split("年")[-1].split("月")[0]
  485. day_str = publish_time.split("月")[-1].split("日")[0]
  486. if int(month_str) < 10:
  487. month_str = f"0{month_str}"
  488. if int(day_str) < 10:
  489. day_str = f"0{day_str}"
  490. publish_time_str = f"{year_str}-{month_str}-{day_str}"
  491. else:
  492. year_str = str(datetime.datetime.now().year)
  493. month_str = publish_time.split("月")[0]
  494. day_str = publish_time.split("月")[-1].split("日")[0]
  495. if int(month_str) < 10:
  496. month_str = f"0{month_str}"
  497. if int(day_str) < 10:
  498. day_str = f"0{day_str}"
  499. publish_time_str = f"{year_str}-{month_str}-{day_str}"
  500. # publish_time_str = f'2023-{publish_time.replace("月", "-").replace("日", "")}'
  501. publish_time_stamp = int(time.mktime(time.strptime(publish_time_str, "%Y-%m-%d")))
  502. # 收起评论
  503. # Common.logger(log_type, crawler).info("收起评论")
  504. driver.find_element(By.ID, "com.tencent.mm:id/be_").click()
  505. time.sleep(0.5)
  506. # 返回 webview
  507. # Common.logger(log_type, crawler).info(f"操作手机返回按键")
  508. driver.find_element(By.ID, "com.tencent.mm:id/a2z").click()
  509. time.sleep(0.5)
  510. # driver.press_keycode(AndroidKey.BACK)
  511. # cls.check_to_webview(log_type=log_type, crawler=crawler, driver=driver)
  512. webviews = driver.contexts
  513. driver.switch_to.context(webviews[1])
  514. video_dict = {
  515. "like_cnt": like_cnt,
  516. "share_cnt": share_cnt,
  517. "favorite_cnt": favorite_cnt,
  518. "comment_cnt": comment_cnt,
  519. "publish_time_str": publish_time_str,
  520. "publish_time_stamp": publish_time_stamp,
  521. }
  522. return video_dict
  523. @classmethod
  524. def get_users(cls, log_type, crawler, sheetid, env):
  525. while True:
  526. user_sheet = Feishu.get_values_batch(log_type, crawler, sheetid)
  527. if user_sheet is None:
  528. Common.logger(log_type, crawler).warning(f"user_sheet:{user_sheet}, 3秒钟后重试")
  529. time.sleep(3)
  530. continue
  531. our_user_list = []
  532. # for i in range(1, len(user_sheet)):
  533. for i in range(1, 3):
  534. search_word = user_sheet[i][4]
  535. our_uid = user_sheet[i][6]
  536. tag1 = user_sheet[i][8]
  537. tag2 = user_sheet[i][9]
  538. tag3 = user_sheet[i][10]
  539. tag4 = user_sheet[i][11]
  540. tag5 = user_sheet[i][12]
  541. tag6 = user_sheet[i][13]
  542. tag7 = user_sheet[i][14]
  543. Common.logger(log_type, crawler).info(f"正在更新 {search_word} 搜索词信息")
  544. if our_uid is None:
  545. default_user = getUser.get_default_user()
  546. # 用来创建our_id的信息
  547. user_dict = {
  548. 'recommendStatus': -6,
  549. 'appRecommendStatus': -6,
  550. 'nickName': default_user['nickName'],
  551. 'avatarUrl': default_user['avatarUrl'],
  552. 'tagName': f'{tag1},{tag2},{tag3},{tag4},{tag5},{tag6},{tag7}',
  553. }
  554. our_uid = getUser.create_uid(log_type, crawler, user_dict, env)
  555. if env == 'prod':
  556. our_user_link = f'https://admin.piaoquantv.com/ums/user/{our_uid}/post'
  557. else:
  558. our_user_link = f'https://testadmin.piaoquantv.com/ums/user/{our_uid}/post'
  559. Feishu.update_values(log_type, crawler, sheetid, f'G{i + 1}:H{i + 1}',
  560. [[our_uid, our_user_link]])
  561. Common.logger(log_type, crawler).info(f'站内用户主页创建成功:{our_user_link}\n')
  562. our_user_dict = {
  563. 'out_uid': '',
  564. 'search_word': search_word,
  565. 'our_uid': our_uid,
  566. 'our_user_link': f'https://admin.piaoquantv.com/ums/user/{our_uid}/post',
  567. }
  568. our_user_list.append(our_user_dict)
  569. return our_user_list
  570. @classmethod
  571. def get_search_videos(cls, log_type, crawler, rule_dict, oss_endpoint, env):
  572. user_list = cls.get_users(log_type, crawler, "wNgi6Z", env)
  573. for user in user_list:
  574. cls.i = 0
  575. cls.download_cnt = 0
  576. search_word = user["search_word"]
  577. our_uid = user["our_uid"]
  578. Common.logger(log_type, crawler).info(f"开始抓取搜索词:{search_word}")
  579. try:
  580. cls.start_wechat(log_type=log_type,
  581. crawler=crawler,
  582. word=search_word,
  583. rule_dict=rule_dict,
  584. our_uid=our_uid,
  585. oss_endpoint=oss_endpoint,
  586. env=env)
  587. except Exception as e:
  588. Common.logger(log_type, crawler).error(f"抓取{user['search_word']}时异常:{e}\n")
  589. if __name__ == '__main__':
  590. # ShipinhaoSearchScheduling.get_search_videos(log_type="search",
  591. # crawler="shipinhao",
  592. # rule_dict='[{"videos_cnt":{"min":10,"max":0}},{"duration":{"min":30,"max":600}},{"share_cnt":{"min":3000,"max":0}},{"favorite_cnt":{"min":1000,"max":0}},{"publish_time":{"min":1672502400000,"max":0}}]',
  593. # oss_endpoint="out",
  594. # env="dev")
  595. # print(ShipinhaoSearchScheduling.get_users("search", "shipinhao", "wNgi6Z", "dev"))
  596. # print((date.today() + timedelta(days=0)).strftime("%Y-%m-%d"))
  597. print(ShipinhaoSearchScheduling.repeat_out_video_id(log_type="search",
  598. crawler="shipinhao",
  599. out_video_id="123",
  600. env="dev"))
  601. pass