shipinhao_search.py 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2023/4/25
  4. import datetime
  5. import json
  6. import os
  7. import shutil
  8. import sys
  9. import time
  10. from datetime import date, timedelta
  11. from hashlib import md5
  12. from appium import webdriver
  13. from appium.webdriver.extensions.android.nativekey import AndroidKey
  14. from appium.webdriver.webdriver import WebDriver
  15. from selenium.common import NoSuchElementException
  16. from selenium.webdriver.common.by import By
  17. sys.path.append(os.getcwd())
  18. from common.feishu import Feishu
  19. from common.publish import Publish
  20. from common.common import Common
  21. from common.getuser import getUser
  22. from common.scheduling_db import MysqlHelper
  23. class ShipinhaoSearch:
  24. platform = "视频号"
  25. i = 0
  26. download_cnt = 0
  27. @staticmethod
  28. def rule_dict(log_type, crawler):
  29. while True:
  30. shipinhao_rule_sheet = Feishu.get_values_batch(log_type, crawler, "YhfkNY")
  31. if shipinhao_rule_sheet is None:
  32. Common.logger(log_type, crawler).warning(f"shipinhao_rule_sheet:{shipinhao_rule_sheet}\n")
  33. time.sleep(3)
  34. continue
  35. rule_duration_min = int(shipinhao_rule_sheet[1][0])
  36. rule_duration_max = int(shipinhao_rule_sheet[1][2])
  37. rule_share_cnt_min = int(shipinhao_rule_sheet[2][0])
  38. rule_share_cnt_max = int(shipinhao_rule_sheet[2][2])
  39. rule_favorite_cnt_min = int(shipinhao_rule_sheet[3][0])
  40. rule_favorite_cnt_max = int(shipinhao_rule_sheet[3][2])
  41. rule_publish_time_min = shipinhao_rule_sheet[4][0]
  42. rule_publish_time_min_str = f"{str(rule_publish_time_min)[:4]}-{str(rule_publish_time_min)[4:6]}-{str(rule_publish_time_min)[6:]}"
  43. rule_publish_time_min = int(time.mktime(time.strptime(rule_publish_time_min_str, "%Y-%m-%d")))
  44. rule_publish_time_max = shipinhao_rule_sheet[4][2]
  45. rule_publish_time_max_str = f"{str(rule_publish_time_max)[:4]}-{str(rule_publish_time_max)[4:6]}-{str(rule_publish_time_max)[6:]}"
  46. rule_publish_time_max = int(time.mktime(time.strptime(rule_publish_time_max_str, "%Y-%m-%d")))
  47. videos_cnt = Feishu.get_values_batch(log_type, crawler, "YhfkNY")[5][2]
  48. rule_like_cnt_min = int(shipinhao_rule_sheet[6][0])
  49. rule_like_cnt_max = int(shipinhao_rule_sheet[6][2])
  50. rule_comment_cnt_min = int(shipinhao_rule_sheet[7][0])
  51. rule_comment_cnt_max = int(shipinhao_rule_sheet[7][2])
  52. rule_width_min = int(Feishu.get_values_batch(log_type, crawler, "YhfkNY")[8][0])
  53. rule_width_max = int(Feishu.get_values_batch(log_type, crawler, "YhfkNY")[8][2])
  54. rule_height_min = int(Feishu.get_values_batch(log_type, crawler, "YhfkNY")[9][0])
  55. rule_height_max = int(Feishu.get_values_batch(log_type, crawler, "YhfkNY")[9][2])
  56. rule_dict = {
  57. "duration": {"min": rule_duration_min, "max": rule_duration_max},
  58. "share_cnt": {"min": rule_share_cnt_min, "max": rule_share_cnt_max},
  59. "favorite_cnt": {"min": rule_favorite_cnt_min, "max": rule_favorite_cnt_max},
  60. "publish_time": {"min": rule_publish_time_min, "max": rule_publish_time_max},
  61. "videos_cnt": {"min": videos_cnt},
  62. "like_cnt": {"min": rule_like_cnt_min, "max": rule_like_cnt_max},
  63. "comment_cnt": {"min": rule_comment_cnt_min, "max": rule_comment_cnt_max},
  64. "width": {"min": rule_width_min, "max": rule_width_max},
  65. "height": {"min": rule_height_min, "max": rule_height_max},
  66. }
  67. return rule_dict
  68. # 基础门槛规则
  69. @staticmethod
  70. def download_rule(log_type, crawler, video_dict):
  71. """
  72. 下载视频的基本规则
  73. :param log_type: 日志
  74. :param crawler: 哪款爬虫
  75. :param video_dict: 视频信息,字典格式
  76. :return: 满足规则,返回 True;反之,返回 False
  77. """
  78. while True:
  79. shipinhao_rule_sheet = Feishu.get_values_batch(log_type, crawler, "YhfkNY")
  80. if shipinhao_rule_sheet is None:
  81. Common.logger(log_type, crawler).warning(f"shipinhao_rule_sheet:{shipinhao_rule_sheet}\n")
  82. time.sleep(3)
  83. continue
  84. rule_duration_min = int(shipinhao_rule_sheet[1][0])
  85. rule_duration_max = int(shipinhao_rule_sheet[1][2])
  86. rule_share_cnt_min = int(shipinhao_rule_sheet[2][0])
  87. rule_share_cnt_max = int(shipinhao_rule_sheet[2][2])
  88. rule_favorite_cnt_min = int(shipinhao_rule_sheet[3][0])
  89. rule_favorite_cnt_max = int(shipinhao_rule_sheet[3][2])
  90. rule_publish_time_min = shipinhao_rule_sheet[4][0]
  91. rule_publish_time_min_str = f"{str(rule_publish_time_min)[:4]}-{str(rule_publish_time_min)[4:6]}-{str(rule_publish_time_min)[6:]}"
  92. rule_publish_time_min = int(time.mktime(time.strptime(rule_publish_time_min_str, "%Y-%m-%d")))
  93. rule_publish_time_max = shipinhao_rule_sheet[4][2]
  94. rule_publish_time_max_str = f"{str(rule_publish_time_max)[:4]}-{str(rule_publish_time_max)[4:6]}-{str(rule_publish_time_max)[6:]}"
  95. rule_publish_time_max = int(time.mktime(time.strptime(rule_publish_time_max_str, "%Y-%m-%d")))
  96. # videos_cnt = Feishu.get_values_batch(log_type, crawler, "YhfkNY")[5][2]
  97. rule_like_cnt_min = int(shipinhao_rule_sheet[6][0])
  98. rule_like_cnt_max = int(shipinhao_rule_sheet[6][2])
  99. rule_comment_cnt_min = int(shipinhao_rule_sheet[7][0])
  100. rule_comment_cnt_max = int(shipinhao_rule_sheet[7][2])
  101. Common.logger(log_type, crawler).info(
  102. f'rule_duration_max:{rule_duration_max} >= duration:{int(float(video_dict["duration"]))} >= rule_duration_min:{int(rule_duration_min)}')
  103. Common.logger(log_type, crawler).info(
  104. f'rule_like_cnt_max:{int(rule_like_cnt_max)} >= like_cnt:{int(video_dict["like_cnt"])} >= rule_like_cnt_min:{int(rule_like_cnt_min)}')
  105. Common.logger(log_type, crawler).info(
  106. f'rule_comment_cnt_max:{int(rule_comment_cnt_max)} >= comment_cnt:{int(video_dict["comment_cnt"])} >= rule_comment_cnt_min:{int(rule_comment_cnt_min)}')
  107. Common.logger(log_type, crawler).info(
  108. f'rule_share_cnt_max:{int(rule_share_cnt_max)} >= share_cnt:{int(video_dict["share_cnt"])} >= rule_share_cnt_min:{int(rule_share_cnt_min)}')
  109. Common.logger(log_type, crawler).info(
  110. f'rule_favorite_cnt_max:{int(rule_favorite_cnt_max)} >= favorite_cnt:{int(video_dict["favorite_cnt"])} >= rule_favorite_cnt_min:{int(rule_favorite_cnt_min)}')
  111. Common.logger(log_type, crawler).info(
  112. f'rule_publish_time_max:{int(rule_publish_time_max)} >= publish_time_stamp:{int(video_dict["publish_time_stamp"])} >= rule_publish_time_min:{int(rule_publish_time_min)}')
  113. if int(rule_duration_max) >= int(float(video_dict["duration"])) >= int(rule_duration_min) \
  114. and int(rule_like_cnt_max) >= int(video_dict['like_cnt']) >= int(rule_like_cnt_min) \
  115. and int(rule_comment_cnt_max) >= int(video_dict['comment_cnt']) >= int(rule_comment_cnt_min) \
  116. and int(rule_share_cnt_max) >= int(video_dict['share_cnt']) >= int(rule_share_cnt_min) \
  117. and int(rule_favorite_cnt_max) >= int(video_dict['favorite_cnt']) >= int(rule_favorite_cnt_min) \
  118. and int(rule_publish_time_max) >= int(video_dict['publish_time_stamp']) >= int(rule_publish_time_min):
  119. return True
  120. else:
  121. return False
  122. @staticmethod
  123. def width_height_rule(log_type, crawler, width, height):
  124. while True:
  125. shipinhao_rule_sheet = Feishu.get_values_batch(log_type, crawler, "YhfkNY")
  126. if shipinhao_rule_sheet is None:
  127. Common.logger(log_type, crawler).warning(f"shipinhao_rule_sheet:{shipinhao_rule_sheet}\n")
  128. time.sleep(3)
  129. continue
  130. rule_width_min = int(Feishu.get_values_batch(log_type, crawler, "YhfkNY")[8][0])
  131. rule_width_max = int(Feishu.get_values_batch(log_type, crawler, "YhfkNY")[8][2])
  132. rule_height_min = int(Feishu.get_values_batch(log_type, crawler, "YhfkNY")[9][0])
  133. rule_height_max = int(Feishu.get_values_batch(log_type, crawler, "YhfkNY")[9][2])
  134. Common.logger(log_type, crawler).info(
  135. f'rule_width_max:{int(rule_width_max)} >= width:{int(width)} >= rule_width_min:{int(rule_width_min)}')
  136. Common.logger(log_type, crawler).info(
  137. f'rule_height_max:{int(rule_height_max)} >= width:{int(height)} >= rule_height_min:{int(rule_height_min)}')
  138. if rule_width_max >= int(width) >= rule_width_min and rule_height_max >= int(height) >= rule_height_min:
  139. return True
  140. else:
  141. return False
  142. @staticmethod
  143. def videos_cnt(log_type, crawler):
  144. while True:
  145. shipinhao_rule_sheet = Feishu.get_values_batch(log_type, crawler, "YhfkNY")
  146. if shipinhao_rule_sheet is None:
  147. Common.logger(log_type, crawler).warning(f"shipinhao_rule_sheet:{shipinhao_rule_sheet}\n")
  148. time.sleep(3)
  149. continue
  150. videos_cnt = Feishu.get_values_batch(log_type, crawler, "YhfkNY")[5][2]
  151. return int(videos_cnt)
  152. @classmethod
  153. def start_wechat(cls, log_type, crawler, word, our_uid, env):
  154. Common.logger(log_type, crawler).info('启动微信')
  155. Common.logging(log_type, crawler, env, '启动微信')
  156. if env == "dev":
  157. chromedriverExecutable = "/Users/wangkun/Downloads/chromedriver/chromedriver_v107/chromedriver"
  158. else:
  159. chromedriverExecutable = '/Users/piaoquan/Downloads/chromedriver'
  160. caps = {
  161. "platformName": "Android", # 手机操作系统 Android / iOS
  162. "deviceName": "Android", # 连接的设备名(模拟器或真机),安卓可以随便写
  163. "platforVersion": "13", # 手机对应的系统版本(Android 13)
  164. "appPackage": "com.tencent.mm", # 被测APP的包名,乐活圈 Android
  165. "appActivity": ".ui.LauncherUI", # 启动的Activity名
  166. "autoGrantPermissions": True, # 让 appium 自动授权 base 权限,
  167. # 如果 noReset 为 True,则该条不生效(该参数为 Android 独有),对应的值为 True 或 False
  168. "unicodekeyboard": True, # 使用自带输入法,输入中文时填True
  169. "resetkeyboard": True, # 执行完程序恢复原来输入法
  170. "noReset": True, # 不重置APP
  171. "recreateChromeDriverSessions": True, # 切换到非 chrome-Driver 会 kill 掉 session,就不需要手动 kill 了
  172. "printPageSourceOnFailure": True, # 找不到元素时,appium log 会完整记录当前页面的 pagesource
  173. "newCommandTimeout": 6000, # 初始等待时间
  174. "automationName": "UiAutomator2", # 使用引擎,默认为 Appium,
  175. # 其中 Appium、UiAutomator2、Selendroid、Espresso 用于 Android,XCUITest 用于 iOS
  176. "showChromedriverLog": True,
  177. # "chromeOptions": {"androidProcess": "com.tencent.mm:appbrand0"},
  178. # "chromeOptions": {"androidProcess": "com.tencent.mm:tools"},
  179. "chromeOptions": {"androidProcess": "com.tencent.mm:toolsmp"},
  180. # "chromeOptions": {"androidProcess": "com.tencent.mm"},
  181. 'enableWebviewDetailsCollection': True,
  182. 'setWebContentsDebuggingEnabled': True,
  183. 'chromedriverExecutable': chromedriverExecutable,
  184. }
  185. driver = webdriver.Remote("http://localhost:4723/wd/hub", caps)
  186. driver.implicitly_wait(10)
  187. # Common.logger(log_type, crawler).info("点击微信")
  188. # if len(driver.find_elements(By.ID, 'android:id/text1')) != 0:
  189. # driver.find_elements(By.ID, 'android:id/text1')[0].click()
  190. # Common.logger(log_type, crawler).info("等待 5s")
  191. time.sleep(5)
  192. cls.search_video(log_type=log_type,
  193. crawler=crawler,
  194. word=word,
  195. our_uid=our_uid,
  196. driver=driver,
  197. env=env)
  198. cls.close_wechat(log_type=log_type,
  199. crawler=crawler,
  200. env=env,
  201. driver=driver)
  202. @classmethod
  203. def close_wechat(cls, log_type, crawler, env, driver: WebDriver):
  204. driver.quit()
  205. Common.logger(log_type, crawler).info(f"微信退出成功\n")
  206. Common.logging(log_type, crawler, env, f"微信退出成功\n")
  207. @classmethod
  208. def is_contain_chinese(cls, strword):
  209. for ch in strword:
  210. if u'\u4e00' <= ch <= u'\u9fff':
  211. return True
  212. return False
  213. # 查找元素
  214. @classmethod
  215. def search_elements(cls, driver: WebDriver, xpath):
  216. time.sleep(1)
  217. windowHandles = driver.window_handles
  218. for handle in windowHandles:
  219. driver.switch_to.window(handle)
  220. time.sleep(1)
  221. try:
  222. elements = driver.find_elements(By.XPATH, xpath)
  223. if elements:
  224. return elements
  225. except NoSuchElementException:
  226. pass
  227. @classmethod
  228. def check_to_webview(cls, log_type, crawler, driver: WebDriver):
  229. webviews = driver.contexts
  230. Common.logger(log_type, crawler).info(f"webviews:{webviews}")
  231. driver.switch_to.context(webviews[1])
  232. time.sleep(1)
  233. windowHandles = driver.window_handles
  234. for handle in windowHandles:
  235. driver.switch_to.window(handle)
  236. time.sleep(1)
  237. if driver.find_element(By.XPATH, '//div[@class="unit"]'):
  238. Common.logger(log_type, crawler).info('切换 webview 成功')
  239. return "成功"
  240. else:
  241. Common.logger(log_type, crawler).info("切换 webview 失败")
  242. @classmethod
  243. def repeat_out_video_id(cls, log_type, crawler, out_video_id, env):
  244. sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{out_video_id}"; """
  245. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  246. return len(repeat_video)
  247. @classmethod
  248. def repeat_video_url(cls, log_type, crawler, video_url, env):
  249. sql = f""" select * from crawler_video where platform="{cls.platform}" and video_url="{video_url}"; """
  250. repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
  251. return len(repeat_video)
  252. @classmethod
  253. def download_publish(cls, log_type, crawler, word, video_dict, our_uid, env):
  254. # 下载视频
  255. Common.download_method(log_type=log_type, crawler=crawler, text="video", title=video_dict["video_title"], url=video_dict["video_url"])
  256. # ffmpeg 获取视频宽高
  257. ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
  258. if ffmpeg_dict is None:
  259. md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
  260. shutil.rmtree(f"./{crawler}/videos/{md_title}/")
  261. Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
  262. Common.logging(log_type, crawler, env, "视频size=0,删除成功\n")
  263. return
  264. video_dict["video_width"] = ffmpeg_dict["width"]
  265. video_dict["video_height"] = ffmpeg_dict["height"]
  266. # 规则判断
  267. if cls.width_height_rule(log_type, crawler, video_dict["video_width"], video_dict["video_height"]) is False:
  268. md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
  269. shutil.rmtree(f"./{crawler}/videos/{md_title}/")
  270. Common.logger(log_type, crawler).info("宽高不满足抓取规则,删除成功\n")
  271. Common.logging(log_type, crawler, env, "宽高不满足抓取规则,删除成功\n")
  272. return
  273. # 下载封面
  274. Common.download_method(log_type=log_type, crawler=crawler, text="cover", title=video_dict["video_title"], url=video_dict["cover_url"])
  275. # 保存视频信息至 "./videos/{download_video_title}/info.txt"
  276. Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
  277. # 上传视频
  278. Common.logger(log_type, crawler).info("开始上传视频...")
  279. Common.logging(log_type, crawler, env, "开始上传视频...")
  280. our_video_id = Publish.upload_and_publish(log_type=log_type,
  281. crawler=crawler,
  282. strategy="搜索爬虫策略",
  283. our_uid=our_uid,
  284. env=env,
  285. oss_endpoint="out")
  286. if env == "dev":
  287. our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  288. else:
  289. our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
  290. Common.logger(log_type, crawler).info("视频上传完成")
  291. Common.logging(log_type, crawler, env, "视频上传完成")
  292. if our_video_id is None:
  293. try:
  294. # 删除视频文件夹
  295. md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
  296. shutil.rmtree(f"./{crawler}/videos/{md_title}")
  297. Common.logger(log_type, crawler).warning(f"our_video_id:{our_video_id}, 删除成功\n")
  298. Common.logging(log_type, crawler, env, f"our_video_id:{our_video_id}, 删除成功\n")
  299. return
  300. except FileNotFoundError:
  301. return
  302. rule_dict = cls.rule_dict(log_type, crawler)
  303. insert_sql = f""" insert into crawler_video(video_id,
  304. out_user_id,
  305. platform,
  306. strategy,
  307. out_video_id,
  308. video_title,
  309. cover_url,
  310. video_url,
  311. duration,
  312. publish_time,
  313. play_cnt,
  314. crawler_rule,
  315. width,
  316. height)
  317. values({our_video_id},
  318. "{video_dict['user_id']}",
  319. "{cls.platform}",
  320. "搜索爬虫策略",
  321. "{video_dict['video_id']}",
  322. "{video_dict['video_title']}",
  323. "{video_dict['cover_url']}",
  324. "{video_dict['video_url']}",
  325. {int(video_dict['duration'])},
  326. "{video_dict['publish_time_str']}",
  327. {int(video_dict['play_cnt'])},
  328. '{json.dumps(rule_dict)}',
  329. {int(video_dict['video_width'])},
  330. {int(video_dict['video_height'])}) """
  331. Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
  332. Common.logging(log_type, crawler, env, f"insert_sql:{insert_sql}")
  333. MysqlHelper.update_values(log_type, crawler, insert_sql, env)
  334. Common.logger(log_type, crawler).info('视频信息插入数据库成功!')
  335. Common.logging(log_type, crawler, env, '视频信息插入数据库成功!')
  336. # 写飞书
  337. Feishu.insert_columns(log_type, crawler, "xYWCzf", "ROWS", 1, 2)
  338. time.sleep(0.5)
  339. values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time()))),
  340. "搜索爬虫策略",
  341. word,
  342. video_dict["video_title"],
  343. our_video_link,
  344. video_dict["duration"],
  345. video_dict["like_cnt"],
  346. video_dict["share_cnt"],
  347. video_dict["favorite_cnt"],
  348. video_dict["comment_cnt"],
  349. f'{video_dict["video_width"]}*{video_dict["video_height"]}',
  350. video_dict["publish_time_str"],
  351. video_dict["user_name"],
  352. video_dict["avatar_url"],
  353. video_dict["cover_url"],
  354. video_dict["video_url"]]]
  355. Feishu.update_values(log_type, crawler, "xYWCzf", "F2:Z2", values)
  356. Common.logger(log_type, crawler).info("写入飞书成功\n")
  357. Common.logging(log_type, crawler, env, "写入飞书成功\n")
  358. cls.download_cnt += 1
  359. @classmethod
  360. def get_video_info(cls, driver: WebDriver):
  361. # Common.logger(log_type, crawler).info('切回NATIVE_APP')
  362. driver.switch_to.context('NATIVE_APP')
  363. # 点赞
  364. like_id = driver.find_element(By.ID, 'com.tencent.mm:id/k04')
  365. like_cnt = like_id.get_attribute('name')
  366. if like_cnt == "" or like_cnt == "喜欢" or like_cnt == "火" or cls.is_contain_chinese(like_cnt) is True:
  367. like_cnt = 0
  368. elif '万' in like_cnt:
  369. like_cnt = int(float(like_cnt.split('万')[0]) * 10000)
  370. elif '万+' in like_cnt:
  371. like_cnt = int(float(like_cnt.split('万+')[0]) * 10000)
  372. else:
  373. like_cnt = int(float(like_cnt))
  374. # 分享
  375. share_id = driver.find_element(By.ID, 'com.tencent.mm:id/jhv')
  376. share_cnt = share_id.get_attribute('name')
  377. if share_cnt == "" or share_cnt == "转发" or cls.is_contain_chinese(share_cnt) is True:
  378. share_cnt = 0
  379. elif '万' in share_cnt:
  380. share_cnt = int(float(share_cnt.split('万')[0]) * 10000)
  381. elif '万+' in share_cnt:
  382. share_cnt = int(float(share_cnt.split('万+')[0]) * 10000)
  383. else:
  384. share_cnt = int(float(share_cnt))
  385. # 收藏
  386. favorite_id = driver.find_element(By.ID, 'com.tencent.mm:id/fnp')
  387. favorite_cnt = favorite_id.get_attribute('name')
  388. if favorite_cnt == "" or favorite_cnt == "收藏" or favorite_cnt == "推荐" or favorite_cnt == "火" or cls.is_contain_chinese(favorite_cnt) is True:
  389. favorite_cnt = 0
  390. elif '万' in favorite_cnt:
  391. favorite_cnt = int(float(favorite_cnt.split('万')[0]) * 10000)
  392. elif '万+' in favorite_cnt:
  393. favorite_cnt = int(float(favorite_cnt.split('万+')[0]) * 10000)
  394. else:
  395. favorite_cnt = int(float(favorite_cnt))
  396. # 评论
  397. comment_id = driver.find_element(By.ID, 'com.tencent.mm:id/bje')
  398. comment_cnt = comment_id.get_attribute('name')
  399. if comment_cnt == "" or comment_cnt == "评论" or cls.is_contain_chinese(comment_cnt) is True:
  400. comment_cnt = 0
  401. elif '万' in comment_cnt:
  402. comment_cnt = int(float(comment_cnt.split('万')[0]) * 10000)
  403. elif '万+' in comment_cnt:
  404. comment_cnt = int(float(comment_cnt.split('万+')[0]) * 10000)
  405. else:
  406. comment_cnt = int(float(comment_cnt))
  407. # 发布时间
  408. comment_id.click()
  409. time.sleep(1)
  410. publish_time = driver.find_element(By.ID, "com.tencent.mm:id/bre").get_attribute("name")
  411. if "秒" in publish_time or "分钟" in publish_time or "小时" in publish_time:
  412. publish_time_str = (date.today() + timedelta(days=0)).strftime("%Y-%m-%d")
  413. elif "天前" in publish_time:
  414. days = int(publish_time.replace("天前", ""))
  415. publish_time_str = (date.today() + timedelta(days=-days)).strftime("%Y-%m-%d")
  416. elif "年" in publish_time:
  417. # publish_time_str = publish_time.replace("年", "-").replace("月", "-").replace("日", "")
  418. year_str = publish_time.split("年")[0]
  419. month_str = publish_time.split("年")[-1].split("月")[0]
  420. day_str = publish_time.split("月")[-1].split("日")[0]
  421. if int(month_str) < 10:
  422. month_str = f"0{month_str}"
  423. if int(day_str) < 10:
  424. day_str = f"0{day_str}"
  425. publish_time_str = f"{year_str}-{month_str}-{day_str}"
  426. else:
  427. year_str = str(datetime.datetime.now().year)
  428. month_str = publish_time.split("月")[0]
  429. day_str = publish_time.split("月")[-1].split("日")[0]
  430. if int(month_str) < 10:
  431. month_str = f"0{month_str}"
  432. if int(day_str) < 10:
  433. day_str = f"0{day_str}"
  434. publish_time_str = f"{year_str}-{month_str}-{day_str}"
  435. # publish_time_str = f'2023-{publish_time.replace("月", "-").replace("日", "")}'
  436. publish_time_stamp = int(time.mktime(time.strptime(publish_time_str, "%Y-%m-%d")))
  437. # 收起评论
  438. # Common.logger(log_type, crawler).info("收起评论")
  439. driver.find_element(By.ID, "com.tencent.mm:id/be_").click()
  440. time.sleep(0.5)
  441. # 返回 webview
  442. # Common.logger(log_type, crawler).info(f"操作手机返回按键")
  443. driver.find_element(By.ID, "com.tencent.mm:id/a2z").click()
  444. time.sleep(0.5)
  445. # driver.press_keycode(AndroidKey.BACK)
  446. # cls.check_to_webview(log_type=log_type, crawler=crawler, driver=driver)
  447. webviews = driver.contexts
  448. driver.switch_to.context(webviews[1])
  449. video_dict = {
  450. "like_cnt": like_cnt,
  451. "share_cnt": share_cnt,
  452. "favorite_cnt": favorite_cnt,
  453. "comment_cnt": comment_cnt,
  454. "publish_time_str": publish_time_str,
  455. "publish_time_stamp": publish_time_stamp,
  456. }
  457. return video_dict
  458. @classmethod
  459. def get_users(cls, log_type, crawler, sheetid, env):
  460. while True:
  461. user_sheet = Feishu.get_values_batch(log_type, crawler, sheetid)
  462. if user_sheet is None:
  463. Common.logger(log_type, crawler).warning(f"user_sheet:{user_sheet}, 3秒钟后重试")
  464. Common.logging(log_type, crawler, env, f"user_sheet:{user_sheet}, 3秒钟后重试")
  465. time.sleep(3)
  466. continue
  467. our_user_list = []
  468. for i in range(1, len(user_sheet)):
  469. # for i in range(1, 3):
  470. search_word = user_sheet[i][4]
  471. our_uid = user_sheet[i][6]
  472. tag1 = user_sheet[i][8]
  473. tag2 = user_sheet[i][9]
  474. tag3 = user_sheet[i][10]
  475. tag4 = user_sheet[i][11]
  476. tag5 = user_sheet[i][12]
  477. Common.logger(log_type, crawler).info(f"正在更新 {search_word} 搜索词信息")
  478. Common.logging(log_type, crawler, env, f"正在更新 {search_word} 搜索词信息")
  479. if our_uid is None:
  480. default_user = getUser.get_default_user()
  481. # 用来创建our_id的信息
  482. user_dict = {
  483. 'recommendStatus': -6,
  484. 'appRecommendStatus': -6,
  485. 'nickName': default_user['nickName'],
  486. 'avatarUrl': default_user['avatarUrl'],
  487. 'tagName': f'{tag1},{tag2},{tag3},{tag4},{tag5}',
  488. }
  489. our_uid = getUser.create_uid(log_type, crawler, user_dict, env)
  490. if env == 'prod':
  491. our_user_link = f'https://admin.piaoquantv.com/ums/user/{our_uid}/post'
  492. else:
  493. our_user_link = f'https://testadmin.piaoquantv.com/ums/user/{our_uid}/post'
  494. Feishu.update_values(log_type, crawler, sheetid, f'G{i + 1}:H{i + 1}',
  495. [[our_uid, our_user_link]])
  496. Common.logger(log_type, crawler).info(f'站内用户主页创建成功:{our_user_link}\n')
  497. Common.logging(log_type, crawler, env, f'站内用户主页创建成功:{our_user_link}\n')
  498. our_user_dict = {
  499. 'out_uid': '',
  500. 'search_word': search_word,
  501. 'our_uid': our_uid,
  502. 'our_user_link': f'https://admin.piaoquantv.com/ums/user/{our_uid}/post',
  503. }
  504. our_user_list.append(our_user_dict)
  505. return our_user_list
  506. @classmethod
  507. def search_video(cls, log_type, crawler, word, driver: WebDriver, our_uid, env):
  508. # 点击微信搜索框,并输入搜索词
  509. driver.implicitly_wait(10)
  510. Common.logger(log_type, crawler).info("点击搜索框")
  511. Common.logging(log_type, crawler, env, "点击搜索框")
  512. # driver.find_element(By.ID, 'com.tencent.mm:id/j5t').click() # 微信8.0.30版本
  513. driver.find_element(By.ID, 'com.tencent.mm:id/he6').click() # 微信8.0.16版本
  514. time.sleep(0.5)
  515. # driver.find_element(By.ID, 'com.tencent.mm:id/cd7').clear().send_keys(word) # 微信8.0.30版本
  516. driver.find_element(By.ID, 'com.tencent.mm:id/bxz').clear().send_keys(word) # 微信8.0.16版本
  517. driver.press_keycode(AndroidKey.ENTER)
  518. Common.logger(log_type, crawler).info("进入搜索词页面")
  519. Common.logging(log_type, crawler, env, "进入搜索词页面")
  520. # driver.find_elements(By.ID, 'com.tencent.mm:id/br8')[0].click() # 微信8.0.30版本
  521. driver.find_elements(By.ID, 'com.tencent.mm:id/jkg')[0].click() # 微信8.0.16版本
  522. time.sleep(5)
  523. # 切换到微信搜索结果页 webview
  524. check_to_webview = cls.check_to_webview(log_type, crawler, driver)
  525. if check_to_webview is None:
  526. Common.logger(log_type, crawler).info("切换到视频号 webview 失败\n")
  527. Common.logging(log_type, crawler, env, "切换到视频号 webview 失败\n")
  528. return
  529. time.sleep(1)
  530. # 切换到"视频号"分类
  531. shipinhao_tags = cls.search_elements(driver, '//div[@class="unit"]/*[2]')
  532. Common.logger(log_type, crawler).info('点击"视频号"分类')
  533. Common.logging(log_type, crawler, env, '点击"视频号"分类')
  534. shipinhao_tags[0].click()
  535. time.sleep(5)
  536. index = 0
  537. while True:
  538. if cls.search_elements(driver, '//*[@class="mixed-box__bd"]') is None:
  539. Common.logger(log_type, crawler).info('窗口已销毁\n')
  540. Common.logging(log_type, crawler, env, '窗口已销毁\n')
  541. return
  542. Common.logger(log_type, crawler).info('获取视频列表\n')
  543. Common.logging(log_type, crawler, env, '获取视频列表\n')
  544. video_elements = cls.search_elements(driver, '//div[@class="rich-media active__absolute"]')
  545. if video_elements is None:
  546. Common.logger(log_type, crawler).warning(f'video_elements:{video_elements}')
  547. Common.logging(log_type, crawler, env, f'video_elements:{video_elements}')
  548. return
  549. video_element_temp = video_elements[index:]
  550. if len(video_element_temp) == 0:
  551. Common.logger(log_type, crawler).info('到底啦~~~~~~~~~~~~~\n')
  552. Common.logging(log_type, crawler, env, '到底啦~~~~~~~~~~~~~\n')
  553. return
  554. for i, video_element in enumerate(video_element_temp):
  555. try:
  556. Common.logger(log_type, crawler).info(f"download_cnt:{cls.download_cnt}")
  557. Common.logging(log_type, crawler, env, f"download_cnt:{cls.download_cnt}")
  558. if cls.download_cnt >= cls.videos_cnt(log_type, crawler):
  559. Common.logger(log_type, crawler).info(f'搜索词:"{word}",已抓取视频数:{cls.download_cnt}')
  560. Common.logging(log_type, crawler, env, f'搜索词:"{word}",已抓取视频数:{cls.download_cnt}')
  561. cls.download_cnt = 0
  562. return
  563. if video_element is None:
  564. Common.logger(log_type, crawler).info('到底啦~\n')
  565. Common.logging(log_type, crawler, env, '到底啦~\n')
  566. return
  567. cls.i += 1
  568. cls.search_elements(driver, '//*[@class="rich-media active__absolute"]')
  569. Common.logger(log_type, crawler).info(f'拖动"视频"列表第{cls.i}个至屏幕中间')
  570. Common.logging(log_type, crawler, env, f'拖动"视频"列表第{cls.i}个至屏幕中间')
  571. time.sleep(3)
  572. driver.execute_script("arguments[0].scrollIntoView({block:'center',inline:'center'})",
  573. video_element)
  574. if len(video_element.find_elements(By.XPATH, "//*[@text='没有更多的搜索结果']")) != 0:
  575. Common.logger(log_type, crawler).info("没有更多的搜索结果\n")
  576. Common.logging(log_type, crawler, env, "没有更多的搜索结果\n")
  577. return
  578. video_title = video_element.find_elements(By.XPATH, '//div[@class="rich-media__title ellipsis_2"]/span')[index + i].text[:40]
  579. video_url = video_element.find_elements(By.XPATH, '//div[@class="video-player"]')[index+i].get_attribute('src')
  580. cover_url = video_element.find_elements(By.XPATH, '//div[@class="video-player__bd"]')[index+i].get_attribute('style')
  581. cover_url = cover_url.split('url("')[-1].split('")')[0]
  582. duration = video_element.find_elements(By.XPATH, '//div[@class="video-player-mask__text"]')[index+i].text
  583. duration = int(duration.split(':')[0]) * 60 + int(duration.split(':')[-1])
  584. user_name = video_element.find_elements(By.XPATH, '//div[@class="rich-media__source__title"]')[index+i].text
  585. avatar_url = video_element.find_elements(By.XPATH, '//div[@class="ui-image-image ui-image rich-media__source__thumb"]')[index+i].get_attribute('style')
  586. avatar_url = avatar_url.split('url("')[-1].split('")')[0]
  587. out_video_id = md5(video_title.encode('utf8')).hexdigest()
  588. out_user_id = md5(user_name.encode('utf8')).hexdigest()
  589. video_dict = {
  590. "video_title": video_title,
  591. "video_id": out_video_id,
  592. "play_cnt": 0,
  593. "duration": duration,
  594. "user_name": user_name,
  595. "user_id": out_user_id,
  596. "avatar_url": avatar_url,
  597. "cover_url": cover_url,
  598. "video_url": video_url,
  599. "session": f"shipinhao-search-{int(time.time())}"
  600. }
  601. for k, v in video_dict.items():
  602. Common.logger(log_type, crawler).info(f"{k}:{v}")
  603. Common.logging(log_type, crawler, env, f"{video_dict}")
  604. if video_title is None or video_url is None:
  605. Common.logger(log_type, crawler).info("无效视频\n")
  606. Common.logging(log_type, crawler, env, "无效视频\n")
  607. elif cls.repeat_out_video_id(log_type, crawler, out_video_id, env) != 0:
  608. Common.logger(log_type, crawler).info('视频已下载\n')
  609. Common.logging(log_type, crawler, env, '视频已下载\n')
  610. elif cls.repeat_video_url(log_type, crawler, video_url, env) != 0:
  611. Common.logger(log_type, crawler).info('视频已下载\n')
  612. Common.logging(log_type, crawler, env, '视频已下载\n')
  613. else:
  614. video_element.click()
  615. time.sleep(3)
  616. video_info_dict = cls.get_video_info(driver)
  617. video_dict["like_cnt"] = video_info_dict["like_cnt"]
  618. video_dict["share_cnt"] = video_info_dict["share_cnt"]
  619. video_dict["favorite_cnt"] = video_info_dict["favorite_cnt"]
  620. video_dict["comment_cnt"] = video_info_dict["comment_cnt"]
  621. video_dict["publish_time_str"] = video_info_dict["publish_time_str"]
  622. video_dict["publish_time_stamp"] = video_info_dict["publish_time_stamp"]
  623. Common.logger(log_type, crawler).info(f'publish_time:{video_dict["publish_time_str"]}')
  624. Common.logging(log_type, crawler, env, f'publish_time:{video_dict["publish_time_str"]}')
  625. if cls.download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict) is False:
  626. Common.logger(log_type, crawler).info("不满足抓取规则\n")
  627. Common.logging(log_type, crawler, env, "不满足抓取规则\n")
  628. else:
  629. cls.download_publish(log_type=log_type,
  630. crawler=crawler,
  631. word=word,
  632. video_dict=video_dict,
  633. our_uid=our_uid,
  634. env=env)
  635. except Exception as e:
  636. Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
  637. Common.logging(log_type, crawler, env, f"抓取单条视频异常:{e}\n")
  638. Common.logger(log_type, crawler).info('已抓取完一组视频,休眠1秒\n')
  639. Common.logging(log_type, crawler, env, '已抓取完一组视频,休眠1秒\n')
  640. time.sleep(1)
  641. index = index + len(video_element_temp)
  642. @classmethod
  643. def get_search_videos(cls, log_type, crawler, env):
  644. user_list = cls.get_users(log_type, crawler, "wNgi6Z", env)
  645. for user in user_list:
  646. # try:
  647. cls.i = 0
  648. cls.download_cnt = 0
  649. search_word = user["search_word"]
  650. our_uid = user["our_uid"]
  651. Common.logger(log_type, crawler).info(f"开始抓取:{search_word}")
  652. Common.logging(log_type, crawler, env, f"开始抓取:{search_word}")
  653. cls.start_wechat(log_type=log_type,
  654. crawler=crawler,
  655. word=search_word,
  656. our_uid=our_uid,
  657. env=env)
  658. # except Exception as e:
  659. # Common.logger(log_type, crawler).error(f"抓取{user['search_word']}时异常:{e}\n")
  660. # Common.logging(log_type, crawler, env, f"抓取{user['search_word']}时异常:{e}\n")
  661. if __name__ == '__main__':
  662. # print(ShipinhaoSearch.get_users("search", "shipinhao", "wNgi6Z", "prod"))
  663. # print(type(str(date.today())))
  664. pass