|
@@ -238,7 +238,6 @@ class ShipinhaoSearch:
|
|
|
|
|
|
@classmethod
|
|
|
def check_to_webview(cls, log_type, crawler, driver: WebDriver):
|
|
|
- # Common.logger(log_type, crawler).info('切换到webview')
|
|
|
webviews = driver.contexts
|
|
|
Common.logger(log_type, crawler).info(f"webviews:{webviews}")
|
|
|
driver.switch_to.context(webviews[1])
|
|
@@ -246,13 +245,11 @@ class ShipinhaoSearch:
|
|
|
windowHandles = driver.window_handles
|
|
|
for handle in windowHandles:
|
|
|
driver.switch_to.window(handle)
|
|
|
- # try:
|
|
|
- shipinhao_webview = driver.find_element(By.XPATH, '//div[@class="unit"]')
|
|
|
- if shipinhao_webview:
|
|
|
- Common.logger(log_type, crawler).info('切换到视频号 webview 成功')
|
|
|
+ if driver.find_element(By.XPATH, '//div[@class="unit"]'):
|
|
|
+ Common.logger(log_type, crawler).info('切换 webview 成功')
|
|
|
return "成功"
|
|
|
- # except Exception as e:
|
|
|
- # Common.logger(log_type, crawler).info(f"切换失败")
|
|
|
+ else:
|
|
|
+ Common.logger(log_type, crawler).info("切换 webview 失败")
|
|
|
|
|
|
@classmethod
|
|
|
def repeat_out_video_id(cls, log_type, crawler, out_video_id, env):
|
|
@@ -266,135 +263,6 @@ class ShipinhaoSearch:
|
|
|
repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
|
|
|
return len(repeat_video)
|
|
|
|
|
|
- @classmethod
|
|
|
- def search_video(cls, log_type, crawler, word, driver: WebDriver, our_uid, env):
|
|
|
- # 点击微信搜索框,并输入搜索词
|
|
|
- driver.implicitly_wait(10)
|
|
|
- Common.logger(log_type, crawler).info("点击微信搜索框")
|
|
|
- driver.find_element(By.ID, 'com.tencent.mm:id/j5t').click()
|
|
|
- time.sleep(0.5)
|
|
|
- # Common.logger(log_type, crawler).info(f'输入搜索词:{word}')
|
|
|
- driver.find_element(By.ID, 'com.tencent.mm:id/cd7').clear().send_keys(word)
|
|
|
- driver.press_keycode(AndroidKey.ENTER)
|
|
|
- # driver.find_elements(By.ID, 'com.tencent.mm:id/oi4')[0].click()
|
|
|
- Common.logger(log_type, crawler).info("进入搜索词页面")
|
|
|
- # driver.find_elements(By.ID, 'com.tencent.mm:id/oi4')[0].click()
|
|
|
- driver.find_elements(By.ID, 'com.tencent.mm:id/br8')[0].click()
|
|
|
- time.sleep(5)
|
|
|
-
|
|
|
- # 切换到微信搜索结果页 webview
|
|
|
- check_to_webview = cls.check_to_webview(log_type, crawler, driver)
|
|
|
- if check_to_webview is None:
|
|
|
- Common.logger(log_type, crawler).info("切换到视频号 webview 失败\n")
|
|
|
- return
|
|
|
- time.sleep(1)
|
|
|
-
|
|
|
- # 切换到"视频号"分类
|
|
|
- shipinhao_tags = cls.search_elements(driver, '//div[@class="unit"]/*[2]')
|
|
|
- Common.logger(log_type, crawler).info('点击"视频号"分类')
|
|
|
- shipinhao_tags[0].click()
|
|
|
- time.sleep(5)
|
|
|
-
|
|
|
- index = 0
|
|
|
- while True:
|
|
|
- if cls.search_elements(driver, '//*[@class="mixed-box__bd"]') is None:
|
|
|
- Common.logger(log_type, crawler).info('窗口已销毁\n')
|
|
|
- return
|
|
|
-
|
|
|
- Common.logger(log_type, crawler).info('获取视频列表\n')
|
|
|
- # video_elements = cls.search_elements(driver, '//div[@class="vc active__mask"]')
|
|
|
- video_elements = cls.search_elements(driver, '//div[@class="rich-media active__absolute"]')
|
|
|
- if video_elements is None:
|
|
|
- Common.logger(log_type, crawler).warning(f'video_elements:{video_elements}')
|
|
|
- return
|
|
|
-
|
|
|
- video_element_temp = video_elements[index:]
|
|
|
- if len(video_element_temp) == 0:
|
|
|
- Common.logger(log_type, crawler).info('到底啦~~~~~~~~~~~~~\n')
|
|
|
- return
|
|
|
-
|
|
|
- for i, video_element in enumerate(video_element_temp):
|
|
|
- # try:
|
|
|
- Common.logger(log_type, crawler).info(f"download_cnt:{cls.download_cnt}")
|
|
|
- if cls.download_cnt >= cls.videos_cnt(log_type, crawler):
|
|
|
- Common.logger(log_type, crawler).info(f'搜索词:"{word}",已抓取视频数:{cls.download_cnt}')
|
|
|
- cls.download_cnt = 0
|
|
|
- return
|
|
|
-
|
|
|
- if video_element is None:
|
|
|
- Common.logger(log_type, crawler).info('到底啦~\n')
|
|
|
- return
|
|
|
-
|
|
|
- cls.i += 1
|
|
|
- cls.search_elements(driver, '//*[@class="rich-media active__absolute"]')
|
|
|
-
|
|
|
- Common.logger(log_type, crawler).info(f'拖动"视频"列表第{cls.i}个至屏幕中间')
|
|
|
- time.sleep(3)
|
|
|
- driver.execute_script("arguments[0].scrollIntoView({block:'center',inline:'center'})",
|
|
|
- video_element)
|
|
|
- if len(video_element.find_elements(By.XPATH, "//*[@text='没有更多的搜索结果']")) != 0:
|
|
|
- Common.logger(log_type, crawler).info("没有更多的搜索结果\n")
|
|
|
- return
|
|
|
- # video_title = video_element.find_elements(By.XPATH, '//div[@class="title ellipsis_2"]/*[2]')[index + i].text[:40]
|
|
|
- video_title = video_element.find_elements(By.XPATH, '//div[@class="rich-media__title ellipsis_2"]/span')[index + i].text[:40]
|
|
|
- video_url = video_element.find_elements(By.XPATH, '//div[@class="video-player"]')[index+i].get_attribute('src')
|
|
|
- cover_url = video_element.find_elements(By.XPATH, '//div[@class="video-player__bd"]')[index+i].get_attribute('style')
|
|
|
- cover_url = cover_url.split('url("')[-1].split('")')[0]
|
|
|
- duration = video_element.find_elements(By.XPATH, '//div[@class="video-player-mask__text"]')[index+i].text
|
|
|
- duration = int(duration.split(':')[0]) * 60 + int(duration.split(':')[-1])
|
|
|
- user_name = video_element.find_elements(By.XPATH, '//div[@class="rich-media__source__title"]')[index+i].text
|
|
|
- avatar_url = video_element.find_elements(By.XPATH, '//div[@class="ui-image-image ui-image rich-media__source__thumb"]')[index+i].get_attribute('style')
|
|
|
- avatar_url = avatar_url.split('url("')[-1].split('")')[0]
|
|
|
- out_video_id = md5(video_title.encode('utf8')).hexdigest()
|
|
|
- out_user_id = md5(user_name.encode('utf8')).hexdigest()
|
|
|
-
|
|
|
- video_dict = {
|
|
|
- "video_title": video_title,
|
|
|
- "video_id": out_video_id,
|
|
|
- "play_cnt": 0,
|
|
|
- "duration": duration,
|
|
|
- "user_name": user_name,
|
|
|
- "user_id": out_user_id,
|
|
|
- "avatar_url": avatar_url,
|
|
|
- "cover_url": cover_url,
|
|
|
- "video_url": video_url,
|
|
|
- "session": f"shipinhao-search-{int(time.time())}"
|
|
|
- }
|
|
|
- for k, v in video_dict.items():
|
|
|
- Common.logger(log_type, crawler).info(f"{k}:{v}")
|
|
|
- if video_title is None or video_url is None:
|
|
|
- Common.logger(log_type, crawler).info("无效视频\n")
|
|
|
- elif cls.repeat_out_video_id(log_type, crawler, out_video_id, env) != 0:
|
|
|
- Common.logger(log_type, crawler).info('视频已下载\n')
|
|
|
- elif cls.repeat_video_url(log_type, crawler, video_url, env) != 0:
|
|
|
- Common.logger(log_type, crawler).info('视频已下载\n')
|
|
|
- else:
|
|
|
- video_element.click()
|
|
|
- time.sleep(3)
|
|
|
- video_info_dict = cls.get_video_info(driver)
|
|
|
- video_dict["like_cnt"] = video_info_dict["like_cnt"]
|
|
|
- video_dict["share_cnt"] = video_info_dict["share_cnt"]
|
|
|
- video_dict["favorite_cnt"] = video_info_dict["favorite_cnt"]
|
|
|
- video_dict["comment_cnt"] = video_info_dict["comment_cnt"]
|
|
|
- video_dict["publish_time_str"] = video_info_dict["publish_time_str"]
|
|
|
- video_dict["publish_time_stamp"] = video_info_dict["publish_time_stamp"]
|
|
|
- Common.logger(log_type, crawler).info(f'publish_time:{video_dict["publish_time_str"]}')
|
|
|
- if cls.download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict) is False:
|
|
|
- Common.logger(log_type, crawler).info("不满足抓取规则\n")
|
|
|
- else:
|
|
|
- cls.download_publish(log_type=log_type,
|
|
|
- crawler=crawler,
|
|
|
- word=word,
|
|
|
- video_dict=video_dict,
|
|
|
- our_uid=our_uid,
|
|
|
- env=env)
|
|
|
- # except Exception as e:
|
|
|
- # Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
|
|
|
-
|
|
|
- Common.logger(log_type, crawler).info('已抓取完一组视频,休眠1秒\n')
|
|
|
- time.sleep(1)
|
|
|
- index = index + len(video_element_temp)
|
|
|
-
|
|
|
@classmethod
|
|
|
def download_publish(cls, log_type, crawler, word, video_dict, our_uid, env):
|
|
|
# 下载视频
|
|
@@ -657,38 +525,152 @@ class ShipinhaoSearch:
|
|
|
return our_user_list
|
|
|
|
|
|
|
|
|
+ @classmethod
|
|
|
+ def search_video(cls, log_type, crawler, word, driver: WebDriver, our_uid, env):
|
|
|
+ # 点击微信搜索框,并输入搜索词
|
|
|
+ driver.implicitly_wait(10)
|
|
|
+ Common.logger(log_type, crawler).info("点击搜索框")
|
|
|
+ driver.find_element(By.ID, 'com.tencent.mm:id/j5t').click()
|
|
|
+ time.sleep(0.5)
|
|
|
+ driver.find_element(By.ID, 'com.tencent.mm:id/cd7').clear().send_keys(word)
|
|
|
+ driver.press_keycode(AndroidKey.ENTER)
|
|
|
+ Common.logger(log_type, crawler).info("进入搜索词页面")
|
|
|
+ driver.find_elements(By.ID, 'com.tencent.mm:id/br8')[0].click()
|
|
|
+ time.sleep(5)
|
|
|
+
|
|
|
+ # 切换到微信搜索结果页 webview
|
|
|
+ check_to_webview = cls.check_to_webview(log_type, crawler, driver)
|
|
|
+ if check_to_webview is None:
|
|
|
+ Common.logger(log_type, crawler).info("切换到视频号 webview 失败\n")
|
|
|
+ return
|
|
|
+ time.sleep(1)
|
|
|
+
|
|
|
+ # 切换到"视频号"分类
|
|
|
+ shipinhao_tags = cls.search_elements(driver, '//div[@class="unit"]/*[2]')
|
|
|
+ Common.logger(log_type, crawler).info('点击"视频号"分类')
|
|
|
+ shipinhao_tags[0].click()
|
|
|
+ time.sleep(5)
|
|
|
+
|
|
|
+ index = 0
|
|
|
+ while True:
|
|
|
+ if cls.search_elements(driver, '//*[@class="mixed-box__bd"]') is None:
|
|
|
+ Common.logger(log_type, crawler).info('窗口已销毁\n')
|
|
|
+ return
|
|
|
+
|
|
|
+ Common.logger(log_type, crawler).info('获取视频列表\n')
|
|
|
+ video_elements = cls.search_elements(driver, '//div[@class="rich-media active__absolute"]')
|
|
|
+ if video_elements is None:
|
|
|
+ Common.logger(log_type, crawler).warning(f'video_elements:{video_elements}')
|
|
|
+ return
|
|
|
+
|
|
|
+ video_element_temp = video_elements[index:]
|
|
|
+ if len(video_element_temp) == 0:
|
|
|
+ Common.logger(log_type, crawler).info('到底啦~~~~~~~~~~~~~\n')
|
|
|
+ return
|
|
|
+
|
|
|
+ for i, video_element in enumerate(video_element_temp):
|
|
|
+ try:
|
|
|
+ Common.logger(log_type, crawler).info(f"download_cnt:{cls.download_cnt}")
|
|
|
+ if cls.download_cnt >= cls.videos_cnt(log_type, crawler):
|
|
|
+ Common.logger(log_type, crawler).info(f'搜索词:"{word}",已抓取视频数:{cls.download_cnt}')
|
|
|
+ cls.download_cnt = 0
|
|
|
+ return
|
|
|
+
|
|
|
+ if video_element is None:
|
|
|
+ Common.logger(log_type, crawler).info('到底啦~\n')
|
|
|
+ return
|
|
|
+
|
|
|
+ cls.i += 1
|
|
|
+ cls.search_elements(driver, '//*[@class="rich-media active__absolute"]')
|
|
|
+
|
|
|
+ Common.logger(log_type, crawler).info(f'拖动"视频"列表第{cls.i}个至屏幕中间')
|
|
|
+ time.sleep(3)
|
|
|
+ driver.execute_script("arguments[0].scrollIntoView({block:'center',inline:'center'})",
|
|
|
+ video_element)
|
|
|
+ if len(video_element.find_elements(By.XPATH, "//*[@text='没有更多的搜索结果']")) != 0:
|
|
|
+ Common.logger(log_type, crawler).info("没有更多的搜索结果\n")
|
|
|
+ return
|
|
|
+ video_title = video_element.find_elements(By.XPATH, '//div[@class="rich-media__title ellipsis_2"]/span')[index + i].text[:40]
|
|
|
+ video_url = video_element.find_elements(By.XPATH, '//div[@class="video-player"]')[index+i].get_attribute('src')
|
|
|
+ cover_url = video_element.find_elements(By.XPATH, '//div[@class="video-player__bd"]')[index+i].get_attribute('style')
|
|
|
+ cover_url = cover_url.split('url("')[-1].split('")')[0]
|
|
|
+ duration = video_element.find_elements(By.XPATH, '//div[@class="video-player-mask__text"]')[index+i].text
|
|
|
+ duration = int(duration.split(':')[0]) * 60 + int(duration.split(':')[-1])
|
|
|
+ user_name = video_element.find_elements(By.XPATH, '//div[@class="rich-media__source__title"]')[index+i].text
|
|
|
+ avatar_url = video_element.find_elements(By.XPATH, '//div[@class="ui-image-image ui-image rich-media__source__thumb"]')[index+i].get_attribute('style')
|
|
|
+ avatar_url = avatar_url.split('url("')[-1].split('")')[0]
|
|
|
+ out_video_id = md5(video_title.encode('utf8')).hexdigest()
|
|
|
+ out_user_id = md5(user_name.encode('utf8')).hexdigest()
|
|
|
+
|
|
|
+ video_dict = {
|
|
|
+ "video_title": video_title,
|
|
|
+ "video_id": out_video_id,
|
|
|
+ "play_cnt": 0,
|
|
|
+ "duration": duration,
|
|
|
+ "user_name": user_name,
|
|
|
+ "user_id": out_user_id,
|
|
|
+ "avatar_url": avatar_url,
|
|
|
+ "cover_url": cover_url,
|
|
|
+ "video_url": video_url,
|
|
|
+ "session": f"shipinhao-search-{int(time.time())}"
|
|
|
+ }
|
|
|
+ for k, v in video_dict.items():
|
|
|
+ Common.logger(log_type, crawler).info(f"{k}:{v}")
|
|
|
+ if video_title is None or video_url is None:
|
|
|
+ Common.logger(log_type, crawler).info("无效视频\n")
|
|
|
+ elif cls.repeat_out_video_id(log_type, crawler, out_video_id, env) != 0:
|
|
|
+ Common.logger(log_type, crawler).info('视频已下载\n')
|
|
|
+ elif cls.repeat_video_url(log_type, crawler, video_url, env) != 0:
|
|
|
+ Common.logger(log_type, crawler).info('视频已下载\n')
|
|
|
+ else:
|
|
|
+ video_element.click()
|
|
|
+ time.sleep(3)
|
|
|
+ video_info_dict = cls.get_video_info(driver)
|
|
|
+ video_dict["like_cnt"] = video_info_dict["like_cnt"]
|
|
|
+ video_dict["share_cnt"] = video_info_dict["share_cnt"]
|
|
|
+ video_dict["favorite_cnt"] = video_info_dict["favorite_cnt"]
|
|
|
+ video_dict["comment_cnt"] = video_info_dict["comment_cnt"]
|
|
|
+ video_dict["publish_time_str"] = video_info_dict["publish_time_str"]
|
|
|
+ video_dict["publish_time_stamp"] = video_info_dict["publish_time_stamp"]
|
|
|
+ Common.logger(log_type, crawler).info(f'publish_time:{video_dict["publish_time_str"]}')
|
|
|
+ if cls.download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict) is False:
|
|
|
+ Common.logger(log_type, crawler).info("不满足抓取规则\n")
|
|
|
+ else:
|
|
|
+ cls.download_publish(log_type=log_type,
|
|
|
+ crawler=crawler,
|
|
|
+ word=word,
|
|
|
+ video_dict=video_dict,
|
|
|
+ our_uid=our_uid,
|
|
|
+ env=env)
|
|
|
+ except Exception as e:
|
|
|
+ Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
|
|
|
+
|
|
|
+ Common.logger(log_type, crawler).info('已抓取完一组视频,休眠1秒\n')
|
|
|
+ time.sleep(1)
|
|
|
+ index = index + len(video_element_temp)
|
|
|
+
|
|
|
+
|
|
|
@classmethod
|
|
|
def get_search_videos(cls, log_type, crawler, env):
|
|
|
user_list = cls.get_users(log_type, crawler, "wNgi6Z", env)
|
|
|
for user in user_list:
|
|
|
- # try:
|
|
|
- cls.i = 0
|
|
|
- cls.download_cnt = 0
|
|
|
- search_word = user["search_word"]
|
|
|
- our_uid = user["our_uid"]
|
|
|
- Common.logger(log_type, crawler).info(f"开始抓取搜索词:{search_word}")
|
|
|
-
|
|
|
- cls.start_wechat(log_type=log_type,
|
|
|
- crawler=crawler,
|
|
|
- word=search_word,
|
|
|
- our_uid=our_uid,
|
|
|
- env=env)
|
|
|
- # except Exception as e:
|
|
|
- # Common.logger(log_type, crawler).error(f"抓取{user['search_word']}时异常:{e}\n")
|
|
|
+ try:
|
|
|
+ cls.i = 0
|
|
|
+ cls.download_cnt = 0
|
|
|
+ search_word = user["search_word"]
|
|
|
+ our_uid = user["our_uid"]
|
|
|
+ Common.logger(log_type, crawler).info(f"开始抓取:{search_word}")
|
|
|
+
|
|
|
+ cls.start_wechat(log_type=log_type,
|
|
|
+ crawler=crawler,
|
|
|
+ word=search_word,
|
|
|
+ our_uid=our_uid,
|
|
|
+ env=env)
|
|
|
+ except Exception as e:
|
|
|
+ Common.logger(log_type, crawler).error(f"抓取{user['search_word']}时异常:{e}\n")
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
- # ShipinhaoSearchScheduling.get_search_videos(log_type="search",
|
|
|
- # crawler="shipinhao",
|
|
|
- # rule_dict='[{"videos_cnt":{"min":10,"max":0}},{"duration":{"min":30,"max":600}},{"share_cnt":{"min":3000,"max":0}},{"favorite_cnt":{"min":1000,"max":0}},{"publish_time":{"min":1672502400000,"max":0}}]',
|
|
|
- # oss_endpoint="out",
|
|
|
- # env="dev")
|
|
|
- print(ShipinhaoSearch.get_users("search", "shipinhao", "wNgi6Z", "prod"))
|
|
|
- # print((date.today() + timedelta(days=0)).strftime("%Y-%m-%d"))
|
|
|
- # print(ShipinhaoSearchScheduling.repeat_out_video_id(log_type="search",
|
|
|
- # crawler="shipinhao",
|
|
|
- # out_video_id="123",
|
|
|
- # env="dev"))
|
|
|
- # ShipinhaoSearch.download_rule(log_type="search", crawler="shipinhao", video_dict={})
|
|
|
- # print(ShipinhaoSearch.rule_dict(log_type="search", crawler="shipinhao"))
|
|
|
+ # print(ShipinhaoSearch.get_users("search", "shipinhao", "wNgi6Z", "prod"))
|
|
|
+ # print(type(str(date.today())))
|
|
|
pass
|