|
@@ -4,7 +4,6 @@
|
|
|
import datetime
|
|
|
import json
|
|
|
import os
|
|
|
-import shutil
|
|
|
import sys
|
|
|
import time
|
|
|
from datetime import date, timedelta
|
|
@@ -15,10 +14,9 @@ from appium.webdriver.webdriver import WebDriver
|
|
|
from selenium.common import NoSuchElementException
|
|
|
from selenium.webdriver.common.by import By
|
|
|
sys.path.append(os.getcwd())
|
|
|
-from common.feishu import Feishu
|
|
|
-from common.publish import Publish
|
|
|
+from common.public import download_rule
|
|
|
+from common.mq import MQ
|
|
|
from common.common import Common
|
|
|
-from common.getuser import getUser
|
|
|
from common.scheduling_db import MysqlHelper
|
|
|
|
|
|
|
|
@@ -27,116 +25,14 @@ class ShipinhaoSearchScheduling:
|
|
|
i = 0
|
|
|
download_cnt = 0
|
|
|
|
|
|
- # 基础门槛规则
|
|
|
- @staticmethod
|
|
|
- def download_rule(log_type, crawler, video_dict, rule_dict):
|
|
|
- """
|
|
|
- 下载视频的基本规则
|
|
|
- :param log_type: 日志
|
|
|
- :param crawler: 哪款爬虫
|
|
|
- :param video_dict: 视频信息,字典格式
|
|
|
- :param rule_dict: 规则信息,字典格式
|
|
|
- :return: 满足规则,返回 True;反之,返回 False
|
|
|
- """
|
|
|
- rule_play_cnt_min = rule_dict.get('play_cnt', {}).get('min', 0)
|
|
|
- rule_play_cnt_max = rule_dict.get('play_cnt', {}).get('max', 100000000)
|
|
|
- if rule_play_cnt_max == 0:
|
|
|
- rule_play_cnt_max = 100000000
|
|
|
-
|
|
|
- rule_duration_min = rule_dict.get('duration', {}).get('min', 0)
|
|
|
- rule_duration_max = rule_dict.get('duration', {}).get('max', 100000000)
|
|
|
- if rule_duration_max == 0:
|
|
|
- rule_duration_max = 100000000
|
|
|
-
|
|
|
- # rule_period_min = rule_dict.get('period', {}).get('min', 0)
|
|
|
- # rule_period_max = rule_dict.get('period', {}).get('max', 100000000)
|
|
|
- # if rule_period_max == 0:
|
|
|
- # rule_period_max = 100000000
|
|
|
-
|
|
|
- # rule_fans_cnt_min = rule_dict.get('fans_cnt', {}).get('min', 0)
|
|
|
- # rule_fans_cnt_max = rule_dict.get('fans_cnt', {}).get('max', 100000000)
|
|
|
- # if rule_fans_cnt_max == 0:
|
|
|
- # rule_fans_cnt_max = 100000000
|
|
|
-
|
|
|
- # rule_videos_cnt_min = rule_dict.get('videos_cnt', {}).get('min', 0)
|
|
|
- # rule_videos_cnt_max = rule_dict.get('videos_cnt', {}).get('max', 100000000)
|
|
|
- # if rule_videos_cnt_max == 0:
|
|
|
- # rule_videos_cnt_max = 100000000
|
|
|
-
|
|
|
- rule_like_cnt_min = rule_dict.get('like_cnt', {}).get('min', 0)
|
|
|
- rule_like_cnt_max = rule_dict.get('like_cnt', {}).get('max', 100000000)
|
|
|
- if rule_like_cnt_max == 0:
|
|
|
- rule_like_cnt_max = 100000000
|
|
|
-
|
|
|
- rule_width_min = rule_dict.get('width', {}).get('min', 0)
|
|
|
- rule_width_max = rule_dict.get('width', {}).get('max', 100000000)
|
|
|
- if rule_width_max == 0:
|
|
|
- rule_width_max = 100000000
|
|
|
-
|
|
|
- rule_height_min = rule_dict.get('height', {}).get('min', 0)
|
|
|
- rule_height_max = rule_dict.get('height', {}).get('max', 100000000)
|
|
|
- if rule_height_max == 0:
|
|
|
- rule_height_max = 100000000
|
|
|
-
|
|
|
- rule_share_cnt_min = rule_dict.get('share_cnt', {}).get('min', 0)
|
|
|
- rule_share_cnt_max = rule_dict.get('share_cnt', {}).get('max', 100000000)
|
|
|
- if rule_share_cnt_max == 0:
|
|
|
- rule_share_cnt_max = 100000000
|
|
|
-
|
|
|
- rule_favorite_cnt_min = rule_dict.get('favorite_cnt', {}).get('min', 0)
|
|
|
- rule_favorite_cnt_max = rule_dict.get('favorite_cnt', {}).get('max', 100000000)
|
|
|
- if rule_favorite_cnt_max == 0:
|
|
|
- rule_favorite_cnt_max = 100000000
|
|
|
-
|
|
|
- rule_comment_cnt_min = rule_dict.get('comment_cnt', {}).get('min', 0)
|
|
|
- rule_comment_cnt_max = rule_dict.get('comment_cnt', {}).get('max', 100000000)
|
|
|
- if rule_comment_cnt_max == 0:
|
|
|
- rule_comment_cnt_max = 100000000
|
|
|
-
|
|
|
- rule_publish_time_min = rule_dict.get('publish_time', {}).get('min', 0)
|
|
|
- rule_publish_time_max = rule_dict.get('publish_time', {}).get('max', 0)
|
|
|
- if rule_publish_time_max == 0:
|
|
|
- rule_publish_time_max = 4102415999000 # 2099-12-31 23:59:59
|
|
|
-
|
|
|
- Common.logger(log_type, crawler).info(
|
|
|
- f'rule_duration_max:{rule_duration_max} >= duration:{int(float(video_dict["duration"]))} >= rule_duration_min:{int(rule_duration_min)}')
|
|
|
- Common.logger(log_type, crawler).info(
|
|
|
- f'rule_play_cnt_max:{int(rule_play_cnt_max)} >= play_cnt:{int(video_dict["play_cnt"])} >= rule_play_cnt_min:{int(rule_play_cnt_min)}')
|
|
|
- Common.logger(log_type, crawler).info(
|
|
|
- f'rule_like_cnt_max:{int(rule_like_cnt_max)} >= like_cnt:{int(video_dict["like_cnt"])} >= rule_like_cnt_min:{int(rule_like_cnt_min)}')
|
|
|
- Common.logger(log_type, crawler).info(
|
|
|
- f'rule_comment_cnt_max:{int(rule_comment_cnt_max)} >= comment_cnt:{int(video_dict["comment_cnt"])} >= rule_comment_cnt_min:{int(rule_comment_cnt_min)}')
|
|
|
- Common.logger(log_type, crawler).info(
|
|
|
- f'rule_share_cnt_max:{int(rule_share_cnt_max)} >= share_cnt:{int(video_dict["share_cnt"])} >= rule_share_cnt_min:{int(rule_share_cnt_min)}')
|
|
|
- Common.logger(log_type, crawler).info(
|
|
|
- f'rule_favorite_cnt_max:{int(rule_favorite_cnt_max)} >= favorite_cnt:{int(video_dict["favorite_cnt"])} >= rule_favorite_cnt_min:{int(rule_favorite_cnt_min)}')
|
|
|
- Common.logger(log_type, crawler).info(
|
|
|
- f'rule_width_max:{int(rule_width_max)} >= video_width:{int(video_dict["video_width"])} >= rule_width_min:{int(rule_width_min)}')
|
|
|
- Common.logger(log_type, crawler).info(
|
|
|
- f'rule_height_max:{int(rule_height_max)} >= video_height:{int(video_dict["video_height"])} >= rule_height_min:{int(rule_height_min)}')
|
|
|
- Common.logger(log_type, crawler).info(
|
|
|
- f'rule_publish_time_max:{int(rule_publish_time_max)} >= publish_time_stamp:{int(video_dict["publish_time_stamp"])*1000} >= rule_publish_time_min:{int(rule_publish_time_min)}')
|
|
|
-
|
|
|
- if int(rule_duration_max) >= int(float(video_dict["duration"])) >= int(rule_duration_min) \
|
|
|
- and int(rule_play_cnt_max) >= int(video_dict['play_cnt']) >= int(rule_play_cnt_min) \
|
|
|
- and int(rule_like_cnt_max) >= int(video_dict['like_cnt']) >= int(rule_like_cnt_min) \
|
|
|
- and int(rule_comment_cnt_max) >= int(video_dict['comment_cnt']) >= int(rule_comment_cnt_min) \
|
|
|
- and int(rule_share_cnt_max) >= int(video_dict['share_cnt']) >= int(rule_share_cnt_min) \
|
|
|
- and int(rule_favorite_cnt_max) >= int(video_dict['favorite_cnt']) >= int(rule_favorite_cnt_min) \
|
|
|
- and int(rule_width_max) >= int(video_dict['video_width']) >= int(rule_width_min) \
|
|
|
- and int(rule_height_max) >= int(video_dict['video_height']) >= int(rule_height_min) \
|
|
|
- and int(rule_publish_time_max) >= int(video_dict['publish_time_stamp'])*1000 >= int(rule_publish_time_min):
|
|
|
- return True
|
|
|
- else:
|
|
|
- return False
|
|
|
-
|
|
|
@classmethod
|
|
|
- def start_wechat(cls, log_type, crawler, word, rule_dict, our_uid, oss_endpoint, env):
|
|
|
+ def start_wechat(cls, log_type, crawler, rule_dict, user_dict, env):
|
|
|
Common.logger(log_type, crawler).info('启动微信')
|
|
|
+ Common.logging(log_type, crawler, env, '启动微信')
|
|
|
if env == "dev":
|
|
|
chromedriverExecutable = "/Users/wangkun/Downloads/chromedriver/chromedriver_v107/chromedriver"
|
|
|
else:
|
|
|
- chromedriverExecutable = '/Users/piaoquan/Downloads/chromedriver'
|
|
|
+ chromedriverExecutable = "/Users/piaoquan/Downloads/chromedriver/chromedriver_v111/chromedriver"
|
|
|
caps = {
|
|
|
"platformName": "Android", # 手机操作系统 Android / iOS
|
|
|
"deviceName": "Android", # 连接的设备名(模拟器或真机),安卓可以随便写
|
|
@@ -156,31 +52,35 @@ class ShipinhaoSearchScheduling:
|
|
|
"showChromedriverLog": True,
|
|
|
# "chromeOptions": {"androidProcess": "com.tencent.mm:appbrand0"},
|
|
|
"chromeOptions": {"androidProcess": "com.tencent.mm:tools"},
|
|
|
+ # "chromeOptions": {"androidProcess": "com.tencent.mm:toolsmp"},
|
|
|
+ # "chromeOptions": {"androidProcess": "com.tencent.mm"},
|
|
|
'enableWebviewDetailsCollection': True,
|
|
|
'setWebContentsDebuggingEnabled': True,
|
|
|
'chromedriverExecutable': chromedriverExecutable,
|
|
|
}
|
|
|
driver = webdriver.Remote("http://localhost:4723/wd/hub", caps)
|
|
|
driver.implicitly_wait(10)
|
|
|
- if len(driver.find_elements(By.ID, 'android:id/text1')) != 0:
|
|
|
- driver.find_elements(By.ID, 'android:id/text1')[0].click()
|
|
|
+ # Common.logger(log_type, crawler).info("点击微信")
|
|
|
+ # if len(driver.find_elements(By.ID, 'android:id/text1')) != 0:
|
|
|
+ # driver.find_elements(By.ID, 'android:id/text1')[0].click()
|
|
|
+ # Common.logger(log_type, crawler).info("等待 5s")
|
|
|
time.sleep(5)
|
|
|
cls.search_video(log_type=log_type,
|
|
|
crawler=crawler,
|
|
|
- word=word,
|
|
|
rule_dict=rule_dict,
|
|
|
- our_uid=our_uid,
|
|
|
- oss_endpoint=oss_endpoint,
|
|
|
+ user_dict=user_dict,
|
|
|
driver=driver,
|
|
|
env=env)
|
|
|
cls.close_wechat(log_type=log_type,
|
|
|
crawler=crawler,
|
|
|
+ env=env,
|
|
|
driver=driver)
|
|
|
|
|
|
@classmethod
|
|
|
- def close_wechat(cls, log_type, crawler, driver: WebDriver):
|
|
|
+ def close_wechat(cls, log_type, crawler, env, driver: WebDriver):
|
|
|
driver.quit()
|
|
|
Common.logger(log_type, crawler).info(f"微信退出成功\n")
|
|
|
+ Common.logging(log_type, crawler, env, f"微信退出成功\n")
|
|
|
|
|
|
@classmethod
|
|
|
def is_contain_chinese(cls, strword):
|
|
@@ -206,108 +106,132 @@ class ShipinhaoSearchScheduling:
|
|
|
|
|
|
@classmethod
|
|
|
def check_to_webview(cls, log_type, crawler, driver: WebDriver):
|
|
|
- # Common.logger(log_type, crawler).info('切换到webview')
|
|
|
webviews = driver.contexts
|
|
|
+ Common.logger(log_type, crawler).info(f"webviews:{webviews}")
|
|
|
driver.switch_to.context(webviews[1])
|
|
|
+ Common.logger(log_type, crawler).info(driver.current_context)
|
|
|
time.sleep(1)
|
|
|
windowHandles = driver.window_handles
|
|
|
for handle in windowHandles:
|
|
|
- driver.switch_to.window(handle)
|
|
|
try:
|
|
|
- shipinhao_webview = driver.find_element(By.XPATH, '//div[@class="unit"]')
|
|
|
- if shipinhao_webview:
|
|
|
- Common.logger(log_type, crawler).info('切换到视频号 webview 成功')
|
|
|
- return "成功"
|
|
|
- except Exception as e:
|
|
|
- Common.logger(log_type, crawler).info(f"{e}\n")
|
|
|
+ driver.switch_to.window(handle)
|
|
|
+ time.sleep(1)
|
|
|
+ driver.find_element(By.XPATH, '//div[@class="unit"]')
|
|
|
+ Common.logger(log_type, crawler).info('切换 webview 成功')
|
|
|
+ return "成功"
|
|
|
+ except Exception:
|
|
|
+ Common.logger(log_type, crawler).info("切换 webview 失败")
|
|
|
|
|
|
@classmethod
|
|
|
def repeat_out_video_id(cls, log_type, crawler, out_video_id, env):
|
|
|
- sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{out_video_id}"; """
|
|
|
+ sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and out_video_id="{out_video_id}"; """
|
|
|
repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
|
|
|
return len(repeat_video)
|
|
|
|
|
|
@classmethod
|
|
|
def repeat_video_url(cls, log_type, crawler, video_url, env):
|
|
|
- sql = f""" select * from crawler_video where platform="{cls.platform}" and video_url="{video_url}"; """
|
|
|
+ sql = f""" select * from crawler_video where platform in ("{crawler}","{cls.platform}") and video_url="{video_url}"; """
|
|
|
repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
|
|
|
return len(repeat_video)
|
|
|
|
|
|
@classmethod
|
|
|
- def search_video(cls, log_type, crawler, word, rule_dict, driver: WebDriver, our_uid, oss_endpoint, env):
|
|
|
+ def search_video(cls, log_type, crawler, rule_dict, driver: WebDriver, user_dict, env):
|
|
|
+ mq = MQ(topic_name="topic_crawler_etl_" + env)
|
|
|
# 点击微信搜索框,并输入搜索词
|
|
|
driver.implicitly_wait(10)
|
|
|
- driver.find_element(By.ID, 'com.tencent.mm:id/j5t').click()
|
|
|
+ Common.logger(log_type, crawler).info("点击搜索框")
|
|
|
+ Common.logging(log_type, crawler, env, "点击搜索框")
|
|
|
+ driver.find_element(By.ID, 'com.tencent.mm:id/j5t').click() # 微信8.0.30版本
|
|
|
+ # driver.find_element(By.ID, 'com.tencent.mm:id/he6').click() # 微信8.0.16版本
|
|
|
time.sleep(0.5)
|
|
|
- Common.logger(log_type, crawler).info(f'输入搜索词:{word}')
|
|
|
- driver.find_element(By.ID, 'com.tencent.mm:id/cd7').clear().send_keys(word)
|
|
|
+ driver.find_element(By.ID, 'com.tencent.mm:id/cd7').clear().send_keys(user_dict["link"]) # 微信8.0.30版本
|
|
|
+ # driver.find_element(By.ID, 'com.tencent.mm:id/bxz').clear().send_keys(word) # 微信8.0.16版本
|
|
|
driver.press_keycode(AndroidKey.ENTER)
|
|
|
- # driver.find_elements(By.ID, 'com.tencent.mm:id/oi4')[0].click()
|
|
|
- driver.find_element(By.ID, 'com.tencent.mm:id/m94').click()
|
|
|
+ Common.logger(log_type, crawler).info("进入搜索词页面")
|
|
|
+ Common.logging(log_type, crawler, env, "进入搜索词页面")
|
|
|
+ driver.find_elements(By.ID, 'com.tencent.mm:id/br8')[0].click() # 微信8.0.30版本
|
|
|
+ # driver.find_elements(By.ID, 'com.tencent.mm:id/jkg')[0].click() # 微信8.0.16版本
|
|
|
time.sleep(5)
|
|
|
|
|
|
# 切换到微信搜索结果页 webview
|
|
|
check_to_webview = cls.check_to_webview(log_type, crawler, driver)
|
|
|
if check_to_webview is None:
|
|
|
Common.logger(log_type, crawler).info("切换到视频号 webview 失败\n")
|
|
|
+ Common.logging(log_type, crawler, env, "切换到视频号 webview 失败\n")
|
|
|
return
|
|
|
time.sleep(1)
|
|
|
|
|
|
# 切换到"视频号"分类
|
|
|
shipinhao_tags = cls.search_elements(driver, '//div[@class="unit"]/*[2]')
|
|
|
Common.logger(log_type, crawler).info('点击"视频号"分类')
|
|
|
+ Common.logging(log_type, crawler, env, '点击"视频号"分类')
|
|
|
shipinhao_tags[0].click()
|
|
|
time.sleep(5)
|
|
|
|
|
|
- videos_cnt = rule_dict.get('videos_cnt', {}).get('min', 0)
|
|
|
+ videos_cnt = rule_dict.get('videos_cnt', {}).get('min', 30)
|
|
|
index = 0
|
|
|
while True:
|
|
|
-
|
|
|
- if cls.search_elements(driver, '//*[@class="double-rich double-rich_vertical"]') is None:
|
|
|
+ if cls.search_elements(driver, '//*[@class="mixed-box__bd"]') is None:
|
|
|
Common.logger(log_type, crawler).info('窗口已销毁\n')
|
|
|
+ Common.logging(log_type, crawler, env, '窗口已销毁\n')
|
|
|
return
|
|
|
|
|
|
Common.logger(log_type, crawler).info('获取视频列表\n')
|
|
|
- video_elements = cls.search_elements(driver, '//div[@class="vc active__mask"]')
|
|
|
+ Common.logging(log_type, crawler, env, '获取视频列表\n')
|
|
|
+ video_elements = cls.search_elements(driver, '//div[@class="rich-media active__absolute"]')
|
|
|
if video_elements is None:
|
|
|
Common.logger(log_type, crawler).warning(f'video_elements:{video_elements}')
|
|
|
+ Common.logging(log_type, crawler, env, f'video_elements:{video_elements}')
|
|
|
return
|
|
|
|
|
|
video_element_temp = video_elements[index:]
|
|
|
if len(video_element_temp) == 0:
|
|
|
Common.logger(log_type, crawler).info('到底啦~~~~~~~~~~~~~\n')
|
|
|
+ Common.logging(log_type, crawler, env, '到底啦~~~~~~~~~~~~~\n')
|
|
|
return
|
|
|
|
|
|
for i, video_element in enumerate(video_element_temp):
|
|
|
try:
|
|
|
Common.logger(log_type, crawler).info(f"download_cnt:{cls.download_cnt}")
|
|
|
+ Common.logging(log_type, crawler, env, f"download_cnt:{cls.download_cnt}")
|
|
|
if cls.download_cnt >= int(videos_cnt):
|
|
|
- Common.logger(log_type, crawler).info(f'搜索词:"{word}",已抓取视频数:{cls.download_cnt}')
|
|
|
+ Common.logger(log_type, crawler).info(f'搜索词:{user_dict["link"]},已抓取视频数:{cls.download_cnt}')
|
|
|
+ Common.logging(log_type, crawler, env, f'搜索词:{user_dict["link"]},已抓取视频数:{cls.download_cnt}')
|
|
|
cls.download_cnt = 0
|
|
|
return
|
|
|
|
|
|
if video_element is None:
|
|
|
Common.logger(log_type, crawler).info('到底啦~\n')
|
|
|
+ Common.logging(log_type, crawler, env, '到底啦~\n')
|
|
|
return
|
|
|
|
|
|
cls.i += 1
|
|
|
- cls.search_elements(driver, '//div[@class="vc active__mask"]')
|
|
|
+ cls.search_elements(driver, '//*[@class="rich-media active__absolute"]')
|
|
|
|
|
|
Common.logger(log_type, crawler).info(f'拖动"视频"列表第{cls.i}个至屏幕中间')
|
|
|
+ Common.logging(log_type, crawler, env, f'拖动"视频"列表第{cls.i}个至屏幕中间')
|
|
|
time.sleep(3)
|
|
|
- driver.execute_script("arguments[0].scrollIntoView({block:'center',inline:'center'})",
|
|
|
- video_element)
|
|
|
+ driver.execute_script("arguments[0].scrollIntoView({block:'center',inline:'center'})", video_element)
|
|
|
if len(video_element.find_elements(By.XPATH, "//*[@text='没有更多的搜索结果']")) != 0:
|
|
|
Common.logger(log_type, crawler).info("没有更多的搜索结果\n")
|
|
|
+ Common.logging(log_type, crawler, env, "没有更多的搜索结果\n")
|
|
|
return
|
|
|
- video_title = video_element.find_elements(By.XPATH, '//div[@class="title ellipsis_2"]/*[2]')[index + i].text
|
|
|
- video_url = video_element.find_elements(By.XPATH, '//div[@class="video-player"]')[index+i].get_attribute('src')
|
|
|
- cover_url = video_element.find_elements(By.XPATH, '//div[@class="video-player__bd"]')[index+i].get_attribute('style')
|
|
|
+ video_title = \
|
|
|
+ video_element.find_elements(By.XPATH, '//div[@class="rich-media__title ellipsis_2"]/span')[
|
|
|
+ index + i].text[:40]
|
|
|
+ video_url = video_element.find_elements(By.XPATH, '//div[@class="video-player"]')[
|
|
|
+ index + i].get_attribute('src')
|
|
|
+ cover_url = video_element.find_elements(By.XPATH, '//div[@class="video-player__bd"]')[
|
|
|
+ index + i].get_attribute('style')
|
|
|
cover_url = cover_url.split('url("')[-1].split('")')[0]
|
|
|
- duration = video_element.find_elements(By.XPATH, '//div[@class="play-mask__text"]/*[2]')[index+i].text
|
|
|
+ duration = video_element.find_elements(By.XPATH, '//div[@class="video-player-mask__text"]')[
|
|
|
+ index + i].text
|
|
|
duration = int(duration.split(':')[0]) * 60 + int(duration.split(':')[-1])
|
|
|
- user_name = video_element.find_elements(By.XPATH, '//p[@class="vc-source__text"]')[index+i].text
|
|
|
- avatar_url = video_element.find_elements(By.XPATH, '//div[@class="ui-image-image ui-image vc-source__thumb"]')[index+i].get_attribute('style')
|
|
|
+ user_name = video_element.find_elements(By.XPATH, '//div[@class="rich-media__source__title"]')[
|
|
|
+ index + i].text
|
|
|
+ avatar_url = video_element.find_elements(By.XPATH,
|
|
|
+ '//div[@class="ui-image-image ui-image rich-media__source__thumb"]')[
|
|
|
+ index + i].get_attribute('style')
|
|
|
avatar_url = avatar_url.split('url("')[-1].split('")')[0]
|
|
|
out_video_id = md5(video_title.encode('utf8')).hexdigest()
|
|
|
out_user_id = md5(user_name.encode('utf8')).hexdigest()
|
|
@@ -317,6 +241,7 @@ class ShipinhaoSearchScheduling:
|
|
|
"video_id": out_video_id,
|
|
|
"play_cnt": 0,
|
|
|
"duration": duration,
|
|
|
+ # "duration": 60,
|
|
|
"user_name": user_name,
|
|
|
"user_id": out_user_id,
|
|
|
"avatar_url": avatar_url,
|
|
@@ -326,12 +251,16 @@ class ShipinhaoSearchScheduling:
|
|
|
}
|
|
|
for k, v in video_dict.items():
|
|
|
Common.logger(log_type, crawler).info(f"{k}:{v}")
|
|
|
+ Common.logging(log_type, crawler, env, f"{video_dict}")
|
|
|
if video_title is None or video_url is None:
|
|
|
Common.logger(log_type, crawler).info("无效视频\n")
|
|
|
+ Common.logging(log_type, crawler, env, "无效视频\n")
|
|
|
elif cls.repeat_out_video_id(log_type, crawler, out_video_id, env) != 0:
|
|
|
Common.logger(log_type, crawler).info('视频已下载\n')
|
|
|
+ Common.logging(log_type, crawler, env, '视频已下载\n')
|
|
|
elif cls.repeat_video_url(log_type, crawler, video_url, env) != 0:
|
|
|
Common.logger(log_type, crawler).info('视频已下载\n')
|
|
|
+ Common.logging(log_type, crawler, env, '视频已下载\n')
|
|
|
else:
|
|
|
video_element.click()
|
|
|
time.sleep(3)
|
|
@@ -340,183 +269,85 @@ class ShipinhaoSearchScheduling:
|
|
|
video_dict["share_cnt"] = video_info_dict["share_cnt"]
|
|
|
video_dict["favorite_cnt"] = video_info_dict["favorite_cnt"]
|
|
|
video_dict["comment_cnt"] = video_info_dict["comment_cnt"]
|
|
|
- video_dict["publish_time_str"] = video_info_dict["publish_time_str"]
|
|
|
+ video_dict["publish_time_str"] = video_info_dict["publish_time_str"] + " 00:00:00"
|
|
|
video_dict["publish_time_stamp"] = video_info_dict["publish_time_stamp"]
|
|
|
-
|
|
|
- cls.download_publish(log_type=log_type,
|
|
|
- crawler=crawler,
|
|
|
- word=word,
|
|
|
- rule_dict=rule_dict,
|
|
|
- video_dict=video_dict,
|
|
|
- our_uid=our_uid,
|
|
|
- oss_endpoint=oss_endpoint,
|
|
|
- env=env)
|
|
|
+ Common.logger(log_type, crawler).info(f'publish_time:{video_dict["publish_time_str"]}')
|
|
|
+ Common.logging(log_type, crawler, env, f'publish_time:{video_dict["publish_time_str"]}')
|
|
|
+ if download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
|
|
|
+ Common.logger(log_type, crawler).info("不满足抓取规则\n")
|
|
|
+ Common.logging(log_type, crawler, env, "不满足抓取规则\n")
|
|
|
+ else:
|
|
|
+ video_dict["out_user_id"] = video_dict["user_id"]
|
|
|
+ video_dict["platform"] = crawler
|
|
|
+ video_dict["strategy"] = log_type
|
|
|
+ video_dict["out_video_id"] = video_dict["video_id"]
|
|
|
+ video_dict["width"] = 0
|
|
|
+ video_dict["height"] = 0
|
|
|
+ video_dict["crawler_rule"] = json.dumps(rule_dict)
|
|
|
+ video_dict["user_id"] = user_dict["uid"]
|
|
|
+ video_dict["publish_time"] = video_dict["publish_time_str"]
|
|
|
+ mq.send_msg(video_dict)
|
|
|
+ cls.download_cnt += 1
|
|
|
except Exception as e:
|
|
|
- Common.logger(log_type, crawler).error(f"抓取单条视频时异常:{e}\n")
|
|
|
+ Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
|
|
|
+ Common.logging(log_type, crawler, env, f"抓取单条视频异常:{e}\n")
|
|
|
|
|
|
Common.logger(log_type, crawler).info('已抓取完一组视频,休眠1秒\n')
|
|
|
+ Common.logging(log_type, crawler, env, '已抓取完一组视频,休眠1秒\n')
|
|
|
time.sleep(1)
|
|
|
index = index + len(video_element_temp)
|
|
|
|
|
|
- @classmethod
|
|
|
- def download_publish(cls, log_type, crawler, word, rule_dict, video_dict, our_uid, oss_endpoint, env):
|
|
|
- # 下载视频
|
|
|
- Common.download_method(log_type=log_type, crawler=crawler, text="video", title=video_dict["video_title"], url=video_dict["video_url"])
|
|
|
-
|
|
|
- # ffmpeg 获取视频宽高
|
|
|
- ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
|
|
|
- if ffmpeg_dict is None:
|
|
|
- md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
|
|
|
- shutil.rmtree(f"./{crawler}/videos/{md_title}/")
|
|
|
- Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
|
|
|
- return
|
|
|
- video_dict["video_width"] = ffmpeg_dict["width"]
|
|
|
- video_dict["video_height"] = ffmpeg_dict["height"]
|
|
|
-
|
|
|
- # 规则判断
|
|
|
- if cls.download_rule(log_type=log_type,
|
|
|
- crawler=crawler,
|
|
|
- video_dict=video_dict,
|
|
|
- rule_dict=rule_dict) is False:
|
|
|
- md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
|
|
|
- shutil.rmtree(f"./{crawler}/videos/{md_title}/")
|
|
|
- Common.logger(log_type, crawler).info("不满足抓取规则,删除成功\n")
|
|
|
- return
|
|
|
-
|
|
|
- # 下载封面
|
|
|
- Common.download_method(log_type=log_type, crawler=crawler, text="cover", title=video_dict["video_title"], url=video_dict["cover_url"])
|
|
|
- # 保存视频信息至 "./videos/{download_video_title}/info.txt"
|
|
|
- Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
|
|
|
-
|
|
|
- # 上传视频
|
|
|
- Common.logger(log_type, crawler).info("开始上传视频...")
|
|
|
- our_video_id = Publish.upload_and_publish(log_type=log_type,
|
|
|
- crawler=crawler,
|
|
|
- strategy="搜索爬虫策略",
|
|
|
- our_uid=our_uid,
|
|
|
- env=env,
|
|
|
- oss_endpoint=oss_endpoint)
|
|
|
- if env == "dev":
|
|
|
- our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
|
|
|
- else:
|
|
|
- our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
|
|
|
- Common.logger(log_type, crawler).info("视频上传完成")
|
|
|
-
|
|
|
- if our_video_id is None:
|
|
|
- try:
|
|
|
- # 删除视频文件夹
|
|
|
- md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
|
|
|
- shutil.rmtree(f"./{crawler}/videos/{md_title}")
|
|
|
- Common.logger(log_type, crawler).warning(f"our_video_id:{our_video_id}, 删除成功\n")
|
|
|
- return
|
|
|
- except FileNotFoundError:
|
|
|
- return
|
|
|
-
|
|
|
- insert_sql = f""" insert into crawler_video(video_id,
|
|
|
- out_user_id,
|
|
|
- platform,
|
|
|
- strategy,
|
|
|
- out_video_id,
|
|
|
- video_title,
|
|
|
- cover_url,
|
|
|
- video_url,
|
|
|
- duration,
|
|
|
- publish_time,
|
|
|
- play_cnt,
|
|
|
- crawler_rule,
|
|
|
- width,
|
|
|
- height)
|
|
|
- values({our_video_id},
|
|
|
- "{video_dict['user_id']}",
|
|
|
- "{cls.platform}",
|
|
|
- "搜索爬虫策略",
|
|
|
- "{video_dict['video_id']}",
|
|
|
- "{video_dict['video_title']}",
|
|
|
- "{video_dict['cover_url']}",
|
|
|
- "{video_dict['video_url']}",
|
|
|
- {int(video_dict['duration'])},
|
|
|
- "{video_dict['publish_time_str']}",
|
|
|
- {int(video_dict['play_cnt'])},
|
|
|
- '{json.dumps(rule_dict)}',
|
|
|
- {int(video_dict['video_width'])},
|
|
|
- {int(video_dict['video_height'])}) """
|
|
|
- Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
|
|
|
- MysqlHelper.update_values(log_type, crawler, insert_sql, env)
|
|
|
- Common.logger(log_type, crawler).info('视频信息插入数据库成功!')
|
|
|
-
|
|
|
- # 写飞书
|
|
|
- Feishu.insert_columns(log_type, crawler, "xYWCzf", "ROWS", 1, 2)
|
|
|
- time.sleep(0.5)
|
|
|
- values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time()))),
|
|
|
- "搜索爬虫策略",
|
|
|
- word,
|
|
|
- video_dict["video_title"],
|
|
|
- our_video_link,
|
|
|
- video_dict["duration"],
|
|
|
- video_dict["like_cnt"],
|
|
|
- video_dict["share_cnt"],
|
|
|
- video_dict["favorite_cnt"],
|
|
|
- video_dict["comment_cnt"],
|
|
|
- f'{video_dict["video_width"]}*{video_dict["video_height"]}',
|
|
|
- video_dict["publish_time_str"],
|
|
|
- video_dict["user_name"],
|
|
|
- video_dict["avatar_url"],
|
|
|
- video_dict["cover_url"],
|
|
|
- video_dict["video_url"]]]
|
|
|
- Feishu.update_values(log_type, crawler, "xYWCzf", "F2:Z2", values)
|
|
|
- Common.logger(log_type, crawler).info("写入飞书成功\n")
|
|
|
- cls.download_cnt += 1
|
|
|
-
|
|
|
@classmethod
|
|
|
def get_video_info(cls, driver: WebDriver):
|
|
|
# Common.logger(log_type, crawler).info('切回NATIVE_APP')
|
|
|
driver.switch_to.context('NATIVE_APP')
|
|
|
|
|
|
# 点赞
|
|
|
- like_id = driver.find_element(By.ID, 'com.tencent.mm:id/k04')
|
|
|
+ like_id = driver.find_element(By.ID, 'com.tencent.mm:id/k04') # 微信版本 8.0.30
|
|
|
like_cnt = like_id.get_attribute('name')
|
|
|
- if like_cnt == "" or like_cnt == "喜欢" or like_cnt == "火" or cls.is_contain_chinese(like_cnt) is True:
|
|
|
- like_cnt = 0
|
|
|
- elif '万' in like_cnt:
|
|
|
+ if '万' in like_cnt:
|
|
|
like_cnt = int(float(like_cnt.split('万')[0]) * 10000)
|
|
|
elif '万+' in like_cnt:
|
|
|
like_cnt = int(float(like_cnt.split('万+')[0]) * 10000)
|
|
|
+ elif like_cnt == "" or like_cnt == "喜欢" or like_cnt == "火" or cls.is_contain_chinese(like_cnt) is True:
|
|
|
+ like_cnt = 0
|
|
|
else:
|
|
|
like_cnt = int(float(like_cnt))
|
|
|
|
|
|
# 分享
|
|
|
share_id = driver.find_element(By.ID, 'com.tencent.mm:id/jhv')
|
|
|
share_cnt = share_id.get_attribute('name')
|
|
|
- if share_cnt == "" or share_cnt == "转发" or cls.is_contain_chinese(share_cnt) is True:
|
|
|
- share_cnt = 0
|
|
|
- elif '万' in share_cnt:
|
|
|
+ if '万' in share_cnt:
|
|
|
share_cnt = int(float(share_cnt.split('万')[0]) * 10000)
|
|
|
elif '万+' in share_cnt:
|
|
|
share_cnt = int(float(share_cnt.split('万+')[0]) * 10000)
|
|
|
+ elif share_cnt == "" or share_cnt == "转发" or cls.is_contain_chinese(share_cnt) is True:
|
|
|
+ share_cnt = 0
|
|
|
else:
|
|
|
share_cnt = int(float(share_cnt))
|
|
|
|
|
|
# 收藏
|
|
|
favorite_id = driver.find_element(By.ID, 'com.tencent.mm:id/fnp')
|
|
|
favorite_cnt = favorite_id.get_attribute('name')
|
|
|
- if favorite_cnt == "" or favorite_cnt == "收藏" or favorite_cnt == "推荐" or favorite_cnt == "火" or cls.is_contain_chinese(favorite_cnt) is True:
|
|
|
- favorite_cnt = 0
|
|
|
- elif '万' in favorite_cnt:
|
|
|
+ if '万' in favorite_cnt:
|
|
|
favorite_cnt = int(float(favorite_cnt.split('万')[0]) * 10000)
|
|
|
elif '万+' in favorite_cnt:
|
|
|
favorite_cnt = int(float(favorite_cnt.split('万+')[0]) * 10000)
|
|
|
+ elif favorite_cnt == "" or favorite_cnt == "收藏" or favorite_cnt == "推荐" or favorite_cnt == "火" or cls.is_contain_chinese(
|
|
|
+ favorite_cnt) is True:
|
|
|
+ favorite_cnt = 0
|
|
|
else:
|
|
|
favorite_cnt = int(float(favorite_cnt))
|
|
|
|
|
|
# 评论
|
|
|
comment_id = driver.find_element(By.ID, 'com.tencent.mm:id/bje')
|
|
|
comment_cnt = comment_id.get_attribute('name')
|
|
|
- if comment_cnt == "" or comment_cnt == "评论" or cls.is_contain_chinese(comment_cnt) is True:
|
|
|
- comment_cnt = 0
|
|
|
- elif '万' in comment_cnt:
|
|
|
+ if '万' in comment_cnt:
|
|
|
comment_cnt = int(float(comment_cnt.split('万')[0]) * 10000)
|
|
|
elif '万+' in comment_cnt:
|
|
|
comment_cnt = int(float(comment_cnt.split('万+')[0]) * 10000)
|
|
|
+ elif comment_cnt == "" or comment_cnt == "评论" or cls.is_contain_chinese(comment_cnt) is True:
|
|
|
+ comment_cnt = 0
|
|
|
else:
|
|
|
comment_cnt = int(float(comment_cnt))
|
|
|
|
|
@@ -575,84 +406,28 @@ class ShipinhaoSearchScheduling:
|
|
|
return video_dict
|
|
|
|
|
|
@classmethod
|
|
|
- def get_users(cls, log_type, crawler, sheetid, env):
|
|
|
- while True:
|
|
|
- user_sheet = Feishu.get_values_batch(log_type, crawler, sheetid)
|
|
|
- if user_sheet is None:
|
|
|
- Common.logger(log_type, crawler).warning(f"user_sheet:{user_sheet}, 3秒钟后重试")
|
|
|
- time.sleep(3)
|
|
|
- continue
|
|
|
- our_user_list = []
|
|
|
- # for i in range(1, len(user_sheet)):
|
|
|
- for i in range(1, 3):
|
|
|
- search_word = user_sheet[i][4]
|
|
|
- our_uid = user_sheet[i][6]
|
|
|
- tag1 = user_sheet[i][8]
|
|
|
- tag2 = user_sheet[i][9]
|
|
|
- tag3 = user_sheet[i][10]
|
|
|
- tag4 = user_sheet[i][11]
|
|
|
- tag5 = user_sheet[i][12]
|
|
|
- tag6 = user_sheet[i][13]
|
|
|
- tag7 = user_sheet[i][14]
|
|
|
- Common.logger(log_type, crawler).info(f"正在更新 {search_word} 搜索词信息")
|
|
|
- if our_uid is None:
|
|
|
- default_user = getUser.get_default_user()
|
|
|
- # 用来创建our_id的信息
|
|
|
- user_dict = {
|
|
|
- 'recommendStatus': -6,
|
|
|
- 'appRecommendStatus': -6,
|
|
|
- 'nickName': default_user['nickName'],
|
|
|
- 'avatarUrl': default_user['avatarUrl'],
|
|
|
- 'tagName': f'{tag1},{tag2},{tag3},{tag4},{tag5},{tag6},{tag7}',
|
|
|
- }
|
|
|
- our_uid = getUser.create_uid(log_type, crawler, user_dict, env)
|
|
|
- if env == 'prod':
|
|
|
- our_user_link = f'https://admin.piaoquantv.com/ums/user/{our_uid}/post'
|
|
|
- else:
|
|
|
- our_user_link = f'https://testadmin.piaoquantv.com/ums/user/{our_uid}/post'
|
|
|
- Feishu.update_values(log_type, crawler, sheetid, f'G{i + 1}:H{i + 1}',
|
|
|
- [[our_uid, our_user_link]])
|
|
|
- Common.logger(log_type, crawler).info(f'站内用户主页创建成功:{our_user_link}\n')
|
|
|
- our_user_dict = {
|
|
|
- 'out_uid': '',
|
|
|
- 'search_word': search_word,
|
|
|
- 'our_uid': our_uid,
|
|
|
- 'our_user_link': f'https://admin.piaoquantv.com/ums/user/{our_uid}/post',
|
|
|
- }
|
|
|
- our_user_list.append(our_user_dict)
|
|
|
-
|
|
|
- return our_user_list
|
|
|
-
|
|
|
-
|
|
|
- @classmethod
|
|
|
- def get_search_videos(cls, log_type, crawler, rule_dict, oss_endpoint, env):
|
|
|
- user_list = cls.get_users(log_type, crawler, "wNgi6Z", env)
|
|
|
- for user in user_list:
|
|
|
- cls.i = 0
|
|
|
- cls.download_cnt = 0
|
|
|
- search_word = user["search_word"]
|
|
|
- our_uid = user["our_uid"]
|
|
|
- Common.logger(log_type, crawler).info(f"开始抓取搜索词:{search_word}")
|
|
|
+ def get_search_videos(cls, log_type, crawler, rule_dict, user_list, env):
|
|
|
+ Common.logger(log_type, crawler).info(f"搜索词总数:{len(user_list)}\n")
|
|
|
+ Common.logging(log_type, crawler, env, f"搜索词总数:{len(user_list)}\n")
|
|
|
+ if len(user_list) == 0:
|
|
|
+ return
|
|
|
+ for user_dict in user_list:
|
|
|
try:
|
|
|
+ cls.i = 0
|
|
|
+ cls.download_cnt = 0
|
|
|
+ Common.logger(log_type, crawler).info(f"开始抓取 {user_dict['link']}\n")
|
|
|
+ Common.logging(log_type, crawler, env, f"开始抓取 {user_dict['link']}\n")
|
|
|
cls.start_wechat(log_type=log_type,
|
|
|
crawler=crawler,
|
|
|
- word=search_word,
|
|
|
rule_dict=rule_dict,
|
|
|
- our_uid=our_uid,
|
|
|
- oss_endpoint=oss_endpoint,
|
|
|
+ user_dict=user_dict,
|
|
|
env=env)
|
|
|
except Exception as e:
|
|
|
- Common.logger(log_type, crawler).error(f"抓取{user['search_word']}时异常:{e}\n")
|
|
|
+ Common.logger(log_type, crawler).error(f"抓取 {user_dict['link']} 时异常:{e}\n")
|
|
|
+ Common.logging(log_type, crawler, env, f"抓取 {user_dict['link']} 时异常:{e}\n")
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
- # ShipinhaoSearchScheduling.get_search_videos(log_type="search",
|
|
|
- # crawler="shipinhao",
|
|
|
- # rule_dict='[{"videos_cnt":{"min":10,"max":0}},{"duration":{"min":30,"max":600}},{"share_cnt":{"min":3000,"max":0}},{"favorite_cnt":{"min":1000,"max":0}},{"publish_time":{"min":1672502400000,"max":0}}]',
|
|
|
- # oss_endpoint="out",
|
|
|
- # env="dev")
|
|
|
- # print(ShipinhaoSearchScheduling.get_users("search", "shipinhao", "wNgi6Z", "dev"))
|
|
|
- # print((date.today() + timedelta(days=0)).strftime("%Y-%m-%d"))
|
|
|
print(ShipinhaoSearchScheduling.repeat_out_video_id(log_type="search",
|
|
|
crawler="shipinhao",
|
|
|
out_video_id="123",
|