|
@@ -9,63 +9,92 @@ import shutil
|
|
|
import string
|
|
|
import sys
|
|
|
import time
|
|
|
+from datetime import date, timedelta
|
|
|
import requests
|
|
|
import urllib3
|
|
|
from requests.adapters import HTTPAdapter
|
|
|
-
|
|
|
-from selenium.webdriver import DesiredCapabilities
|
|
|
-from selenium.webdriver.chrome.service import Service
|
|
|
-from selenium.webdriver.common.by import By
|
|
|
-from selenium import webdriver
|
|
|
-from lxml import etree
|
|
|
-
|
|
|
sys.path.append(os.getcwd())
|
|
|
-from common.db import MysqlHelper
|
|
|
-from common.users import Users
|
|
|
+from common.scheduling_db import MysqlHelper
|
|
|
from common.common import Common
|
|
|
from common.feishu import Feishu
|
|
|
from common.publish import Publish
|
|
|
|
|
|
|
|
|
-class Follow:
|
|
|
+class SchedulingFollow:
|
|
|
# 个人主页视频翻页参数
|
|
|
offset = 0
|
|
|
-
|
|
|
platform = "西瓜视频"
|
|
|
- tag = "西瓜视频爬虫,定向爬虫策略"
|
|
|
|
|
|
@classmethod
|
|
|
- def get_rule(cls, log_type, crawler):
|
|
|
- try:
|
|
|
- while True:
|
|
|
- rule_sheet = Feishu.get_values_batch(log_type, crawler, "4kxd31")
|
|
|
- if rule_sheet is None:
|
|
|
- Common.logger(log_type, crawler).warning("rule_sheet is None! 10秒后重新获取")
|
|
|
- time.sleep(10)
|
|
|
- continue
|
|
|
- rule_dict = {
|
|
|
- "play_cnt": int(rule_sheet[1][2]),
|
|
|
- "comment_cnt": int(rule_sheet[2][2]),
|
|
|
- "like_cnt": int(rule_sheet[3][2]),
|
|
|
- "duration": int(rule_sheet[4][2]),
|
|
|
- "publish_time": int(rule_sheet[5][2]),
|
|
|
- "video_width": int(rule_sheet[6][2]),
|
|
|
- "video_height": int(rule_sheet[7][2]),
|
|
|
- }
|
|
|
- return rule_dict
|
|
|
- except Exception as e:
|
|
|
- Common.logger(log_type, crawler).error(f"get_rule:{e}\n")
|
|
|
+ def get_users(cls, log_type, crawler, task, env, machine):
|
|
|
+ link_list = task['spider_link']
|
|
|
+ user_list = []
|
|
|
+ for link in link_list:
|
|
|
+ out_uid = int(link.split("https://www.ixigua.com/home/")[-1].replace("/", "").strip())
|
|
|
+ sql = f""" select * from crawler_author_map where spider_link="{link}" """
|
|
|
+ our_user_info = MysqlHelper.get_values(log_type=log_type, crawler=crawler, sql=sql, env=env, machine=machine)
|
|
|
+ if len(our_user_info) == 0:
|
|
|
+ our_uid = 0
|
|
|
+ Common.logger(log_type, crawler).info(f"没有站内虚拟账号: {link}\n")
|
|
|
+ else:
|
|
|
+ # print(type(our_user_info[0]))
|
|
|
+ # print(our_user_info[0])
|
|
|
+ our_uid = our_user_info[0]["media_id"]
|
|
|
+ user_dict = {
|
|
|
+ "out_uid": out_uid,
|
|
|
+ "our_uid": our_uid
|
|
|
+ }
|
|
|
+ user_list.append(user_dict)
|
|
|
+ Common.logger(log_type, crawler).info(f"user_list:{user_list}")
|
|
|
+ return user_list
|
|
|
|
|
|
# 下载规则
|
|
|
@classmethod
|
|
|
- def download_rule(cls, video_info_dict, rule_dict):
|
|
|
- if video_info_dict['play_cnt'] >= rule_dict['play_cnt']:
|
|
|
- if video_info_dict['comment_cnt'] >= rule_dict['comment_cnt']:
|
|
|
- if video_info_dict['like_cnt'] >= rule_dict['like_cnt']:
|
|
|
- if video_info_dict['duration'] >= rule_dict['duration']:
|
|
|
- if video_info_dict['video_width'] >= rule_dict['video_width'] \
|
|
|
- or video_info_dict['video_height'] >= rule_dict['video_height']:
|
|
|
- return True
|
|
|
+ def download_rule_scheduling(cls, video_info_dict, task):
|
|
|
+ try:
|
|
|
+ play_cnt_min = int(task['play_cnt']['min'])
|
|
|
+ except:
|
|
|
+ play_cnt_min = 0
|
|
|
+
|
|
|
+ try:
|
|
|
+ video_like_min = int(task['video_like']['min'])
|
|
|
+ except:
|
|
|
+ video_like_min = 0
|
|
|
+
|
|
|
+ try:
|
|
|
+ share_cnt_min = int(task['share_cnt']['min'])
|
|
|
+ except:
|
|
|
+ share_cnt_min = 0
|
|
|
+
|
|
|
+ try:
|
|
|
+ video_width_min = int(task['video_width']['min'])
|
|
|
+ except:
|
|
|
+ video_width_min = 0
|
|
|
+
|
|
|
+ try:
|
|
|
+ video_height_min = task['video_height']['min']
|
|
|
+ except:
|
|
|
+ video_height_min = 0
|
|
|
+
|
|
|
+ try:
|
|
|
+ duration_min = int(task['duration_min'])
|
|
|
+ except:
|
|
|
+ duration_min = 0
|
|
|
+
|
|
|
+ try:
|
|
|
+ duration_max = int(task['duration_max'])
|
|
|
+ except:
|
|
|
+ duration_max = 1000000000
|
|
|
+
|
|
|
+ if int(video_info_dict['play_cnt']) >= play_cnt_min:
|
|
|
+ if int(video_info_dict['like_cnt']) >= video_like_min:
|
|
|
+ if int(video_info_dict['share_cnt']) >= share_cnt_min:
|
|
|
+ if duration_max >= int(video_info_dict['duration']) >= duration_min:
|
|
|
+ if int(video_info_dict['video_width']) >= video_width_min:
|
|
|
+ if int(video_info_dict['video_height']) >= video_height_min:
|
|
|
+ return True
|
|
|
+ else:
|
|
|
+ return False
|
|
|
else:
|
|
|
return False
|
|
|
else:
|
|
@@ -97,101 +126,6 @@ class Follow:
|
|
|
except Exception as e:
|
|
|
Common.logger(log_type, crawler).error(f'filter_words异常:{e}\n')
|
|
|
|
|
|
- @classmethod
|
|
|
- def get_out_user_info(cls, log_type, crawler, out_uid):
|
|
|
- try:
|
|
|
- headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.41',
|
|
|
- 'referer': f'https://www.ixigua.com/home/{out_uid}',
|
|
|
- 'Cookie': f'ixigua-a-s=1; support_webp=true; support_avif=false; csrf_session_id=a5355d954d3c63ed1ba35faada452b4d; __ac_signature={cls.random_signature()}; MONITOR_WEB_ID=67cb5099-a022-4ec3-bb8e-c4de6ba51dd0; s_v_web_id=verify_lef4i99x_32SosrdH_Qrtk_4LJn_8S7q_fhu16xe3s8ZV; tt_scid=QLJjPuHf6wxVqu6IIq6gHiJXQpVrCwrdhjH2zpm7-E3ZniE1RXBcP6M8b41FJOdo41e1; ttwid=1%7CHHtv2QqpSGuSu8r-zXF1QoWsvjmNi1SJrqOrZzg-UCY%7C1677047013%7C5866a444e5ae10a9df8c11551db75010fb77b657f214ccf84e503fae8d313d09; msToken=PerXJcDdIsZ6zXkGITsftXX4mDaVaW21GuqtzSVdctH46oXXT2GcELIs9f0XW2hunRzP6KVHLZaYElRvNYflLKUXih7lC27XKxs3HjdZiXPK9NQaoKbLfA==; ixigua-a-s=1',}
|
|
|
- url = f"https://www.ixigua.com/home/{out_uid}"
|
|
|
- urllib3.disable_warnings()
|
|
|
- s = requests.session()
|
|
|
- # max_retries=3 重试3次
|
|
|
- s.mount('http://', HTTPAdapter(max_retries=3))
|
|
|
- s.mount('https://', HTTPAdapter(max_retries=3))
|
|
|
- response = s.get(url=url, headers=headers, proxies=Common.tunnel_proxies(), verify=False, timeout=5).text
|
|
|
- html = etree.HTML(response)
|
|
|
- out_follow_str = html.xpath('//div[@class="userDetailV3__header__detail2"]/*[1]/span')[0].text.encode('raw_unicode_escape').decode()
|
|
|
- out_fans_str = html.xpath('//div[@class="userDetailV3__header__detail2"]/*[2]/span')[0].text.encode('raw_unicode_escape').decode()
|
|
|
- out_like_str = html.xpath('//div[@class="userDetailV3__header__detail2"]/*[3]/span')[0].text.encode('raw_unicode_escape').decode()
|
|
|
- out_avatar_url = f"""https:{html.xpath('//span[@class="component-avatar__inner"]//img/@src')[0]}"""
|
|
|
- if "万" in out_follow_str:
|
|
|
- out_follow = int(float(out_follow_str.split("万")[0])*10000)
|
|
|
- else:
|
|
|
- out_follow = int(out_follow_str.replace(",", ""))
|
|
|
- if "万" in out_fans_str:
|
|
|
- out_fans = int(float(out_fans_str.split("万")[0])*10000)
|
|
|
- else:
|
|
|
- out_fans = int(out_fans_str.replace(",", ""))
|
|
|
- if "万" in out_like_str:
|
|
|
- out_like = int(float(out_like_str.split("万")[0])*10000)
|
|
|
- else:
|
|
|
- out_like = int(out_like_str.replace(",", ""))
|
|
|
- out_user_dict = {
|
|
|
- "out_follow": out_follow,
|
|
|
- "out_fans": out_fans,
|
|
|
- "out_like": out_like,
|
|
|
- "out_avatar_url": out_avatar_url,
|
|
|
- }
|
|
|
- # for k, v in out_user_dict.items():
|
|
|
- # print(f"{k}:{v}")
|
|
|
- return out_user_dict
|
|
|
- except Exception as e:
|
|
|
- Common.logger(log_type, crawler).error(f"get_out_user_info:{e}\n")
|
|
|
-
|
|
|
- # 获取用户信息(字典格式). 注意:部分 user_id 字符类型是 int / str
|
|
|
- @classmethod
|
|
|
- def get_user_list(cls, log_type, crawler, sheetid, env, machine):
|
|
|
- try:
|
|
|
- while True:
|
|
|
- user_sheet = Feishu.get_values_batch(log_type, crawler, sheetid)
|
|
|
- if user_sheet is None:
|
|
|
- Common.logger(log_type, crawler).warning(f"user_sheet:{user_sheet} 10秒钟后重试")
|
|
|
- continue
|
|
|
- our_user_list = []
|
|
|
- for i in range(1, len(user_sheet)):
|
|
|
- out_uid = user_sheet[i][2]
|
|
|
- user_name = user_sheet[i][3]
|
|
|
- our_uid = user_sheet[i][6]
|
|
|
- our_user_link = user_sheet[i][7]
|
|
|
- if out_uid is None or user_name is None:
|
|
|
- Common.logger(log_type, crawler).info("空行\n")
|
|
|
- else:
|
|
|
- Common.logger(log_type, crawler).info(f"正在更新 {user_name} 用户信息\n")
|
|
|
- if our_uid is None:
|
|
|
- out_user_info = cls.get_out_user_info(log_type, crawler, out_uid)
|
|
|
- out_user_dict = {
|
|
|
- "out_uid": out_uid,
|
|
|
- "user_name": user_name,
|
|
|
- "out_avatar_url": out_user_info["out_avatar_url"],
|
|
|
- "out_create_time": '',
|
|
|
- "out_tag": '',
|
|
|
- "out_play_cnt": 0,
|
|
|
- "out_fans": out_user_info["out_fans"],
|
|
|
- "out_follow": out_user_info["out_follow"],
|
|
|
- "out_friend": 0,
|
|
|
- "out_like": out_user_info["out_like"],
|
|
|
- "platform": cls.platform,
|
|
|
- "tag": cls.tag,
|
|
|
- }
|
|
|
- our_user_dict = Users.create_user(log_type=log_type, crawler=crawler, out_user_dict=out_user_dict, env=env, machine=machine)
|
|
|
- our_uid = our_user_dict['our_uid']
|
|
|
- our_user_link = our_user_dict['our_user_link']
|
|
|
- Feishu.update_values(log_type, crawler, sheetid, f'G{i + 1}:H{i + 1}', [[our_uid, our_user_link]])
|
|
|
- Common.logger(log_type, crawler).info(f'站内用户信息写入飞书成功!\n')
|
|
|
- our_user_list.append(our_user_dict)
|
|
|
- else:
|
|
|
- our_user_dict = {
|
|
|
- 'out_uid': out_uid,
|
|
|
- 'user_name': user_name,
|
|
|
- 'our_uid': our_uid,
|
|
|
- 'our_user_link': our_user_link,
|
|
|
- }
|
|
|
- our_user_list.append(our_user_dict)
|
|
|
- return our_user_list
|
|
|
- except Exception as e:
|
|
|
- Common.logger(log_type, crawler).error(f'get_user_id_from_feishu异常:{e}\n')
|
|
|
-
|
|
|
@classmethod
|
|
|
def random_signature(cls):
|
|
|
src_digits = string.digits # string_数字
|
|
@@ -218,39 +152,6 @@ class Follow:
|
|
|
new_password = new_password_start + 'y' + new_password_end
|
|
|
return new_password
|
|
|
|
|
|
- @classmethod
|
|
|
- def get_signature(cls, log_type, crawler, out_uid, machine):
|
|
|
- try:
|
|
|
- # 打印请求配置
|
|
|
- ca = DesiredCapabilities.CHROME
|
|
|
- ca["goog:loggingPrefs"] = {"performance": "ALL"}
|
|
|
-
|
|
|
- # 不打开浏览器运行
|
|
|
- chrome_options = webdriver.ChromeOptions()
|
|
|
- chrome_options.add_argument("--headless")
|
|
|
- chrome_options.add_argument('--user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36')
|
|
|
- chrome_options.add_argument("--no-sandbox")
|
|
|
-
|
|
|
- # driver初始化
|
|
|
- if machine == 'aliyun' or machine == 'aliyun_hk':
|
|
|
- driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options)
|
|
|
- elif machine == 'macpro':
|
|
|
- driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options,
|
|
|
- service=Service('/Users/lieyunye/Downloads/chromedriver_v86/chromedriver'))
|
|
|
- elif machine == 'macair':
|
|
|
- driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options,
|
|
|
- service=Service('/Users/piaoquan/Downloads/chromedriver'))
|
|
|
- else:
|
|
|
- driver = webdriver.Chrome(desired_capabilities=ca, options=chrome_options, service=Service('/Users/wangkun/Downloads/chromedriver/chromedriver_v110/chromedriver'))
|
|
|
- driver.implicitly_wait(10)
|
|
|
- driver.get(f'https://www.ixigua.com/home/{out_uid}/')
|
|
|
- time.sleep(3)
|
|
|
- data_src = driver.find_elements(By.XPATH, '//img[@class="tt-img BU-MagicImage tt-img-loaded"]')[1].get_attribute("data-src")
|
|
|
- signature = data_src.split("x-signature=")[-1]
|
|
|
- return signature
|
|
|
- except Exception as e:
|
|
|
- Common.logger(log_type, crawler).error(f'get_signature异常:{e}\n')
|
|
|
-
|
|
|
# 获取视频详情
|
|
|
@classmethod
|
|
|
def get_video_url(cls, log_type, crawler, gid):
|
|
@@ -652,7 +553,7 @@ class Follow:
|
|
|
Common.logger(log_type, crawler).error(f'get_video_url:{e}\n')
|
|
|
|
|
|
@classmethod
|
|
|
- def get_videolist(cls, log_type, crawler, strategy, our_uid, out_uid, oss_endpoint, env, machine):
|
|
|
+ def get_videolist(cls, log_type, crawler, task, our_uid, out_uid, oss_endpoint, env, machine):
|
|
|
try:
|
|
|
signature = cls.random_signature()
|
|
|
while True:
|
|
@@ -664,26 +565,11 @@ class Follow:
|
|
|
'maxBehotTime': '0',
|
|
|
'order': 'new',
|
|
|
'isHome': '0',
|
|
|
- # 'msToken': 'G0eRzNkw189a8TLaXjc6nTHVMQwh9XcxVAqTbGKi7iPJdQcLwS3-XRrJ3MZ7QBfqErpxp3EX1WtvWOIcZ3NIgr41hgcd-v64so_RRj3YCRw1UsKW8mIssNLlIMspsg==',
|
|
|
- # 'X-Bogus': 'DFSzswVuEkUANjW9ShFTgR/F6qHt',
|
|
|
'_signature': signature,
|
|
|
}
|
|
|
headers = {
|
|
|
- # 'authority': 'www.ixigua.com',
|
|
|
- # 'accept': 'application/json, text/plain, */*',
|
|
|
- # 'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
|
|
|
- # 'cache-control': 'no-cache',
|
|
|
- # 'cookie': f'MONITOR_WEB_ID=7168304743566296612; __ac_signature={signature}; ixigua-a-s=1; support_webp=true; support_avif=false; csrf_session_id=a5355d954d3c63ed1ba35faada452b4d; msToken=G0eRzNkw189a8TLaXjc6nTHVMQwh9XcxVAqTbGKi7iPJdQcLwS3-XRrJ3MZ7QBfqErpxp3EX1WtvWOIcZ3NIgr41hgcd-v64so_RRj3YCRw1UsKW8mIssNLlIMspsg==; tt_scid=o4agqz7u9SKPwfBoPt6S82Cw0q.9KDtqmNe0JHxMqmpxNHQWq1BmrQdgVU6jEoX7ed99; ttwid=1%7CHHtv2QqpSGuSu8r-zXF1QoWsvjmNi1SJrqOrZzg-UCY%7C1676618894%7Cee5ad95378275f282f230a7ffa9947ae7eff40d0829c5a2568672a6dc90a1c96; ixigua-a-s=1',
|
|
|
- # 'pragma': 'no-cache',
|
|
|
'referer': f'https://www.ixigua.com/home/{out_uid}/video/?preActiveKey=hotsoon&list_entrance=userdetail',
|
|
|
- # 'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
|
|
|
- # 'sec-ch-ua-mobile': '?0',
|
|
|
- # 'sec-ch-ua-platform': '"macOS"',
|
|
|
- # 'sec-fetch-dest': 'empty',
|
|
|
- # 'sec-fetch-mode': 'cors',
|
|
|
- # 'sec-fetch-site': 'same-origin',
|
|
|
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.41',
|
|
|
- # 'x-secsdk-csrf-token': '00010000000119e3f9454d1dcbb288704cda1960f241e2d19bd21f2fd283520c3615a990ac5a17448bfbb902a249'
|
|
|
}
|
|
|
urllib3.disable_warnings()
|
|
|
s = requests.session()
|
|
@@ -804,20 +690,23 @@ class Follow:
|
|
|
else:
|
|
|
cover_url = videoList[i]['video_detail_info']['detail_video_large_image']['url_list'][0]['url']
|
|
|
|
|
|
- while True:
|
|
|
- rule_dict = cls.get_rule(log_type, crawler)
|
|
|
- if rule_dict is None:
|
|
|
- Common.logger(log_type, crawler).warning(f"rule_dict:{rule_dict}, 10秒后重试")
|
|
|
- time.sleep(10)
|
|
|
- else:
|
|
|
- break
|
|
|
+ min_publish_time = int(task["min_publish_time"])
|
|
|
+ min_publish_day = int(task["min_publish_day"])
|
|
|
+ min_publish_day = (date.today() + timedelta(days=-min_publish_day)).strftime("%Y-%m-%d")
|
|
|
+ min_publish_day = int(time.mktime(time.strptime(min_publish_day, "%Y-%m-%d")))
|
|
|
+ if min_publish_time > 0 and min_publish_day > 0:
|
|
|
+ publish_time_rule = min_publish_time
|
|
|
+ elif min_publish_time > 0:
|
|
|
+ publish_time_rule = min_publish_time
|
|
|
+ else:
|
|
|
+ publish_time_rule = min_publish_day
|
|
|
|
|
|
if gid == 0 or video_id == 0 or cover_url == 0:
|
|
|
Common.logger(log_type, crawler).info('无效视频\n')
|
|
|
- elif is_top is True and int(time.time()) - int(publish_time) > 3600 * 24 * rule_dict['publish_time']:
|
|
|
- Common.logger(log_type, crawler).info(f'置顶视频,且发布时间:{publish_time_str} 超过{rule_dict["publish_time"]}天\n')
|
|
|
- elif int(time.time()) - int(publish_time) > 3600 * 24 * rule_dict['publish_time']:
|
|
|
- Common.logger(log_type, crawler).info(f'发布时间:{publish_time_str}超过{rule_dict["publish_time"]}天\n')
|
|
|
+ elif is_top is True and int(publish_time) < publish_time_rule:
|
|
|
+ Common.logger(log_type, crawler).info(f'置顶视频,且发布时间超过抓取时间\n')
|
|
|
+ elif int(publish_time) < publish_time_rule:
|
|
|
+ Common.logger(log_type, crawler).info(f'发布时间超过抓取时间\n')
|
|
|
cls.offset = 0
|
|
|
return
|
|
|
else:
|
|
@@ -852,8 +741,8 @@ class Follow:
|
|
|
cls.download_publish(log_type=log_type,
|
|
|
crawler=crawler,
|
|
|
video_dict=video_dict,
|
|
|
- rule_dict=rule_dict,
|
|
|
- strategy=strategy,
|
|
|
+ task=task,
|
|
|
+ strategy=task["task_name"],
|
|
|
our_uid=our_uid,
|
|
|
oss_endpoint=oss_endpoint,
|
|
|
env=env,
|
|
@@ -869,22 +758,14 @@ class Follow:
|
|
|
|
|
|
# 下载 / 上传
|
|
|
@classmethod
|
|
|
- def download_publish(cls, log_type, crawler, strategy, video_dict, rule_dict, our_uid, oss_endpoint, env, machine):
|
|
|
+ def download_publish(cls, log_type, crawler, strategy, video_dict, task, our_uid, oss_endpoint, env, machine):
|
|
|
try:
|
|
|
- if cls.download_rule(video_dict, rule_dict) is False:
|
|
|
+ if cls.download_rule_scheduling(video_dict, task) is False:
|
|
|
Common.logger(log_type, crawler).info('不满足抓取规则\n')
|
|
|
elif any(word if word in video_dict['video_title'] else False for word in cls.filter_words(log_type, crawler)) is True:
|
|
|
Common.logger(log_type, crawler).info('标题已中过滤词:{}\n', video_dict['video_title'])
|
|
|
elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env, machine) != 0:
|
|
|
Common.logger(log_type, crawler).info('视频已下载\n')
|
|
|
- # elif str(video_dict['video_id']) in [x for y in Feishu.get_values_batch(log_type, 'xigua', 'e075e9') for x in y]:
|
|
|
- # Common.logger(log_type, crawler).info('视频已下载\n')
|
|
|
- # elif str(video_dict['video_id']) in [x for y in Feishu.get_values_batch(log_type, 'xigua', '3Ul6wZ') for x in y]:
|
|
|
- # Common.logger(log_type, crawler).info('视频已下载\n')
|
|
|
- # elif str(video_dict['video_id']) in [x for y in Feishu.get_values_batch(log_type, 'xigua', 'QOWqMo') for x in y]:
|
|
|
- # Common.logger(log_type, crawler).info('视频已下载\n')
|
|
|
- # elif str(video_dict['video_id']) in [x for y in Feishu.get_values_batch(log_type, 'xigua', 'wjhpDs') for x in y]:
|
|
|
- # Common.logger(log_type, crawler).info('视频已存在\n')
|
|
|
else:
|
|
|
# 下载视频
|
|
|
Common.download_method(log_type=log_type, crawler=crawler, text='xigua_video', title=video_dict['video_title'], url=video_dict['video_url'])
|
|
@@ -948,6 +829,15 @@ class Follow:
|
|
|
Feishu.update_values(log_type, 'xigua', "e075e9", "F2:Z2", values)
|
|
|
Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
|
|
|
|
|
|
+ rule_dict = {
|
|
|
+ "play_cnt": task["play_cnt"],
|
|
|
+ "video_width": task["video_width"],
|
|
|
+ "video_height": task["video_height"],
|
|
|
+ "video_like": task["video_like"],
|
|
|
+ "share_cnt": task["share_cnt"],
|
|
|
+ "duration": {"min": task["duration_min"], "max": task["duration_max"]}
|
|
|
+ }
|
|
|
+
|
|
|
# 视频信息保存数据库
|
|
|
insert_sql = f""" insert into crawler_video(video_id,
|
|
|
user_id,
|
|
@@ -986,41 +876,40 @@ class Follow:
|
|
|
Common.logger(log_type, crawler).error(f'download_publish异常:{e}\n')
|
|
|
|
|
|
@classmethod
|
|
|
- def get_follow_videos(cls, log_type, crawler, strategy, oss_endpoint, env, machine):
|
|
|
+ def get_follow_videos(cls, log_type, crawler, task, oss_endpoint, env, machine):
|
|
|
try:
|
|
|
- user_list = cls.get_user_list(log_type=log_type, crawler=crawler, sheetid="5tlTYB", env=env, machine=machine)
|
|
|
+ user_list = cls.get_users(log_type=log_type,
|
|
|
+ crawler=crawler,
|
|
|
+ task=task,
|
|
|
+ env=env,
|
|
|
+ machine=machine)
|
|
|
for user in user_list:
|
|
|
out_uid = user["out_uid"]
|
|
|
- user_name = user["user_name"]
|
|
|
- our_uid = user["our_uid"]
|
|
|
- Common.logger(log_type, crawler).info(f"开始抓取 {user_name} 用户主页视频\n")
|
|
|
- cls.get_videolist(log_type=log_type,
|
|
|
- crawler=crawler,
|
|
|
- strategy=strategy,
|
|
|
- our_uid=our_uid,
|
|
|
- out_uid=out_uid,
|
|
|
- oss_endpoint=oss_endpoint,
|
|
|
- env=env,
|
|
|
- machine=machine)
|
|
|
- cls.offset = 0
|
|
|
- time.sleep(1)
|
|
|
+ our_uid = int(user["our_uid"])
|
|
|
+ if our_uid == 0:
|
|
|
+ pass
|
|
|
+ else:
|
|
|
+ Common.logger(log_type, crawler).info(f"开始抓取 {out_uid} 用户主页视频\n")
|
|
|
+ cls.get_videolist(log_type=log_type,
|
|
|
+ crawler=crawler,
|
|
|
+ task=task,
|
|
|
+ our_uid=our_uid,
|
|
|
+ out_uid=out_uid,
|
|
|
+ oss_endpoint=oss_endpoint,
|
|
|
+ env=env,
|
|
|
+ machine=machine)
|
|
|
+ cls.offset = 0
|
|
|
+ time.sleep(1)
|
|
|
except Exception as e:
|
|
|
Common.logger(log_type, crawler).error(f"get_follow_videos:{e}\n")
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
- # print(Follow.get_signature("follow", "xigua", "95420624045", "local"))
|
|
|
- # Follow.get_videolist(log_type="follow",
|
|
|
- # crawler="xigua",
|
|
|
- # strategy="定向爬虫策略",
|
|
|
- # our_uid="6267141",
|
|
|
- # out_uid="95420624045",
|
|
|
- # oss_endpoint="out",
|
|
|
- # env="dev",
|
|
|
- # machine="local")
|
|
|
- # print(Follow.random_signature())
|
|
|
- rule = Follow.get_rule("follow", "xigua")
|
|
|
- print(type(rule))
|
|
|
- print(type(json.dumps(rule)))
|
|
|
- print(json.dumps(rule))
|
|
|
+ # SchedulingFollow.get_users(log_type="follow",
|
|
|
+ # crawler="xigua",
|
|
|
+ # spider_rule="['https://www.ixigua.com/home/95420624045', 'https://www.ixigua.com/home/6431477489']",
|
|
|
+ # env="dev",
|
|
|
+ # machine="local")
|
|
|
+
|
|
|
+ print(SchedulingFollow.repeat_video("follow", "xigua", "v0201ag10000ce3jcjbc77u8jsplpgrg", "dev", "local"))
|
|
|
pass
|