|
@@ -13,15 +13,17 @@ import json
|
|
|
|
|
|
import urllib3
|
|
|
from requests.adapters import HTTPAdapter
|
|
|
+
|
|
|
sys.path.append(os.getcwd())
|
|
|
from common.common import Common
|
|
|
from common.feishu import Feishu
|
|
|
from common.getuser import getUser
|
|
|
from common.db import MysqlHelper
|
|
|
from common.publish import Publish
|
|
|
+from common.userAgent import get_random_user_agent
|
|
|
|
|
|
|
|
|
-class Follow:
|
|
|
+class KuaiShouFollow:
|
|
|
platform = "快手"
|
|
|
tag = "快手爬虫,定向爬虫策略"
|
|
|
|
|
@@ -121,27 +123,27 @@ class Follow:
|
|
|
try:
|
|
|
url = "https://www.kuaishou.com/graphql"
|
|
|
payload = json.dumps({
|
|
|
- "operationName": "visionProfilePhotoList",
|
|
|
+ "operationName": "visionProfile",
|
|
|
"variables": {
|
|
|
- "userId": out_uid,
|
|
|
- "pcursor": "",
|
|
|
- "page": "profile"
|
|
|
+ "userId": out_uid
|
|
|
},
|
|
|
- "query": "fragment photoContent on PhotoEntity {\n id\n duration\n caption\n originCaption\n likeCount\n viewCount\n realLikeCount\n coverUrl\n photoUrl\n photoH265Url\n manifest\n manifestH265\n videoResource\n coverUrls {\n url\n __typename\n }\n timestamp\n expTag\n animatedCoverUrl\n distance\n videoRatio\n liked\n stereoType\n profileUserTopPhoto\n musicBlocked\n __typename\n}\n\nfragment feedContent on Feed {\n type\n author {\n id\n name\n headerUrl\n following\n headerUrls {\n url\n __typename\n }\n __typename\n }\n photo {\n ...photoContent\n __typename\n }\n canAddComment\n llsid\n status\n currentPcursor\n tags {\n type\n name\n __typename\n }\n __typename\n}\n\nquery visionProfilePhotoList($pcursor: String, $userId: String, $page: String, $webPageArea: String) {\n visionProfilePhotoList(pcursor: $pcursor, userId: $userId, page: $page, webPageArea: $webPageArea) {\n result\n llsid\n webPageArea\n feeds {\n ...feedContent\n __typename\n }\n hostName\n pcursor\n __typename\n }\n}\n"
|
|
|
+ "query": "query visionProfile($userId: String) {\n visionProfile(userId: $userId) {\n result\n hostName\n userProfile {\n ownerCount {\n fan\n photo\n follow\n photo_public\n __typename\n }\n profile {\n gender\n user_name\n user_id\n headurl\n user_text\n user_profile_bg_url\n __typename\n }\n isFollowing\n __typename\n }\n __typename\n }\n}\n"
|
|
|
})
|
|
|
headers = {
|
|
|
- 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:109.0) Gecko/20100101 Firefox/111.0',
|
|
|
- 'Accept': '*/*',
|
|
|
- 'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
|
|
|
- 'Accept-Encoding': 'gzip, deflate, br',
|
|
|
- 'Referer': f'https://www.kuaishou.com/profile/{out_uid}',
|
|
|
- 'content-type': 'application/json',
|
|
|
- 'Origin': 'https://www.kuaishou.com',
|
|
|
+ 'Accept-Language': 'zh-CN,zh;q=0.9',
|
|
|
'Connection': 'keep-alive',
|
|
|
- 'Cookie': 'kpf=PC_WEB; clientid=3; did=web_3f264336f6a6c191cd36fb15e87ab708; kpn=KUAISHOU_VISION',
|
|
|
+ 'Cookie': 'kpf=PC_WEB; clientid=3; did=web_3910d9d51a5f6ac9cce8cb25cc0780ff; userId=1321650328; kuaishou.server.web_st=ChZrdWFpc2hvdS5zZXJ2ZXIud2ViLnN0EqAB7bjkVapctEeh2Vh6_w9YmSXFnxBUvy3dAIZZmpyd9sAJciYB48W_Ch7rN3r1mhKtJCtBGFqMX-cTy1RoGLLmaKpwQTvxjew1nsH6JacRsJf6qB0N273lzzmGeXQPxb-MVwqtoyvxL8bLJ0DcldtHb1Q36U4efpRFse9WYLL9PtlsEprI6xORB6a009HLlRKiKMzma5s_nhdwr5xt1QwnphoStVKEb-xUGkLo9u0A7O3lj4AGIiDTIZw_4BbSmp0oOBtTtItbuywLAU3zSIErl1q6F5AW8SgFMAE; kuaishou.server.web_ph=0efe610176ceb53a303c1256cd0f6ff0325e; kpn=KUAISHOU_VISION',
|
|
|
+ 'Origin': 'https://www.kuaishou.com',
|
|
|
+ 'Referer': 'https://www.kuaishou.com/profile/{}'.format(out_uid),
|
|
|
'Sec-Fetch-Dest': 'empty',
|
|
|
'Sec-Fetch-Mode': 'cors',
|
|
|
- 'Sec-Fetch-Site': 'same-origin'
|
|
|
+ 'Sec-Fetch-Site': 'same-origin',
|
|
|
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
|
|
|
+ 'accept': '*/*',
|
|
|
+ 'content-type': 'application/json',
|
|
|
+ 'sec-ch-ua': '"Google Chrome";v="111", "Not(A:Brand";v="8", "Chromium";v="111"',
|
|
|
+ 'sec-ch-ua-mobile': '?0',
|
|
|
+ 'sec-ch-ua-platform': '"macOS"'
|
|
|
}
|
|
|
urllib3.disable_warnings()
|
|
|
s = requests.session()
|
|
@@ -244,7 +246,7 @@ class Follow:
|
|
|
"tag": cls.tag,
|
|
|
}
|
|
|
our_user_dict = getUser.create_user(log_type=log_type, crawler=crawler,
|
|
|
- out_user_dict=out_user_dict, env=env, machine=machine)
|
|
|
+ out_user_dict=out_user_dict, env=env, machine=machine)
|
|
|
our_uid = our_user_dict['our_uid']
|
|
|
our_user_link = our_user_dict['our_user_link']
|
|
|
Feishu.update_values(log_type, crawler, sheetid, f'G{i + 1}:H{i + 1}',
|
|
@@ -355,264 +357,265 @@ class Follow:
|
|
|
|
|
|
@classmethod
|
|
|
def get_videoList(cls, log_type, crawler, strategy, our_uid, out_uid, oss_endpoint, env, machine, pcursor=""):
|
|
|
- download_cnt_1, download_cnt_2 = 0, 0
|
|
|
+ download_cnt_1, download_cnt_2 = 0, 0
|
|
|
+
|
|
|
+ while True:
|
|
|
+ rule_dict_1 = cls.get_rule(log_type, crawler, 1)
|
|
|
+ rule_dict_2 = cls.get_rule(log_type, crawler, 2)
|
|
|
+ if rule_dict_1 is None or rule_dict_2 is None:
|
|
|
+ Common.logger(log_type, crawler).warning(f"rule_dict is None, 10秒后重试")
|
|
|
+ time.sleep(10)
|
|
|
+ else:
|
|
|
+ break
|
|
|
|
|
|
- while True:
|
|
|
- rule_dict_1 = cls.get_rule(log_type, crawler, 1)
|
|
|
- rule_dict_2 = cls.get_rule(log_type, crawler, 2)
|
|
|
- if rule_dict_1 is None or rule_dict_2 is None:
|
|
|
- Common.logger(log_type, crawler).warning(f"rule_dict is None, 10秒后重试")
|
|
|
- time.sleep(10)
|
|
|
- else:
|
|
|
- break
|
|
|
+ try:
|
|
|
+ if download_cnt_1 >= int(
|
|
|
+ rule_dict_1['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">", "")[
|
|
|
+ -1]) and download_cnt_2 >= int(
|
|
|
+ rule_dict_2['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">", "")[-1]):
|
|
|
+ Common.logger(log_type, crawler).info(
|
|
|
+ f"规则1已下载{download_cnt_1}条视频,规则2已下载{download_cnt_2}条视频\n")
|
|
|
+ return
|
|
|
|
|
|
- try:
|
|
|
- if download_cnt_1 >= int(
|
|
|
- rule_dict_1['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">", "")[
|
|
|
- -1]) and download_cnt_2 >= int(
|
|
|
- rule_dict_2['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">", "")[-1]):
|
|
|
- Common.logger(log_type, crawler).info(
|
|
|
- f"规则1已下载{download_cnt_1}条视频,规则2已下载{download_cnt_2}条视频\n")
|
|
|
- return
|
|
|
+ url = "https://www.kuaishou.com/graphql"
|
|
|
+ payload = json.dumps({
|
|
|
+ "operationName": "visionProfilePhotoList",
|
|
|
+ "variables": {
|
|
|
+ "userId": out_uid,
|
|
|
+ "pcursor": pcursor,
|
|
|
+ "page": "profile"
|
|
|
+ },
|
|
|
+ "query": "fragment photoContent on PhotoEntity {\n id\n duration\n caption\n originCaption\n likeCount\n viewCount\n realLikeCount\n coverUrl\n photoUrl\n photoH265Url\n manifest\n manifestH265\n videoResource\n coverUrls {\n url\n __typename\n }\n timestamp\n expTag\n animatedCoverUrl\n distance\n videoRatio\n liked\n stereoType\n profileUserTopPhoto\n musicBlocked\n __typename\n}\n\nfragment feedContent on Feed {\n type\n author {\n id\n name\n headerUrl\n following\n headerUrls {\n url\n __typename\n }\n __typename\n }\n photo {\n ...photoContent\n __typename\n }\n canAddComment\n llsid\n status\n currentPcursor\n tags {\n type\n name\n __typename\n }\n __typename\n}\n\nquery visionProfilePhotoList($pcursor: String, $userId: String, $page: String, $webPageArea: String) {\n visionProfilePhotoList(pcursor: $pcursor, userId: $userId, page: $page, webPageArea: $webPageArea) {\n result\n llsid\n webPageArea\n feeds {\n ...feedContent\n __typename\n }\n hostName\n pcursor\n __typename\n }\n}\n"
|
|
|
+ })
|
|
|
|
|
|
- url = "https://www.kuaishou.com/graphql"
|
|
|
- payload = json.dumps({
|
|
|
- "operationName": "visionProfilePhotoList",
|
|
|
- "variables": {
|
|
|
- "userId": out_uid,
|
|
|
- "pcursor": pcursor,
|
|
|
- "page": "profile"
|
|
|
- },
|
|
|
- "query": "fragment photoContent on PhotoEntity {\n id\n duration\n caption\n originCaption\n likeCount\n viewCount\n realLikeCount\n coverUrl\n photoUrl\n photoH265Url\n manifest\n manifestH265\n videoResource\n coverUrls {\n url\n __typename\n }\n timestamp\n expTag\n animatedCoverUrl\n distance\n videoRatio\n liked\n stereoType\n profileUserTopPhoto\n musicBlocked\n __typename\n}\n\nfragment feedContent on Feed {\n type\n author {\n id\n name\n headerUrl\n following\n headerUrls {\n url\n __typename\n }\n __typename\n }\n photo {\n ...photoContent\n __typename\n }\n canAddComment\n llsid\n status\n currentPcursor\n tags {\n type\n name\n __typename\n }\n __typename\n}\n\nquery visionProfilePhotoList($pcursor: String, $userId: String, $page: String, $webPageArea: String) {\n visionProfilePhotoList(pcursor: $pcursor, userId: $userId, page: $page, webPageArea: $webPageArea) {\n result\n llsid\n webPageArea\n feeds {\n ...feedContent\n __typename\n }\n hostName\n pcursor\n __typename\n }\n}\n"
|
|
|
- })
|
|
|
-
|
|
|
- headers = {
|
|
|
- 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:109.0) Gecko/20100101 Firefox/111.0',
|
|
|
- 'Accept': '*/*',
|
|
|
- 'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
|
|
|
- 'Accept-Encoding': 'gzip, deflate, br',
|
|
|
- 'Referer': f'https://www.kuaishou.com/profile/{out_uid}',
|
|
|
- 'content-type': 'application/json',
|
|
|
- 'Origin': 'https://www.kuaishou.com',
|
|
|
- 'Connection': 'keep-alive',
|
|
|
- 'Cookie': 'kpf=PC_WEB; clientid=3; did=web_3f264336f6a6c191cd36fb15e87ab708; kpn=KUAISHOU_VISION',
|
|
|
- 'Sec-Fetch-Dest': 'empty',
|
|
|
- 'Sec-Fetch-Mode': 'cors',
|
|
|
- 'Sec-Fetch-Site': 'same-origin'
|
|
|
- }
|
|
|
- urllib3.disable_warnings()
|
|
|
- s = requests.session()
|
|
|
- # max_retries=3 重试3次
|
|
|
- s.mount('http://', HTTPAdapter(max_retries=3))
|
|
|
- s.mount('https://', HTTPAdapter(max_retries=3))
|
|
|
- response = s.post(url=url, headers=headers, data=payload, proxies=Common.tunnel_proxies(), verify=False,
|
|
|
- timeout=10)
|
|
|
- response.close()
|
|
|
- # Common.logger(log_type, crawler).info(f"get_videoList:{response.text}\n")
|
|
|
- if response.status_code != 200:
|
|
|
- Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.text}\n")
|
|
|
- return
|
|
|
- elif 'data' not in response.json():
|
|
|
- Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()}\n")
|
|
|
- return
|
|
|
- elif 'visionProfilePhotoList' not in response.json()['data']:
|
|
|
- Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()['data']}\n")
|
|
|
- return
|
|
|
- elif 'feeds' not in response.json()['data']['visionProfilePhotoList']:
|
|
|
- Common.logger(log_type, crawler).warning(
|
|
|
- f"get_videoList_response:{response.json()['data']['visionProfilePhotoList']}\n")
|
|
|
- return
|
|
|
- elif len(response.json()['data']['visionProfilePhotoList']['feeds']) == 0:
|
|
|
- Common.logger(log_type, crawler).info("没有更多视频啦 ~\n")
|
|
|
- return
|
|
|
- else:
|
|
|
- feeds = response.json()['data']['visionProfilePhotoList']['feeds']
|
|
|
- pcursor = response.json()['data']['visionProfilePhotoList']['pcursor']
|
|
|
- # Common.logger(log_type, crawler).info(f"feeds0: {feeds}\n")
|
|
|
- for i in range(len(feeds)):
|
|
|
- if 'photo' not in feeds[i]:
|
|
|
- Common.logger(log_type, crawler).warning(f"get_videoList:{feeds[i]}\n")
|
|
|
- break
|
|
|
-
|
|
|
- # video_title
|
|
|
- if 'caption' not in feeds[i]['photo']:
|
|
|
- video_title = cls.random_title(log_type, crawler)
|
|
|
- elif feeds[i]['photo']['caption'].strip() == "":
|
|
|
- video_title = cls.random_title(log_type, crawler)
|
|
|
- else:
|
|
|
- video_title = cls.video_title(log_type, crawler, feeds[i]['photo']['caption'])
|
|
|
-
|
|
|
- if 'videoResource' not in feeds[i]['photo'] \
|
|
|
- and 'manifest' not in feeds[i]['photo'] \
|
|
|
- and 'manifestH265' not in feeds[i]['photo']:
|
|
|
- Common.logger(log_type, crawler).warning(f"get_videoList:{feeds[i]['photo']}\n")
|
|
|
- break
|
|
|
- videoResource = feeds[i]['photo']['videoResource']
|
|
|
-
|
|
|
- if 'h264' not in videoResource and 'hevc' not in videoResource:
|
|
|
- Common.logger(log_type, crawler).warning(f"get_videoList:{videoResource}\n")
|
|
|
- break
|
|
|
-
|
|
|
- # video_id
|
|
|
- if 'h264' in videoResource and 'videoId' in videoResource['h264']:
|
|
|
- video_id = videoResource['h264']['videoId']
|
|
|
- elif 'hevc' in videoResource and 'videoId' in videoResource['hevc']:
|
|
|
- video_id = videoResource['hevc']['videoId']
|
|
|
- else:
|
|
|
- video_id = ""
|
|
|
+ headers = {
|
|
|
+ 'User-Agent': get_random_user_agent('pc'),
|
|
|
+ 'Accept': '*/*',
|
|
|
+ 'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
|
|
|
+ 'Accept-Encoding': 'gzip, deflate, br',
|
|
|
+ 'Referer': f'https://www.kuaishou.com/profile/{out_uid}',
|
|
|
+ 'content-type': 'application/json',
|
|
|
+ 'Origin': 'https://www.kuaishou.com',
|
|
|
+ 'Connection': 'keep-alive',
|
|
|
+ 'Cookie': 'kpf=PC_WEB; clientid=3; did=web_3a3ea737874da28fd2e54f304a192eff; kpn=KUAISHOU_VISION',
|
|
|
+ 'Sec-Fetch-Dest': 'empty',
|
|
|
+ 'Sec-Fetch-Mode': 'cors',
|
|
|
+ 'Sec-Fetch-Site': 'same-origin'
|
|
|
+ }
|
|
|
+ urllib3.disable_warnings()
|
|
|
+ s = requests.session()
|
|
|
+ # max_retries=3 重试3次
|
|
|
+ s.mount('http://', HTTPAdapter(max_retries=3))
|
|
|
+ s.mount('https://', HTTPAdapter(max_retries=3))
|
|
|
+ response = s.post(url=url, headers=headers, data=payload, proxies=Common.tunnel_proxies(), verify=False,
|
|
|
+ timeout=10)
|
|
|
+ response.close()
|
|
|
+ # Common.logger(log_type, crawler).info(f"get_videoList:{response.text}\n")
|
|
|
+ if response.status_code != 200:
|
|
|
+ Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.text}\n")
|
|
|
+ return
|
|
|
+ elif 'data' not in response.json():
|
|
|
+ Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()}\n")
|
|
|
+ return
|
|
|
+ elif 'visionProfilePhotoList' not in response.json()['data']:
|
|
|
+ Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()['data']}\n")
|
|
|
+ return
|
|
|
+ elif 'feeds' not in response.json()['data']['visionProfilePhotoList']:
|
|
|
+ Common.logger(log_type, crawler).warning(
|
|
|
+ f"get_videoList_response:{response.json()['data']['visionProfilePhotoList']}\n")
|
|
|
+ return
|
|
|
+ elif len(response.json()['data']['visionProfilePhotoList']['feeds']) == 0:
|
|
|
+ Common.logger(log_type, crawler).info("没有更多视频啦 ~\n")
|
|
|
+ return
|
|
|
+ else:
|
|
|
+ feeds = response.json()['data']['visionProfilePhotoList']['feeds']
|
|
|
+ pcursor = response.json()['data']['visionProfilePhotoList']['pcursor']
|
|
|
+ # Common.logger(log_type, crawler).info(f"feeds0: {feeds}\n")
|
|
|
+ for i in range(len(feeds)):
|
|
|
+ if 'photo' not in feeds[i]:
|
|
|
+ Common.logger(log_type, crawler).warning(f"get_videoList:{feeds[i]}\n")
|
|
|
+ break
|
|
|
+
|
|
|
+ # video_title
|
|
|
+ if 'caption' not in feeds[i]['photo']:
|
|
|
+ video_title = cls.random_title(log_type, crawler)
|
|
|
+ elif feeds[i]['photo']['caption'].strip() == "":
|
|
|
+ video_title = cls.random_title(log_type, crawler)
|
|
|
+ else:
|
|
|
+ video_title = cls.video_title(log_type, crawler, feeds[i]['photo']['caption'])
|
|
|
+
|
|
|
+ if 'videoResource' not in feeds[i]['photo'] \
|
|
|
+ and 'manifest' not in feeds[i]['photo'] \
|
|
|
+ and 'manifestH265' not in feeds[i]['photo']:
|
|
|
+ Common.logger(log_type, crawler).warning(f"get_videoList:{feeds[i]['photo']}\n")
|
|
|
+ break
|
|
|
+ videoResource = feeds[i]['photo']['videoResource']
|
|
|
+
|
|
|
+ if 'h264' not in videoResource and 'hevc' not in videoResource:
|
|
|
+ Common.logger(log_type, crawler).warning(f"get_videoList:{videoResource}\n")
|
|
|
+ break
|
|
|
+
|
|
|
+ # video_id
|
|
|
+ if 'h264' in videoResource and 'videoId' in videoResource['h264']:
|
|
|
+ video_id = videoResource['h264']['videoId']
|
|
|
+ elif 'hevc' in videoResource and 'videoId' in videoResource['hevc']:
|
|
|
+ video_id = videoResource['hevc']['videoId']
|
|
|
+ else:
|
|
|
+ video_id = ""
|
|
|
|
|
|
- # play_cnt
|
|
|
- if 'viewCount' not in feeds[i]['photo']:
|
|
|
- play_cnt = 0
|
|
|
- else:
|
|
|
- play_cnt = int(feeds[i]['photo']['viewCount'])
|
|
|
+ # play_cnt
|
|
|
+ if 'viewCount' not in feeds[i]['photo']:
|
|
|
+ play_cnt = 0
|
|
|
+ else:
|
|
|
+ play_cnt = int(feeds[i]['photo']['viewCount'])
|
|
|
|
|
|
- # like_cnt
|
|
|
- if 'realLikeCount' not in feeds[i]['photo']:
|
|
|
- like_cnt = 0
|
|
|
- else:
|
|
|
- like_cnt = feeds[i]['photo']['realLikeCount']
|
|
|
+ # like_cnt
|
|
|
+ if 'realLikeCount' not in feeds[i]['photo']:
|
|
|
+ like_cnt = 0
|
|
|
+ else:
|
|
|
+ like_cnt = feeds[i]['photo']['realLikeCount']
|
|
|
|
|
|
- # publish_time
|
|
|
- if 'timestamp' not in feeds[i]['photo']:
|
|
|
- publish_time_stamp = 0
|
|
|
- publish_time_str = ''
|
|
|
- publish_time = 0
|
|
|
- else:
|
|
|
- publish_time_stamp = int(int(feeds[i]['photo']['timestamp']) / 1000)
|
|
|
- publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
|
|
|
- publish_time = int((int(time.time()) - publish_time_stamp) / (3600 * 24))
|
|
|
+ # publish_time
|
|
|
+ if 'timestamp' not in feeds[i]['photo']:
|
|
|
+ publish_time_stamp = 0
|
|
|
+ publish_time_str = ''
|
|
|
+ publish_time = 0
|
|
|
+ else:
|
|
|
+ publish_time_stamp = int(int(feeds[i]['photo']['timestamp']) / 1000)
|
|
|
+ publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
|
|
|
+ publish_time = int((int(time.time()) - publish_time_stamp) / (3600 * 24))
|
|
|
|
|
|
- # duration
|
|
|
- if 'duration' not in feeds[i]['photo']:
|
|
|
- duration = 0
|
|
|
- else:
|
|
|
- duration = int(int(feeds[i]['photo']['duration']) / 1000)
|
|
|
-
|
|
|
- # video_width / video_height / video_url
|
|
|
- mapping = {}
|
|
|
- for item in ['width', 'height']:
|
|
|
- try:
|
|
|
- val = str(videoResource['h264']['adaptationSet'][0]['representation'][0][item])
|
|
|
- except Exception:
|
|
|
- val = str(videoResource['hevc']['adaptationSet'][0]['representation'][0][item])
|
|
|
- except:
|
|
|
- val = ''
|
|
|
- mapping[item] = val
|
|
|
- video_width = int(mapping['width']) if mapping['width'] != '' else 0
|
|
|
- video_height = int(mapping['height']) if mapping['height'] != '' else 0
|
|
|
- # cover_url
|
|
|
- if 'coverUrl' not in feeds[i]['photo']:
|
|
|
- cover_url = ""
|
|
|
- else:
|
|
|
- cover_url = feeds[i]['photo']['coverUrl']
|
|
|
+ # duration
|
|
|
+ if 'duration' not in feeds[i]['photo']:
|
|
|
+ duration = 0
|
|
|
+ else:
|
|
|
+ duration = int(int(feeds[i]['photo']['duration']) / 1000)
|
|
|
|
|
|
- # user_name / avatar_url
|
|
|
+ # video_width / video_height / video_url
|
|
|
+ mapping = {}
|
|
|
+ for item in ['width', 'height']:
|
|
|
try:
|
|
|
- user_name = feeds[i]['author']['name']
|
|
|
- avatar_url = feeds[i]['author']['headerUrl']
|
|
|
+ val = str(videoResource['h264']['adaptationSet'][0]['representation'][0][item])
|
|
|
except Exception:
|
|
|
- user_name = ''
|
|
|
- avatar_url = ''
|
|
|
-
|
|
|
- video_url = feeds[i]['photo']['photoUrl']
|
|
|
- video_dict = {'video_title': video_title,
|
|
|
- 'video_id': video_id,
|
|
|
- 'play_cnt': play_cnt,
|
|
|
- 'comment_cnt': 0,
|
|
|
- 'like_cnt': like_cnt,
|
|
|
- 'share_cnt': 0,
|
|
|
- 'video_width': video_width,
|
|
|
- 'video_height': video_height,
|
|
|
- 'duration': duration,
|
|
|
- 'publish_time': publish_time,
|
|
|
- 'publish_time_stamp': publish_time_stamp,
|
|
|
- 'publish_time_str': publish_time_str,
|
|
|
- 'user_name': user_name,
|
|
|
- 'user_id': out_uid,
|
|
|
- 'avatar_url': avatar_url,
|
|
|
- 'cover_url': cover_url,
|
|
|
- 'video_url': video_url,
|
|
|
- 'session': f"kuaishou{int(time.time())}"}
|
|
|
-
|
|
|
- rule_1 = cls.download_rule(video_dict, rule_dict_1)
|
|
|
- Common.logger(log_type, crawler).info(f"video_title:{video_title}")
|
|
|
- Common.logger(log_type, crawler).info(f"video_id:{video_id}\n")
|
|
|
-
|
|
|
- Common.logger(log_type, crawler).info(
|
|
|
- f"play_cnt:{video_dict['play_cnt']}{rule_dict_1['play_cnt']}, {eval(str(video_dict['play_cnt']) + str(rule_dict_1['play_cnt']))}")
|
|
|
- Common.logger(log_type, crawler).info(
|
|
|
- f"like_cnt:{video_dict['like_cnt']}{rule_dict_1['like_cnt']}, {eval(str(video_dict['like_cnt']) + str(rule_dict_1['like_cnt']))}")
|
|
|
- Common.logger(log_type, crawler).info(
|
|
|
- f"video_width:{video_dict['video_width']}{rule_dict_1['video_width']}, {eval(str(video_dict['video_width']) + str(rule_dict_1['video_width']))}")
|
|
|
- Common.logger(log_type, crawler).info(
|
|
|
- f"video_height:{video_dict['video_height']}{rule_dict_1['video_height']}, {eval(str(video_dict['video_height']) + str(rule_dict_1['video_height']))}")
|
|
|
- Common.logger(log_type, crawler).info(
|
|
|
- f"duration:{video_dict['duration']}{rule_dict_1['duration']}, {eval(str(video_dict['duration']) + str(rule_dict_1['duration']))}")
|
|
|
- Common.logger(log_type, crawler).info(
|
|
|
- f"publish_time:{video_dict['publish_time']}{rule_dict_1['publish_time']}, {eval(str(video_dict['publish_time']) + str(rule_dict_1['publish_time']))}")
|
|
|
- Common.logger(log_type, crawler).info(f"rule_1:{rule_1}\n")
|
|
|
-
|
|
|
- rule_2 = cls.download_rule(video_dict, rule_dict_2)
|
|
|
- Common.logger(log_type, crawler).info(
|
|
|
- f"play_cnt:{video_dict['play_cnt']}{rule_dict_2['play_cnt']}, {eval(str(video_dict['play_cnt']) + str(rule_dict_2['play_cnt']))}")
|
|
|
- Common.logger(log_type, crawler).info(
|
|
|
- f"like_cnt:{video_dict['like_cnt']}{rule_dict_2['like_cnt']}, {eval(str(video_dict['like_cnt']) + str(rule_dict_2['like_cnt']))}")
|
|
|
- Common.logger(log_type, crawler).info(
|
|
|
- f"video_width:{video_dict['video_width']}{rule_dict_2['video_width']}, {eval(str(video_dict['video_width']) + str(rule_dict_2['video_width']))}")
|
|
|
- Common.logger(log_type, crawler).info(
|
|
|
- f"video_height:{video_dict['video_height']}{rule_dict_2['video_height']}, {eval(str(video_dict['video_height']) + str(rule_dict_2['video_height']))}")
|
|
|
- Common.logger(log_type, crawler).info(
|
|
|
- f"duration:{video_dict['duration']}{rule_dict_2['duration']}, {eval(str(video_dict['duration']) + str(rule_dict_2['duration']))}")
|
|
|
- Common.logger(log_type, crawler).info(
|
|
|
- f"publish_time:{video_dict['publish_time']}{rule_dict_2['publish_time']}, {eval(str(video_dict['publish_time']) + str(rule_dict_2['publish_time']))}")
|
|
|
- Common.logger(log_type, crawler).info(f"rule_2:{rule_2}\n")
|
|
|
-
|
|
|
- if video_title == "" or video_url == "":
|
|
|
- Common.logger(log_type, crawler).info("无效视频\n")
|
|
|
- continue
|
|
|
- elif rule_1 is True:
|
|
|
- if download_cnt_1 < int(
|
|
|
- rule_dict_1['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">",
|
|
|
- "")[
|
|
|
- -1]):
|
|
|
- download_finished = cls.download_publish(log_type=log_type,
|
|
|
- crawler=crawler,
|
|
|
- strategy=strategy,
|
|
|
- video_dict=video_dict,
|
|
|
- rule_dict=rule_dict_1,
|
|
|
- our_uid=our_uid,
|
|
|
- oss_endpoint=oss_endpoint,
|
|
|
- env=env,
|
|
|
- machine=machine)
|
|
|
- if download_finished is True:
|
|
|
- download_cnt_1 += 1
|
|
|
- elif rule_2 is True:
|
|
|
- if download_cnt_2 < int(
|
|
|
- rule_dict_2['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">",
|
|
|
- "")[
|
|
|
- -1]):
|
|
|
- download_finished = cls.download_publish(log_type=log_type,
|
|
|
- crawler=crawler,
|
|
|
- strategy=strategy,
|
|
|
- video_dict=video_dict,
|
|
|
- rule_dict=rule_dict_2,
|
|
|
- our_uid=our_uid,
|
|
|
- oss_endpoint=oss_endpoint,
|
|
|
- env=env,
|
|
|
- machine=machine)
|
|
|
- if download_finished is True:
|
|
|
- download_cnt_2 += 1
|
|
|
- else:
|
|
|
- Common.logger(log_type, crawler).info("不满足下载规则\n")
|
|
|
- # Common.logger(log_type, crawler).info(f"feeds: {feeds}\n")
|
|
|
+ val = str(videoResource['hevc']['adaptationSet'][0]['representation'][0][item])
|
|
|
+ except:
|
|
|
+ val = ''
|
|
|
+ mapping[item] = val
|
|
|
+ video_width = int(mapping['width']) if mapping['width'] != '' else 0
|
|
|
+ video_height = int(mapping['height']) if mapping['height'] != '' else 0
|
|
|
+ # cover_url
|
|
|
+ if 'coverUrl' not in feeds[i]['photo']:
|
|
|
+ cover_url = ""
|
|
|
+ else:
|
|
|
+ cover_url = feeds[i]['photo']['coverUrl']
|
|
|
+
|
|
|
+ # user_name / avatar_url
|
|
|
+ try:
|
|
|
+ user_name = feeds[i]['author']['name']
|
|
|
+ avatar_url = feeds[i]['author']['headerUrl']
|
|
|
+ except Exception:
|
|
|
+ user_name = ''
|
|
|
+ avatar_url = ''
|
|
|
+
|
|
|
+ video_url = feeds[i]['photo']['photoUrl']
|
|
|
+ video_dict = {'video_title': video_title,
|
|
|
+ 'video_id': video_id,
|
|
|
+ 'play_cnt': play_cnt,
|
|
|
+ 'comment_cnt': 0,
|
|
|
+ 'like_cnt': like_cnt,
|
|
|
+ 'share_cnt': 0,
|
|
|
+ 'video_width': video_width,
|
|
|
+ 'video_height': video_height,
|
|
|
+ 'duration': duration,
|
|
|
+ 'publish_time': publish_time,
|
|
|
+ 'publish_time_stamp': publish_time_stamp,
|
|
|
+ 'publish_time_str': publish_time_str,
|
|
|
+ 'user_name': user_name,
|
|
|
+ 'user_id': out_uid,
|
|
|
+ 'avatar_url': avatar_url,
|
|
|
+ 'cover_url': cover_url,
|
|
|
+ 'video_url': video_url,
|
|
|
+ 'session': f"kuaishou{int(time.time())}"}
|
|
|
+
|
|
|
+ rule_1 = cls.download_rule(video_dict, rule_dict_1)
|
|
|
+ Common.logger(log_type, crawler).info(f"video_title:{video_title}")
|
|
|
+ Common.logger(log_type, crawler).info(f"video_id:{video_id}\n")
|
|
|
|
|
|
- if pcursor == "no_more":
|
|
|
- Common.logger(log_type, crawler).info(f"作者,{out_uid},已经到底了,没有更多内容了\n")
|
|
|
- return
|
|
|
- cls.get_videoList(log_type, crawler, strategy, our_uid, out_uid, oss_endpoint, env, machine, pcursor=pcursor)
|
|
|
- except Exception as e:
|
|
|
- Common.logger(log_type, crawler).error(f"get_videoList:{e}\n")
|
|
|
+ Common.logger(log_type, crawler).info(
|
|
|
+ f"play_cnt:{video_dict['play_cnt']}{rule_dict_1['play_cnt']}, {eval(str(video_dict['play_cnt']) + str(rule_dict_1['play_cnt']))}")
|
|
|
+ Common.logger(log_type, crawler).info(
|
|
|
+ f"like_cnt:{video_dict['like_cnt']}{rule_dict_1['like_cnt']}, {eval(str(video_dict['like_cnt']) + str(rule_dict_1['like_cnt']))}")
|
|
|
+ Common.logger(log_type, crawler).info(
|
|
|
+ f"video_width:{video_dict['video_width']}{rule_dict_1['video_width']}, {eval(str(video_dict['video_width']) + str(rule_dict_1['video_width']))}")
|
|
|
+ Common.logger(log_type, crawler).info(
|
|
|
+ f"video_height:{video_dict['video_height']}{rule_dict_1['video_height']}, {eval(str(video_dict['video_height']) + str(rule_dict_1['video_height']))}")
|
|
|
+ Common.logger(log_type, crawler).info(
|
|
|
+ f"duration:{video_dict['duration']}{rule_dict_1['duration']}, {eval(str(video_dict['duration']) + str(rule_dict_1['duration']))}")
|
|
|
+ Common.logger(log_type, crawler).info(
|
|
|
+ f"publish_time:{video_dict['publish_time']}{rule_dict_1['publish_time']}, {eval(str(video_dict['publish_time']) + str(rule_dict_1['publish_time']))}")
|
|
|
+ Common.logger(log_type, crawler).info(f"rule_1:{rule_1}\n")
|
|
|
+
|
|
|
+ rule_2 = cls.download_rule(video_dict, rule_dict_2)
|
|
|
+ Common.logger(log_type, crawler).info(
|
|
|
+ f"play_cnt:{video_dict['play_cnt']}{rule_dict_2['play_cnt']}, {eval(str(video_dict['play_cnt']) + str(rule_dict_2['play_cnt']))}")
|
|
|
+ Common.logger(log_type, crawler).info(
|
|
|
+ f"like_cnt:{video_dict['like_cnt']}{rule_dict_2['like_cnt']}, {eval(str(video_dict['like_cnt']) + str(rule_dict_2['like_cnt']))}")
|
|
|
+ Common.logger(log_type, crawler).info(
|
|
|
+ f"video_width:{video_dict['video_width']}{rule_dict_2['video_width']}, {eval(str(video_dict['video_width']) + str(rule_dict_2['video_width']))}")
|
|
|
+ Common.logger(log_type, crawler).info(
|
|
|
+ f"video_height:{video_dict['video_height']}{rule_dict_2['video_height']}, {eval(str(video_dict['video_height']) + str(rule_dict_2['video_height']))}")
|
|
|
+ Common.logger(log_type, crawler).info(
|
|
|
+ f"duration:{video_dict['duration']}{rule_dict_2['duration']}, {eval(str(video_dict['duration']) + str(rule_dict_2['duration']))}")
|
|
|
+ Common.logger(log_type, crawler).info(
|
|
|
+ f"publish_time:{video_dict['publish_time']}{rule_dict_2['publish_time']}, {eval(str(video_dict['publish_time']) + str(rule_dict_2['publish_time']))}")
|
|
|
+ Common.logger(log_type, crawler).info(f"rule_2:{rule_2}\n")
|
|
|
+
|
|
|
+ if video_title == "" or video_url == "":
|
|
|
+ Common.logger(log_type, crawler).info("无效视频\n")
|
|
|
+ continue
|
|
|
+ elif rule_1 is True:
|
|
|
+ if download_cnt_1 < int(
|
|
|
+ rule_dict_1['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">",
|
|
|
+ "")[
|
|
|
+ -1]):
|
|
|
+ download_finished = cls.download_publish(log_type=log_type,
|
|
|
+ crawler=crawler,
|
|
|
+ strategy=strategy,
|
|
|
+ video_dict=video_dict,
|
|
|
+ rule_dict=rule_dict_1,
|
|
|
+ our_uid=our_uid,
|
|
|
+ oss_endpoint=oss_endpoint,
|
|
|
+ env=env,
|
|
|
+ machine=machine)
|
|
|
+ if download_finished is True:
|
|
|
+ download_cnt_1 += 1
|
|
|
+ elif rule_2 is True:
|
|
|
+ if download_cnt_2 < int(
|
|
|
+ rule_dict_2['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">",
|
|
|
+ "")[
|
|
|
+ -1]):
|
|
|
+ download_finished = cls.download_publish(log_type=log_type,
|
|
|
+ crawler=crawler,
|
|
|
+ strategy=strategy,
|
|
|
+ video_dict=video_dict,
|
|
|
+ rule_dict=rule_dict_2,
|
|
|
+ our_uid=our_uid,
|
|
|
+ oss_endpoint=oss_endpoint,
|
|
|
+ env=env,
|
|
|
+ machine=machine)
|
|
|
+ if download_finished is True:
|
|
|
+ download_cnt_2 += 1
|
|
|
+ else:
|
|
|
+ Common.logger(log_type, crawler).info("不满足下载规则\n")
|
|
|
+ # Common.logger(log_type, crawler).info(f"feeds: {feeds}\n")
|
|
|
+
|
|
|
+ if pcursor == "no_more":
|
|
|
+ Common.logger(log_type, crawler).info(f"作者,{out_uid},已经到底了,没有更多内容了\n")
|
|
|
+ return
|
|
|
+ cls.get_videoList(log_type, crawler, strategy, our_uid, out_uid, oss_endpoint, env, machine,
|
|
|
+ pcursor=pcursor)
|
|
|
+ except Exception as e:
|
|
|
+ Common.logger(log_type, crawler).error(f"get_videoList:{e}\n")
|
|
|
|
|
|
@classmethod
|
|
|
def repeat_video(cls, log_type, crawler, video_id, video_title, publish_time, env, machine):
|
|
@@ -765,16 +768,14 @@ class Follow:
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
- pass
|
|
|
- Follow.get_videoList(log_type="follow",
|
|
|
- crawler="kuaishou",
|
|
|
- strategy="定向爬虫策略",
|
|
|
- our_uid="54719554",
|
|
|
- out_uid="3xv5xwkfm9y9n86",
|
|
|
- oss_endpoint="out",
|
|
|
- env="dev",
|
|
|
- machine="local")
|
|
|
-
|
|
|
- # print(Follow.get_out_user_info("follow", "kuaishou", "3xgh4ja9be3wcaw"))
|
|
|
+ # KuaiShouFollow.get_videoList(log_type="follow",
|
|
|
+ # crawler="kuaishou",
|
|
|
+ # strategy="定向爬虫策略",
|
|
|
+ # our_uid="54719554",
|
|
|
+ # out_uid="3xnk3wbm3vfiha6",
|
|
|
+ # oss_endpoint="out",
|
|
|
+ # env="dev",
|
|
|
+ # machine="local")
|
|
|
+
|
|
|
+ print(KuaiShouFollow.get_out_user_info("follow", "kuaishou", "3xnk3wbm3vfiha6"))
|
|
|
# print(Follow.get_out_user_info("follow", "kuaishou", "3x5wgjhfc7tx8ue"))
|
|
|
-
|