wangkun hace 2 años
padre
commit
6e5bdfd708

BIN
.DS_Store


+ 1 - 1
common/publish.py

@@ -351,5 +351,5 @@ class Publish:
                     Common.logger(log_type, crawler).error('file not a dir = {}'.format(fi_d))
             except Exception as e:
                 # 删除视频文件夹
-                shutil.rmtree(f"./{crawler}/videos/{fv}/")
+                # shutil.rmtree(f"./{crawler}/videos/{fv}/")
                 Common.logger(log_type, crawler).exception('upload_and_publish error', e)

BIN
kuaishou/.DS_Store


+ 158 - 0
kuaishou/kuaishou_follow/insert_videos.py

@@ -0,0 +1,158 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/2/27
+import json
+import os
+import sys
+
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.db import MysqlHelper
+from common.feishu import Feishu
+
+
+class Insert:
+    @classmethod
+    def insert_video_from_feishu_to_mysql(cls, log_type, crawler, env, machine):
+        kuaishou_sheetid_list = ["fYdA8F", "3cd128", "31kOdu"]
+        for sheetid in kuaishou_sheetid_list:
+            kuaishou_sheet = Feishu.get_values_batch(log_type, crawler, sheetid)
+            # Common.logger(log_type, crawler).info(f"kuaishou_sheet:{kuaishou_sheet}")
+            for i in range(1, len(kuaishou_sheet)):
+            # for i in range(1, 3):
+                if kuaishou_sheet[i][5] is None:
+                    continue
+                if kuaishou_sheet[i][9] is None:
+                    video_id = 0
+                else:
+                    video_id = kuaishou_sheet[i][9].replace("https://admin.piaoquantv.com/cms/post-detail/", "").replace("/info", "")
+                if video_id == "None":
+                    continue
+                video_id = int(video_id)
+                user_id = 0
+                out_user_id = str(kuaishou_sheet[i][18])
+                platform = "快手"
+                strategy = "定向爬虫策略"
+                out_video_id = str(kuaishou_sheet[i][7])
+                video_title = str(kuaishou_sheet[i][8])
+                cover_url = str(kuaishou_sheet[i][20])
+                video_url = str(kuaishou_sheet[i][21])
+                duration = int(kuaishou_sheet[i][14])
+                publish_time = str(kuaishou_sheet[i][16].replace("/", "-"))
+                play_cnt = int(kuaishou_sheet[i][10])
+                like_cnt = int(kuaishou_sheet[i][12])
+                share_cnt = int(kuaishou_sheet[i][13])
+                # collection_cnt = 0
+                comment_cnt = int(kuaishou_sheet[i][11])
+                crawler_rule = json.dumps({"play_cnt": 5000, "comment_cnt": 0, "like_cnt": 5000, "share_cnt": 1000, "duration": 40, "publish_time": 7, "video_width": 0, "video_height": 0})
+                width = int(kuaishou_sheet[i][15].split("*")[0])
+                height = int(kuaishou_sheet[i][15].split("*")[1])
+
+                # print(f"video_id:{video_id}, type:{type(video_id)}")
+                # print(f"user_id:{user_id}, type:{type(user_id)}")
+                # print(f"out_user_id:{out_user_id}, type:{type(out_user_id)}")
+                # print(f"platform:{platform}, type:{type(platform)}")
+                # print(f"strategy:{strategy}, type:{type(strategy)}")
+                # print(f"out_video_id:{out_video_id}, type:{type(out_video_id)}")
+                # print(f"video_title:{video_title}, type:{type(video_title)}")
+                # print(f"cover_url:{cover_url}, type:{type(cover_url)}")
+                # print(f"video_url:{video_url}, type:{type(video_url)}")
+                # print(f"duration:{duration}, type:{type(duration)}")
+                # print(f"publish_time:{publish_time}, type:{type(publish_time)}")
+                # print(f"play_cnt:{play_cnt}, type:{type(play_cnt)}")
+                # print(f"like_cnt:{like_cnt}, type:{type(like_cnt)}")
+                # print(f"share_cnt:{share_cnt}, type:{type(share_cnt)}")
+                # print(f"collection_cnt:{collection_cnt}, type:{type(collection_cnt)}")
+                # print(f"comment_cnt:{comment_cnt}, type:{type(comment_cnt)}")
+                # print(f"crawler_rule:{crawler_rule}, type:{type(crawler_rule)}")
+                # print(f"width:{width}, type:{type(width)}")
+                # print(f"height:{height}, type:{type(height)}\n")
+
+                select_sql = f""" select * from crawler_video where platform="{platform}" and out_video_id="{out_video_id}" """
+                # Common.logger(log_type, crawler).info(f"select_sql:{select_sql}")
+                repeat_video = MysqlHelper.get_values(log_type, crawler, select_sql, env, machine)
+                Common.logger(log_type, crawler).info(f"repeat_video:{repeat_video}")
+
+                repeat_video_id_sql = f""" select * from crawler_video where out_video_id="{out_video_id}" """
+                repeat_video_id = MysqlHelper.get_values(log_type, crawler, repeat_video_id_sql, env, machine)
+                Common.logger(log_type, crawler).info(f"repeat_video_id:{repeat_video_id}")
+
+                if repeat_video is not None and len(repeat_video) != 0:
+                    Common.logger(log_type, crawler).info(f"{video_title} 已存在数据库中\n")
+                elif repeat_video_id is not None and len(repeat_video_id) != 0:
+                    Common.logger(log_type, crawler).info(f"开始更新视频信息\n")
+                    update_sql = f""" UPDATE crawler_video SET
+                                    user_id={user_id},
+                                    out_user_id="{out_user_id}",
+                                    platform="{platform}",
+                                    strategy="{strategy}",
+                                    out_video_id="{out_video_id}",
+                                    video_title="{video_title}",
+                                    cover_url="{cover_url}",
+                                    video_url="{video_url}",
+                                    duration={duration},
+                                    publish_time="{publish_time}",
+                                    play_cnt={play_cnt},
+                                    like_cnt={like_cnt},
+                                    share_cnt={share_cnt},
+                                    comment_cnt={comment_cnt},
+                                    crawler_rule='{crawler_rule}',
+                                    width={width},
+                                    height={height}
+                                    WHERE video_id={video_id}
+                                    """
+                    Common.logger(log_type, crawler).info(f"update_sql:{update_sql}")
+                    MysqlHelper.update_values(log_type, crawler, update_sql, env, machine)
+                    Common.logger(log_type, crawler).info('视频信息更新成功!\n')
+                else:
+                    # 视频信息保存数据库
+                    insert_sql = f""" insert into crawler_video(video_id,
+                                    user_id,
+                                    out_user_id,
+                                    platform,
+                                    strategy,
+                                    out_video_id,
+                                    video_title,
+                                    cover_url,
+                                    video_url,
+                                    duration,
+                                    publish_time,
+                                    play_cnt,
+                                    like_cnt,
+                                    share_cnt,
+                                    comment_cnt,
+                                    crawler_rule,
+                                    width,
+                                    height)
+                                    values({video_id},
+                                    {user_id},
+                                    "{out_user_id}",
+                                    "{platform}",
+                                    "{strategy}",
+                                    "{out_video_id}",
+                                    "{video_title}",
+                                    "{cover_url}",
+                                    "{video_url}",
+                                    {duration},
+                                    "{publish_time}",
+                                    {play_cnt},
+                                    {like_cnt},
+                                    {share_cnt},
+                                    {comment_cnt},
+                                    '{crawler_rule}',
+                                    {width},
+                                    {height}) """
+                    Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+                    MysqlHelper.update_values(log_type, crawler, insert_sql, env, machine)
+                    Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
+
+    @classmethod
+    def get_sheet(cls):
+        sheet = Feishu.get_values_batch("insert", "kuaishou", "fYdA8F")
+        print(sheet)
+
+
+if __name__ == "__main__":
+    Insert.insert_video_from_feishu_to_mysql("insert-prod", "kuaishou", "prod", "local")
+    # Insert.get_sheet()
+    pass

+ 466 - 54
kuaishou/kuaishou_follow/kuaishou_follow_pc.py

@@ -2,23 +2,75 @@
 # @Author: wangkun
 # @Time: 2023/2/24
 import os
+import random
+import shutil
 import sys
-
+import time
 import requests
 import json
+
 sys.path.append(os.getcwd())
 from common.common import Common
 from common.feishu import Feishu
 from common.users import Users
+from common.db import MysqlHelper
+from common.publish import Publish
 
 
 class Follow:
-    # 翻页参数
-    pcursor = ""
-
     platform = "快手"
     tag = "快手爬虫,定向爬虫策略"
 
+    @classmethod
+    def get_rule(cls, log_type, crawler, index):
+        try:
+            while True:
+                rule_sheet = Feishu.get_values_batch(log_type, crawler, "3iqG4z")
+                if rule_sheet is None:
+                    Common.logger(log_type, crawler).warning("rule_sheet is None! 10秒后重新获取")
+                    time.sleep(10)
+                    continue
+                if index == 1:
+                    rule_dict = {
+                        "play_cnt": f"{rule_sheet[1][1]}{rule_sheet[1][2]}",
+                        "video_width": f"{rule_sheet[2][1]}{rule_sheet[2][2]}",
+                        "video_height": f"{rule_sheet[3][1]}{rule_sheet[3][2]}",
+                        "like_cnt": f"{rule_sheet[4][1]}{rule_sheet[4][2]}",
+                        "duration": f"{rule_sheet[5][1]}{rule_sheet[5][2]}",
+                        "download_cnt": f"{rule_sheet[6][1]}{rule_sheet[6][2]}",
+                        "publish_time": f"{rule_sheet[7][1]}{rule_sheet[7][2]}",
+                    }
+                    # for k, v in rule_dict.items():
+                    #     Common.logger(log_type, crawler).info(f"{k}:{v}")
+                    return rule_dict
+                elif index == 2:
+                    rule_dict = {
+                        "play_cnt": f"{rule_sheet[9][1]}{rule_sheet[9][2]}",
+                        "video_width": f"{rule_sheet[10][1]}{rule_sheet[10][2]}",
+                        "video_height": f"{rule_sheet[11][1]}{rule_sheet[11][2]}",
+                        "like_cnt": f"{rule_sheet[12][1]}{rule_sheet[12][2]}",
+                        "duration": f"{rule_sheet[13][1]}{rule_sheet[13][2]}",
+                        "download_cnt": f"{rule_sheet[14][1]}{rule_sheet[14][2]}",
+                        "publish_time": f"{rule_sheet[15][1]}{rule_sheet[15][2]}",
+                    }
+                    # for k, v in rule_dict.items():
+                    #     Common.logger(log_type, crawler).info(f"{k}:{v}")
+                    return rule_dict
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f"get_rule:{e}\n")
+
+    @classmethod
+    def download_rule(cls, video_dict, rule_dict):
+        if eval(f"{video_dict['play_cnt']}{rule_dict['play_cnt']}") is True\
+                and eval(f"{video_dict['video_width']}{rule_dict['video_width']}") is True\
+                and eval(f"{video_dict['video_height']}{rule_dict['video_height']}") is True\
+                and eval(f"{video_dict['like_cnt']}{rule_dict['like_cnt']}") is True\
+                and eval(f"{video_dict['duration']}{rule_dict['duration']}") is True\
+                and eval(f"{video_dict['publish_time']}{rule_dict['publish_time']}") is True:
+            return True
+        else:
+            return False
+
     # 过滤词库
     @classmethod
     def filter_words(cls, log_type, crawler):
@@ -39,6 +91,26 @@ class Follow:
         except Exception as e:
             Common.logger(log_type, crawler).error(f'filter_words异常:{e}\n')
 
+    # 万能标题
+    @classmethod
+    def random_title(cls, log_type, crawler):
+        try:
+            while True:
+                random_title_sheet = Feishu.get_values_batch(log_type, crawler, '0DiyXe')
+                if random_title_sheet is None:
+                    Common.logger(log_type, crawler).warning(f"filter_words_sheet:{random_title_sheet} 10秒钟后重试")
+                    continue
+                random_title_list = []
+                for x in random_title_sheet:
+                    for y in x:
+                        if y is None:
+                            pass
+                        else:
+                            random_title_list.append(y)
+                return random.choice(random_title_list)
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f'random_title:{e}\n')
+
     # 获取站外用户信息
     @classmethod
     def get_out_user_info(cls, log_type, crawler, out_uid):
@@ -115,7 +187,7 @@ class Follow:
         except Exception as e:
             Common.logger(log_type, crawler).error(f"get_out_user_info:{e}\n")
 
-    # 获取用户信息(字典格式). 注意:部分 user_id 字符类型是 int / str
+    # 获取用户信息列表
     @classmethod
     def get_user_list(cls, log_type, crawler, sheetid, env, machine):
         try:
@@ -126,7 +198,7 @@ class Follow:
                     continue
                 our_user_list = []
                 # for i in range(1, len(user_sheet)):
-                for i in range(1, 3):
+                for i in range(1, 2):
                     out_uid = user_sheet[i][2]
                     user_name = user_sheet[i][3]
                     our_uid = user_sheet[i][6]
@@ -171,59 +243,399 @@ class Follow:
         except Exception as e:
             Common.logger(log_type, crawler).error(f'get_user_list:{e}\n')
 
+    # 处理视频标题
     @classmethod
-    def get_videoList(cls, log_type, crawler, out_uid):
-        url = "https://www.kuaishou.com/graphql"
-        payload = json.dumps({
-            "operationName": "visionProfilePhotoList",
-            "variables": {
-                "userId": out_uid,
-                "pcursor": cls.pcursor,
-                "page": "profile"
-            },
-            "query": "fragment photoContent on PhotoEntity {\n  id\n  duration\n  caption\n  originCaption\n  likeCount\n  viewCount\n  realLikeCount\n  coverUrl\n  photoUrl\n  photoH265Url\n  manifest\n  manifestH265\n  videoResource\n  coverUrls {\n    url\n    __typename\n  }\n  timestamp\n  expTag\n  animatedCoverUrl\n  distance\n  videoRatio\n  liked\n  stereoType\n  profileUserTopPhoto\n  musicBlocked\n  __typename\n}\n\nfragment feedContent on Feed {\n  type\n  author {\n    id\n    name\n    headerUrl\n    following\n    headerUrls {\n      url\n      __typename\n    }\n    __typename\n  }\n  photo {\n    ...photoContent\n    __typename\n  }\n  canAddComment\n  llsid\n  status\n  currentPcursor\n  tags {\n    type\n    name\n    __typename\n  }\n  __typename\n}\n\nquery visionProfilePhotoList($pcursor: String, $userId: String, $page: String, $webPageArea: String) {\n  visionProfilePhotoList(pcursor: $pcursor, userId: $userId, page: $page, webPageArea: $webPageArea) {\n    result\n    llsid\n    webPageArea\n    feeds {\n      ...feedContent\n      __typename\n    }\n    hostName\n    pcursor\n    __typename\n  }\n}\n"
-        })
-        headers = {
-            'Cookie': 'kpf=PC_WEB; clientid=3; did=web_e2901e1c5a13c60af81ba88bc7a3ee24; userId=1268646616; kuaishou.server.web_st=ChZrdWFpc2hvdS5zZXJ2ZXIud2ViLnN0EqABOLgYYcIJ5ilxU46Jc-HLWThY8sppX3V0htC_KhSGOzAjP2hAOdegzfkZGAxS5rf6rCBS487FkxfYzLkV__I6b1lK16rDjvv94Kkoo4z7mgf8y8rFgWoqrp81JAWTtx00y-wrc1XXPf9RAVQoET70wWaeNG2r5bxtZEiNwpK_zPi0ZdUo0BW13dFKfVssAy2xKYh0UlJ8VSd_vBvyMKSxVBoSf061Kc3w5Nem7YdpVBmH39ceIiBpiGioLzbZqlHiSbwkH_LhUhNXz3o7LITj098KUytk2CgFMAE; kuaishou.server.web_ph=f1033957981996a7d50e849a9ded4cf4adff; kpn=KUAISHOU_VISION',
-            'Referer': f'https://www.kuaishou.com/profile/{out_uid}',
-            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.41',
-            'content-type': 'application/json',
-            # 'accept': '*/*',
-            # 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
-            # 'Cache-Control': 'no-cache',
-            # 'Connection': 'keep-alive',
-            # 'Origin': 'https://www.kuaishou.com',
-            # 'Pragma': 'no-cache',
-            # 'Sec-Fetch-Dest': 'empty',
-            # 'Sec-Fetch-Mode': 'cors',
-            # 'Sec-Fetch-Site': 'same-origin',
-            # 'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
-            # 'sec-ch-ua-mobile': '?0',
-            # 'sec-ch-ua-platform': '"macOS"'
-        }
-        response = requests.post(url=url, headers=headers, data=payload)
-        if response.status_code != 200:
-            Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.text}\n")
-            return
-        elif 'data' not in response.json():
-            Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()}\n")
-            return
-        elif 'visionProfilePhotoList' not in response.json()['data']:
-            Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()['data']}\n")
-            return
-        elif 'feeds' not in response.json()['data']['visionProfilePhotoList']:
-            Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()['data']['visionProfilePhotoList']}\n")
-            return
-        elif len(response.json()['data']['visionProfilePhotoList']['feeds']) == 0:
-            Common.logger(log_type, crawler).info("没有更多视频啦 ~\n")
-            return
+    def video_title(cls, log_type, crawler, title):
+        title_split1 = title.split(" #")
+        if title_split1[0] != "":
+            title1 = title_split1[0]
+        else:
+            title1 = title_split1[-1]
+
+        title_split2 = title1.split(" #")
+        if title_split2[0] != "":
+            title2 = title_split2[0]
+        else:
+            title2 = title_split2[-1]
+
+        title_split3 = title2.split("@")
+        if title_split3[0] != "":
+            title3 = title_split3[0]
         else:
-            feeds = response.json()['data']['visionProfilePhotoList']['feeds']
+            title3 = title_split3[-1]
+
+        video_title = title3.strip().replace("\n", "") \
+                          .replace("/", "").replace("快手", "").replace(" ", "") \
+                          .replace(" ", "").replace("&NBSP", "").replace("\r", "") \
+                          .replace("#", "").replace(".", "。").replace("\\", "") \
+                          .replace(":", "").replace("*", "").replace("?", "") \
+                          .replace("?", "").replace('"', "").replace("<", "") \
+                          .replace(">", "").replace("|", "").replace("@", "")[:40]
+        if video_title.replace(" ", "") == "" or video_title == "。。。" or video_title == "...":
+            return cls.random_title(log_type, crawler)
+        else:
+            return video_title
+
+    @classmethod
+    def get_videoList(cls, log_type, crawler, strategy, our_uid, out_uid, oss_endpoint, env, machine):
+        download_cnt_1, download_cnt_2 = 0, 0
+        pcursor = ""
+
+        while True:
+            rule_dict_1 = cls.get_rule(log_type, crawler, 1)
+            rule_dict_2 = cls.get_rule(log_type, crawler, 2)
+            if rule_dict_1 is None or rule_dict_2 is None:
+                Common.logger(log_type, crawler).warning(f"rule_dict is None, 10秒后重试")
+                time.sleep(10)
+            else:
+                break
+
+        while True:
+            if download_cnt_1 >=int(rule_dict_1['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">", "")[-1]) and download_cnt_2 >= int(rule_dict_2['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">", "")[-1]):
+                Common.logger(log_type, crawler).info(f"规则1已下载{download_cnt_1}条视频,规则2已下载{download_cnt_2}条视频\n")
+                return
+            url = "https://www.kuaishou.com/graphql"
+            payload = json.dumps({
+                "operationName": "visionProfilePhotoList",
+                "variables": {
+                    "userId": out_uid,
+                    "pcursor": pcursor,
+                    "page": "profile"
+                },
+                "query": "fragment photoContent on PhotoEntity {\n  id\n  duration\n  caption\n  originCaption\n  likeCount\n  viewCount\n  realLikeCount\n  coverUrl\n  photoUrl\n  photoH265Url\n  manifest\n  manifestH265\n  videoResource\n  coverUrls {\n    url\n    __typename\n  }\n  timestamp\n  expTag\n  animatedCoverUrl\n  distance\n  videoRatio\n  liked\n  stereoType\n  profileUserTopPhoto\n  musicBlocked\n  __typename\n}\n\nfragment feedContent on Feed {\n  type\n  author {\n    id\n    name\n    headerUrl\n    following\n    headerUrls {\n      url\n      __typename\n    }\n    __typename\n  }\n  photo {\n    ...photoContent\n    __typename\n  }\n  canAddComment\n  llsid\n  status\n  currentPcursor\n  tags {\n    type\n    name\n    __typename\n  }\n  __typename\n}\n\nquery visionProfilePhotoList($pcursor: String, $userId: String, $page: String, $webPageArea: String) {\n  visionProfilePhotoList(pcursor: $pcursor, userId: $userId, page: $page, webPageArea: $webPageArea) {\n    result\n    llsid\n    webPageArea\n    feeds {\n      ...feedContent\n      __typename\n    }\n    hostName\n    pcursor\n    __typename\n  }\n}\n"
+            })
+            headers = {
+                'Cookie': 'kpf=PC_WEB; clientid=3; did=web_e2901e1c5a13c60af81ba88bc7a3ee24; userId=1268646616; kuaishou.server.web_st=ChZrdWFpc2hvdS5zZXJ2ZXIud2ViLnN0EqABOLgYYcIJ5ilxU46Jc-HLWThY8sppX3V0htC_KhSGOzAjP2hAOdegzfkZGAxS5rf6rCBS487FkxfYzLkV__I6b1lK16rDjvv94Kkoo4z7mgf8y8rFgWoqrp81JAWTtx00y-wrc1XXPf9RAVQoET70wWaeNG2r5bxtZEiNwpK_zPi0ZdUo0BW13dFKfVssAy2xKYh0UlJ8VSd_vBvyMKSxVBoSf061Kc3w5Nem7YdpVBmH39ceIiBpiGioLzbZqlHiSbwkH_LhUhNXz3o7LITj098KUytk2CgFMAE; kuaishou.server.web_ph=f1033957981996a7d50e849a9ded4cf4adff; kpn=KUAISHOU_VISION',
+                'Referer': f'https://www.kuaishou.com/profile/{out_uid}',
+                'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.41',
+                'content-type': 'application/json',
+                # 'accept': '*/*',
+                # 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
+                # 'Cache-Control': 'no-cache',
+                # 'Connection': 'keep-alive',
+                # 'Origin': 'https://www.kuaishou.com',
+                # 'Pragma': 'no-cache',
+                # 'Sec-Fetch-Dest': 'empty',
+                # 'Sec-Fetch-Mode': 'cors',
+                # 'Sec-Fetch-Site': 'same-origin',
+                # 'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
+                # 'sec-ch-ua-mobile': '?0',
+                # 'sec-ch-ua-platform': '"macOS"'
+            }
+            response = requests.post(url=url, headers=headers, data=payload)
+            if response.status_code != 200:
+                Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.text}\n")
+                return
+            elif 'data' not in response.json():
+                Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()}\n")
+                return
+            elif 'visionProfilePhotoList' not in response.json()['data']:
+                Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()['data']}\n")
+                return
+            elif 'feeds' not in response.json()['data']['visionProfilePhotoList']:
+                Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()['data']['visionProfilePhotoList']}\n")
+                return
+            elif len(response.json()['data']['visionProfilePhotoList']['feeds']) == 0:
+                Common.logger(log_type, crawler).info("没有更多视频啦 ~\n")
+                return
+            else:
+                feeds = response.json()['data']['visionProfilePhotoList']['feeds']
+                pcursor = response.json()['data']['visionProfilePhotoList']['pcursor']
+                for i in range(len(feeds)):
+                    if 'photo' not in feeds[i]:
+                        Common.logger(log_type, crawler).warning(f"get_videoList:{feeds[i]}\n")
+                        break
+
+                    # video_title
+                    if 'caption' not in feeds[i]['photo']:
+                        video_title = cls.random_title(log_type, crawler)
+                    elif feeds[i]['photo']['caption'].strip() == "":
+                        video_title = cls.random_title(log_type, crawler)
+                    else:
+                        video_title = cls.video_title(log_type, crawler, feeds[i]['photo']['caption'])
+
+                    if 'videoResource' not in feeds[i]['photo'] \
+                            and 'manifest' not in feeds[i]['photo']\
+                            and 'manifestH265'not in feeds[i]['photo']:
+                        Common.logger(log_type, crawler).warning(f"get_videoList:{feeds[i]['photo']}\n")
+                        break
+                    videoResource = feeds[i]['photo']['videoResource']
+
+                    if 'h264' not in videoResource and 'hevc' not in videoResource:
+                        Common.logger(log_type, crawler).warning(f"get_videoList:{videoResource}\n")
+                        break
+
+                    # video_id
+                    if 'h264' in videoResource and 'videoId' in videoResource['h264']:
+                        video_id = videoResource['h264']['videoId']
+                    elif 'hevc' in videoResource and 'videoId' in videoResource['hevc']:
+                        video_id = videoResource['hevc']['videoId']
+                    else:
+                        video_id = ""
+
+                    # play_cnt
+                    if 'viewCount' not in feeds[i]['photo']:
+                        play_cnt = 0
+                    else:
+                        play_cnt = int(feeds[i]['photo']['viewCount'])
+
+                    # like_cnt
+                    if 'realLikeCount' not in feeds[i]['photo']:
+                        like_cnt = 0
+                    else:
+                        like_cnt = feeds[i]['photo']['realLikeCount']
+
+                    # publish_time
+                    if 'timestamp' not in feeds[i]['photo']:
+                        publish_time_stamp = 0
+                        publish_time_str = ''
+                        publish_time = 0
+                    else:
+                        publish_time_stamp = int(int(feeds[i]['photo']['timestamp'])/1000)
+                        publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
+                        publish_time = int((int(time.time()) - publish_time_stamp) / (3600*24))
+
+                    # duration
+                    if 'duration' not in feeds[i]['photo']:
+                        duration = 0
+                    else:
+                        duration = int(int(feeds[i]['photo']['duration'])/100)
+
+                    # video_width / video_height / video_url
+                    mapping = {}
+                    for item in ['width', 'height', 'url']:
+                        try:
+                            val = str(videoResource['h264']['adaptationSet'][0]['representation'][0][item])
+                        except Exception:
+                            val = str(videoResource['hevc']['adaptationSet'][0]['representation'][0][item])
+                        except:
+                            val = ''
+                        mapping[item] = val
+                    video_width = int(mapping['width']) if mapping['width'] != '' else 0
+                    video_height = int(mapping['height']) if mapping['height'] != '' else 0
+                    video_url = mapping['url']
+
+                    # cover_url
+                    if 'coverUrl' not in feeds[i]['photo']:
+                        cover_url = ""
+                    else:
+                        cover_url = feeds[i]['photo']['coverUrl']
+
+                    # user_name / avatar_url
+                    try:
+                        user_name = feeds[i]['author']['name']
+                        avatar_url = feeds[i]['author']['headerUrl']
+                    except Exception:
+                        user_name = ''
+                        avatar_url = ''
+
+                    video_dict = {'video_title': video_title,
+                                  'video_id': video_id,
+                                  'play_cnt': play_cnt,
+                                  'comment_cnt': 0,
+                                  'like_cnt': like_cnt,
+                                  'share_cnt': 0,
+                                  'video_width': video_width,
+                                  'video_height': video_height,
+                                  'duration': duration,
+                                  'publish_time': publish_time,
+                                  'publish_time_stamp': publish_time_stamp,
+                                  'publish_time_str': publish_time_str,
+                                  'user_name': user_name,
+                                  'user_id': out_uid,
+                                  'avatar_url': avatar_url,
+                                  'cover_url': cover_url,
+                                  'video_url': video_url,
+                                  'session': f"kuaishou{int(time.time())}"}
+
+                    rule_1 = cls.download_rule(video_dict, rule_dict_1)
+                    Common.logger(log_type, crawler).info(f"video_title:{video_title}")
+                    Common.logger(log_type, crawler).info(f"video_id:{video_id}\n")
+
+                    Common.logger(log_type, crawler).info(f"play_cnt:{video_dict['play_cnt']}{rule_dict_1['play_cnt']}, {eval(str(video_dict['play_cnt']) + str(rule_dict_1['play_cnt']))}")
+                    Common.logger(log_type, crawler).info(f"like_cnt:{video_dict['like_cnt']}{rule_dict_1['like_cnt']}, {eval(str(video_dict['like_cnt']) + str(rule_dict_1['like_cnt']))}")
+                    Common.logger(log_type, crawler).info(f"video_width:{video_dict['video_width']}{rule_dict_1['video_width']}, {eval(str(video_dict['video_width']) + str(rule_dict_1['video_width']))}")
+                    Common.logger(log_type, crawler).info(f"video_height:{video_dict['video_height']}{rule_dict_1['video_height']}, {eval(str(video_dict['video_height']) + str(rule_dict_1['video_height']))}")
+                    Common.logger(log_type, crawler).info(f"duration:{video_dict['duration']}{rule_dict_1['duration']}, {eval(str(video_dict['duration']) + str(rule_dict_1['duration']))}")
+                    Common.logger(log_type, crawler).info(f"publish_time_str:{video_dict['publish_time']}{rule_dict_1['publish_time']}, {eval(str(video_dict['publish_time']) + str(rule_dict_1['publish_time']))}")
+                    Common.logger(log_type, crawler).info(f"rule_1:{rule_1}\n")
+
+                    rule_2 = cls.download_rule(video_dict, rule_dict_2)
+                    Common.logger(log_type, crawler).info(f"play_cnt:{video_dict['play_cnt']}{rule_dict_2['play_cnt']}, {eval(str(video_dict['play_cnt']) + str(rule_dict_2['play_cnt']))}")
+                    Common.logger(log_type, crawler).info(f"like_cnt:{video_dict['like_cnt']}{rule_dict_2['like_cnt']}, {eval(str(video_dict['like_cnt']) + str(rule_dict_2['like_cnt']))}")
+                    Common.logger(log_type, crawler).info(f"video_width:{video_dict['video_width']}{rule_dict_2['video_width']}, {eval(str(video_dict['video_width']) + str(rule_dict_2['video_width']))}")
+                    Common.logger(log_type, crawler).info(f"video_height:{video_dict['video_height']}{rule_dict_2['video_height']}, {eval(str(video_dict['video_height']) + str(rule_dict_2['video_height']))}")
+                    Common.logger(log_type, crawler).info(f"duration:{video_dict['duration']}{rule_dict_2['duration']}, {eval(str(video_dict['duration']) + str(rule_dict_2['duration']))}")
+                    Common.logger(log_type, crawler).info(f"publish_time_str:{video_dict['publish_time']}{rule_dict_2['publish_time']}, {eval(str(video_dict['publish_time']) + str(rule_dict_2['publish_time']))}")
+                    Common.logger(log_type, crawler).info(f"rule_2:{rule_2}\n")
+
+                    if video_title == "" or video_url == "":
+                        Common.logger(log_type, crawler).info("无效视频\n")
+                        break
+                    elif rule_1 is True:
+                        if download_cnt_1 < int(rule_dict_1['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">", "")[-1]):
+                            cls.download_publish(log_type=log_type,
+                                                 crawler=crawler,
+                                                 strategy=strategy,
+                                                 video_dict=video_dict,
+                                                 rule_dict=rule_dict_1,
+                                                 our_uid=our_uid,
+                                                 oss_endpoint=oss_endpoint,
+                                                 env=env,
+                                                 machine=machine)
+                            download_cnt_1 += 1
+                    elif rule_2 is True:
+                        if download_cnt_2 < int(rule_dict_2['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">", "")[-1]):
+                            cls.download_publish(log_type=log_type,
+                                                 crawler=crawler,
+                                                 strategy=strategy,
+                                                 video_dict=video_dict,
+                                                 rule_dict=rule_dict_2,
+                                                 our_uid=our_uid,
+                                                 oss_endpoint=oss_endpoint,
+                                                 env=env,
+                                                 machine=machine)
+                            download_cnt_2 += 1
+                if pcursor == "no_more":
+                    Common.logger(log_type, crawler).info("已经到底了,没有更多内容了\n")
+                    return
+
+    @classmethod
+    def repeat_video(cls, log_type, crawler, video_id, env, machine):
+        sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}"; """
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env, machine)
+        return len(repeat_video)
+
+    @classmethod
+    def download_publish(cls, log_type, crawler, strategy, video_dict, rule_dict, our_uid, oss_endpoint, env, machine):
+        if cls.repeat_video(log_type, crawler, video_dict['video_id'], env, machine) != 0:
+            Common.logger(log_type, crawler).info('视频已下载\n')
+        elif video_dict['video_id'] in [x for y in Feishu.get_values_batch(log_type, crawler, "3cd128") for x in y]:
+            Common.logger(log_type, crawler).info('视频已下载\n')
+        elif any(word if word in video_dict['video_title'] else False for word in cls.filter_words(log_type, crawler)) is True:
+            Common.logger(log_type, crawler).info('标题已中过滤词\n')
+        else:
+            # 下载封面
+            Common.download_method(log_type=log_type, crawler=crawler, text='cover', title=video_dict['video_title'], url=video_dict['cover_url'])
+            # 下载视频
+            Common.download_method(log_type=log_type, crawler=crawler, text='video', title=video_dict['video_title'], url=video_dict['video_url'])
+            # 保存视频信息至txt
+            Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
+
+            # 上传视频
+            Common.logger(log_type, crawler).info("开始上传视频...")
+            our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                      crawler=crawler,
+                                                      strategy=strategy,
+                                                      our_uid=our_uid,
+                                                      env=env,
+                                                      oss_endpoint=oss_endpoint)
+            if env == 'dev':
+                our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+            else:
+                our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+            Common.logger(log_type, crawler).info("视频上传完成")
+
+            if our_video_id is None:
+                Common.logger(log_type, crawler).warning(f"our_video_id:{our_video_id} 删除该视频文件夹")
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
+                return
+
+            # 视频信息保存数据库
+            insert_sql = f""" insert into crawler_video(video_id,
+                                                    user_id,
+                                                    out_user_id,
+                                                    platform,
+                                                    strategy,
+                                                    out_video_id,
+                                                    video_title,
+                                                    cover_url,
+                                                    video_url,
+                                                    duration,
+                                                    publish_time,
+                                                    play_cnt,
+                                                    crawler_rule,
+                                                    width,
+                                                    height)
+                                                    values({our_video_id},
+                                                    {our_uid},
+                                                    "{video_dict['user_id']}",
+                                                    "{cls.platform}",
+                                                    "定向爬虫策略",
+                                                    "{video_dict['video_id']}",
+                                                    "{video_dict['video_title']}",
+                                                    "{video_dict['cover_url']}",
+                                                    "{video_dict['video_url']}",
+                                                    {int(video_dict['duration'])},
+                                                    "{video_dict['publish_time_str']}",
+                                                    {int(video_dict['play_cnt'])},
+                                                    '{json.dumps(rule_dict)}',
+                                                    {int(video_dict['video_width'])},
+                                                    {int(video_dict['video_height'])}) """
+            Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+            MysqlHelper.update_values(log_type, crawler, insert_sql, env, machine)
+            Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
+
+            # 视频写入飞书
+            Feishu.insert_columns(log_type, 'kuaishou', "fYdA8F", "ROWS", 1, 2)
+            upload_time = int(time.time())
+            values = [[our_uid,
+                       time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
+                       "定向榜",
+                       str(video_dict['video_id']),
+                       video_dict['video_title'],
+                       our_video_link,
+                       video_dict['play_cnt'],
+                       video_dict['comment_cnt'],
+                       video_dict['like_cnt'],
+                       video_dict['share_cnt'],
+                       video_dict['duration'],
+                       f"{video_dict['video_width']}*{video_dict['video_height']}",
+                       video_dict['publish_time_str'],
+                       video_dict['user_name'],
+                       video_dict['user_id'],
+                       video_dict['avatar_url'],
+                       video_dict['cover_url'],
+                       video_dict['video_url']]]
+            time.sleep(1)
+            Feishu.update_values(log_type, 'kuaishou', "fYdA8F", "E2:Z2", values)
+            Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
+
+    @classmethod
+    def get_follow_videos(cls, log_type, crawler, strategy, oss_endpoint, env, machine):
+        user_list = cls.get_user_list(log_type=log_type, crawler=crawler, sheetid="bTSzxW", env=env, machine=machine)
+        for user in user_list:
+            out_uid = user["out_uid"]
+            user_name = user["user_name"]
+            our_uid = user["our_uid"]
+            Common.logger(log_type, crawler).info(f"开始抓取 {user_name} 用户主页视频\n")
+            cls.get_videoList(log_type=log_type,
+                              crawler=crawler,
+                              strategy=strategy,
+                              our_uid=our_uid,
+                              out_uid=out_uid,
+                              oss_endpoint=oss_endpoint,
+                              env=env,
+                              machine=machine)
+            cls.pcursor = ""
+            cls.download_cnt = 0
+            time.sleep(3)
 
 
 if __name__ == "__main__":
-    print(Follow.filter_words("follow", "kuaishou"))
+    # print(Follow.filter_words("follow", "kuaishou"))
+    # print(Follow.random_title("follow", "kuaishou"))
     # Follow.get_user_list("follow", "kuaishou", "2OLxLr", "dev", "local")
-    # Follow.get_videoList("3xgh4ja9be3wcaw")
+    Follow.get_videoList(log_type="follow",
+                         crawler="kuaishou",
+                         strategy="定向爬虫策略",
+                         our_uid="6282431",
+                         out_uid="3xws7ydsnmp5mgq",
+                         oss_endpoint="out",
+                         env="dev",
+                         machine="local")
+    # Follow.get_rule("follow", "kuaishou", 1)
+    # Follow.get_rule("follow", "kuaishou", 2)
 
     pass

BIN
kuaishou/videos/🧨🧨新年的炮竹声再响也没有我给大家拜年的心意响来给大家拜年啦🧧🧧/image.jpg


BIN
kuaishou/videos/🧨🧨新年的炮竹声再响也没有我给大家拜年的心意响来给大家拜年啦🧧🧧/video.mp4


+ 13 - 7
weixinzhishu/weixinzhishu_main/search_key.py

@@ -169,13 +169,19 @@ class Searchkey:
                 cls.start_wechat(log_type, crawler)
                 cls.get_search_key(log_type, crawler)
             else:
-                Common.logger(log_type, crawler).info(f'已获取 search_key,openid:{search_key}')
-                Feishu.insert_columns(log_type, crawler, 'sVL74k', 'ROWS', 1, 2)
-                time.sleep(1)
-                time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time())))
-                Feishu.update_values(log_type, crawler, 'sVL74k', 'A2:Z2', [[time_str, search_key[0], search_key[-1]]])
-                cls.del_search_key_from_feishu(log_type, crawler)
-                Common.logger(log_type, crawler).info(f"search_key:{search_key}写入飞书表成功\n")
+                while True:
+                    try:
+                        Common.logger(log_type, crawler).info(f'已获取 search_key,openid:{search_key}')
+                        Feishu.insert_columns(log_type, crawler, 'sVL74k', 'ROWS', 1, 2)
+                        time.sleep(1)
+                        time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time())))
+                        Feishu.update_values(log_type, crawler, 'sVL74k', 'A2:Z2', [[time_str, search_key[0], search_key[-1]]])
+                        cls.del_search_key_from_feishu(log_type, crawler)
+                        Common.logger(log_type, crawler).info(f"search_key:{search_key}写入飞书表成功\n")
+                        break
+                    except Exception as e:
+                        Common.logger(log_type, crawler).error(f"{e}\n")
+                        time.sleep(1)
                 return