فهرست منبع

add kuaishou 推荐接入调度器

lierqiang 2 سال پیش
والد
کامیت
1a57a5b299

+ 1 - 1
kuaishou/kuaishou_main/run_kuaishou_follow_scheduling.py

@@ -12,7 +12,7 @@ from common.public import task_fun
 
 
 
 
 def main(log_type, crawler, task, oss_endpoint, env):
 def main(log_type, crawler, task, oss_endpoint, env):
-    # task = task_fun(task)
+    task = task_fun(task)
     try:
     try:
         Common.logger(log_type, crawler).info(f'开始抓取 {crawler}视频 定向榜\n')
         Common.logger(log_type, crawler).info(f'开始抓取 {crawler}视频 定向榜\n')
         KuaiShouFollowScheduling.get_follow_videos(log_type=log_type,
         KuaiShouFollowScheduling.get_follow_videos(log_type=log_type,

+ 57 - 0
kuaishou/kuaishou_main/run_kuaishou_recommend_scheduling.py

@@ -0,0 +1,57 @@
+# -*- coding: utf-8 -*-
+# @Author: lierqiang
+# @Time: 2023/4/21
+import argparse
+import os
+import sys
+
+sys.path.append(os.getcwd())
+from common.common import Common
+from kuaishou.kuaishou_recommend.kuaishou_recommend_shceduling import KuaiShouRecommendScheduling
+from common.public import task_fun
+
+
+def main(log_type, crawler, task, oss_endpoint, env):
+    task = task_fun(task)
+    try:
+        Common.logger(log_type, crawler).info(f'开始抓取 {crawler}视频 推荐榜\n')
+        KuaiShouRecommendScheduling.get_recommend_videos(log_type=log_type,
+                                             crawler=crawler,
+                                             task=task,
+                                             oss_endpoint=oss_endpoint,
+                                             env=env)
+        Common.del_logs(log_type, crawler)
+        Common.logger(log_type, crawler).info('抓取任务结束\n')
+    except Exception as e:
+        Common.logger(log_type, crawler).info(f"{crawler}视频异常,触发报警:{e}\n")
+        # Feishu.bot(log_type, crawler, f"{e}")
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
+    parser.add_argument('--log_type', default='author')  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler', default='kuaishou')  ## 添加参数
+    parser.add_argument('--strategy', default='定向抓取')  ## 添加参数
+    parser.add_argument('--task')  ## 添加参数
+    parser.add_argument('--oss_endpoint', default='outer')  ## 添加参数
+    parser.add_argument('--env', default='dev')  ## 添加参数
+    # parser.add_argument('--machine')  ## 添加参数
+    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
+    task = {
+        'task_dict': {'task_id': '17', 'task_name': '西瓜测试4.21', 'source': 'kuaishou', 'start_time': '1682010720000',
+                      'interval': '24', 'mode': 'author',
+                      'rule': {'duration': {'min': 40, 'max': 0}, 'play_cnt': {'min': 4000, 'max': 0},
+                               'period': {'min': 10, 'max': 0}, 'fans_cnt': {'min': 0, 'max': 0},
+                               'videos_cnt': {'min': 0, 'max': 0}, 'like_cnt': {'min': 0, 'max': 0},
+                               'width': {'min': 0, 'max': 0}, 'height': {'min': 0, 'max': 0}},
+                      'spider_name': 'run_dy_author_scheduling', 'machine': 'aliyun', 'status': '0',
+                      'create_time': '1682048632396', 'update_time': '1682048632396', 'operator': ''},
+        'rule_dict': {'duration': {'min': 0, 'max': 0}, 'play_cnt': {'min': 0, 'max': 0},
+                      'period': {'min': 0, 'max': 0}, 'fans_cnt': {'min': 0, 'max': 0}, 'videos_cnt': {'min': 0, 'max': 0},
+                      'like_cnt': {'min': 0, 'max': 0}, 'width': {'min': 0, 'max': 0},
+                      'height': {'min': 0, 'max': 0},'publish_time':{'min':0}}}
+    main(log_type=args.log_type,
+         crawler=args.crawler,
+         task=task,
+         oss_endpoint=args.oss_endpoint,
+         env=args.env)

+ 503 - 0
kuaishou/kuaishou_recommend/kuaishou_recommend_shceduling.py

@@ -0,0 +1,503 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/2/24
+import os
+import random
+import shutil
+import sys
+import time
+import string
+from hashlib import md5
+
+import requests
+import json
+
+import urllib3
+from requests.adapters import HTTPAdapter
+
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.feishu import Feishu
+from common.getuser import getUser
+# from common.db import MysqlHelper
+from common.scheduling_db import MysqlHelper
+from common.publish import Publish
+from common.public import get_user_from_mysql, random_title, get_config_from_mysql
+from common.userAgent import get_random_user_agent
+
+
+class KuaiShouRecommendScheduling:
+    platform = "快手"
+    tag = "快手爬虫,推荐爬虫策略"
+
+    @classmethod
+    def get_rule(cls, log_type, crawler):
+        try:
+            rule_sheet = Feishu.get_values_batch(log_type, crawler, "NQ6CZN")
+            rule_dict = {
+                "play_cnt": f"{rule_sheet[0][1]}{rule_sheet[0][2]}",
+                "video_width": f"{rule_sheet[1][1]}{rule_sheet[1][2]}",
+                "video_height": f"{rule_sheet[2][1]}{rule_sheet[2][2]}",
+                "like_cnt": f"{rule_sheet[5][1]}{rule_sheet[5][2]}",
+                "duration": f"{rule_sheet[3][1]}{rule_sheet[3][2]}",
+                "publish_time": f"{rule_sheet[4][1]}{rule_sheet[4][2]}",
+            }
+            return rule_dict
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f"get_rule:{e}\n")
+
+    @classmethod
+    def download_rule(cls, video_dict, rule_dict):
+        if video_dict['like_cnt'] >= rule_dict['like_cnt']['min']:
+            if video_dict['publish_time'] >= rule_dict['publish_time']['min']:
+                if video_dict['duration'] >= rule_dict['duration']['min']:
+                    if video_dict['video_width'] >= rule_dict['width']['min'] \
+                            or video_dict['video_height'] >= rule_dict['height']['min']:
+                        return True
+                    else:
+                        return False
+                else:
+                    return False
+            else:
+                return False
+        else:
+            return False
+
+    # 过滤词库
+    @classmethod
+    def filter_words(cls, log_type, crawler):
+        try:
+            while True:
+                filter_words_sheet = Feishu.get_values_batch(log_type, crawler, 'HIKVvs')
+                if filter_words_sheet is None:
+                    Common.logger(log_type, crawler).warning(f"filter_words_sheet:{filter_words_sheet} 10秒钟后重试")
+                    continue
+                filter_words_list = []
+                for x in filter_words_sheet:
+                    for y in x:
+                        if y is None:
+                            pass
+                        else:
+                            filter_words_list.append(y)
+                return filter_words_list
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f'filter_words异常:{e}\n')
+
+    # 获取用户信息列表
+    @classmethod
+    def get_user_list(cls, log_type, crawler, sheetid, env):
+        try:
+            while True:
+                user_sheet = Feishu.get_values_batch(log_type, crawler, sheetid)
+                if user_sheet is None:
+                    Common.logger(log_type, crawler).warning(f"user_sheet:{user_sheet} 10秒钟后重试")
+                    continue
+                our_user_list = []
+                for i in range(1, len(user_sheet)):
+                    # for i in range(1, 2):
+                    out_uid = user_sheet[i][2]
+                    user_name = user_sheet[i][3]
+                    our_uid = user_sheet[i][6]
+                    our_user_link = user_sheet[i][7]
+                    if out_uid is None or user_name is None:
+                        Common.logger(log_type, crawler).info("空行\n")
+                    else:
+                        Common.logger(log_type, crawler).info(f"正在更新 {user_name} 用户信息\n")
+                        if our_uid is None:
+                            out_user_info = cls.get_out_user_info(log_type, crawler, out_uid)
+                            out_user_dict = {
+                                "out_uid": out_uid,
+                                "user_name": user_name,
+                                "out_avatar_url": out_user_info["out_avatar_url"],
+                                "out_create_time": '',
+                                "out_tag": '',
+                                "out_play_cnt": 0,
+                                "out_fans": out_user_info["out_fans"],
+                                "out_follow": out_user_info["out_follow"],
+                                "out_friend": 0,
+                                "out_like": 0,
+                                "platform": cls.platform,
+                                "tag": cls.tag,
+                            }
+                            our_user_dict = getUser.create_user(log_type=log_type, crawler=crawler,
+                                                                out_user_dict=out_user_dict, env=env)
+                            our_uid = our_user_dict['our_uid']
+                            our_user_link = our_user_dict['our_user_link']
+                            Feishu.update_values(log_type, crawler, sheetid, f'G{i + 1}:H{i + 1}',
+                                                 [[our_uid, our_user_link]])
+                            Common.logger(log_type, crawler).info(f'站内用户信息写入飞书成功!\n')
+                            our_user_list.append(our_user_dict)
+                        else:
+                            our_user_dict = {
+                                'out_uid': out_uid,
+                                'user_name': user_name,
+                                'our_uid': our_uid,
+                                'our_user_link': our_user_link,
+                            }
+                            our_user_list.append(our_user_dict)
+                return our_user_list
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f'get_user_list:{e}\n')
+
+    # 处理视频标题
+    @classmethod
+    def video_title(cls, log_type, crawler, env, title):
+        title_split1 = title.split(" #")
+        if title_split1[0] != "":
+            title1 = title_split1[0]
+        else:
+            title1 = title_split1[-1]
+
+        title_split2 = title1.split(" #")
+        if title_split2[0] != "":
+            title2 = title_split2[0]
+        else:
+            title2 = title_split2[-1]
+
+        title_split3 = title2.split("@")
+        if title_split3[0] != "":
+            title3 = title_split3[0]
+        else:
+            title3 = title_split3[-1]
+
+        video_title = title3.strip().replace("\n", "") \
+                          .replace("/", "").replace("快手", "").replace(" ", "") \
+                          .replace(" ", "").replace("&NBSP", "").replace("\r", "") \
+                          .replace("#", "").replace(".", "。").replace("\\", "") \
+                          .replace(":", "").replace("*", "").replace("?", "") \
+                          .replace("?", "").replace('"', "").replace("<", "") \
+                          .replace(">", "").replace("|", "").replace("@", "").replace('"', '').replace("'", '')[:40]
+        if video_title.replace(" ", "") == "" or video_title == "。。。" or video_title == "...":
+            return random_title(log_type, crawler, env, text='title')
+        else:
+            return video_title
+
+    @classmethod
+    def get_videoList(cls, log_type, crawler, strategy, task, our_uid, oss_endpoint, env):
+        rule_dict_1 = task['rule_dict']
+        for i in range(100):
+            url = "https://www.kuaishou.com/graphql"
+
+            payload = json.dumps({
+                "operationName": "visionNewRecoFeed",
+                "variables": {
+                    "dailyFirstPage": False
+                },
+                "query": "fragment photoContent on PhotoEntity {\n  id\n  duration\n  caption\n  originCaption\n  likeCount\n  viewCount\n  realLikeCount\n  coverUrl\n  photoUrl\n  photoH265Url\n  manifest\n  manifestH265\n  videoResource\n  coverUrls {\n    url\n    __typename\n  }\n  timestamp\n  expTag\n  animatedCoverUrl\n  distance\n  videoRatio\n  liked\n  stereoType\n  profileUserTopPhoto\n  musicBlocked\n  __typename\n}\n\nfragment feedContent on Feed {\n  type\n  author {\n    id\n    name\n    headerUrl\n    following\n    headerUrls {\n      url\n      __typename\n    }\n    __typename\n  }\n  photo {\n    ...photoContent\n    __typename\n  }\n  canAddComment\n  llsid\n  status\n  currentPcursor\n  tags {\n    type\n    name\n    __typename\n  }\n  __typename\n}\n\nfragment photoResult on PhotoResult {\n  result\n  llsid\n  expTag\n  serverExpTag\n  pcursor\n  feeds {\n    ...feedContent\n    __typename\n  }\n  webPageArea\n  __typename\n}\n\nquery visionNewRecoFeed($semKeyword: String, $semCrowd: String, $utmSource: String, $utmMedium: String, $utmCampaign: String, $dailyFirstPage: Boolean) {\n  visionNewRecoFeed(semKeyword: $semKeyword, semCrowd: $semCrowd, utmSource: $utmSource, utmMedium: $utmMedium, utmCampaign: $utmCampaign, dailyFirstPage: $dailyFirstPage) {\n    ...photoResult\n    __typename\n  }\n}\n"
+            })
+            s = string.ascii_lowercase
+            r = random.choice(s)
+
+            headers = {
+                'Accept-Language': 'zh-CN,zh;q=0.9',
+                'Connection': 'keep-alive',
+                'Cookie': 'kpf=PC_WEB; clientid=3; did=web_aba004b1780f4d7174d0a2ff42da1f{r}7; kpn=KUAISHOU_VISION;'.format(
+                    r=r),
+                'Origin': 'https://www.kuaishou.com',
+                'Referer': 'https://www.kuaishou.com/new-reco',
+                'Sec-Fetch-Dest': 'empty',
+                'Sec-Fetch-Mode': 'cors',
+                'Sec-Fetch-Site': 'same-origin',
+                'User-Agent': get_random_user_agent('pc'),
+                'accept': '*/*',
+                'content-type': 'application/json',
+                'sec-ch-ua': '"Google Chrome";v="111", "Not(A:Brand";v="8", "Chromium";v="111"',
+                'sec-ch-ua-mobile': '?0',
+                'sec-ch-ua-platform': '"macOS"'
+            }
+
+            try:
+                urllib3.disable_warnings()
+                s = requests.session()
+                # max_retries=3 重试3次
+                s.mount('http://', HTTPAdapter(max_retries=3))
+                s.mount('https://', HTTPAdapter(max_retries=3))
+                response = s.post(url=url, headers=headers, data=payload, proxies=Common.tunnel_proxies(), verify=False,
+                                  timeout=10)
+                response.close()
+            except Exception as e:
+                Common.logger(log_type, crawler).error(f"get_videoList:{e}\n")
+                continue
+            # Common.logger(log_type, crawler).info(f"get_videoList:{response.text}\n")
+            if response.status_code != 200:
+                Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.text}\n")
+                continue
+            elif 'data' not in response.json():
+                Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()}\n")
+                continue
+            elif 'visionNewRecoFeed' not in response.json()['data']:
+                Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()['data']}\n")
+                continue
+            elif 'feeds' not in response.json()['data']['visionNewRecoFeed']:
+                Common.logger(log_type, crawler).warning(
+                    f"get_videoList_response:{response.json()['data']['visionNewRecoFeed']}\n")
+                continue
+            elif len(response.json()['data']['visionNewRecoFeed']['feeds']) == 0:
+                Common.logger(log_type, crawler).info("没有更多视频啦 ~\n")
+                continue
+            else:
+                feeds = response.json()['data']['visionNewRecoFeed']['feeds']
+                for i in range(len(feeds)):
+                    if 'photo' not in feeds[i]:
+                        Common.logger(log_type, crawler).warning(f"get_videoList:{feeds[i]}\n")
+                        continue
+
+                    # video_title
+                    if 'caption' not in feeds[i]['photo']:
+                        video_title = random_title(log_type, crawler, env, text='title')
+
+                    elif feeds[i]['photo']['caption'].strip() == "":
+                        video_title = random_title(log_type, crawler, env, text='title')
+                    else:
+                        video_title = cls.video_title(log_type, crawler, env, feeds[i]['photo']['caption'])
+
+                    if 'videoResource' not in feeds[i]['photo'] \
+                            and 'manifest' not in feeds[i]['photo'] \
+                            and 'manifestH265' not in feeds[i]['photo']:
+                        Common.logger(log_type, crawler).warning(f"get_videoList:{feeds[i]['photo']}\n")
+                        continue
+                    videoResource = feeds[i]['photo']['videoResource']
+
+                    if 'h264' not in videoResource and 'hevc' not in videoResource:
+                        Common.logger(log_type, crawler).warning(f"get_videoList:{videoResource}\n")
+                        continue
+
+                    # video_id
+                    if 'h264' in videoResource and 'videoId' in videoResource['h264']:
+                        video_id = videoResource['h264']['videoId']
+                    elif 'hevc' in videoResource and 'videoId' in videoResource['hevc']:
+                        video_id = videoResource['hevc']['videoId']
+                    else:
+                        video_id = ""
+
+                    # play_cnt
+                    if 'viewCount' not in feeds[i]['photo']:
+                        play_cnt = 0
+                    else:
+                        play_cnt = int(feeds[i]['photo']['viewCount'])
+
+                    # like_cnt
+                    if 'realLikeCount' not in feeds[i]['photo']:
+                        like_cnt = 0
+                    else:
+                        like_cnt = feeds[i]['photo']['realLikeCount']
+
+                    # publish_time
+                    if 'timestamp' not in feeds[i]['photo']:
+                        publish_time_stamp = 0
+                        publish_time_str = ''
+                        publish_time = 0
+                    else:
+                        publish_time_stamp = int(int(feeds[i]['photo']['timestamp']) / 1000)
+                        publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
+                        publish_time = int((int(time.time()) - publish_time_stamp) / (3600 * 24))
+
+                    # duration
+                    if 'duration' not in feeds[i]['photo']:
+                        duration = 0
+                    else:
+                        duration = int(int(feeds[i]['photo']['duration']) / 1000)
+
+                    # video_width / video_height / video_url
+                    mapping = {}
+                    for item in ['width', 'height']:
+                        try:
+                            val = str(videoResource['h264']['adaptationSet'][0]['representation'][0][item])
+                        except Exception:
+                            val = str(videoResource['hevc']['adaptationSet'][0]['representation'][0][item])
+                        except:
+                            val = ''
+                        mapping[item] = val
+                    video_width = int(mapping['width']) if mapping['width'] != '' else 0
+                    video_height = int(mapping['height']) if mapping['height'] != '' else 0
+                    # cover_url
+                    if 'coverUrl' not in feeds[i]['photo']:
+                        cover_url = ""
+                    else:
+                        cover_url = feeds[i]['photo']['coverUrl']
+
+                    # user_name / avatar_url
+                    try:
+                        user_name = feeds[i]['author']['name']
+                        avatar_url = feeds[i]['author']['headerUrl']
+                        user_id = feeds[i]['author']['id']
+                    except Exception:
+                        user_name = ''
+                        avatar_url = ''
+                        user_id = ''
+                    video_url = feeds[i]['photo']['photoUrl']
+                    video_dict = {'video_title': video_title,
+                                  'video_id': video_id,
+                                  'play_cnt': play_cnt,
+                                  'comment_cnt': 0,
+                                  'like_cnt': like_cnt,
+                                  'share_cnt': 0,
+                                  'video_width': video_width,
+                                  'video_height': video_height,
+                                  'duration': duration,
+                                  'publish_time': publish_time,
+                                  'publish_time_stamp': publish_time_stamp,
+                                  'publish_time_str': publish_time_str,
+                                  'user_name': user_name,
+                                  'user_id': user_id,
+                                  'avatar_url': avatar_url,
+                                  'cover_url': cover_url,
+                                  'video_url': video_url,
+                                  'session': f"kuaishou{int(time.time())}"}
+
+                    rule_1 = cls.download_rule(video_dict, rule_dict_1)
+
+                    if rule_1 is True:
+                        cls.download_publish(log_type=log_type,
+                                             crawler=crawler,
+                                             strategy=strategy,
+                                             video_dict=video_dict,
+                                             rule_dict=rule_dict_1,
+                                             our_uid=our_uid,
+                                             oss_endpoint=oss_endpoint,
+                                             env=env,
+                                             )
+
+                    else:
+                        Common.logger(log_type, crawler).info("不满足下载规则\n")
+
+    @classmethod
+    def repeat_video(cls, log_type, crawler, video_id, video_title, publish_time, env):
+        sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}" or (platform="{cls.platform}" and video_title="{video_title}" and publish_time="{publish_time}") """
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
+        return len(repeat_video)
+
+    @classmethod
+    def download_publish(cls, log_type, crawler, strategy, video_dict, rule_dict, our_uid, oss_endpoint, env):
+        try:
+            filter_words = get_config_from_mysql(log_type, crawler, env, text='filter')
+            for filter_word in filter_words:
+                if filter_word in video_dict['video_title']:
+                    Common.logger(log_type, crawler).info('标题已中过滤词:{}\n', video_dict['video_title'])
+                    return
+            download_finished = False
+            if cls.repeat_video(log_type, crawler, video_dict['video_id'], video_dict['video_title'],
+                                video_dict['publish_time_str'], env) != 0:
+                Common.logger(log_type, crawler).info('视频已下载\n')
+            else:
+                # 下载视频
+                Common.download_method(log_type=log_type, crawler=crawler, text='video',
+                                       title=video_dict['video_title'], url=video_dict['video_url'])
+                md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
+                if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
+                    # 删除视频文件夹
+                    shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                    Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+                    return
+                # 下载封面
+                Common.download_method(log_type=log_type, crawler=crawler, text='cover',
+                                       title=video_dict['video_title'], url=video_dict['cover_url'])
+                # 保存视频信息至txt
+                Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
+
+                # 上传视频
+                Common.logger(log_type, crawler).info("开始上传视频...")
+                our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                          crawler=crawler,
+                                                          strategy=strategy,
+                                                          our_uid=our_uid,
+                                                          env=env,
+                                                          oss_endpoint=oss_endpoint)
+                if env == 'dev':
+                    our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+                else:
+                    our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+                Common.logger(log_type, crawler).info("视频上传完成")
+
+                if our_video_id is None:
+                    Common.logger(log_type, crawler).warning(f"our_video_id:{our_video_id} 删除该视频文件夹")
+                    # 删除视频文件夹
+                    shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
+                    return download_finished
+
+                # 视频信息保存数据库
+                insert_sql = f""" insert into crawler_video(video_id,
+                                                        user_id,
+                                                        out_user_id,
+                                                        platform,
+                                                        strategy,
+                                                        out_video_id,
+                                                        video_title,
+                                                        cover_url,
+                                                        video_url,
+                                                        duration,
+                                                        publish_time,
+                                                        play_cnt,
+                                                        crawler_rule,
+                                                        width,
+                                                        height)
+                                                        values({our_video_id},
+                                                        {our_uid},
+                                                        "{video_dict['user_id']}",
+                                                        "{cls.platform}",
+                                                        "{strategy}",
+                                                        "{video_dict['video_id']}",
+                                                        "{video_dict['video_title']}",
+                                                        "{video_dict['cover_url']}",
+                                                        "{video_dict['video_url']}",
+                                                        {int(video_dict['duration'])},
+                                                        "{video_dict['publish_time_str']}",
+                                                        {int(video_dict['play_cnt'])},
+                                                        '{json.dumps(rule_dict)}',
+                                                        {int(video_dict['video_width'])},
+                                                        {int(video_dict['video_height'])}) """
+                Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+                MysqlHelper.update_values(log_type, crawler, insert_sql, env)
+                Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
+
+                # 视频写入飞书
+                Feishu.insert_columns(log_type, 'kuaishou', "Aps2BI", "ROWS", 1, 2)
+                upload_time = int(time.time())
+                values = [[our_video_id,
+                           time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
+                           strategy,
+                           str(video_dict['video_id']),
+                           video_dict['video_title'],
+                           our_video_link,
+                           video_dict['play_cnt'],
+                           video_dict['comment_cnt'],
+                           video_dict['like_cnt'],
+                           video_dict['share_cnt'],
+                           video_dict['duration'],
+                           f"{video_dict['video_width']}*{video_dict['video_height']}",
+                           video_dict['publish_time_str'],
+                           video_dict['user_name'],
+                           video_dict['user_id'],
+                           video_dict['avatar_url'],
+                           video_dict['cover_url'],
+                           video_dict['video_url']]]
+                time.sleep(1)
+                Feishu.update_values(log_type, 'kuaishou', "Aps2BI", "E2:Z2", values)
+                Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
+                download_finished = True
+            return download_finished
+        except Exception as e:
+            Common.logger(log_type, crawler).error(f"download_publish:{e}\n")
+
+    @classmethod
+    def get_recommend_videos(cls, log_type, crawler, task, oss_endpoint, env):
+        user_list = get_user_from_mysql(log_type, crawler, crawler, env)
+        strategy = '推荐抓取策略'
+        for user in user_list:
+            spider_link = user["link"]
+            out_uid = spider_link
+            user_name = user["nick_name"]
+            our_uid = user["uid"]
+            Common.logger(log_type, crawler).info(f"开始抓取 {user_name} 用户主页视频\n")
+            cls.get_videoList(log_type=log_type,
+                              crawler=crawler,
+                              strategy=strategy,
+                              task=task,
+                              our_uid=our_uid,
+                              oss_endpoint=oss_endpoint,
+                              env=env)
+
+
+if __name__ == "__main__":
+    task = ''
+    KuaiShouRecommendScheduling.get_recommend_videos('recommend', 'kuaishou', task, 'outer', 'prod')