wangkun 1 year ago
parent
commit
05265057a5

+ 57 - 18
common/public.py

@@ -2,16 +2,17 @@
 # @Author: wangkun
 # @Time: 2023/3/27
 from mq_http_sdk.mq_client import *
-# from mq_http_sdk.mq_consumer import *
 from mq_http_sdk.mq_exception import MQExceptionBase
-import os, sys
+import os, sys, jieba
 import time
 import random
 import difflib
 sys.path.append(os.getcwd())
 from common.common import Common
+from common.feishu import Feishu
 from common.scheduling_db import MysqlHelper
 # from common import Common
+# from feishu import Feishu
 # from scheduling_db import MysqlHelper
 
 
@@ -201,21 +202,59 @@ def download_rule(log_type, crawler, video_dict, rule_dict):
     return True
 
 
+def get_word_score(log_type, crawler, score_sheet, word):
+    while True:
+        score_sheet = Feishu.get_values_batch(log_type, crawler, score_sheet)
+        if score_sheet is None:
+            time.sleep(1)
+            continue
+        for i in range(1, len(score_sheet)):
+            if word not in [y for x in score_sheet for y in x]:
+                return 0
+            if word == score_sheet[i][0]:
+                word_score = score_sheet[i][8]
+                return word_score
+
+
+def get_title_score(log_type, crawler, stop_sheet, score_sheet, title):
+    # 获取停用词列表
+    while True:
+        stop_word_list = []
+        stop_word_sheet = Feishu.get_values_batch(log_type, crawler, stop_sheet)
+        if stop_word_sheet is None:
+            time.sleep(1)
+            continue
+        for x in stop_word_sheet:
+            for y in x:
+                if y is None:
+                    pass
+                else:
+                    stop_word_list.append(y)
+        break
+
+    # 将文本分词
+    cut_list = jieba.lcut(title)
+
+    # 生成分词列表
+    cut_word_list = []
+    for cut_item in cut_list:
+        if cut_item == " ":
+            continue
+        if cut_item in stop_word_list:
+            continue
+        cut_word_list.append(cut_item)
+
+    # 获取权重分列表
+    score_list = []
+    for word in cut_word_list:
+        word_score = get_word_score(log_type, crawler, score_sheet, word)
+        score_list.append(word_score)
+
+    # 获取标题的权重总分
+    title_score = sum(score_list)
+    return title_score
+
+
 if __name__ == "__main__":
-    # print(filter_word('public', 'xiaoniangao', '小年糕', 'prod'))
-    # print(get_config_from_mysql('test', 'gongzhonghao', 'prod', 'filter'))
-    # print(filter_word('test', 'gongzhonghao', '公众号', 'prod'))
-    # task_str = "[('task_id','11')," \
-    #            "('task_name','小年糕小时榜')," \
-    #            "('source','xiaoniangao')," \
-    #            "('start_time','1681834560000')," \
-    #            "('interval','1'),('mode','hour')," \
-    #            "('rule','[{'duration':{'min':40,'max':0}},{'playCnt':{'min':4000,'max':0}},{'period':{'min':10,'max':0}},{'fans':{'min':0,'max':0}},{'videos':{'min':0,'max':0}},{'like':{'min':0,'max':0}},{'videoWidth':{'min':0,'max':0}},{'videoHeight':{'min':0,'max':0}}]')," \
-    #            "('spider_name','')," \
-    #            "('machine','')," \
-    #            "('status','0')," \
-    #            "('create_time','1681889875288')," \
-    #            "('update_time','1681889904908')," \
-    #            "('operator','王坤')]"
-    # print(task(task_str))
+    print(get_title_score("recommend", "kuaishou", "16QspO", "0usaDk", '像梦一场'))
     pass

+ 23 - 0
kuaishou/kuaishou_main/run_ks_recommend_dev.py

@@ -0,0 +1,23 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/6/9
+import os
+import sys
+sys.path.append(os.getcwd())
+from common.common import Common
+from kuaishou.kuaishou_recommend.kuaishou_recommend_cut_title import KuaiShouRecommendScheduling
+
+
+def kuaishou_recommend_main(log_type, crawler, env):
+    Common.logger(log_type, crawler).info("开始抓取快手推荐\n")
+    KuaiShouRecommendScheduling.get_videoList(log_type=log_type,
+                                              crawler=crawler,
+                                              our_uid=6267140,
+                                              rule_dict={"play_cnt":{"min":100000,"max":0},"like_cnt":{"min":80000,"max":0},"duration":{"min":50,"max":0},"period":{"min":30,"max":30}},
+                                              env=env)
+    Common.del_logs(log_type, crawler)
+    Common.logger(log_type, crawler).info("抓取一轮结束\n")
+
+
+if __name__ == "__main__":
+    kuaishou_recommend_main("recommend", "kuaishou", "dev")

+ 0 - 43
kuaishou/kuaishou_main/run_kuaishou_author_scheduling.py

@@ -1,43 +0,0 @@
-# -*- coding: utf-8 -*-
-# @Author: wangkun
-# @Time: 2023/5/25
-import argparse
-import os
-import sys
-sys.path.append(os.getcwd())
-from common.common import Common
-from common.public import task_fun
-from common.scheduling_db import MysqlHelper
-from kuaishou.kuaishou_author.kuaishou_author_scheduling import KuaishouauthorScheduling
-
-
-def main(log_type, crawler, task, env):
-    task_dict = task_fun(task)['task_dict']
-    rule_dict = task_fun(task)['rule_dict']
-    task_id = task_dict['task_id']
-    select_user_sql = f"""select * from crawler_user_v3 where task_id={task_id}"""
-    user_list = MysqlHelper.get_values(log_type, crawler, select_user_sql, env, action="")
-    Common.logger(log_type, crawler).info(f"调度任务:{task_dict}")
-    Common.logger(log_type, crawler).info(f"抓取规则:{rule_dict}")
-    # Common.logger(log_type, crawler).info(f"用户列表:{user_list}\n")
-    Common.logger(log_type, crawler).info(f'开始抓取 {task_dict["task_name"]}\n')
-    KuaishouauthorScheduling.get_author_videos(log_type=log_type,
-                                               crawler=crawler,
-                                               rule_dict=rule_dict,
-                                               user_list=user_list,
-                                               env=env)
-    Common.del_logs(log_type, crawler)
-    Common.logger(log_type, crawler).info('抓取任务结束\n')
-
-
-if __name__ == "__main__":
-    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
-    parser.add_argument('--log_type', default='recommend')  ## 添加参数,注明参数类型
-    parser.add_argument('--crawler', default='kuaishou')  ## 添加参数
-    parser.add_argument('--task')  ## 添加参数
-    parser.add_argument('--env', default='prod')  ## 添加参数
-    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
-    main(log_type=args.log_type,
-         crawler=args.crawler,
-         task=args.task,
-         env=args.env)

+ 0 - 49
kuaishou/kuaishou_main/run_kuaishou_follow.py

@@ -1,49 +0,0 @@
-# -*- coding: utf-8 -*-
-# @Author: wangkun
-# @Time: 2023/2/27
-import argparse
-import os
-import sys
-import time
-
-# import time
-
-sys.path.append(os.getcwd())
-from common.common import Common
-# from common.feishu import Feishu
-from kuaishou.kuaishou_follow.kuaishou_follow import KuaiShouFollow
-
-
-def main(log_type, crawler, strategy, oss_endpoint, env, machine):
-    try:
-        Common.logger(log_type, crawler).info('开始抓取 快手 定向榜\n')
-        KuaiShouFollow.get_follow_videos(log_type=log_type,
-                                         crawler=crawler,
-                                         strategy=strategy,
-                                         oss_endpoint=oss_endpoint,
-                                         env=env,
-                                         machine=machine)
-        Common.del_logs(log_type, crawler)
-        Common.logger(log_type, crawler).info(f'抓取完一轮,休眠60秒\n')
-
-    except Exception as e:
-        Common.logger(log_type, crawler).info(f"快手定向榜异常,触发报警:{e}\n")
-        # Feishu.bot(log_type, crawler, f"快手定向榜异常,触发报警:{e}")
-
-
-if __name__ == "__main__":
-    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
-    parser.add_argument('--log_type', default='follow', type=str)  ## 添加参数,注明参数类型
-    parser.add_argument('--crawler', default='kuaishou')  ## 添加参数
-    parser.add_argument('--strategy', default='定向抓取')  ## 添加参数
-    parser.add_argument('--oss_endpoint', default='inner')  ## 添加参数
-    parser.add_argument('--env', default='prod')  ## 添加参数
-    parser.add_argument('--machine', default='aliyun')  ## 添加参数
-    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
-    # print(args)
-    main(log_type=args.log_type,
-         crawler=args.crawler,
-         strategy=args.strategy,
-         oss_endpoint=args.oss_endpoint,
-         env=args.env,
-         machine=args.machine)

+ 0 - 48
kuaishou/kuaishou_main/run_kuaishou_recommend.py

@@ -1,48 +0,0 @@
-# -*- coding: utf-8 -*-
-# @Author: wangkun
-# @Time: 2023/2/27
-import argparse
-import os
-import sys
-
-# import time
-
-sys.path.append(os.getcwd())
-from common.common import Common
-# from common.feishu import Feishu
-from kuaishou.kuaishou_recommend.recommend_kuaishou import KuaiShouRecommend
-
-
-def main(log_type, crawler, strategy, oss_endpoint, env, machine):
-    try:
-        Common.logger(log_type, crawler).info('开始抓取 快手 推荐榜\n')
-        KuaiShouRecommend.get_videoList(log_type=log_type,
-                                         crawler=crawler,
-                                         strategy=strategy,
-                                         our_uid=55440319,
-                                         oss_endpoint=oss_endpoint,
-                                         env=env,
-                                         machine=machine)
-        Common.del_logs(log_type, crawler)
-        Common.logger(log_type, crawler).info('抓取完一轮\n')
-    except Exception as e:
-        Common.logger(log_type, crawler).info(f"快手推荐榜异常,触发报警:{e}\n")
-        # Feishu.bot(log_type, crawler, f"快手定向榜异常,触发报警:{e}")
-
-
-if __name__ == "__main__":
-    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
-    parser.add_argument('--log_type', default='follow', type=str)  ## 添加参数,注明参数类型
-    parser.add_argument('--crawler', default='kuaishou')  ## 添加参数
-    parser.add_argument('--strategy', default='推荐抓取')  ## 添加参数
-    parser.add_argument('--oss_endpoint', default='inner')  ## 添加参数
-    parser.add_argument('--env', default='prod')  ## 添加参数
-    parser.add_argument('--machine', default='aliyun')  ## 添加参数
-    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
-    # print(args)
-    main(log_type=args.log_type,
-         crawler=args.crawler,
-         strategy=args.strategy,
-         oss_endpoint=args.oss_endpoint,
-         env=args.env,
-         machine=args.machine)

+ 0 - 48
kuaishou/kuaishou_main/run_kuaishou_recommend_scheduling.py

@@ -1,48 +0,0 @@
-# -*- coding: utf-8 -*-
-# @Author: lierqiang
-# @Time: 2023/4/21
-import argparse
-import os
-import random
-import sys
-sys.path.append(os.getcwd())
-from common.common import Common
-from common.public import task_fun
-from common.scheduling_db import MysqlHelper
-from kuaishou.kuaishou_recommend.kuaishou_recommend_shceduling import KuaiShouRecommendScheduling
-
-
-def main(log_type, crawler, task, env):
-    task_dict = task_fun(task)['task_dict']
-    rule_dict = task_fun(task)['rule_dict']
-    task_id = task_dict['task_id']
-    select_user_sql = f"""select * from crawler_user_v3 where task_id={task_id}"""
-    user_list = MysqlHelper.get_values(log_type, crawler, select_user_sql, env, action="")
-    our_uid_list = []
-    for user in user_list:
-        our_uid_list.append(user["uid"])
-    our_uid = random.choice(our_uid_list)
-    Common.logger(log_type, crawler).info(f"调度任务:{task_dict}")
-    Common.logger(log_type, crawler).info(f"抓取规则:{rule_dict}")
-    Common.logger(log_type, crawler).info(f"用户列表:{user_list}\n")
-    Common.logger(log_type, crawler).info('开始抓取 快手推荐榜\n')
-    KuaiShouRecommendScheduling.get_videoList(log_type=log_type,
-                                              crawler=crawler,
-                                              rule_dict=rule_dict,
-                                              our_uid=our_uid,
-                                              env=env)
-    Common.del_logs(log_type, crawler)
-    Common.logger(log_type, crawler).info('抓取任务结束\n')
-
-
-if __name__ == "__main__":
-    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
-    parser.add_argument('--log_type', default='recommend')  ## 添加参数,注明参数类型
-    parser.add_argument('--crawler', default='kuaishou')  ## 添加参数
-    parser.add_argument('--task')  ## 添加参数
-    parser.add_argument('--env', default='prod')  ## 添加参数
-    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
-    main(log_type=args.log_type,
-         crawler=args.crawler,
-         task=args.task,
-         env=args.env)

+ 305 - 0
kuaishou/kuaishou_recommend/kuaishou_recommend_cut_title.py

@@ -0,0 +1,305 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/2/24
+import os
+import random
+import shutil
+import sys
+import time
+import string
+from hashlib import md5
+import requests
+import json
+import urllib3
+from requests.adapters import HTTPAdapter
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.feishu import Feishu
+from common.scheduling_db import MysqlHelper
+from common.publish import Publish
+from common.public import random_title, get_config_from_mysql, download_rule, get_title_score
+from common.userAgent import get_random_user_agent
+
+
+class KuaiShouRecommendScheduling:
+    platform = "快手"
+
+    # 处理视频标题
+    @classmethod
+    def video_title(cls, log_type, crawler, env, title):
+        title_split1 = title.split(" #")
+        if title_split1[0] != "":
+            title1 = title_split1[0]
+        else:
+            title1 = title_split1[-1]
+
+        title_split2 = title1.split(" #")
+        if title_split2[0] != "":
+            title2 = title_split2[0]
+        else:
+            title2 = title_split2[-1]
+
+        title_split3 = title2.split("@")
+        if title_split3[0] != "":
+            title3 = title_split3[0]
+        else:
+            title3 = title_split3[-1]
+
+        video_title = title3.strip().replace("\n", "") \
+                          .replace("/", "").replace("快手", "").replace(" ", "") \
+                          .replace(" ", "").replace("&NBSP", "").replace("\r", "") \
+                          .replace("#", "").replace(".", "。").replace("\\", "") \
+                          .replace(":", "").replace("*", "").replace("?", "") \
+                          .replace("?", "").replace('"', "").replace("<", "") \
+                          .replace(">", "").replace("|", "").replace("@", "").replace('"', '').replace("'", '')[:40]
+        if video_title.replace(" ", "") == "" or video_title == "。。。" or video_title == "...":
+            return random_title(log_type, crawler, env, text='title')
+        else:
+            return video_title
+
+    @classmethod
+    def get_videoList(cls, log_type, crawler, our_uid, rule_dict, env):
+        for page in range(1, 101):
+            try:
+                Common.logger(log_type, crawler).info(f"正在抓取第{page}页")
+                url = "https://www.kuaishou.com/graphql"
+                payload = json.dumps({
+                    "operationName": "visionNewRecoFeed",
+                    "variables": {
+                        "dailyFirstPage": False
+                    },
+                    "query": "fragment photoContent on PhotoEntity {\n  id\n  duration\n  caption\n  originCaption\n  likeCount\n  viewCount\n  realLikeCount\n  coverUrl\n  photoUrl\n  photoH265Url\n  manifest\n  manifestH265\n  videoResource\n  coverUrls {\n    url\n    __typename\n  }\n  timestamp\n  expTag\n  animatedCoverUrl\n  distance\n  videoRatio\n  liked\n  stereoType\n  profileUserTopPhoto\n  musicBlocked\n  __typename\n}\n\nfragment feedContent on Feed {\n  type\n  author {\n    id\n    name\n    headerUrl\n    following\n    headerUrls {\n      url\n      __typename\n    }\n    __typename\n  }\n  photo {\n    ...photoContent\n    __typename\n  }\n  canAddComment\n  llsid\n  status\n  currentPcursor\n  tags {\n    type\n    name\n    __typename\n  }\n  __typename\n}\n\nfragment photoResult on PhotoResult {\n  result\n  llsid\n  expTag\n  serverExpTag\n  pcursor\n  feeds {\n    ...feedContent\n    __typename\n  }\n  webPageArea\n  __typename\n}\n\nquery visionNewRecoFeed($semKeyword: String, $semCrowd: String, $utmSource: String, $utmMedium: String, $utmCampaign: String, $dailyFirstPage: Boolean) {\n  visionNewRecoFeed(semKeyword: $semKeyword, semCrowd: $semCrowd, utmSource: $utmSource, utmMedium: $utmMedium, utmCampaign: $utmCampaign, dailyFirstPage: $dailyFirstPage) {\n    ...photoResult\n    __typename\n  }\n}\n"
+                })
+                s = string.ascii_lowercase
+                r = random.choice(s)
+                headers = {
+                    'Accept-Language': 'zh-CN,zh;q=0.9',
+                    'Connection': 'keep-alive',
+                    'Cookie': 'kpf=PC_WEB; clientid=3; did=web_7cdc486ebd1aba220455a7781d6ae5b5{r}7; kpn=KUAISHOU_VISION;'.format(
+                        r=r),
+                    'Origin': 'https://www.kuaishou.com',
+                    'Referer': 'https://www.kuaishou.com/new-reco',
+                    'Sec-Fetch-Dest': 'empty',
+                    'Sec-Fetch-Mode': 'cors',
+                    'Sec-Fetch-Site': 'same-origin',
+                    'User-Agent': get_random_user_agent('pc'),
+                    'accept': '*/*',
+                    'content-type': 'application/json',
+                    'sec-ch-ua': '"Google Chrome";v="111", "Not(A:Brand";v="8", "Chromium";v="111"',
+                    'sec-ch-ua-mobile': '?0',
+                    'sec-ch-ua-platform': '"macOS"'
+                }
+                urllib3.disable_warnings()
+                s = requests.session()
+                # max_retries=3 重试3次
+                s.mount('http://', HTTPAdapter(max_retries=3))
+                s.mount('https://', HTTPAdapter(max_retries=3))
+                response = s.post(url=url, headers=headers, data=payload, proxies=Common.tunnel_proxies(), verify=False, timeout=10)
+                # Common.logger(log_type, crawler).info(f"response:{response.text}")
+                response.close()
+                if response.status_code != 200:
+                    Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.status_code}, {response.text}\n")
+                    continue
+                elif 'data' not in response.json():
+                    Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()}\n")
+                    continue
+                elif 'visionNewRecoFeed' not in response.json()['data']:
+                    Common.logger(log_type, crawler).warning(f"get_videoList_response:{response.json()['data']}\n")
+                    continue
+                elif 'feeds' not in response.json()['data']['visionNewRecoFeed']:
+                    Common.logger(log_type, crawler).warning(
+                        f"get_videoList_response:{response.json()['data']['visionNewRecoFeed']}\n")
+                    continue
+                elif len(response.json()['data']['visionNewRecoFeed']['feeds']) == 0:
+                    Common.logger(log_type, crawler).info("没有更多视频啦 ~\n")
+                    continue
+                else:
+                    feeds = response.json()['data']['visionNewRecoFeed']['feeds']
+                    for i in range(len(feeds)):
+                        try:
+                            video_title = feeds[i].get("photo", random_title(log_type, crawler, env, text='title')).get("caption", random_title(log_type, crawler, env, text='title'))
+                            video_title = cls.video_title(log_type, crawler, env, video_title)
+                            try:
+                                video_id = feeds[i].get("photo", {}).get("videoResource").get("h264", {}).get("videoId", "")
+                                video_width = feeds[i].get("photo", {}).get("videoResource").get("h264", {}).get("adaptationSet", {})[0].get("representation", {})[0].get("width", 0)
+                                video_height = feeds[i].get("photo", {}).get("videoResource").get("h264", {}).get("adaptationSet", {})[0].get("representation", {})[0].get("height", 0)
+                            except KeyError:
+                                video_id = feeds[i].get("photo", {}).get("videoResource").get("hevc", {}).get("videoId", "")
+                                video_width = feeds[i].get("photo", {}).get("videoResource").get("hevc", {}).get("adaptationSet", {})[0].get("representation", {})[0].get("width", 0)
+                                video_height = feeds[i].get("photo", {}).get("videoResource").get("hevc", {}).get("adaptationSet", {})[0].get("representation", {})[0].get("height", 0)
+                            publish_time_stamp = int(int(feeds[i].get('photo', {}).get('timestamp', 0)) / 1000)
+                            publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
+
+                            video_dict = {'video_title': video_title,
+                                          'video_id': video_id,
+                                          'play_cnt': int(feeds[i].get('photo', {}).get('viewCount', 0)),
+                                          'like_cnt': int(feeds[i].get('photo', {}).get('realLikeCount', 0)),
+                                          'comment_cnt': 0,
+                                          'share_cnt': 0,
+                                          'video_width': video_width,
+                                          'video_height': video_height,
+                                          'duration': int(int(feeds[i].get('photo', {}).get('duration', 0)) / 1000),
+                                          'publish_time_stamp': publish_time_stamp,
+                                          'publish_time_str': publish_time_str,
+                                          'user_name': feeds[i].get('author', {}).get('name', ""),
+                                          'user_id': feeds[i].get('author', {}).get('id', ""),
+                                          'avatar_url': feeds[i].get('author', {}).get('headerUrl', ""),
+                                          'cover_url': feeds[i].get('photo', {}).get('coverUrl', ""),
+                                          'video_url': feeds[i].get('photo', {}).get('photoUrl', ""),
+                                          'session': f"kuaishou-{int(time.time())}"}
+                            for k, v in video_dict.items():
+                                Common.logger(log_type, crawler).info(f"{k}:{v}")
+
+                            if video_dict["video_id"] == '' or video_dict["cover_url"] == '' or video_dict["video_url"] == '':
+                                Common.logger(log_type, crawler).info('无效视频\n')
+                            elif download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
+                                Common.logger(log_type, crawler).info("不满足抓取规则\n")
+                            elif any(str(word) if str(word) in video_dict["video_title"] else False
+                                     for word in get_config_from_mysql(log_type=log_type,
+                                                                       source=crawler,
+                                                                       env=env,
+                                                                       text="filter",
+                                                                       action="")) is True:
+                                Common.logger(log_type, crawler).info('已中过滤词\n')
+                            elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
+                                Common.logger(log_type, crawler).info('视频已下载\n')
+                            else:
+                                title_score = get_title_score(log_type, crawler, "16QspO", "0usaDk", video_title)
+                                if title_score <= 0.3:
+                                    Common.logger(log_type, crawler).info(f"权重分:{title_score}<=0.3\n")
+                                    continue
+                                cls.download_publish(log_type=log_type,
+                                                     crawler=crawler,
+                                                     our_uid=our_uid,
+                                                     video_dict=video_dict,
+                                                     rule_dict=rule_dict,
+                                                     title_score=title_score,
+                                                     env=env)
+                        except Exception as e:
+                            Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
+            except Exception as e:
+                Common.logger(log_type, crawler).error(f"抓取第{page}页时异常:{e}\n")
+
+    @classmethod
+    def repeat_video(cls, log_type, crawler, video_id, env):
+        sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}" """
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
+        return len(repeat_video)
+
+    @classmethod
+    def download_publish(cls, log_type, crawler, our_uid, video_dict, rule_dict, title_score, env):
+        # 下载视频
+        Common.download_method(log_type=log_type, crawler=crawler, text='video',
+                               title=video_dict['video_title'], url=video_dict['video_url'])
+        md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
+        try:
+            if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+                return
+        except FileNotFoundError:
+            # 删除视频文件夹
+            shutil.rmtree(f"./{crawler}/videos/{md_title}")
+            Common.logger(log_type, crawler).info("视频文件不存在,删除文件夹成功\n")
+            return
+        # 下载封面
+        Common.download_method(log_type=log_type, crawler=crawler, text='cover', title=video_dict['video_title'], url=video_dict['cover_url'])
+        # 保存视频信息至txt
+        Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
+
+        # 上传视频
+        Common.logger(log_type, crawler).info("开始上传视频...")
+        if env == "dev":
+            oss_endpoint = "out"
+            our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                      crawler=crawler,
+                                                      strategy="推荐抓取策略",
+                                                      our_uid=our_uid,
+                                                      env=env,
+                                                      oss_endpoint=oss_endpoint)
+            our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+        else:
+            oss_endpoint = "inner"
+            our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                      crawler=crawler,
+                                                      strategy="推荐抓取策略",
+                                                      our_uid=our_uid,
+                                                      env=env,
+                                                      oss_endpoint=oss_endpoint)
+
+            our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+
+        if our_video_id is None:
+            try:
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                return
+            except FileNotFoundError:
+                return
+
+        # 视频信息保存数据库
+        insert_sql = f""" insert into crawler_video(video_id,
+                                                user_id,
+                                                out_user_id,
+                                                platform,
+                                                strategy,
+                                                out_video_id,
+                                                video_title,
+                                                cover_url,
+                                                video_url,
+                                                duration,
+                                                publish_time,
+                                                play_cnt,
+                                                crawler_rule,
+                                                width,
+                                                height)
+                                                values({our_video_id},
+                                                {our_uid},
+                                                "{video_dict['user_id']}",
+                                                "{cls.platform}",
+                                                "推荐抓取策略",
+                                                "{video_dict['video_id']}",
+                                                "{video_dict['video_title']}",
+                                                "{video_dict['cover_url']}",
+                                                "{video_dict['video_url']}",
+                                                {int(video_dict['duration'])},
+                                                "{video_dict['publish_time_str']}",
+                                                {int(video_dict['play_cnt'])},
+                                                '{json.dumps(rule_dict)}',
+                                                {int(video_dict['video_width'])},
+                                                {int(video_dict['video_height'])}) """
+        Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+        MysqlHelper.update_values(log_type, crawler, insert_sql, env, action="")
+        Common.logger(log_type, crawler).info('视频信息写入数据库成功')
+
+        # 视频写入飞书
+        Feishu.insert_columns(log_type, crawler, "Aps2BI", "ROWS", 1, 2)
+        upload_time = int(time.time())
+        values = [[title_score,
+                   our_video_id,
+                   time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
+                   "推荐抓取策略",
+                   str(video_dict['video_id']),
+                   video_dict['video_title'],
+                   our_video_link,
+                   video_dict['play_cnt'],
+                   video_dict['comment_cnt'],
+                   video_dict['like_cnt'],
+                   video_dict['share_cnt'],
+                   video_dict['duration'],
+                   f"{video_dict['video_width']}*{video_dict['video_height']}",
+                   video_dict['publish_time_str'],
+                   video_dict['user_name'],
+                   video_dict['user_id'],
+                   video_dict['avatar_url'],
+                   video_dict['cover_url'],
+                   video_dict['video_url']]]
+        time.sleep(0.5)
+        Feishu.update_values(log_type, crawler, "Aps2BI", "D2:Z2", values)
+        Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
+
+
+if __name__ == "__main__":
+    print(get_config_from_mysql("recommend", "kuaishou", "prod", "filter"))
+    pass

+ 2 - 0
requirements.txt

@@ -26,3 +26,5 @@ requests==2.27.1
 selenium==4.9.1
 # pip3 install urllib3
 urllib3==1.26.9
+# pip3 install jieba
+jieba==0.42.1

+ 5 - 4
weixinzhishu/weixinzhishu_key/search_key_mac.py

@@ -13,7 +13,7 @@
     3.2 python ./weixinzhishu/weixinzhishu_main/search_key_mac.py
 4. 每 10 秒获取最新search_key,写入飞书: https://w42nne6hzg.feishu.cn/sheets/shtcnqhMRUGunIfGnGXMOBYiy4K?sheet=sVL74k
 """
-
+import datetime
 import json
 import os
 import sys
@@ -221,12 +221,13 @@ class SearchKey:
     @classmethod
     def main(cls, log_type, crawler):
         while True:
-            try:
+            if 11 <= datetime.datetime.now().hour <= 14:
                 cls.write_wechat_key(log_type, crawler)
                 Common.logger(log_type, crawler).info('休眠10秒\n')
                 time.sleep(10)
-            except Exception as e:
-                Common.logger(log_type, crawler).error(f"{e}\n")
+            else:
+                Common.logger(log_type, crawler).info("休眠中,获取 search_key 的时间段为: 11:00:00 - 14:59:59")
+                time.sleep(60)
 
 
 if __name__ == "__main__":