소스 검색

add kuaishou_author

wangkun 1 년 전
부모
커밋
0347cc0439
5개의 변경된 파일289개의 추가작업 그리고 359개의 파일을 삭제
  1. 1 0
      README.MD
  2. 228 345
      kuaishou/kuaishou_author/kuaishou_author.py
  3. 43 0
      kuaishou/kuaishou_main/run_kuaishou_author_scheduling.py
  4. 3 0
      kuaishou/logs/__init__.py
  5. 14 14
      main/process.sh

+ 1 - 0
README.MD

@@ -245,4 +245,5 @@ ps aux | grep run_kuaishou | grep -v grep | awk '{print $2}' | xargs kill -9
 ps aux | grep run_gongzhonghao | grep -v grep | awk '{print $2}' | xargs kill -9
 ps aux | grep run_shipinhao | grep -v grep | awk '{print $2}' | xargs kill -9
 ps aux | grep Appium.app | grep -v grep | awk '{print $2}' | xargs kill -9
+ps aux | grep kuaishou | grep -v grep | awk '{print $2}' | xargs kill -9
 ```

+ 228 - 345
kuaishou/kuaishou_author/kuaishou_author.py

@@ -13,17 +13,22 @@ from requests.adapters import HTTPAdapter
 sys.path.append(os.getcwd())
 from common.common import Common
 from common.feishu import Feishu
-from common.getuser import getUser
 from common.scheduling_db import MysqlHelper
 from common.publish import Publish
-from common.public import random_title, get_config_from_mysql
-from common.public import get_user_from_mysql
+from common.public import random_title, get_config_from_mysql, download_rule
 
 
 class KuaishouauthorScheduling:
     platform = "快手"
+    download_cnt = 0
+
+    @classmethod
+    def videos_cnt(cls, rule_dict):
+        videos_cnt = rule_dict.get("videos_cnt", {}).get("min", 0)
+        if videos_cnt == 0:
+            videos_cnt = 1000
+        return videos_cnt
 
-    # 处理视频标题
     @classmethod
     def video_title(cls, log_type, crawler, env, title):
         title_split1 = title.split(" #")
@@ -69,377 +74,255 @@ class KuaishouauthorScheduling:
                     "update_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(config["update_time"] / 1000))),
                     "operator": config["operator"].strip()
                 }
-                for k, v in cookie_dict.items():
-                    print(f"{k}:{type(v)}, {v}")
+                # for k, v in cookie_dict.items():
+                #     print(f"{k}:{type(v)}, {v}")
                 return cookie_dict
 
     @classmethod
-    def get_videoList(cls, log_type, crawler, strategy, our_uid, out_uid, oss_endpoint, env, machine):
-        download_cnt_1, download_cnt_2 = 0, 0
-        rule_dict_1 = cls.get_rule(log_type, crawler, 1)
-        rule_dict_2 = cls.get_rule(log_type, crawler, 2)
-        if rule_dict_1 is None or rule_dict_2 is None:
-            Common.logger(log_type, crawler).warning(f"rule_dict is None")
-            return
-
-        url = "https://www.kuaishou.com/graphql"
-        payload = json.dumps({
-            "operationName": "visionProfilePhotoList",
-            "variables": {
-                "userId": out_uid,
-                "pcursor": "",
-                "page": "profile"
-            },
-            "query": "fragment photoContent on PhotoEntity {\n  id\n  duration\n  caption\n  originCaption\n  likeCount\n  viewCount\n  commentCount\n  realLikeCount\n  coverUrl\n  photoUrl\n  photoH265Url\n  manifest\n  manifestH265\n  videoResource\n  coverUrls {\n    url\n    __typename\n  }\n  timestamp\n  expTag\n  animatedCoverUrl\n  distance\n  videoRatio\n  liked\n  stereoType\n  profileUserTopPhoto\n  musicBlocked\n  __typename\n}\n\nfragment feedContent on Feed {\n  type\n  author {\n    id\n    name\n    headerUrl\n    following\n    headerUrls {\n      url\n      __typename\n    }\n    __typename\n  }\n  photo {\n    ...photoContent\n    __typename\n  }\n  canAddComment\n  llsid\n  status\n  currentPcursor\n  tags {\n    type\n    name\n    __typename\n  }\n  __typename\n}\n\nquery visionProfilePhotoList($pcursor: String, $userId: String, $page: String, $webPageArea: String) {\n  visionProfilePhotoList(pcursor: $pcursor, userId: $userId, page: $page, webPageArea: $webPageArea) {\n    result\n    llsid\n    webPageArea\n    feeds {\n      ...feedContent\n      __typename\n    }\n    hostName\n    pcursor\n    __typename\n  }\n}\n"
-        })
-        headers = {
-            'Accept': '*/*',
-            'Content-Type': 'application/json',
-            'Origin': 'https://www.kuaishou.com',
-            'Cookie': f'kpf=PC_WEB; clientid=3; did={cls.get_(log_type, crawler)}; kpn=KUAISHOU_VISION',
-            'Content-Length': '1260',
-            'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
-            'Host': 'www.kuaishou.com',
-            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.6.1 Safari/605.1.15',
-            'Referer': 'https://www.kuaishou.com/profile/{}'.format(out_uid),
-            'Accept-Encoding': 'gzip, deflate, br',
-            'Connection': 'keep-alive'
-        }
-        response = requests.post(url=url, headers=headers, data=payload, proxies=Common.tunnel_proxies(),
-                                 verify=False, timeout=10)
-        try:
+    def get_videoList(cls, log_type, crawler, user_dict, rule_dict, env):
+        pcursor = ""
+        while True:
+            url = "https://www.kuaishou.com/graphql"
+            payload = json.dumps({
+                "operationName": "visionProfilePhotoList",
+                "variables": {
+                    "userId": user_dict["link"].replace("https://www.kuaishou.com/profile/", ""),
+                    "pcursor": pcursor,
+                    "page": "profile"
+                },
+                "query": "fragment photoContent on PhotoEntity {\n  id\n  duration\n  caption\n  originCaption\n  likeCount\n  viewCount\n  commentCount\n  realLikeCount\n  coverUrl\n  photoUrl\n  photoH265Url\n  manifest\n  manifestH265\n  videoResource\n  coverUrls {\n    url\n    __typename\n  }\n  timestamp\n  expTag\n  animatedCoverUrl\n  distance\n  videoRatio\n  liked\n  stereoType\n  profileUserTopPhoto\n  musicBlocked\n  __typename\n}\n\nfragment feedContent on Feed {\n  type\n  author {\n    id\n    name\n    headerUrl\n    following\n    headerUrls {\n      url\n      __typename\n    }\n    __typename\n  }\n  photo {\n    ...photoContent\n    __typename\n  }\n  canAddComment\n  llsid\n  status\n  currentPcursor\n  tags {\n    type\n    name\n    __typename\n  }\n  __typename\n}\n\nquery visionProfilePhotoList($pcursor: String, $userId: String, $page: String, $webPageArea: String) {\n  visionProfilePhotoList(pcursor: $pcursor, userId: $userId, page: $page, webPageArea: $webPageArea) {\n    result\n    llsid\n    webPageArea\n    feeds {\n      ...feedContent\n      __typename\n    }\n    hostName\n    pcursor\n    __typename\n  }\n}\n"
+            })
+            headers = {
+                'Accept': '*/*',
+                'Content-Type': 'application/json',
+                'Origin': 'https://www.kuaishou.com',
+                'Cookie': cls.get_cookie(log_type, crawler, env)["cookie"],
+                'Content-Length': '1260',
+                'Accept-Language': 'zh-CN,zh-Hans;q=0.9',
+                'Host': 'www.kuaishou.com',
+                'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.6.1 Safari/605.1.15',
+                'Referer': f'https://www.kuaishou.com/profile/{user_dict["link"].replace("https://www.kuaishou.com/profile/", "")}',
+                'Accept-Encoding': 'gzip, deflate, br',
+                'Connection': 'keep-alive'
+            }
+            urllib3.disable_warnings()
+            s = requests.session()
+            # max_retries=3 重试3次
+            s.mount('http://', HTTPAdapter(max_retries=3))
+            s.mount('https://', HTTPAdapter(max_retries=3))
+            response = s.post(url=url, headers=headers, data=payload, proxies=Common.tunnel_proxies(), verify=False, timeout=10)
+            response.close()
+            # Common.logger(log_type, crawler).info(f"response:{response.text}\n")
+            if response.status_code != 200:
+                Common.logger(log_type, crawler).warning(f"response:{response.text}\n")
+                return
+            elif "data" not in response.json():
+                Common.logger(log_type, crawler).warning(f"response:{response.json()}\n")
+                return
+            elif "visionProfilePhotoList" not in response.json()["data"]:
+                Common.logger(log_type, crawler).warning(f"response:{response.json()}\n")
+                return
+            elif "feeds" not in response.json()["data"]["visionProfilePhotoList"]:
+                Common.logger(log_type, crawler).warning(f"response:{response.json()}\n")
+                return
+            elif len(response.json()["data"]["visionProfilePhotoList"]["feeds"]) == 0:
+                Common.logger(log_type, crawler).warning(f"没有更多视频啦 ~\n")
+                return
+            pcursor = response.json()['data']['visionProfilePhotoList']['pcursor']
             feeds = response.json()['data']['visionProfilePhotoList']['feeds']
-        except Exception as e:
-            Common.logger(log_type, crawler).error(f"get_videoList:{e},response:{response.text}")
-            return
-        if not feeds:
-            Common.logger(log_type, crawler).info("没有更多视频啦 ~\n")
-            return
-        # pcursor = response.json()['data']['visionProfilePhotoList']['pcursor']
-        # Common.logger(log_type, crawler).info(f"feeds0: {feeds}\n")
-        for i in range(len(feeds)):
-            try:
-                # video_title
-                if 'caption' not in feeds[i]['photo']:
-                    video_title = random_title(log_type, crawler, env, text='title')
-                elif feeds[i]['photo']['caption'].strip() == "":
-                    video_title = random_title(log_type, crawler, env, text='title')
-                else:
-                    video_title = cls.video_title(log_type, crawler, env, feeds[i]['photo']['caption'])
-
-                if 'videoResource' not in feeds[i]['photo'] \
-                        and 'manifest' not in feeds[i]['photo'] \
-                        and 'manifestH265' not in feeds[i]['photo']:
-                    Common.logger(log_type, crawler).warning(f"get_videoList:{feeds[i]['photo']}\n")
-                    break
-                videoResource = feeds[i]['photo']['videoResource']
-
-                if 'h264' not in videoResource and 'hevc' not in videoResource:
-                    Common.logger(log_type, crawler).warning(f"get_videoList:{videoResource}\n")
-                    break
-
-                # video_id
-                if 'h264' in videoResource and 'videoId' in videoResource['h264']:
-                    video_id = videoResource['h264']['videoId']
-                elif 'hevc' in videoResource and 'videoId' in videoResource['hevc']:
-                    video_id = videoResource['hevc']['videoId']
-                else:
-                    video_id = ""
-
-                # play_cnt
-                if 'viewCount' not in feeds[i]['photo']:
-                    play_cnt = 0
-                else:
-                    play_cnt = int(feeds[i]['photo']['viewCount'])
-
-                # like_cnt
-                if 'realLikeCount' not in feeds[i]['photo']:
-                    like_cnt = 0
-                else:
-                    like_cnt = feeds[i]['photo']['realLikeCount']
-
-                # publish_time
-                if 'timestamp' not in feeds[i]['photo']:
-                    publish_time_stamp = 0
-                    publish_time_str = ''
-                    publish_time = 0
-                else:
-                    publish_time_stamp = int(int(feeds[i]['photo']['timestamp']) / 1000)
-                    publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
-                    publish_time = int((int(time.time()) - publish_time_stamp) / (3600 * 24))
-
-                # duration
-                if 'duration' not in feeds[i]['photo']:
-                    duration = 0
-                else:
-                    duration = int(int(feeds[i]['photo']['duration']) / 1000)
-
-                # video_width / video_height / video_url
-                mapping = {}
-                for item in ['width', 'height']:
+            for i in range(len(feeds)):
+                try:
+                    if cls.download_cnt >= cls.videos_cnt(rule_dict):
+                    # if cls.download_cnt >= 2:
+                        Common.logger(log_type, crawler).info(f"已下载视频数:{cls.download_cnt}\n")
+                        return
+                    video_title = feeds[i].get("photo", {}).get("caption", random_title(log_type, crawler, env, text='title'))
+                    video_title = cls.video_title(log_type, crawler, env, video_title)
                     try:
-                        val = str(videoResource['h264']['adaptationSet'][0]['representation'][0][item])
-                    except:
-                        val = str(videoResource['hevc']['adaptationSet'][0]['representation'][0][item])
-                    mapping[item] = val
-                video_width = int(mapping['width']) if mapping['width'] else 0
-                video_height = int(mapping['height']) if mapping['height'] else 0
-                # cover_url
-                if 'coverUrl' not in feeds[i]['photo']:
-                    cover_url = ""
-                else:
-                    cover_url = feeds[i]['photo']['coverUrl']
-
-                # user_name / avatar_url
-                user_name = feeds[i]['author']['name']
-                avatar_url = feeds[i]['author']['headerUrl']
-
-                video_url = feeds[i]['photo']['photoUrl']
-                video_dict = {'video_title': video_title,
-                              'video_id': video_id,
-                              'play_cnt': play_cnt,
-                              'comment_cnt': 0,
-                              'like_cnt': like_cnt,
-                              'share_cnt': 0,
-                              'video_width': video_width,
-                              'video_height': video_height,
-                              'duration': duration,
-                              'publish_time': publish_time,
-                              'publish_time_stamp': publish_time_stamp,
-                              'publish_time_str': publish_time_str,
-                              'user_name': user_name,
-                              'user_id': out_uid,
-                              'avatar_url': avatar_url,
-                              'cover_url': cover_url,
-                              'video_url': video_url,
-                              'session': f"kuaishou{int(time.time())}"}
-
-                rule_1 = cls.download_rule(video_dict, rule_dict_1)
-                Common.logger(log_type, crawler).info(f"video_title:{video_title}")
-                Common.logger(log_type, crawler).info(f"video_id:{video_id}\n")
-
-                Common.logger(log_type, crawler).info(
-                    f"play_cnt:{video_dict['play_cnt']}{rule_dict_1['play_cnt']}, {eval(str(video_dict['play_cnt']) + str(rule_dict_1['play_cnt']))}")
-                Common.logger(log_type, crawler).info(
-                    f"like_cnt:{video_dict['like_cnt']}{rule_dict_1['like_cnt']}, {eval(str(video_dict['like_cnt']) + str(rule_dict_1['like_cnt']))}")
-                Common.logger(log_type, crawler).info(
-                    f"video_width:{video_dict['video_width']}{rule_dict_1['video_width']}, {eval(str(video_dict['video_width']) + str(rule_dict_1['video_width']))}")
-                Common.logger(log_type, crawler).info(
-                    f"video_height:{video_dict['video_height']}{rule_dict_1['video_height']}, {eval(str(video_dict['video_height']) + str(rule_dict_1['video_height']))}")
-                Common.logger(log_type, crawler).info(
-                    f"duration:{video_dict['duration']}{rule_dict_1['duration']}, {eval(str(video_dict['duration']) + str(rule_dict_1['duration']))}")
-                Common.logger(log_type, crawler).info(
-                    f"publish_time:{video_dict['publish_time']}{rule_dict_1['publish_time']}, {eval(str(video_dict['publish_time']) + str(rule_dict_1['publish_time']))}")
-                Common.logger(log_type, crawler).info(f"rule_1:{rule_1}\n")
-
-                rule_2 = cls.download_rule(video_dict, rule_dict_2)
-                Common.logger(log_type, crawler).info(
-                    f"play_cnt:{video_dict['play_cnt']}{rule_dict_2['play_cnt']}, {eval(str(video_dict['play_cnt']) + str(rule_dict_2['play_cnt']))}")
-                Common.logger(log_type, crawler).info(
-                    f"like_cnt:{video_dict['like_cnt']}{rule_dict_2['like_cnt']}, {eval(str(video_dict['like_cnt']) + str(rule_dict_2['like_cnt']))}")
-                Common.logger(log_type, crawler).info(
-                    f"video_width:{video_dict['video_width']}{rule_dict_2['video_width']}, {eval(str(video_dict['video_width']) + str(rule_dict_2['video_width']))}")
-                Common.logger(log_type, crawler).info(
-                    f"video_height:{video_dict['video_height']}{rule_dict_2['video_height']}, {eval(str(video_dict['video_height']) + str(rule_dict_2['video_height']))}")
-                Common.logger(log_type, crawler).info(
-                    f"duration:{video_dict['duration']}{rule_dict_2['duration']}, {eval(str(video_dict['duration']) + str(rule_dict_2['duration']))}")
-                Common.logger(log_type, crawler).info(
-                    f"publish_time:{video_dict['publish_time']}{rule_dict_2['publish_time']}, {eval(str(video_dict['publish_time']) + str(rule_dict_2['publish_time']))}")
-                Common.logger(log_type, crawler).info(f"rule_2:{rule_2}\n")
+                        video_id = feeds[i].get("photo", {}).get("videoResource").get("h264", {}).get("videoId", "")
+                        video_width = feeds[i].get("photo", {}).get("videoResource").get("h264", {}).get("adaptationSet", {})[0].get("representation", {})[0].get("width", 0)
+                        video_height = feeds[i].get("photo", {}).get("videoResource").get("h264", {}).get("adaptationSet", {})[0].get("representation", {})[0].get("height", 0)
+                    except KeyError:
+                        video_id = feeds[i].get("photo", {}).get("videoResource").get("hevc", {}).get("videoId", "")
+                        video_width = feeds[i].get("photo", {}).get("videoResource").get("hevc", {}).get("adaptationSet", {})[0].get("representation", {})[0].get("width", 0)
+                        video_height = feeds[i].get("photo", {}).get("videoResource").get("hevc", {}).get("adaptationSet", {})[0].get("representation", {})[0].get("height", 0)
+                    publish_time_stamp = int(int(feeds[i].get('photo', {}).get('timestamp', 0)) / 1000)
+                    publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
 
-                if video_title == "" or video_url == "":
-                    Common.logger(log_type, crawler).info("无效视频\n")
-                    continue
-                elif rule_1 is True:
-                    if download_cnt_1 < int(
-                            rule_dict_1['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">",
-                                                                                                          "")[
-                                -1]):
+                    video_dict = {'video_title': video_title,
+                                  'video_id': video_id,
+                                  'play_cnt': int(feeds[i].get('photo', {}).get('viewCount', 0)),
+                                  'like_cnt': int(feeds[i].get('photo', {}).get('realLikeCount', 0)),
+                                  'comment_cnt': 0,
+                                  'share_cnt': 0,
+                                  'video_width': video_width,
+                                  'video_height': video_height,
+                                  'duration': int(int(feeds[i].get('photo', {}).get('duration', 0)) / 1000),
+                                  'publish_time_stamp': publish_time_stamp,
+                                  'publish_time_str': publish_time_str,
+                                  'user_name': feeds[i].get('author', {}).get('name', ""),
+                                  'user_id': feeds[i].get('author', {}).get('id', ""),
+                                  'avatar_url': feeds[i].get('author', {}).get('headerUrl', ""),
+                                  'cover_url': feeds[i].get('photo', {}).get('coverUrl', ""),
+                                  'video_url': feeds[i].get('photo', {}).get('photoUrl', ""),
+                                  'session': f"kuaishou-{int(time.time())}"}
+                    for k, v in video_dict.items():
+                        Common.logger(log_type, crawler).info(f"{k}:{v}")
+
+                    if video_dict["video_id"] == '' or video_dict["cover_url"] == '' or video_dict["video_url"] == '':
+                        Common.logger(log_type, crawler).info('无效视频\n')
+                    elif download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
+                        Common.logger(log_type, crawler).info("不满足抓取规则\n")
+                    elif any(str(word) if str(word) in video_dict["video_title"] else False
+                             for word in get_config_from_mysql(log_type=log_type,
+                                                               source=crawler,
+                                                               env=env,
+                                                               text="filter",
+                                                               action="")) is True:
+                        Common.logger(log_type, crawler).info('已中过滤词\n')
+                    elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
+                        Common.logger(log_type, crawler).info('视频已下载\n')
+                    else:
                         cls.download_publish(log_type=log_type,
                                              crawler=crawler,
-                                             strategy=strategy,
+                                             user_dict=user_dict,
                                              video_dict=video_dict,
-                                             rule_dict=rule_dict_1,
-                                             our_uid=our_uid,
-                                             oss_endpoint=oss_endpoint,
-                                             env=env,
-                                             machine=machine)
-                        # if download_finished is True:
-                        #     download_cnt_1 += 1
-                elif rule_2 is True:
-                    if download_cnt_2 < int(
-                            rule_dict_2['download_cnt'].replace("=", "")[-1].replace("<", "")[-1].replace(">",
-                                                                                                          "")[
-                                -1]):
-                        cls.download_publish(log_type=log_type,
-                                             crawler=crawler,
-                                             strategy=strategy,
-                                             video_dict=video_dict,
-                                             rule_dict=rule_dict_2,
-                                             our_uid=our_uid,
-                                             oss_endpoint=oss_endpoint,
-                                             env=env,
-                                             machine=machine)
-                        # if download_finished is True:
-                        #     download_cnt_2 += 1
-                else:
-                    Common.logger(log_type, crawler).info("不满足下载规则\n")
-                    # Common.logger(log_type, crawler).info(f"feeds: {feeds}\n")
-            except Exception as e:
-                Common.logger(log_type, crawler).warning(f"抓取单条视频异常:{e}\n")
-
-            # if pcursor == "no_more":
-            #     Common.logger(log_type, crawler).info(f"作者,{out_uid},已经到底了,没有更多内容了\n")
-            #     return
-            # cls.get_videoList(log_type, crawler, strategy, our_uid, out_uid, oss_endpoint, env, machine,
-            #               pcursor=pcursor)
-            # time.sleep(random.randint(1, 3))
+                                             rule_dict=rule_dict,
+                                             env=env)
+                except Exception as e:
+                    Common.logger(log_type, crawler).warning(f"抓取单条视频异常:{e}\n")
 
     @classmethod
-    def repeat_video(cls, log_type, crawler, video_id, video_title, publish_time, env, machine):
-        sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}" or (platform="{cls.platform}" and video_title="{video_title}" and publish_time="{publish_time}") """
-        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env, machine)
+    def repeat_video(cls, log_type, crawler, video_id, env):
+        sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}" """
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
         return len(repeat_video)
 
     @classmethod
-    def download_publish(cls, log_type, crawler, strategy, video_dict, rule_dict, our_uid, oss_endpoint, env, machine):
-        filter_words = get_config_from_mysql(log_type, crawler, env, text='filter')
-        for filter_word in filter_words:
-            if filter_word in video_dict['video_title']:
-                Common.logger(log_type, crawler).info('标题已中过滤词:{}\n', video_dict['video_title'])
-                return
-        download_finished = False
-        if cls.repeat_video(log_type, crawler, video_dict['video_id'], video_dict['video_title'],
-                            video_dict['publish_time_str'], env, machine) != 0:
-            Common.logger(log_type, crawler).info('视频已下载\n')
-        else:
-            # 下载视频
-            Common.download_method(log_type=log_type, crawler=crawler, text='video',
-                                   title=video_dict['video_title'], url=video_dict['video_url'])
-            md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
-            try:
-                if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
-                    # 删除视频文件夹
-                    shutil.rmtree(f"./{crawler}/videos/{md_title}")
-                    Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
-                    return
-            except FileNotFoundError:
+    def download_publish(cls, log_type, crawler, user_dict, rule_dict, video_dict, env):
+        # 下载视频
+        Common.download_method(log_type=log_type, crawler=crawler, text='video', title=video_dict['video_title'], url=video_dict['video_url'])
+        md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
+        try:
+            if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
                 # 删除视频文件夹
                 shutil.rmtree(f"./{crawler}/videos/{md_title}")
-                Common.logger(log_type, crawler).info("未发现视频文件,删除成功\n")
+                Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
                 return
-
-            # 下载封面
-            Common.download_method(log_type=log_type, crawler=crawler, text='cover',
-                                   title=video_dict['video_title'], url=video_dict['cover_url'])
-            # 保存视频信息至txt
-            Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
-
-            # 上传视频
-            Common.logger(log_type, crawler).info("开始上传视频...")
+        except FileNotFoundError:
+            # 删除视频文件夹
+            shutil.rmtree(f"./{crawler}/videos/{md_title}")
+            Common.logger(log_type, crawler).info("视频文件不存在,删除文件夹成功\n")
+            return
+        # 下载封面
+        Common.download_method(log_type=log_type, crawler=crawler, text='cover', title=video_dict['video_title'], url=video_dict['cover_url'])
+        # 保存视频信息至txt
+        Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
+
+        # 上传视频
+        Common.logger(log_type, crawler).info("开始上传视频...")
+        if env == "dev":
+            oss_endpoint = "out"
             our_video_id = Publish.upload_and_publish(log_type=log_type,
                                                       crawler=crawler,
-                                                      strategy=strategy,
-                                                      our_uid=our_uid,
+                                                      strategy="定向抓取策略",
+                                                      our_uid=user_dict["uid"],
+                                                      env=env,
+                                                      oss_endpoint=oss_endpoint)
+            our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+        else:
+            oss_endpoint = "inner"
+            our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                      crawler=crawler,
+                                                      strategy="定向抓取策略",
+                                                      our_uid=user_dict["uid"],
                                                       env=env,
                                                       oss_endpoint=oss_endpoint)
-            if env == 'dev':
-                our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
-            else:
-                our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
-            Common.logger(log_type, crawler).info("视频上传完成")
 
-            if our_video_id is None:
-                try:
-                    Common.logger(log_type, crawler).warning(f"our_video_id:{our_video_id} 删除该视频文件夹")
-                    # 删除视频文件夹
-                    shutil.rmtree(f"./{crawler}/videos/{md_title}")
-                    return download_finished
-                except FileNotFoundError:
-                    return download_finished
+            our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
 
-            # 视频信息保存数据库
-            insert_sql = f""" insert into crawler_video(video_id,
-                                                    user_id,
-                                                    out_user_id,
-                                                    platform,
-                                                    strategy,
-                                                    out_video_id,
-                                                    video_title,
-                                                    cover_url,
-                                                    video_url,
-                                                    duration,
-                                                    publish_time,
-                                                    play_cnt,
-                                                    crawler_rule,
-                                                    width,
-                                                    height)
-                                                    values({our_video_id},
-                                                    {our_uid},
-                                                    "{video_dict['user_id']}",
-                                                    "{cls.platform}",
-                                                    "定向爬虫策略",
-                                                    "{video_dict['video_id']}",
-                                                    "{video_dict['video_title']}",
-                                                    "{video_dict['cover_url']}",
-                                                    "{video_dict['video_url']}",
-                                                    {int(video_dict['duration'])},
-                                                    "{video_dict['publish_time_str']}",
-                                                    {int(video_dict['play_cnt'])},
-                                                    '{json.dumps(rule_dict)}',
-                                                    {int(video_dict['video_width'])},
-                                                    {int(video_dict['video_height'])}) """
-            Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
-            MysqlHelper.update_values(log_type, crawler, insert_sql, env, machine)
-            Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
+        if our_video_id is None:
+            try:
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                return
+            except FileNotFoundError:
+                return
 
-            # 视频写入飞书
-            Feishu.insert_columns(log_type, 'kuaishou', "fYdA8F", "ROWS", 1, 2)
-            upload_time = int(time.time())
-            values = [[our_video_id,
-                       time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
-                       "定向榜",
-                       str(video_dict['video_id']),
-                       video_dict['video_title'],
-                       our_video_link,
-                       video_dict['play_cnt'],
-                       video_dict['comment_cnt'],
-                       video_dict['like_cnt'],
-                       video_dict['share_cnt'],
-                       video_dict['duration'],
-                       f"{video_dict['video_width']}*{video_dict['video_height']}",
-                       video_dict['publish_time_str'],
-                       video_dict['user_name'],
-                       video_dict['user_id'],
-                       video_dict['avatar_url'],
-                       video_dict['cover_url'],
-                       video_dict['video_url']]]
-            time.sleep(1)
-            Feishu.update_values(log_type, 'kuaishou', "fYdA8F", "E2:Z2", values)
-            Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
-            download_finished = True
-        return download_finished
+        # 视频信息保存数据库
+        insert_sql = f""" insert into crawler_video(video_id,
+                                                user_id,
+                                                out_user_id,
+                                                platform,
+                                                strategy,
+                                                out_video_id,
+                                                video_title,
+                                                cover_url,
+                                                video_url,
+                                                duration,
+                                                publish_time,
+                                                play_cnt,
+                                                crawler_rule,
+                                                width,
+                                                height)
+                                                values({our_video_id},
+                                                {user_dict["uid"]},
+                                                "{video_dict['user_id']}",
+                                                "{cls.platform}",
+                                                "定向爬虫策略",
+                                                "{video_dict['video_id']}",
+                                                "{video_dict['video_title']}",
+                                                "{video_dict['cover_url']}",
+                                                "{video_dict['video_url']}",
+                                                {int(video_dict['duration'])},
+                                                "{video_dict['publish_time_str']}",
+                                                {int(video_dict['play_cnt'])},
+                                                '{json.dumps(rule_dict)}',
+                                                {int(video_dict['video_width'])},
+                                                {int(video_dict['video_height'])}) """
+        Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+        MysqlHelper.update_values(log_type, crawler, insert_sql, env, action="")
+        Common.logger(log_type, crawler).info('视频信息写入数据库成功')
+
+        # 视频写入飞书
+        Feishu.insert_columns(log_type, crawler, "fYdA8F", "ROWS", 1, 2)
+        upload_time = int(time.time())
+        values = [[our_video_id,
+                   time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
+                   "定向榜",
+                   str(video_dict['video_id']),
+                   video_dict['video_title'],
+                   our_video_link,
+                   video_dict['play_cnt'],
+                   video_dict['comment_cnt'],
+                   video_dict['like_cnt'],
+                   video_dict['share_cnt'],
+                   video_dict['duration'],
+                   f"{video_dict['video_width']}*{video_dict['video_height']}",
+                   video_dict['publish_time_str'],
+                   video_dict['user_name'],
+                   video_dict['user_id'],
+                   video_dict['avatar_url'],
+                   video_dict['cover_url'],
+                   video_dict['video_url']]]
+        time.sleep(1)
+        Feishu.update_values(log_type, crawler, "fYdA8F", "E2:Z2", values)
+        Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
+        cls.download_cnt += 1
 
     @classmethod
-    def get_follow_videos(cls, log_type, crawler, strategy, oss_endpoint, env, machine):
-        user_list = get_user_from_mysql(log_type, crawler, crawler, env)
-        for user in user_list:
+    def get_author_videos(cls, log_type, crawler, user_list, rule_dict, env):
+        for user_dict in user_list:
             try:
-                spider_link = user["link"]
-                out_uid = spider_link.split('/')[-1]
-                user_name = user["nick_name"]
-                our_uid = user["uid"]
-                Common.logger(log_type, crawler).info(f"开始抓取 {user_name} 用户主页视频\n")
+                Common.logger(log_type, crawler).info(f"开始抓取 {user_dict['nick_name']} 主页视频")
+                cls.download_cnt = 0
                 cls.get_videoList(log_type=log_type,
                                   crawler=crawler,
-                                  strategy=strategy,
-                                  our_uid=our_uid,
-                                  out_uid=out_uid,
-                                  oss_endpoint=oss_endpoint,
-                                  env=env,
-                                  machine=machine)
+                                  user_dict=user_dict,
+                                  rule_dict=rule_dict,
+                                  env=env)
             except Exception as e:
-                Common.logger(log_type, crawler).warning(f"抓取用户{user}时异常:{e}\n")
+                Common.logger(log_type, crawler).warning(f"抓取用户{user_dict['nick_name']}主页视频时异常:{e}\n")
 
 
 if __name__ == "__main__":

+ 43 - 0
kuaishou/kuaishou_main/run_kuaishou_author_scheduling.py

@@ -0,0 +1,43 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/5/25
+import argparse
+import os
+import sys
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.public import task_fun
+from common.scheduling_db import MysqlHelper
+from kuaishou.kuaishou_author.kuaishou_author import KuaishouauthorScheduling
+
+
+def main(log_type, crawler, task, env):
+    task_dict = task_fun(task)['task_dict']
+    rule_dict = task_fun(task)['rule_dict']
+    task_id = task_dict['task_id']
+    select_user_sql = f"""select * from crawler_user_v3 where task_id={task_id}"""
+    user_list = MysqlHelper.get_values(log_type, crawler, select_user_sql, env, action="")
+    Common.logger(log_type, crawler).info(f"调度任务:{task_dict}")
+    Common.logger(log_type, crawler).info(f"抓取规则:{rule_dict}")
+    # Common.logger(log_type, crawler).info(f"用户列表:{user_list}\n")
+    Common.logger(log_type, crawler).info(f'开始抓取 {task_dict["task_name"]}\n')
+    KuaishouauthorScheduling.get_author_videos(log_type=log_type,
+                                               crawler=crawler,
+                                               rule_dict=rule_dict,
+                                               user_list=user_list,
+                                               env=env)
+    Common.del_logs(log_type, crawler)
+    Common.logger(log_type, crawler).info('抓取任务结束\n')
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
+    parser.add_argument('--log_type', default='recommend')  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler', default='kuaishou')  ## 添加参数
+    parser.add_argument('--task')  ## 添加参数
+    parser.add_argument('--env', default='prod')  ## 添加参数
+    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
+    main(log_type=args.log_type,
+         crawler=args.crawler,
+         task=args.task,
+         env=args.env)

+ 3 - 0
kuaishou/logs/__init__.py

@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/5/25

+ 14 - 14
main/process.sh

@@ -99,20 +99,20 @@ else
 fi
 
 
-# 快手定向爬虫策略
-echo "$(date "+%Y-%m-%d %H:%M:%S") 正在监测 快手定向爬虫策略 进程状态" >> ${log_path}
-ps -ef | grep "run_kuaishou_follow.py" | grep -v "grep"
-if [ "$?" -eq 1 ];then
-  echo "$(date "+%Y-%m-%d_%H:%M:%S") 异常停止,正在重启!" >> ${log_path}
-  if [ ${env} = "dev" ];then
-    cd ${piaoquan_crawler_dir} && sh main/scheduling_main.sh ./kuaishou/kuaishou_main/run_kuaishou_follow.py --log_type="author" --crawler="kuaishou" --env="dev" kuaishou/logs/nohup-follow.log
-  else
-    cd ${piaoquan_crawler_dir} && /usr/bin/sh ./main/main.sh ./kuaishou/kuaishou_main/run_kuaishou_follow.py --log_type="author" --crawler="kuaishou" --strategy="定向爬虫策略" --oss_endpoint="inner" --env="prod" --machine="aliyun" kuaishou/logs/nohup-follow.log
-  fi
-  echo "$(date "+%Y-%m-%d %H:%M:%S") 重启完成!" >> ${log_path}
-else
-  echo "$(date "+%Y-%m-%d %H:%M:%S") 快手定向爬虫策略 进程状态正常" >> ${log_path}
-fi
+## 快手定向爬虫策略
+#echo "$(date "+%Y-%m-%d %H:%M:%S") 正在监测 快手定向爬虫策略 进程状态" >> ${log_path}
+#ps -ef | grep "run_kuaishou_follow.py" | grep -v "grep"
+#if [ "$?" -eq 1 ];then
+#  echo "$(date "+%Y-%m-%d_%H:%M:%S") 异常停止,正在重启!" >> ${log_path}
+#  if [ ${env} = "dev" ];then
+#    cd ${piaoquan_crawler_dir} && sh main/scheduling_main.sh ./kuaishou/kuaishou_main/run_kuaishou_follow.py --log_type="author" --crawler="kuaishou" --env="dev" kuaishou/logs/nohup-follow.log
+#  else
+#    cd ${piaoquan_crawler_dir} && /usr/bin/sh ./main/main.sh ./kuaishou/kuaishou_main/run_kuaishou_follow.py --log_type="author" --crawler="kuaishou" --strategy="定向爬虫策略" --oss_endpoint="inner" --env="prod" --machine="aliyun" kuaishou/logs/nohup-follow.log
+#  fi
+#  echo "$(date "+%Y-%m-%d %H:%M:%S") 重启完成!" >> ${log_path}
+#else
+#  echo "$(date "+%Y-%m-%d %H:%M:%S") 快手定向爬虫策略 进程状态正常" >> ${log_path}
+#fi
 
 ## 快手推荐爬虫策略
 #echo "$(date "+%Y-%m-%d %H:%M:%S") 正在监测 快手推荐爬虫策略 进程状态" >> ${log_path}