wangkun 2 سال پیش
والد
کامیت
56818a5cb6
1فایلهای تغییر یافته به همراه107 افزوده شده و 107 حذف شده
  1. 107 107
      main/kanyikan_recommend.py

+ 107 - 107
main/kanyikan_recommend.py

@@ -50,115 +50,115 @@ class Kanyikanrecommend:
         while True:
             for page in range(1, 101):
                 Common.logger(log_type).info(f"正在抓取第{page}页")
-                try:
-                    session = Common.get_session(log_type)
-                    if session is None:
-                        time.sleep(1)
-                        continue
-                    url = 'https://search.weixin.qq.com/cgi-bin/recwxa/recwxavideolist?'
-                    header = {
-                        "Connection": "keep-alive",
-                        "content-type": "application/json",
-                        "Accept-Encoding": "gzip,compress,br,deflate",
-                        "User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X) "
-                                      "AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 MicroMessenger/8.0.18(0x18001236) "
-                                      "NetType/WIFI Language/zh_CN",
-                        "Referer": "https://servicewechat.com/wxbb9a805eb4f9533c/234/page-frame.html",
-                    }
-                    params = {
-                        'session': session,
-                        "offset": 0,
-                        "wxaVersion": "3.9.2",
-                        "count": "10",
-                        "channelid": "208",
-                        "scene": '310',
-                        "subscene": '1089',
-                        "clientVersion": '8.0.18',
-                        "sharesearchid": '0',
-                        "nettype": 'wifi',
-                        "switchprofile": "0",
-                        "switchnewuser": "0",
+                # try:
+                session = Common.get_session(log_type)
+                if session is None:
+                    time.sleep(1)
+                    continue
+                url = 'https://search.weixin.qq.com/cgi-bin/recwxa/recwxavideolist?'
+                header = {
+                    "Connection": "keep-alive",
+                    "content-type": "application/json",
+                    "Accept-Encoding": "gzip,compress,br,deflate",
+                    "User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X) "
+                                  "AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 MicroMessenger/8.0.18(0x18001236) "
+                                  "NetType/WIFI Language/zh_CN",
+                    "Referer": "https://servicewechat.com/wxbb9a805eb4f9533c/234/page-frame.html",
+                }
+                params = {
+                    'session': session,
+                    "offset": 0,
+                    "wxaVersion": "3.9.2",
+                    "count": "10",
+                    "channelid": "208",
+                    "scene": '310',
+                    "subscene": '1089',
+                    "clientVersion": '8.0.18',
+                    "sharesearchid": '0',
+                    "nettype": 'wifi',
+                    "switchprofile": "0",
+                    "switchnewuser": "0",
+                }
+                urllib3.disable_warnings()
+                response = requests.get(url=url, headers=header, params=params, proxies=proxies, verify=False)
+                if "data" not in response.text:
+                    Common.logger(log_type).info("获取视频list时,session过期,随机睡眠 31-50 秒")
+                    # 如果返回空信息,则随机睡眠 31-40 秒
+                    time.sleep(random.randint(31, 40))
+                    continue
+                elif "items" not in response.json()["data"]:
+                    Common.logger(log_type).info(f"get_feeds:{response.json()},随机睡眠 1-3 分钟")
+                    # 如果返回空信息,则随机睡眠 1-3 分钟
+                    time.sleep(random.randint(60, 180))
+                    continue
+                feeds = response.json().get("data", {}).get("items", "")
+                if feeds == "":
+                    Common.logger(log_type).info(f"feeds:{feeds}")
+                    time.sleep(random.randint(31, 40))
+                    continue
+                for i in range(len(feeds)):
+                    # try:
+                    video_title = feeds[i].get("title", "").strip().replace("\n", "") \
+                        .replace("/", "").replace("\\", "").replace("\r", "") \
+                        .replace(":", "").replace("*", "").replace("?", "") \
+                        .replace("?", "").replace('"', "").replace("<", "") \
+                        .replace(">", "").replace("|", "").replace(" ", "") \
+                        .replace("&NBSP", "").replace(".", "。").replace(" ", "") \
+                        .replace("'", "").replace("#", "").replace("Merge", "")
+                    publish_time_stamp = feeds[i].get("date", 0)
+                    publish_time_str = time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(publish_time_stamp))
+                    # 获取播放地址
+                    if "videoInfo" not in feeds[i]:
+                        video_url = ""
+                    elif "mpInfo" in feeds[i]["videoInfo"]["videoCdnInfo"]:
+                        if len(feeds[i]["videoInfo"]["videoCdnInfo"]["mpInfo"]["urlInfo"]) > 2:
+                            video_url = feeds[i]["videoInfo"]["videoCdnInfo"]["mpInfo"]["urlInfo"][2]["url"]
+                        else:
+                            video_url = feeds[i]["videoInfo"]["videoCdnInfo"]["mpInfo"]["urlInfo"][0]["url"]
+                    elif "ctnInfo" in feeds[i]["videoInfo"]["videoCdnInfo"]:
+                        video_url = feeds[i]["videoInfo"]["videoCdnInfo"]["ctnInfo"]["urlInfo"][0]["url"]
+                    else:
+                        video_url = feeds[i]["videoInfo"]["videoCdnInfo"]["urlInfo"][0]["url"]
+                    video_dict = {
+                        "video_title": video_title,
+                        "video_id":  feeds[i].get("videoId", ""),
+                        "play_cnt":  feeds[i].get("playCount", 0),
+                        "like_cnt":  feeds[i].get("liked_cnt", 0),
+                        "comment_cnt":  feeds[i].get("comment_cnt", 0),
+                        "share_cnt":  feeds[i].get("shared_cnt", 0),
+                        "duration":  feeds[i].get("mediaDuration", 0),
+                        "video_width":  feeds[i].get("short_video_info", {}).get("width", 0),
+                        "video_height":  feeds[i].get("short_video_info", {}).get("height", 0),
+                        "publish_time_stamp":  publish_time_stamp,
+                        "publish_time_str":  publish_time_str,
+                        "user_name": feeds[i].get("source", "").strip().replace("\n", ""),
+                        "user_id": feeds[i].get("openid", ""),
+                        "avatar_url": feeds[i].get("bizIcon", ""),
+                        "cover_url": feeds[i].get("thumbUrl", ""),
+                        "video_url": video_url,
+                        "session": session,
                     }
-                    urllib3.disable_warnings()
-                    response = requests.get(url=url, headers=header, params=params, proxies=proxies, verify=False)
-                    if "data" not in response.text:
-                        Common.logger(log_type).info("获取视频list时,session过期,随机睡眠 31-50 秒")
-                        # 如果返回空信息,则随机睡眠 31-40 秒
-                        time.sleep(random.randint(31, 40))
-                        continue
-                    elif "items" not in response.json()["data"]:
-                        Common.logger(log_type).info(f"get_feeds:{response.json()},随机睡眠 1-3 分钟")
-                        # 如果返回空信息,则随机睡眠 1-3 分钟
-                        time.sleep(random.randint(60, 180))
-                        continue
-                    feeds = response.json().get("data", {}).get("items", "")
-                    if feeds == "":
-                        Common.logger(log_type).info(f"feeds:{feeds}")
-                        time.sleep(random.randint(31, 40))
-                        continue
-                    for i in range(len(feeds)):
-                        try:
-                            video_title = feeds[i].get("title", "").strip().replace("\n", "") \
-                                .replace("/", "").replace("\\", "").replace("\r", "") \
-                                .replace(":", "").replace("*", "").replace("?", "") \
-                                .replace("?", "").replace('"', "").replace("<", "") \
-                                .replace(">", "").replace("|", "").replace(" ", "") \
-                                .replace("&NBSP", "").replace(".", "。").replace(" ", "") \
-                                .replace("'", "").replace("#", "").replace("Merge", "")
-                            publish_time_stamp = feeds[i].get("date", 0)
-                            publish_time_str = time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(publish_time_stamp))
-                            # 获取播放地址
-                            if "videoInfo" not in feeds[i]:
-                                video_url = ""
-                            elif "mpInfo" in feeds[i]["videoInfo"]["videoCdnInfo"]:
-                                if len(feeds[i]["videoInfo"]["videoCdnInfo"]["mpInfo"]["urlInfo"]) > 2:
-                                    video_url = feeds[i]["videoInfo"]["videoCdnInfo"]["mpInfo"]["urlInfo"][2]["url"]
-                                else:
-                                    video_url = feeds[i]["videoInfo"]["videoCdnInfo"]["mpInfo"]["urlInfo"][0]["url"]
-                            elif "ctnInfo" in feeds[i]["videoInfo"]["videoCdnInfo"]:
-                                video_url = feeds[i]["videoInfo"]["videoCdnInfo"]["ctnInfo"]["urlInfo"][0]["url"]
-                            else:
-                                video_url = feeds[i]["videoInfo"]["videoCdnInfo"]["urlInfo"][0]["url"]
-                            video_dict = {
-                                "video_title": video_title,
-                                "video_id":  feeds[i].get("videoId", ""),
-                                "play_cnt":  feeds[i].get("playCount", 0),
-                                "like_cnt":  feeds[i].get("liked_cnt", 0),
-                                "comment_cnt":  feeds[i].get("comment_cnt", 0),
-                                "share_cnt":  feeds[i].get("shared_cnt", 0),
-                                "duration":  feeds[i].get("mediaDuration", 0),
-                                "video_width":  feeds[i].get("short_video_info", {}).get("width", 0),
-                                "video_height":  feeds[i].get("short_video_info", {}).get("height", 0),
-                                "publish_time_stamp":  publish_time_stamp,
-                                "publish_time_str":  publish_time_str,
-                                "user_name": feeds[i].get("source", "").strip().replace("\n", ""),
-                                "user_id": feeds[i].get("openid", ""),
-                                "avatar_url": feeds[i].get("bizIcon", ""),
-                                "cover_url": feeds[i].get("thumbUrl", ""),
-                                "video_url": video_url,
-                                "session": session,
-                            }
-                            for k, v in video_dict.items():
-                                Common.logger(log_type).info(f"{k}:{v}")
+                    for k, v in video_dict.items():
+                        Common.logger(log_type).info(f"{k}:{v}")
 
-                            if video_dict["video_id"] == "" \
-                                    or video_dict["video_title"] == ""\
-                                    or video_dict["video_url"] == "":
-                                Common.logger(log_type).info("无效视频\n")
-                            elif cls.download_rule(video_dict) is False:
-                                Common.logger(log_type).info("不满足抓取规则\n")
-                            elif any(str(word) if str(word) in video_title else False for word in cls.get_filter_word(log_type, crawler)) is True:
-                                Common.logger(log_type).info("视频已中过滤词\n")
-                            elif video_dict["video_id"] in [j for i in Feishu.get_values_batch(log_type, crawler, "ho98Ov") for j in i]:
-                                Common.logger(log_type).info("视频已下载\n")
-                            elif video_dict["video_id"] in [j for i in Feishu.get_values_batch(log_type, crawler, "20ce0c") for j in i]:
-                                Common.logger(log_type).info("视频已下载\n")
-                            else:
-                                cls.download_publish(log_type, crawler, video_dict, env)
-                        except Exception as e:
-                            Common.logger(log_type).error(f"抓取单条视频异常:{e}\n")
-                except Exception as e:
-                    Common.logger(log_type).error(f"抓取第{page}页时异常:{e}\n")
+                    if video_dict["video_id"] == "" \
+                            or video_dict["video_title"] == ""\
+                            or video_dict["video_url"] == "":
+                        Common.logger(log_type).info("无效视频\n")
+                    elif cls.download_rule(video_dict) is False:
+                        Common.logger(log_type).info("不满足抓取规则\n")
+                    elif any(str(word) if str(word) in video_title else False for word in cls.get_filter_word(log_type, crawler)) is True:
+                        Common.logger(log_type).info("视频已中过滤词\n")
+                    elif video_dict["video_id"] in [j for i in Feishu.get_values_batch(log_type, crawler, "ho98Ov") for j in i]:
+                        Common.logger(log_type).info("视频已下载\n")
+                    elif video_dict["video_id"] in [j for i in Feishu.get_values_batch(log_type, crawler, "20ce0c") for j in i]:
+                        Common.logger(log_type).info("视频已下载\n")
+                    else:
+                        cls.download_publish(log_type, crawler, video_dict, env)
+                #         except Exception as e:
+                #             Common.logger(log_type).error(f"抓取单条视频异常:{e}\n")
+                # except Exception as e:
+                #     Common.logger(log_type).error(f"抓取第{page}页时异常:{e}\n")
 
     @classmethod
     def download_publish(cls, log_type, crawler, video_dict, env):