Bläddra i källkod

公众号降低频率,每次成功发送消息休闲1~3秒

罗俊辉 1 år sedan
förälder
incheckning
d71d390645
1 ändrade filer med 2 tillägg och 133 borttagningar
  1. 2 133
      gongzhonghao/gongzhonghao_author/gongzhonghao_author.py

+ 2 - 133
gongzhonghao/gongzhonghao_author/gongzhonghao_author.py

@@ -4,6 +4,7 @@
 import datetime
 import datetime
 import json
 import json
 import os
 import os
+import random
 import shutil
 import shutil
 import sys
 import sys
 import time
 import time
@@ -322,12 +323,6 @@ class GongzhonghaoAuthor:
                             Common.logger(log_type, crawler).info(f'标题相似度>=80%:{video_dict["video_title"]}\n')
                             Common.logger(log_type, crawler).info(f'标题相似度>=80%:{video_dict["video_title"]}\n')
                             Common.logging(log_type, crawler, env, f'标题相似度>=80%:{video_dict["video_title"]}\n')
                             Common.logging(log_type, crawler, env, f'标题相似度>=80%:{video_dict["video_title"]}\n')
                         else:
                         else:
-                            # cls.download_publish(log_type=log_type,
-                            #                      crawler=crawler,
-                            #                      video_dict=video_dict,
-                            #                      rule_dict=rule_dict,
-                            #                      # user_dict=user_dict,
-                            #                      env=env)
                             video_dict["out_user_id"] = video_dict["user_id"]
                             video_dict["out_user_id"] = video_dict["user_id"]
                             video_dict["platform"] = crawler
                             video_dict["platform"] = crawler
                             video_dict["strategy"] = log_type
                             video_dict["strategy"] = log_type
@@ -338,6 +333,7 @@ class GongzhonghaoAuthor:
                             video_dict["user_id"] = user_dict["uid"]  # 站内 UID?爬虫获取不到了(随机发布到原 5 个账号中)
                             video_dict["user_id"] = user_dict["uid"]  # 站内 UID?爬虫获取不到了(随机发布到原 5 个账号中)
                             video_dict["publish_time"] = video_dict["publish_time_str"]
                             video_dict["publish_time"] = video_dict["publish_time_str"]
                             mq.send_msg(video_dict)
                             mq.send_msg(video_dict)
+                            time.sleep(random.randint(1, 3))
                     except Exception as e:
                     except Exception as e:
                         Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
                         Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
                         Common.logging(log_type, crawler, env, f"抓取单条视频异常:{e}\n")
                         Common.logging(log_type, crawler, env, f"抓取单条视频异常:{e}\n")
@@ -353,133 +349,6 @@ class GongzhonghaoAuthor:
         return len(repeat_video)
         return len(repeat_video)
 
 
     # 下载/上传
     # 下载/上传
-    @classmethod
-    def download_publish(cls, log_type, crawler, video_dict, rule_dict, env):
-        # 下载视频
-        Common.download_method(log_type=log_type, crawler=crawler, text="video", title=video_dict["video_title"], url=video_dict["video_url"])
-        md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
-        try:
-            if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
-                # 删除视频文件夹
-                shutil.rmtree(f"./{crawler}/videos/{md_title}")
-                Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
-                Common.logging(log_type, crawler, env, "视频size=0,删除成功\n")
-                return
-        except FileNotFoundError:
-            # 删除视频文件夹
-            shutil.rmtree(f"./{crawler}/videos/{md_title}")
-            Common.logger(log_type, crawler).info("视频文件不存在,删除文件夹成功\n")
-            Common.logging(log_type, crawler, env, "视频文件不存在,删除文件夹成功\n")
-            return
-        # 获取视频时长
-        ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
-        video_dict["video_width"] = ffmpeg_dict["width"]
-        video_dict["video_height"] = ffmpeg_dict["height"]
-        video_dict["duration"] = ffmpeg_dict["duration"]
-        Common.logger(log_type, crawler).info(f'video_width:{video_dict["video_width"]}')
-        Common.logging(log_type, crawler, env, f'video_width:{video_dict["video_width"]}')
-        Common.logger(log_type, crawler).info(f'video_height:{video_dict["video_height"]}')
-        Common.logging(log_type, crawler, env, f'video_height:{video_dict["video_height"]}')
-        Common.logger(log_type, crawler).info(f'duration:{video_dict["duration"]}')
-        Common.logging(log_type, crawler, env, f'duration:{video_dict["duration"]}')
-        if download_rule(log_type, crawler, video_dict, rule_dict) is False:
-            shutil.rmtree(f"./{crawler}/videos/{md_title}")
-            Common.logger(log_type, crawler).info("不满足抓取规则,删除成功\n")
-            Common.logging(log_type, crawler, env, "不满足抓取规则,删除成功\n")
-            return
-        # 下载封面
-        Common.download_method(log_type=log_type, crawler=crawler, text="cover",
-                               title=video_dict["video_title"], url=video_dict["cover_url"])
-        # 保存视频信息至 "./videos/{video_title}/info.txt"
-        Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
-
-        # 上传视频
-        Common.logger(log_type, crawler).info("开始上传视频...")
-        Common.logging(log_type, crawler, env, "开始上传视频...")
-        strategy = "定向爬虫策略"
-        if env == 'prod':
-            oss_endpoint = "inner"
-            our_video_id = Publish.upload_and_publish(log_type=log_type,
-                                                      crawler=crawler,
-                                                      strategy=strategy,
-                                                      our_uid="follow",
-                                                      oss_endpoint=oss_endpoint,
-                                                      env=env)
-            our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
-        else:
-            oss_endpoint = "out"
-            our_video_id = Publish.upload_and_publish(log_type=log_type,
-                                                      crawler=crawler,
-                                                      strategy=strategy,
-                                                      our_uid="follow",
-                                                      oss_endpoint=oss_endpoint,
-                                                      env=env)
-            our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{str(our_video_id)}/info"
-
-        if our_video_id is None:
-            try:
-                # 删除视频文件夹
-                shutil.rmtree(f"./{crawler}/videos/{md_title}")
-                return
-            except FileNotFoundError:
-                return
-
-        insert_sql = f""" insert into crawler_video(video_id,
-                                                    out_user_id,
-                                                    platform,
-                                                    strategy,
-                                                    out_video_id,
-                                                    video_title,
-                                                    cover_url,
-                                                    video_url,
-                                                    duration,
-                                                    publish_time,
-                                                    play_cnt,
-                                                    crawler_rule,
-                                                    width,
-                                                    height)
-                                                    values({our_video_id},
-                                                    "{video_dict['user_id']}",
-                                                    "{cls.platform}",
-                                                    "定向爬虫策略",
-                                                    "{video_dict['video_id']}",
-                                                    "{video_dict['video_title']}",
-                                                    "{video_dict['cover_url']}",
-                                                    "{video_dict['video_url']}",
-                                                    {int(video_dict['duration'])},
-                                                    "{video_dict['publish_time_str']}",
-                                                    {int(video_dict['play_cnt'])},
-                                                    '{json.dumps(rule_dict)}',
-                                                    {int(video_dict['video_width'])},
-                                                    {int(video_dict['video_height'])}) """
-        Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
-        Common.logging(log_type, crawler, env, f"insert_sql:{insert_sql}")
-        MysqlHelper.update_values(log_type, crawler, insert_sql, env)
-        Common.logger(log_type, crawler).info('视频信息写入数据库成功')
-        Common.logging(log_type, crawler, env, '视频信息写入数据库成功')
-
-        # 视频写入飞书
-        Feishu.insert_columns(log_type, crawler, "47e39d", "ROWS", 1, 2)
-        # 视频ID工作表,首行写入数据
-        upload_time = int(time.time())
-        values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
-                   "用户主页",
-                   video_dict['video_title'],
-                   video_dict['video_id'],
-                   our_video_link,
-                   int(video_dict['duration']),
-                   f"{video_dict['video_width']}*{video_dict['video_height']}",
-                   video_dict['publish_time_str'],
-                   video_dict['user_name'],
-                   video_dict['user_id'],
-                   video_dict['avatar_url'],
-                   video_dict['cover_url'],
-                   video_dict['article_url'],
-                   video_dict['video_url']]]
-        time.sleep(0.5)
-        Feishu.update_values(log_type, crawler, "47e39d", "F2:Z2", values)
-        Common.logger(log_type, crawler).info('视频下载/上传成功\n')
-        Common.logging(log_type, crawler, env, '视频下载/上传成功\n')
 
 
     @classmethod
     @classmethod
     def get_all_videos(cls, log_type, crawler, task_dict, token_index, rule_dict, user_list, env):
     def get_all_videos(cls, log_type, crawler, task_dict, token_index, rule_dict, user_list, env):