wangkun 2 年之前
父节点
当前提交
b8e5e00915
共有 1 个文件被更改,包括 100 次插入100 次删除
  1. 100 100
      xiaoniangao/xiaoniangao_follow/xiaoniangao_follow.py

+ 100 - 100
xiaoniangao/xiaoniangao_follow/xiaoniangao_follow.py

@@ -485,108 +485,108 @@ class XiaoniangaoFollow:
     # 下载/上传
     @classmethod
     def download_publish(cls, log_type, crawler, strategy, video_dict, oss_endpoint, env):
-        try:
-            if cls.download_rule(video_dict) is False:
-                Common.logger(log_type, crawler).info("不满足基础门槛\n")
-            elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
-                Common.logger(log_type, crawler).info('视频已下载\n')
-            elif any(str(word) if str(word) in video_dict['video_title'] else False for word in filter_word(log_type, crawler, "小年糕", env)) is True:
-                Common.logger(log_type, crawler).info("视频已中过滤词\n")
+        # try:
+        if cls.download_rule(video_dict) is False:
+            Common.logger(log_type, crawler).info("不满足基础门槛\n")
+        elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
+            Common.logger(log_type, crawler).info('视频已下载\n')
+        elif any(str(word) if str(word) in video_dict['video_title'] else False for word in filter_word(log_type, crawler, "小年糕", env)) is True:
+            Common.logger(log_type, crawler).info("视频已中过滤词\n")
+        else:
+            # 下载封面
+            Common.download_method(log_type=log_type, crawler=crawler, text="cover",
+                                   title=video_dict["video_title"], url=video_dict["cover_url"])
+            # 下载视频
+            Common.download_method(log_type=log_type, crawler=crawler, text="video",
+                                   title=video_dict["video_title"], url=video_dict["video_url"])
+            # 保存视频信息至 "./videos/{download_video_title}/info.txt"
+            Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
+
+            # 上传视频
+            Common.logger(log_type, crawler).info("开始上传视频...")
+            our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                      crawler=crawler,
+                                                      strategy=strategy,
+                                                      our_uid="follow",
+                                                      env=env,
+                                                      oss_endpoint=oss_endpoint)
+            if env == "dev":
+                our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
             else:
-                # 下载封面
-                Common.download_method(log_type=log_type, crawler=crawler, text="cover",
-                                       title=video_dict["video_title"], url=video_dict["cover_url"])
-                # 下载视频
-                Common.download_method(log_type=log_type, crawler=crawler, text="video",
-                                       title=video_dict["video_title"], url=video_dict["video_url"])
-                # 保存视频信息至 "./videos/{download_video_title}/info.txt"
-                Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
-
-                # 上传视频
-                Common.logger(log_type, crawler).info("开始上传视频...")
-                our_video_id = Publish.upload_and_publish(log_type=log_type,
-                                                          crawler=crawler,
-                                                          strategy=strategy,
-                                                          our_uid="follow",
-                                                          env=env,
-                                                          oss_endpoint=oss_endpoint)
-                if env == "dev":
-                    our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
-                else:
-                    our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
-                Common.logger(log_type, crawler).info("视频上传完成")
-
-                if our_video_id is None:
-                    # 删除视频文件夹
-                    shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
-                    return
+                our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+            Common.logger(log_type, crawler).info("视频上传完成")
+
+            if our_video_id is None:
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{video_dict['video_title']}")
+                return
+
+            # 视频信息保存数据库
+            rule_dict = {
+                "duration": {"min": 40, "max": 100000000},
+                "play_cnt": {"min": 500}
+            }
+
+            insert_sql = f""" insert into crawler_video(video_id,
+                                            out_user_id,
+                                            platform,
+                                            strategy,
+                                            out_video_id,
+                                            video_title,
+                                            cover_url,
+                                            video_url,
+                                            duration,
+                                            publish_time,
+                                            play_cnt,
+                                            crawler_rule,
+                                            width,
+                                            height)
+                                            values({our_video_id},
+                                            "{video_dict['profile_id']}",
+                                            "{cls.platform}",
+                                            "定向爬虫策略",
+                                            "{video_dict['video_id']}",
+                                            "{video_dict['video_title']}",
+                                            "{video_dict['cover_url']}",
+                                            "{video_dict['video_url']}",
+                                            {int(video_dict['duration'])},
+                                            "{video_dict['publish_time_str']}",
+                                            {int(video_dict['play_cnt'])},
+                                            '{json.dumps(rule_dict)}',
+                                            {int(video_dict['video_width'])},
+                                            {int(video_dict['video_height'])}) """
+            Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+            MysqlHelper.update_values(log_type, crawler, insert_sql, env)
+            Common.logger(log_type, crawler).info('视频信息插入数据库成功!')
+
+            # 视频写入飞书
+            Feishu.insert_columns(log_type, crawler, "Wu0CeL", "ROWS", 1, 2)
+            # 视频ID工作表,首行写入数据
+            upload_time = int(time.time())
+            values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
+                       "用户主页",
+                       str(video_dict['video_id']),
+                       str(video_dict['video_title']),
+                       our_video_link,
+                       video_dict['play_cnt'],
+                       video_dict['comment_cnt'],
+                       video_dict['like_cnt'],
+                       video_dict['share_cnt'],
+                       video_dict['duration'],
+                       f"{video_dict['video_width']}*{video_dict['video_height']}",
+                       str(video_dict['publish_time_str']),
+                       str(video_dict['user_name']),
+                       str(video_dict['profile_id']),
+                       str(video_dict['profile_mid']),
+                       str(video_dict['avatar_url']),
+                       str(video_dict['cover_url']),
+                       str(video_dict['video_url'])]]
+            time.sleep(1)
+            Feishu.update_values(log_type, crawler, "Wu0CeL", "F2:Z2", values)
+            Common.logger(log_type, crawler).info('视频信息写入飞书成功\n')
 
-                # 视频信息保存数据库
-                rule_dict = {
-                    "duration": {"min": 40, "max": 100000000},
-                    "play_cnt": {"min": 500}
-                }
-
-                insert_sql = f""" insert into crawler_video(video_id,
-                                                out_user_id,
-                                                platform,
-                                                strategy,
-                                                out_video_id,
-                                                video_title,
-                                                cover_url,
-                                                video_url,
-                                                duration,
-                                                publish_time,
-                                                play_cnt,
-                                                crawler_rule,
-                                                width,
-                                                height)
-                                                values({our_video_id},
-                                                "{video_dict['profile_id']}",
-                                                "{cls.platform}",
-                                                "定向爬虫策略",
-                                                "{video_dict['video_id']}",
-                                                "{video_dict['video_title']}",
-                                                "{video_dict['cover_url']}",
-                                                "{video_dict['video_url']}",
-                                                {int(video_dict['duration'])},
-                                                "{video_dict['publish_time_str']}",
-                                                {int(video_dict['play_cnt'])},
-                                                '{json.dumps(rule_dict)}',
-                                                {int(video_dict['video_width'])},
-                                                {int(video_dict['video_height'])}) """
-                Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
-                MysqlHelper.update_values(log_type, crawler, insert_sql, env)
-                Common.logger(log_type, crawler).info('视频信息插入数据库成功!')
-
-                # 视频写入飞书
-                Feishu.insert_columns(log_type, crawler, "Wu0CeL", "ROWS", 1, 2)
-                # 视频ID工作表,首行写入数据
-                upload_time = int(time.time())
-                values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
-                           "用户主页",
-                           str(video_dict['video_id']),
-                           str(video_dict['video_title']),
-                           our_video_link,
-                           video_dict['play_cnt'],
-                           video_dict['comment_cnt'],
-                           video_dict['like_cnt'],
-                           video_dict['share_cnt'],
-                           video_dict['duration'],
-                           f"{video_dict['video_width']}*{video_dict['video_height']}",
-                           str(video_dict['publish_time_str']),
-                           str(video_dict['user_name']),
-                           str(video_dict['profile_id']),
-                           str(video_dict['profile_mid']),
-                           str(video_dict['avatar_url']),
-                           str(video_dict['cover_url']),
-                           str(video_dict['video_url'])]]
-                time.sleep(1)
-                Feishu.update_values(log_type, crawler, "Wu0CeL", "F2:Z2", values)
-                Common.logger(log_type, crawler).info('视频信息写入飞书成功\n')
-
-        except Exception as e:
-            Common.logger(log_type, crawler).error("下载/上传异常:{}", e)
+        # except Exception as e:
+        #     Common.logger(log_type, crawler).error("下载/上传异常:{}", e)
 
     # 获取所有关注列表的用户视频
     @classmethod