فهرست منبع

add benshanzhufu_scheduling

wangkun 1 سال پیش
والد
کامیت
48d4eaea72

+ 1 - 0
README.MD

@@ -239,4 +239,5 @@ ps aux | grep gongzhonghao | grep -v grep | awk '{print $2}' | xargs kill -9
 ps aux | grep xiaoniangao | grep -v grep | awk '{print $2}' | xargs kill -9
 ps aux | grep run_xigua_search | grep -v grep | awk '{print $2}' | xargs kill -9
 ps aux | grep run_suisuiniannianyingfuqi | grep -v grep | awk '{print $2}' | xargs kill -9
+ps aux | grep run_benshanzhufu | grep -v grep | awk '{print $2}' | xargs kill -9
 ```

+ 48 - 0
benshanzhufu/benshanzhufu_main/run_benshanzhufu_recommend_scheduling.py

@@ -0,0 +1,48 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/4/13
+import argparse
+import os
+import random
+import sys
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.public import task_fun
+from common.scheduling_db import MysqlHelper
+from benshanzhufu.benshanzhufu_recommend.benshanzhufu_recommend_scheduling import BenshanzhufuRecommend
+
+
+def main(log_type, crawler, task, env):
+    task_dict = task_fun(task)['task_dict']
+    rule_dict = task_fun(task)['rule_dict']
+    task_id = task_dict['task_id']
+    select_user_sql = f"""select * from crawler_user_v3 where task_id={task_id}"""
+    user_list = MysqlHelper.get_values(log_type, crawler, select_user_sql, env, action="")
+    our_uid_list = []
+    for user in user_list:
+        our_uid_list.append(user["uid"])
+    our_uid = random.choice(our_uid_list)
+    Common.logger(log_type, crawler).info(f"调度任务:{task_dict}")
+    Common.logger(log_type, crawler).info(f"抓取规则:{rule_dict}")
+    Common.logger(log_type, crawler).info(f"用户列表:{user_list}\n")
+    Common.logger(log_type, crawler).info('开始抓取 本山祝福小程序\n')
+    BenshanzhufuRecommend.get_videoList(log_type=log_type,
+                                        crawler=crawler,
+                                        our_uid=our_uid,
+                                        rule_dict=rule_dict,
+                                        env=env)
+    Common.del_logs(log_type, crawler)
+    Common.logger(log_type, crawler).info('抓取完一轮\n')
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
+    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler')  ## 添加参数
+    parser.add_argument('--task')  ## 添加参数
+    parser.add_argument('--env')  ## 添加参数
+    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
+    main(log_type=args.log_type,
+         crawler=args.crawler,
+         task=args.task,
+         env=args.env)

+ 249 - 0
benshanzhufu/benshanzhufu_recommend/benshanzhufu_recommend_scheduling.py

@@ -0,0 +1,249 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/4/13
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2022/4/25
+import json
+import os
+import shutil
+import sys
+import time
+from hashlib import md5
+from urllib import parse
+import requests
+import urllib3
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.scheduling_db import MysqlHelper
+from common.feishu import Feishu
+from common.publish import Publish
+from common.public import get_config_from_mysql, download_rule
+proxies = {"http": None, "https": None}
+
+
+class BenshanzhufuRecommend:
+    platform = "本山祝福"
+
+    @classmethod
+    def repeat_video(cls, log_type, crawler, video_id, env):
+        sql = f""" select * from crawler_video where platform="{cls.platform}" and out_video_id="{video_id}"; """
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
+        return len(repeat_video)
+
+    # 推荐列表获取视频
+    @classmethod
+    def get_videoList(cls, log_type, crawler, our_uid, rule_dict, env):
+        # 翻页参数
+        visitor_key = ""
+        page = 1
+        while True:
+            # try:
+            now = int(time.time() * 1000)
+            url = "https://bszf.wentingyou.cn/index.php/v111/index/index?parameter="
+            header = {
+                "content-time": str(now),
+                "chatKey": "wx0fb8149da961d3b0",
+                "cache-time": str(now),
+                "User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X) "
+                              "AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 "
+                              "MicroMessenger/8.0.20(0x1800142d) NetType/WIFI Language/zh_CN",
+                "Referer": "https://servicewechat.com/wx0fb8149da961d3b0/2/page-frame.html"
+            }
+            parameter = {
+                "page": page,
+                "ini_id": visitor_key
+            }
+            params = parse.quote(json.dumps(parameter))
+            url = url + str(params)
+            urllib3.disable_warnings()
+            r = requests.get(headers=header, url=url, proxies=proxies, verify=False)
+            if r.status_code != 200:
+                Common.logger(log_type, crawler).warning(f"get_videoList:{r.status_code}, {r.text}\n")
+                return
+            elif r.json()['message'] != "list success":
+                Common.logger(log_type, crawler).warning(f"get_videoList:{r.status_code}, {r.json()}\n")
+                return
+            elif "data" not in r.json():
+                Common.logger(log_type, crawler).warning(f"get_videoList:{r.status_code}, {r.json()}\n")
+                return
+            elif len(r.json()['data']["list"]) == 0:
+                Common.logger(log_type, crawler).info(f"没有更多数据了~ {r.json()}\n")
+                return
+            else:
+                # 翻页
+                visitor_key = r.json()["data"]["visitor_key"]
+                page += 1
+                feeds = r.json()["data"]["list"]
+                for i in range(len(feeds)):
+                    # try:
+                    publish_time_stamp = feeds[i].get("update_time", 0)
+                    publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
+                    video_url = feeds[i].get("video_url", "")
+                    if ".mp4" not in video_url:
+                        video_url = ""
+
+                    video_dict = {
+                        'video_title': feeds[i].get("title", "").replace(" ", "").replace("'", "").replace('"', ""),
+                        'video_id': str(feeds[i].get("nid", "")),
+                        'play_cnt': 0,
+                        'comment_cnt': feeds[i].get("commentCount", 0),
+                        'like_cnt': 0,
+                        'share_cnt': 0,
+                        'publish_time_stamp': publish_time_stamp,
+                        'publish_time_str': publish_time_str,
+                        'user_name': "本山祝福",
+                        'user_id': "benshanzhufu",
+                        'avatar_url': feeds[i].get("video_cover", ""),
+                        'cover_url': feeds[i].get("video_cover", ""),
+                        'video_url': video_url,
+                        'session': f"benshanzhufu-{int(time.time())}"
+                    }
+                    for k, v in video_dict.items():
+                        Common.logger(log_type, crawler).info(f"{k}:{v}")
+
+                    # 过滤无效视频
+                    if video_dict["video_id"] == "" or video_dict["cover_url"] == "" or video_dict["video_url"] == "":
+                        Common.logger(log_type, crawler).info("无效视频\n")
+                    elif download_rule(log_type=log_type, crawler=crawler, video_dict=video_dict, rule_dict=rule_dict) is False:
+                        Common.logger(log_type, crawler).info("不满足抓取规则\n")
+                    elif any(str(word) if str(word) in video_dict["video_title"] else False
+                             for word in get_config_from_mysql(log_type=log_type,
+                                                               source=crawler,
+                                                               env=env,
+                                                               text="filter",
+                                                               action="")) is True:
+                        Common.logger(log_type, crawler).info('已中过滤词\n')
+                    elif cls.repeat_video(log_type, crawler, video_dict["video_id"], env) != 0:
+                        Common.logger(log_type, crawler).info('视频已下载\n')
+                    else:
+                        cls.download_publish(log_type=log_type,
+                                             crawler=crawler,
+                                             our_uid=our_uid,
+                                             video_dict=video_dict,
+                                             rule_dict=rule_dict,
+                                             env=env)
+                        # except Exception as e:
+                        #     Common.logger(log_type, crawler).info(f"抓取单条视频异常:{e}\n")
+            # except Exception as e:
+            #     Common.logger(log_type, crawler).error(f"抓取第{page}页时异常:{e}\n")
+
+    # 下载 / 上传
+    @classmethod
+    def download_publish(cls, log_type, crawler, our_uid, video_dict, rule_dict, env):
+        # 下载视频
+        Common.download_method(log_type=log_type, crawler=crawler, text='video', title=video_dict['video_title'], url=video_dict['video_url'])
+        md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
+        try:
+            if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+                return
+        except FileNotFoundError:
+            # 删除视频文件夹
+            shutil.rmtree(f"./{crawler}/videos/{md_title}")
+            Common.logger(log_type, crawler).info("视频文件不存在,删除文件夹成功\n")
+            return
+
+        ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
+        video_dict["duration"] = ffmpeg_dict["duration"]
+        video_dict["video_width"] = ffmpeg_dict["width"]
+        video_dict["video_height"] = ffmpeg_dict["height"]
+
+        # 下载封面
+        Common.download_method(log_type=log_type, crawler=crawler, text='cover', title=video_dict['video_title'], url=video_dict['cover_url'])
+        # 保存视频信息至txt
+        Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
+
+        # 上传视频
+        Common.logger(log_type, crawler).info("开始上传视频...")
+        if env == "dev":
+            oss_endpoint = "out"
+            our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                      crawler=crawler,
+                                                      strategy="推荐榜爬虫策略",
+                                                      our_uid=our_uid,
+                                                      env=env,
+                                                      oss_endpoint=oss_endpoint)
+            our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+        else:
+            oss_endpoint = "inner"
+            our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                      crawler=crawler,
+                                                      strategy="推荐榜爬虫策略",
+                                                      our_uid=our_uid,
+                                                      env=env,
+                                                      oss_endpoint=oss_endpoint)
+
+            our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+
+        if our_video_id is None:
+            try:
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                return
+            except FileNotFoundError:
+                return
+
+        # 视频信息保存数据库
+        insert_sql = f""" insert into crawler_video(video_id,
+                                                out_user_id,
+                                                platform,
+                                                strategy,
+                                                out_video_id,
+                                                video_title,
+                                                cover_url,
+                                                video_url,
+                                                duration,
+                                                publish_time,
+                                                play_cnt,
+                                                crawler_rule,
+                                                width,
+                                                height)
+                                                values({our_video_id},
+                                                "{video_dict['user_id']}",
+                                                "{cls.platform}",
+                                                "推荐榜爬虫策略",
+                                                "{video_dict['video_id']}",
+                                                "{video_dict['video_title']}",
+                                                "{video_dict['cover_url']}",
+                                                "{video_dict['video_url']}",
+                                                {int(video_dict['duration'])},
+                                                "{video_dict['publish_time_str']}",
+                                                {int(video_dict['play_cnt'])},
+                                                '{json.dumps(rule_dict)}',
+                                                {int(video_dict['video_width'])},
+                                                {int(video_dict['video_height'])}) """
+        Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+        MysqlHelper.update_values(log_type, crawler, insert_sql, env, action='')
+        Common.logger(log_type, crawler).info('视频信息写入数据库成功')
+
+        # 视频写入飞书
+        Feishu.insert_columns(log_type, crawler, "440018", "ROWS", 1, 2)
+        upload_time = int(time.time())
+        values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
+                   "推荐榜爬虫策略",
+                   video_dict['video_id'],
+                   video_dict['video_title'],
+                   our_video_link,
+                   video_dict['play_cnt'],
+                   video_dict['comment_cnt'],
+                   video_dict['like_cnt'],
+                   video_dict['share_cnt'],
+                   video_dict['duration'],
+                   f"{video_dict['video_width']}*{video_dict['video_height']}",
+                   video_dict['publish_time_str'],
+                   video_dict['user_name'],
+                   video_dict['user_id'],
+                   video_dict['avatar_url'],
+                   video_dict['cover_url'],
+                   video_dict['video_url']]]
+        time.sleep(0.5)
+        Feishu.update_values(log_type, crawler, "440018", "E2:Z2", values)
+        Common.logger(log_type, crawler).info(f"视频信息已保存至云文档\n")
+
+
+if __name__ == "__main__":
+    print(get_config_from_mysql("recommend", "benshanzhufu", "dev", "filter"))
+    pass

+ 12 - 7
common/public.py

@@ -104,14 +104,19 @@ def download_rule(log_type, crawler, video_dict, rule_dict):
     # 比较结果,输出:True / False
     for video_key, video_value in video_dict.items():
         for rule_key, rule_value in rule_dict.items():
-            if video_key == rule_key:
-                result = rule_value["min"] <= video_value <= rule_value["max"]
-                # print(f'{video_key}: {rule_value["min"]} <= {video_value} <= {rule_value["max"]},{result}')
+            if video_key == rule_key == "period":
+                result = 0 <= int(video_value) <= int(rule_value["min"])
+                Common.logger(log_type, crawler).info(f'{video_key}: 0 <= {video_value} <= {rule_value["min"]}, {result}')
+            elif video_key == rule_key:
+                result = int(rule_value["min"]) <= int(video_value) <= int(rule_value["max"])
                 Common.logger(log_type, crawler).info(f'{video_key}: {rule_value["min"]} <= {video_value} <= {rule_value["max"]},{result}')
-                if result is False:
-                    return False
-                else:
-                    continue
+            else:
+                result = True
+
+            if result is False:
+                return False
+            else:
+                continue
     return True
 
 

BIN
suisuiniannianyingfuqi/.DS_Store


+ 19 - 14
suisuiniannianyingfuqi/suisuiniannianyingfuqi_main/demo.py

@@ -26,22 +26,23 @@ class Demo:
             "play_cnt": 1000,
             "share_cnt": 1000,
             "duration": 55,
+            "period": 5,
             "publish_time_stamp": 1683648000,  # 2023-05-10 00:00:00
             "video_url": "www.baidu.com"
         }
         rule_dict = {
-             # "play_cnt": {"min": 0, "max": 0},
-             # "fans_cnt": {"min": 0, "max": 0},
-             # "videos_cnt": {"min": 0, "max": 0},
-             # "like_cnt": {"min": 0, "max": 0},
-             # "video_width": {"min": 0, "max": 0},
-             # "video_height": {"min": 0, "max": 0},
-             # "duration": {"min": 0, "max": 0},
-             # "share_cnt": {"min": 0, "max": 0},
-             # "comment_cnt": {"min": 0, "max": 0},
-             # "favorite_cnt": {"min": 0, "max": 0},
-             # "period": {"min": 10, "max": 0},
-             # "publish_time": {"min": 1673734400000, "max": 0}
+             "play_cnt": {"min": 0, "max": 0},
+             "fans_cnt": {"min": 0, "max": 0},
+             "videos_cnt": {"min": 0, "max": 0},
+             "like_cnt": {"min": 0, "max": 0},
+             "video_width": {"min": 0, "max": 0},
+             "video_height": {"min": 0, "max": 0},
+             "duration": {"min": 0, "max": 0},
+             "share_cnt": {"min": 0, "max": 0},
+             "comment_cnt": {"min": 0, "max": 0},
+             "favorite_cnt": {"min": 0, "max": 0},
+             "period": {"min": 10, "max": 0},
+             "publish_time": {"min": 1673734400000, "max": 0}
         }
 
         # 格式化 video_dict:publish_time_stamp
@@ -54,6 +55,10 @@ class Demo:
         for rule_value in rule_dict.values():
             if rule_value["max"] == 0:
                 rule_value["max"] = 999999999999999
+        rule_dict["period"]["max"] = rule_dict["period"]["min"]
+        rule_dict["period"]["min"] = 0
+        for k, v in rule_dict.items():
+            print(f"{k}:{v}")
         # 格式化 rule_dict 有的 key,video_dict 中没有的问题
         for rule_key in rule_dict.keys():
             if rule_key not in video_dict.keys():
@@ -127,7 +132,7 @@ class Demo:
 
 if __name__ == "__main__":
     # Demo.get_user("demo", "suisuiniannianyingfuqi", "dev")
-    # print(Demo.test_dict())
+    print(Demo.test_dict())
     # print(500 <= 1000 <= 100000000)
-    Demo.save_video_info()
+    # Demo.save_video_info()
     pass