wangkun 1 سال پیش
والد
کامیت
9b1f863b33

+ 1 - 0
README.MD

@@ -238,4 +238,5 @@ ps aux | grep search_key_mac | grep -v grep | awk '{print $2}' | xargs kill -9
 ps aux | grep gongzhonghao | grep -v grep | awk '{print $2}' | xargs kill -9
 ps aux | grep xiaoniangao | grep -v grep | awk '{print $2}' | xargs kill -9
 ps aux | grep run_xigua_search | grep -v grep | awk '{print $2}' | xargs kill -9
+ps aux | grep run_suisuiniannianyingfuqi | grep -v grep | awk '{print $2}' | xargs kill -9
 ```

+ 7 - 7
common/publish.py

@@ -172,12 +172,12 @@ class Publish:
         :param our_uid: 上传到指定站内 UID
         :return: uid
         """
-        if env == 'dev':
-            uids_dev = [6267140, 6267141]
-            return random.choice(uids_dev)
+        # if env == 'dev':
+        #     uids_dev = [6267140, 6267141]
+        #     return random.choice(uids_dev)
 
         # 小年糕
-        elif crawler == 'xiaoniangao' and env == 'prod' and strategy == '定向爬虫策略':
+        if crawler == 'xiaoniangao' and env == 'prod' and strategy == '定向爬虫策略':
             uids_prod_xiaoniangao_follow = [50322210, 50322211, 50322212, 50322213, 50322214, 50322215,
                                             50322216, 50322217, 50322218, 50322219, 50322220, 50322221, 50322236, 50322237]
             return random.choice(uids_prod_xiaoniangao_follow)
@@ -200,9 +200,9 @@ class Publish:
             uids_prod_benshanzhufu_recommend = [20631262, 20631263, 20631264, 20631265, 20631266, 20631267, 20631268, 20631269, 20631271, 20631272]
             return random.choice(uids_prod_benshanzhufu_recommend)
 
-        elif crawler == 'suisuiniannianyingfuqi' and env == 'prod' and strategy == '推荐榜爬虫策略':
-            uids_prod_suisuiniannianyingfuqi_recommend = [26117547, 26117548, 26117549, 26117550, 26117551]
-            return random.choice(uids_prod_suisuiniannianyingfuqi_recommend)
+        # elif crawler == 'suisuiniannianyingfuqi' and env == 'prod' and strategy == '推荐榜爬虫策略':
+        #     uids_prod_suisuiniannianyingfuqi_recommend = [26117547, 26117548, 26117549, 26117550, 26117551]
+        #     return random.choice(uids_prod_suisuiniannianyingfuqi_recommend)
 
         elif crawler == 'ganggangdouchuan' and env == 'prod' and strategy == '推荐榜爬虫策略':
             uids_prod_ganggangdouchuan_recommend = [26117661, 26117662, 26117663]

+ 7 - 7
scheduling/scheduling_v3/crawler_scheduling_v3.py

@@ -88,12 +88,12 @@ class SchedulingV3:
         mode = task['mode']
         source = task['source']
         spider_name = task['spider_name']
-        if env == "aliyun":
-            oss_endpoint = "inner"
-        elif env == "hk":
-            oss_endpoint = "hk"
-        else:
-            oss_endpoint = "out"
+        # if env == "aliyun":
+        #     oss_endpoint = "inner"
+        # elif env == "hk":
+        #     oss_endpoint = "hk"
+        # else:
+        #     oss_endpoint = "out"
 
         # 正式环境,调度任务
         Common.logger(log_type, crawler).info(f"开始调度任务")
@@ -113,7 +113,7 @@ class SchedulingV3:
             ('operator', str(task['operator']))
         ]
         task_str = str(task_str).replace(' ', '').replace('"', "'").replace("\/", "").replace("/", "")
-        cmd = f"""sh scheduling/scheduling_v3/scheduling_v3.sh {source}/{source}_main/{spider_name}.py --log_type="{mode}" --crawler="{source}" --task="{task_str}" --oss_endpoint="{oss_endpoint}" --env="{env}" {source}/logs/{source}-{mode}-scheduling.log """
+        cmd = f"""sh scheduling/scheduling_v3/scheduling_v3.sh {source}/{source}_main/{spider_name}.py --log_type="{mode}" --crawler="{source}" --task="{task_str}" --env="{env}" {source}/logs/{source}-{mode}-scheduling.log """
         Common.logger(log_type, crawler).info(f"cmd:{cmd}")
         os.system(cmd)
         Common.logger(log_type, crawler).info(f"调度任务结束")

+ 6 - 6
scheduling/scheduling_v3/scheduling_v3.sh

@@ -10,16 +10,16 @@ crawler_dir=$1  # 爬虫执行路径,如: ./youtube/youtube_main/run_youtube_f
 log_type=$2     # 日志命名格式,如: follow,则在 youtube/logs/目录下,生成 2023-02-08-follow.log
 crawler=$3      # 哪款爬虫,如: youtube / kanyikan / weixinzhishu
 task=$4         # 爬虫任务
-oss_endpoint=$5 # OSS网关,内网: inner / 外网: out / 香港: hk
-env=$6          # 爬虫运行环境,正式环境: prod / 测试环境: dev
-nohup_dir=$7    # nohup日志存储路径,如: ./youtube/nohup.log
+#oss_endpoint=$5 # OSS网关,内网: inner / 外网: out / 香港: hk
+env=$5          # 爬虫运行环境,正式环境: prod / 测试环境: dev
+nohup_dir=$6    # nohup日志存储路径,如: ./youtube/nohup.log
 
 echo "$(date "+%Y-%m-%d %H:%M:%S") 开始调度爬虫任务"
 echo "crawler_dir:"${crawler_dir}
 echo "log_type:"${log_type}
 echo "crawler:"${crawler}
 echo "task:"${task}
-echo "oss_endpoint:"${oss_endpoint}
+#echo "oss_endpoint:"${oss_endpoint}
 echo "env:"${env}
 echo "nohup_dir:"${nohup_dir}
 
@@ -65,13 +65,13 @@ elif [ ${crawler} = "--crawler=shipinhao" ];then
   fi
 else
   echo "$(date "+%Y-%m-%d %H:%M:%S") 正在更新代码..."
-#  cd ${piaoquan_crawler_dir} && git pull origin master --force && rm -f ${piaoquan_crawler_dir}main/nohup.log && rm -f ${piaoquan_crawler_dir}${nohup_dir}
+  cd ${piaoquan_crawler_dir} && git pull origin master --force && rm -f ${piaoquan_crawler_dir}main/nohup.log && rm -f ${piaoquan_crawler_dir}${nohup_dir}
   echo "$(date "+%Y-%m-%d %H:%M:%S") 代码更新完成!"
 fi
 
 echo "$(date "+%Y-%m-%d %H:%M:%S") 正在重启服务..."
 cd ${piaoquan_crawler_dir}
-nohup ${python} -u ${crawler_dir} ${log_type} ${crawler} ${task} ${oss_endpoint} ${env} >> ${nohup_dir} 2>&1 &
+nohup ${python} -u ${crawler_dir} ${log_type} ${crawler} ${task} ${env} >> ${nohup_dir} 2>&1 &
 echo "$(date "+%Y-%m-%d %H:%M:%S") 服务重启完毕!"
 
 exit 0

+ 72 - 0
suisuiniannianyingfuqi/suisuiniannianyingfuqi_main/demo.py

@@ -0,0 +1,72 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/5/15
+import random
+
+from common.scheduling_db import MysqlHelper
+
+
+class Demo:
+    @classmethod
+    def get_user(cls, log_type, crawler, env):
+        select_user_sql = f"""select * from crawler_user_v3 where task_id=36"""
+        user_list = MysqlHelper.get_values(log_type, crawler, select_user_sql, env, action="")
+        print(user_list)
+        our_uid_list = []
+        for user in user_list:
+            our_uid_list.append(user["uid"])
+
+        print(our_uid_list)
+        our_uid = random.choice(our_uid_list)
+        print(our_uid)
+
+    @classmethod
+    def test_dict(cls):
+        video_dict = {
+            "play_cnt": 1000,
+            "share_cnt": 1000,
+            "duration": 55,
+            "video_url": "www.baidu.com"
+        }
+        rule_dict = {"play_cnt": {"min": 0, "max": 0},
+                     "fans_cnt": {"min": 0, "max": 0},
+                     "videos_cnt": {"min": 0, "max": 0},
+                     "like_cnt": {"min": 0, "max": 0},
+                     "video_width": {"min": 0, "max": 0},
+                     "video_height": {"min": 0, "max": 0},
+                     "duration": {"min": 0, "max": 0},
+                     "share_cnt": {"min": 0, "max": 0},
+                     "comment_cnt": {"min": 0, "max": 0},
+                     "favorite_cnt": {"min": 0, "max": 0},
+                     "period": {"min": 1, "max": 0}}
+        # 格式化 rule_dict 最大值取值为 0 的问题
+        for rule_value in rule_dict.values():
+            if rule_value["max"] == 0:
+                rule_value["max"] = 999999999999999
+        # 格式化 rule_dict 有的 key,video_dict 中没有的问题
+        for rule_key in rule_dict.keys():
+            if rule_key not in video_dict.keys():
+                video_dict[rule_key] = int(rule_dict[rule_key]["max"] / 2)
+        # 比较结果,输出结果:True / False
+        for video_key, video_value in video_dict.items():
+            for rule_key, rule_value in rule_dict.items():
+                if video_key == rule_key:
+                    result = rule_value["min"] <= video_value <= rule_value["max"]
+                    print(f'{type(video_key)}: {type(rule_value["min"])} <= {type(video_value)} <= {type(rule_value["max"])}')
+                    print(f'{video_key}: {rule_value["min"]} <= {video_value} <= {rule_value["max"]}')
+                    print(f"result:{type(result)}, {result}")
+                    print("=========================\n")
+
+                    if result is False:
+                        return False
+                    else:
+                        continue
+
+
+        return True
+
+if __name__ == "__main__":
+    # Demo.get_user("demo", "suisuiniannianyingfuqi", "dev")
+    print(Demo.test_dict())
+    # print(500 <= 1000 <= 100000000)
+    pass

+ 49 - 0
suisuiniannianyingfuqi/suisuiniannianyingfuqi_main/run_suisuiniannianyingfuqi_recommend_scheduling.py

@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/4/13
+import argparse
+import os
+import random
+import sys
+sys.path.append(os.getcwd())
+from common.public import task_fun
+from common.common import Common
+from common.scheduling_db import MysqlHelper
+from suisuiniannianyingfuqi.suisuiniannianyingfuqi_recommend.suisuiniannianyingfuqi_recommend_scheduling import SuisuiniannianyingfuqiRecommendScheduling
+
+
+def main(log_type, crawler, task, env):
+    task_dict = task_fun(task)['task_dict']
+    rule_dict = task_fun(task)['rule_dict']
+    task_id = task_dict['task_id']
+    select_user_sql = f"""select * from crawler_user_v3 where task_id={task_id}"""
+    user_list = MysqlHelper.get_values(log_type, crawler, select_user_sql, env, action="")
+    our_uid_list = []
+    for user in user_list:
+        our_uid_list.append(user["uid"])
+    our_uid = random.choice(our_uid_list)
+    Common.logger(log_type, crawler).info(f"调度任务:\n{task_dict}")
+    Common.logger(log_type, crawler).info(f"抓取规则:\n{rule_dict}")
+    Common.logger(log_type, crawler).info(f"用户列表:\n{user_list}")
+    Common.logger(log_type, crawler).info('开始抓取 岁岁年年迎福气小程序\n')
+    SuisuiniannianyingfuqiRecommendScheduling.get_videoList(log_type=log_type,
+                                                            crawler=crawler,
+                                                            our_uid=our_uid,
+                                                            rule_dict=rule_dict,
+                                                            env=env)
+    Common.del_logs(log_type, crawler)
+    Common.logger(log_type, crawler).info('抓取完一轮,休眠 1 分钟\n')
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
+    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler')  ## 添加参数
+    parser.add_argument('--task')  ## 添加参数
+    # parser.add_argument('--oss_endpoint')  ## 添加参数
+    parser.add_argument('--env')  ## 添加参数
+    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
+    main(log_type=args.log_type,
+         crawler=args.crawler,
+         task=args.task,
+         env=args.env)

+ 228 - 0
suisuiniannianyingfuqi/suisuiniannianyingfuqi_recommend/suisuiniannianyingfuqi_recommend_scheduling.py

@@ -0,0 +1,228 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/4/13
+import json
+import os
+import shutil
+import sys
+import time
+from hashlib import md5
+import requests
+import urllib3
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.feishu import Feishu
+from common.publish import Publish
+from common.scheduling_db import MysqlHelper
+
+
+class SuisuiniannianyingfuqiRecommendScheduling:
+    platform = "岁岁年年迎福气"
+
+    @classmethod
+    def repeat_video(cls, log_type, crawler, video_id, env):
+        sql = f""" select * from crawler_video where platform="岁岁年年迎福气" and out_video_id="{video_id}"; """
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
+        return len(repeat_video)
+
+    @classmethod
+    def get_videoList(cls, log_type, crawler, our_uid, rule_dict, env):
+        page = 1
+        while True:
+            try:
+                url = 'https://www.jzkksp.com/index/home/get_home_list.html'
+                headers = {
+                    'content-type': 'application/x-www-form-urlencoded',
+                    'Accept-Encoding': 'gzip,compress,br,deflate',
+                    'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 13_3_1 like Mac OS X) '
+                                  'AppleWebKit/605.1.15 (KHTML, like Gecko) '
+                                  'Mobile/15E148 MicroMessenger/8.0.25(0x1800192b) NetType/WIFI Language/zh_CN',
+                    'Referer': 'https://servicewechat.com/wxd4c54f60812f6f36/1/page-frame.html',
+                }
+                data = {
+                    'token': '851ae159fd33f955bf433e7c47a4a298',
+                    'time': '1667905857000',
+                    'str_data': 'uT551tU8',
+                    'page': str(page),
+                    'limit': '10',
+                    'appid': 'wxd4c54f60812f6f36',
+                    'version': '1.4.1',
+                    'openid': 'oDAjy5SCFe7Ml3PNgiow3ncozL1o'
+                }
+                urllib3.disable_warnings()
+                response = requests.post(url=url, headers=headers, data=data, verify=False)
+                page += 1
+                if response.status_code != 200:
+                    Common.logger(log_type, crawler).warning(f'get_videoList:{response.status_code}, {response.text}\n')
+                    cls.page = 0
+                    return
+                elif 'data' not in response.json():
+                    Common.logger(log_type, crawler).warning(f'get_videoList:{response.status_code}, {response.json()}\n')
+                    cls.page = 0
+                    return
+                elif len(response.json()['data']['video_list']['data']) == 0:
+                    Common.logger(log_type, crawler).info(f'没有更多数据啦~ {response.json()}\n')
+                    cls.page = 0
+                    return
+                else:
+                    feeds = response.json()['data']['video_list']['data']
+                    for i in range(len(feeds)):
+                        try:
+                            video_title = feeds[i].get('title', "").replace("'", "").replace('"', '')
+                            video_id = str(feeds[i].get('id', ''))
+                            play_cnt = feeds[i].get('browse', 0)
+                            comment_cnt = 0
+                            like_cnt = 0
+                            share_cnt = 0
+                            publish_time_str = feeds[i].get('createtime', '')
+                            publish_time_stamp = int(time.mktime(time.strptime(publish_time_str, "%Y-%m-%d")))
+                            user_name = "岁岁年年迎福气"
+                            user_id = "suisuiniannianyingfuqi"
+                            cover_url = feeds[i].get('thumb', '')
+                            video_url = feeds[i].get('url', '')
+
+                            video_dict = {'video_title': video_title,
+                                          'video_id': video_id,
+                                          'play_cnt': play_cnt,
+                                          'comment_cnt': comment_cnt,
+                                          'like_cnt': like_cnt,
+                                          'share_cnt': share_cnt,
+                                          'publish_time_stamp': publish_time_stamp,
+                                          'publish_time_str': publish_time_str,
+                                          'user_name': user_name,
+                                          'user_id': user_id,
+                                          'avatar_url': cover_url,
+                                          'cover_url': cover_url,
+                                          'video_url': video_url,
+                                          'session': f"suisuiniannianyingfuqi-{int(time.time())}"}
+                            for k, v in video_dict.items():
+                                Common.logger(log_type, crawler).info(f"{k}:{v}")
+
+                            if video_id == '' or video_title == '' or cover_url == '' or video_url == '':
+                                Common.logger(log_type, crawler).info('无效视频\n')
+                            elif cls.repeat_video(log_type, crawler, video_id, env) != 0:
+                                Common.logger(log_type, crawler).info('视频已下载\n')
+                            else:
+                                cls.download_publish(log_type=log_type,
+                                                     crawler=crawler,
+                                                     our_uid=our_uid,
+                                                     video_dict=video_dict,
+                                                     rule_dict=rule_dict,
+                                                     env=env)
+                        except Exception as e:
+                            Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
+            except Exception as e:
+                Common.logger(log_type, crawler).error(f"抓取第{page}页时异常:{e}\n")
+
+    # 下载 / 上传
+    @classmethod
+    def download_publish(cls, log_type, crawler, our_uid, video_dict, rule_dict, env):
+        # 下载视频
+        Common.download_method(log_type=log_type, crawler=crawler, text='video', title=video_dict['video_title'], url=video_dict['video_url'])
+
+        md_title = md5(video_dict['video_title'].encode('utf8')).hexdigest()
+        try:
+            if os.path.getsize(f"./{crawler}/videos/{md_title}/video.mp4") == 0:
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                Common.logger(log_type, crawler).info("视频size=0,删除成功\n")
+                return
+        except FileNotFoundError:
+            # 删除视频文件夹
+            shutil.rmtree(f"./{crawler}/videos/{md_title}")
+            Common.logger(log_type, crawler).info("视频文件不存在,删除文件夹成功\n")
+            return
+
+        ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
+        video_dict["duration"] = ffmpeg_dict["duration"]
+        video_dict["video_width"] = ffmpeg_dict["width"]
+        video_dict["video_height"] = ffmpeg_dict["height"]
+
+        # 下载封面
+        Common.download_method(log_type=log_type, crawler=crawler, text='cover', title=video_dict['video_title'], url=video_dict['cover_url'])
+        # 保存视频信息至txt
+        Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_dict)
+
+        if env == "dev":
+            oss_endpoint = "out"
+        else:
+            oss_endpoint = "inner"
+        # 上传视频
+        Common.logger(log_type, crawler).info("开始上传视频...")
+        our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                  crawler=crawler,
+                                                  strategy="推荐榜爬虫策略",
+                                                  our_uid=our_uid,
+                                                  env=env,
+                                                  oss_endpoint=oss_endpoint)
+        if env == 'dev':
+            our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+        else:
+            our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+        Common.logger(log_type, crawler).info("视频上传完成")
+
+        if our_video_id is None:
+            try:
+                # 删除视频文件夹
+                shutil.rmtree(f"./{crawler}/videos/{md_title}")
+                return
+            except FileNotFoundError:
+                return
+
+        # 视频写入飞书
+        Feishu.insert_columns(log_type, crawler, "290bae", "ROWS", 1, 2)
+        upload_time = int(time.time())
+        values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
+                   "推荐榜爬虫策略",
+                   video_dict['video_title'],
+                   video_dict['video_id'],
+                   our_video_link,
+                   video_dict['play_cnt'],
+                   video_dict['duration'],
+                   f"{video_dict['video_width']}*{video_dict['video_height']}",
+                   video_dict['cover_url'],
+                   video_dict['video_url']]]
+        time.sleep(0.5)
+        Feishu.update_values(log_type, crawler, "290bae", "F2:Z2", values)
+        Common.logger(log_type, crawler).info(f"视频已保存至云文档\n")
+
+        # 视频信息保存数据库
+        insert_sql = f""" insert into crawler_video(video_id,
+                                                out_user_id,
+                                                platform,
+                                                strategy,
+                                                out_video_id,
+                                                video_title,
+                                                cover_url,
+                                                video_url,
+                                                duration,
+                                                publish_time,
+                                                play_cnt,
+                                                crawler_rule,
+                                                width,
+                                                height)
+                                                values({our_video_id},
+                                                "{video_dict['user_id']}",
+                                                "{cls.platform}",
+                                                "推荐榜爬虫策略",
+                                                "{video_dict['video_id']}",
+                                                "{video_dict['video_title']}",
+                                                "{video_dict['cover_url']}",
+                                                "{video_dict['video_url']}",
+                                                {int(video_dict['duration'])},
+                                                "{video_dict['publish_time_str']}",
+                                                {int(video_dict['play_cnt'])},
+                                                '{json.dumps(rule_dict)}',
+                                                {int(video_dict['video_width'])},
+                                                {int(video_dict['video_height'])}) """
+        Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+        MysqlHelper.update_values(log_type, crawler, insert_sql, env, action='')
+        Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
+
+
+if __name__ == '__main__':
+    SuisuiniannianyingfuqiRecommendScheduling.get_videoList(log_type='recommend',
+                                                            crawler='suisuiniannianyingfuqi',
+                                                            our_uid=6267140,
+                                                            rule_dict={},
+                                                            env='dev')

BIN
suisuiniannianyingfuqi/videos/.DS_Store


+ 0 - 21
xigua/xigua_recommend/xigua_recommend.py

@@ -588,27 +588,6 @@ class XiguaRecommend:
         except Exception as e:
             Common.logger(log_type, crawler).error(f'get_video_url:{e}\n')
 
-    # 过滤词库
-    @classmethod
-    def filter_words(cls, log_type, crawler):
-        try:
-            while True:
-                filter_words_sheet = Feishu.get_values_batch(log_type, crawler, 'KGB4Hc')
-                if filter_words_sheet is None:
-                    Common.logger(log_type, crawler).warning(
-                        f"filter_words_sheet:{filter_words_sheet} 10秒钟后重试")
-                    continue
-                filter_words_list = []
-                for x in filter_words_sheet:
-                    for y in x:
-                        if y is None:
-                            pass
-                        else:
-                            filter_words_list.append(y)
-                return filter_words_list
-        except Exception as e:
-            Common.logger(log_type, crawler).error(f'filter_words异常:{e}\n')
-
     @classmethod
     def repeat_video(cls, log_type, crawler, video_id, env):
         sql = f""" select * from crawler_video where platform="西瓜视频" and out_video_id="{video_id}"; """

+ 1 - 0
zhiqingtiantiankan/zhiqingtiantiankan_recommend/zhiqingtiantiankan_recommend.py

@@ -149,6 +149,7 @@ class ZhiqingtiantiankanRecommend:
     def check_to_applet(cls, log_type, crawler, driver: WebDriver):
         while True:
             webview = driver.contexts
+            Common.logger(log_type, crawler).info(f"webview:{webview}")
             driver.switch_to.context(webview[1])
             windowHandles = driver.window_handles
             for handle in windowHandles: