Przeglądaj źródła

update schdeuling

wangkun 2 lat temu
rodzic
commit
251bb396b3

+ 4 - 1
README.MD

@@ -197,19 +197,22 @@ ps aux | grep run_suisuiniannianyingfuqi
 ps aux | grep run_suisuiniannianyingfuqi | grep -v grep | awk '{print $2}' | xargs kill -9
 ```
 
-#### 线下爬虫: 刚刚都传 / 吉祥幸福 / 知青天天看 / 众妙音信
+#### 线下爬虫: 刚刚都传 / 吉祥幸福 / 知青天天看 / 众妙音信 / wechat_search_key
 ```commandline
 MacAir 设备, crontab定时任务
 * * * * * /bin/sh /Users/piaoquan/Desktop/piaoquan_crawler/main/process_offline.sh "prod"
 线下调试
 sh /Users/wangkun/Desktop/crawler/piaoquan_crawler/main/process_offline.sh "dev"
+cd /Users/piaoquan/Desktop/piaoquan_crawler/ && nohup python3 -u weixinzhishu/weixinzhishu_key/search_key_mac.py >> weixinzhishu/logs/nohup-search-key.log 2>&1 &
 检测进程
 ps aux | grep run_ganggangdouchuan
 ps aux | grep run_jixiangxingfu
 ps aux | grep run_zhongmiaoyinxin
 ps aux | grep run_zhiqingtiantiankan
+ps aux | grep search_key_mac
 ps aux | grep run_ganggangdouchuan | grep -v grep | awk '{print $2}' | xargs kill -9
 ps aux | grep run_jixiangxingfu | grep -v grep | awk '{print $2}' | xargs kill -9
 ps aux | grep run_zhongmiaoyinxin | grep -v grep | awk '{print $2}' | xargs kill -9
 ps aux | grep run_zhiqingtiantiankan | grep -v grep | awk '{print $2}' | xargs kill -9
+ps aux | grep search_key_mac | grep -v grep | awk '{print $2}' | xargs kill -9
 ```

+ 0 - 21
main/scheduling_main.sh

@@ -33,11 +33,6 @@ echo "$(date "+%Y-%m-%d %H:%M:%S") 更新环境变量..."
 cd ~ && source ${profile_path}
 echo "$(date "+%Y-%m-%d %H:%M:%S") 更新环境变量完成!"
 
-#echo "$(date "+%Y-%m-%d %H:%M:%S") 正在杀进程..."
-#grep_str=run_${crawler##*=}
-#ps aux | grep ${grep_str} | grep Python | grep -v grep | awk '{print $2}' | xargs kill -9
-#echo "$(date "+%Y-%m-%d %H:%M:%S") 进程已杀死!"
-
 if [ ${env} = "--env=hk" ];then
   echo "升级yt-dlp"
   pip3 install yt-dlp -U
@@ -47,22 +42,6 @@ else
   echo "$(date "+%Y-%m-%d %H:%M:%S") 代码更新完成!"
 fi
 
-if [ ${env} = "--env=hk" ];then
-  echo "无需重启Appium及adb服务"
-elif [ ${env} = "--env=prod" ];then
-  echo "无需重启Appium及adb服务"
-else
-  echo "$(date "+%Y-%m-%d %H:%M:%S") 正在重启Appium..."
-  ps aux | grep Appium.app | grep -v grep | awk '{print $2}' | xargs kill -9
-  nohup ${node_path} /Applications/Appium.app/Contents/Resources/app/node_modules/appium/build/lib/main.js >>./nohup.log 2>&1 &
-  echo "$(date "+%Y-%m-%d %H:%M:%S") 重启Appium完毕!"
-
-  echo "$(date "+%Y-%m-%d %H:%M:%S") 正在重启adb..."
-  adb kill-server
-  adb start-server
-  echo "$(date "+%Y-%m-%d %H:%M:%S") 重启adb完毕!"
-fi
-
 echo "$(date "+%Y-%m-%d %H:%M:%S") 正在重启服务..."
 cd ${piaoquan_crawler_dir}
 nohup ${python} -u ${crawler_dir} ${log_type} ${crawler} ${env} >>${nohup_dir} 2>&1 &

+ 3 - 0
scheduling/scheduling_v3/__init__.py

@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/4/19

+ 143 - 0
scheduling/scheduling_v3/crawler_scheduling_v3.py

@@ -0,0 +1,143 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/3/2
+import os
+import sys
+import time
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.scheduling_db import MysqlHelper, RedisHelper
+
+
+class SchedulingV3:
+    # 读取任务表
+    @classmethod
+    def get_task(cls, log_type, crawler, env):
+        get_sql = """ select * from crawler_task_v3; """
+        all_task_list = MysqlHelper.get_values(log_type=log_type, crawler=crawler, sql=get_sql, env=env, action='')
+        pre_task_list = []
+        for task in all_task_list:
+            if int(time.time()*1000) >= task["start_time"]:
+                pre_task_list.append(task)
+        return pre_task_list
+
+    # 更新下次启动时间,调用时机:调度该 task_id 的任务时
+    @classmethod
+    def update_task(cls, log_type, crawler, task_id, start_time, interval_piaoquan, env):
+        if interval_piaoquan > 0:
+            new_start_time = start_time + int(interval_piaoquan)*60*1000
+            update_sql = f""" UPDATE crawler_task_v3 SET start_time={new_start_time} WHERE id={task_id} """
+            MysqlHelper.update_values(log_type, crawler, update_sql, env)
+            Common.logger(log_type, crawler).info(f"更新任务下次启动时间成功:{new_start_time}\n")
+
+    # 资源分配 / 组装
+    @classmethod
+    def write_redis(cls, log_type, crawler, env):
+        pre_task_list = cls.get_task(log_type=log_type, crawler=crawler, env=env)
+        if len(pre_task_list) == 0:
+            Common.logger(log_type, crawler).info("暂无新任务\n")
+        else:
+            for pre_task in pre_task_list:
+                # machine字段是用来区分海外爬虫和国内爬虫使用的,不涉及任何其他含义
+                machine = pre_task.get('machine', 'dev')
+                next_time = pre_task['start_time']
+                interval_piaoquan = pre_task['interval']
+                task_id = pre_task['id']
+                if machine == "hk":
+                    # 写入 redis
+                    task_key = 'crawler_config_task_queue:hk'
+                    RedisHelper.redis_push(env, task_key, str(pre_task))
+                    Common.logger(log_type, crawler).info(f"写入Redis成功:{str(pre_task)}")
+                elif machine == "aliyun":
+                    # 写入 redis
+                    task_key = 'crawler_config_task_queue:aliyun'
+                    RedisHelper.redis_push(env, task_key, str(pre_task))
+                    Common.logger(log_type, crawler).info(f"写入Redis成功:{str(pre_task)}")
+                else:
+                    # 写入 redis
+                    task_key = 'crawler_config_task_queue:dev'
+                    RedisHelper.redis_push(env, task_key, str(pre_task))
+                    Common.logger(log_type, crawler).info(f"写入Redis成功:{str(pre_task)}")
+                if int(time.time()*1000) >= next_time:
+                    cls.update_task(log_type, crawler, task_id, next_time, interval_piaoquan, env)
+
+    @classmethod
+    def get_redis(cls, log_type, crawler, env):
+        if env == 'hk':
+            task_key = 'crawler_config_task_queue:hk'
+        elif env == 'prod':
+            task_key = 'crawler_config_task_queue:aliyun'
+        else:
+            task_key = 'crawler_config_task_queue:dev'
+
+        redis_data = RedisHelper.redis_pop(env, task_key)
+        if redis_data is None or len(redis_data) == 0:
+            Common.logger(log_type, crawler).info("Redis为空,程序退出")
+            return
+        else:
+            task = eval(str(redis_data, encoding="utf8"))
+            return task
+
+    @classmethod
+    def scheduling_task(cls, log_type, crawler, env):
+        task = cls.get_redis(log_type, crawler, env)
+        if not task:
+            Common.logger(log_type, crawler).info("Redis为空,程序退出")
+            return
+        Common.logger(log_type, crawler).info(f"已获取调度任务:{type(task)}, {task}")
+        mode = task['mode']
+        source = task['source']
+        spider_name = f"run_{source}_{mode}_scheduling"
+        if env == "aliyun":
+            oss_endpoint = "inner"
+        elif env == "hk":
+            oss_endpoint = "hk"
+        else:
+            oss_endpoint = "out"
+
+        # 正式环境,调度任务
+        Common.logger(log_type, crawler).info(f"开始调度任务:{task}\n")
+        task_str = [
+            ('task_id', str(task['id'])),
+            ('task_name', str(task['task_name'])),
+            ('source', str(task['source'])),
+            ('start_time', str(task['start_time'])),
+            ('interval', str(task['interval'])),
+            ('mode', str(task['mode'])),
+            ('duration_min', eval(task['rule'])['duration']['min']),
+            ('duration_max', eval(task['rule'])['duration']['max']),
+            ('play_cnt_min', eval(task['rule'])['playCnt']['min']),
+            ('play_cnt_max', eval(task['rule'])['playCnt']['max']),
+            ('publish_day_min', eval(task['rule'])['period']['min']),
+            ('publish_day_max', eval(task['rule'])['period']['max']),
+            ('fans_min', eval(task['rule'])['fans']['min']),
+            ('fans_max', eval(task['rule'])['fans']['max']),
+            ('videos_min', eval(task['rule'])['videos']['min']),
+            ('videos_max', eval(task['rule'])['videos']['max']),
+            ('video_like_min', eval(task['rule'])['like']['min']),
+            ('video_like_max', eval(task['rule'])['like']['max']),
+            ('video_width_min', eval(task['rule'])['videoWidth']['min']),
+            ('video_width_max', eval(task['rule'])['videoWidth']['max']),
+            ('video_height_min', eval(task['rule'])['videoHeight']['min']),
+            ('video_height_max', eval(task['rule'])['videoHeight']['max']),
+            ('spider_name', str(task['spider_name'])),
+            ('machine', str(task['machine'])),
+            ('status', str(task['status'])),
+            ('create_time', str(task['create_time'])),
+            ('update_time', str(task['update_time'])),
+            ('operator', str(task['operator']))
+        ]
+        task_str = str(task_str).replace(' ', '')
+        cmd = f"""sh scheduling/scheduling_main/scheduling_v3.sh {source}/{source}_main/{spider_name}.py --log_type="{mode}" --crawler="{source}" --task="{task_str}" --oss_endpoint="{oss_endpoint}" --env="{env}" >>{source}/logs/{source}-scheduling.log """
+        Common.logger(log_type, crawler).info(f"cmd:{cmd}\n")
+        os.system(cmd)
+
+
+if __name__ == "__main__":
+    # print(Scheduling.get_task("scheduling", "scheduling", "dev"))
+    # Scheduling.update_task("scheduling", "scheduling", 8, 1681833600000, 1, "dev")
+    # Scheduling.write_redis("scheduling", "scheduling", "dev")
+    # print(Scheduling.get_redis("scheduling", "scheduling", "dev"))
+    SchedulingV3.scheduling_task("scheduling", "scheduling", "dev")
+    pass
+

+ 34 - 0
scheduling/scheduling_v3/run_scheduling_task_v3.py

@@ -0,0 +1,34 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/3/7
+import argparse
+import os
+import sys
+
+sys.path.append(os.getcwd())
+from common.common import Common
+from scheduling.scheduling_v3.crawler_scheduling_v3 import SchedulingV3
+
+
+class SchedulingTask:
+    @classmethod
+    def scheduling_task(cls, log_type, crawler, env):
+        Common.logger(log_type, crawler).info("开始调度爬虫任务")
+        SchedulingV3.scheduling_task(log_type, crawler, env)
+        Common.logger(log_type, crawler).info("爬虫任务调度完成")
+        Common.del_logs(log_type, crawler)
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
+    parser.add_argument('--log_type', default='follow', type=str)  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler', default='youtube')  ## 添加参数
+    parser.add_argument('--env', default='dev')  ## 添加参数
+    # parser.add_argument('--machine')  ## 添加参数
+    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
+    # print(args)
+    SchedulingTask.scheduling_task(
+        log_type=args.log_type,
+        crawler=args.crawler,
+        env=args.env,
+    )

+ 31 - 0
scheduling/scheduling_v3/run_write_task_v3.py

@@ -0,0 +1,31 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/3/7
+import argparse
+import os
+import sys
+sys.path.append(os.getcwd())
+from common.common import Common
+from scheduling.scheduling_v3.crawler_scheduling_v3 import SchedulingV3
+
+
+class WriteTask:
+    @classmethod
+    def write_task(cls, log_type, crawler, env):
+        Common.logger(log_type, crawler).info("开始读取爬虫任务,写入Redis")
+        SchedulingV3.write_redis(log_type=log_type, crawler=crawler, env=env)
+        Common.logger(log_type, crawler).info("写入Redis完成")
+        Common.del_logs(log_type, crawler)
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
+    parser.add_argument('--log_type', default='follow', type=str, )  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler', default='youtube')  ## 添加参数
+    parser.add_argument('--env', default='dev')  ## 添加参数
+    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
+    WriteTask.write_task(
+        log_type=args.log_type,
+        crawler=args.crawler,
+        env=args.env,
+    )

+ 1 - 22
scheduling/scheduling_main/scheduling.sh → scheduling/scheduling_v3/scheduling_v3.sh

@@ -33,11 +33,6 @@ echo "$(date "+%Y-%m-%d %H:%M:%S") 更新环境变量..."
 cd ~ && source ${profile_path}
 echo "$(date "+%Y-%m-%d %H:%M:%S") 更新环境变量完成!"
 
-#echo "$(date "+%Y-%m-%d %H:%M:%S") 正在杀进程..."
-#grep_str=run_${crawler##*=}
-#ps aux | grep ${grep_str} | grep Python | grep -v grep | awk '{print $2}' | xargs kill -9
-#echo "$(date "+%Y-%m-%d %H:%M:%S") 进程已杀死!"
-
 if [ ${env} = "--env=hk" ];then
   echo "升级yt-dlp"
   pip3 install yt-dlp -U
@@ -47,25 +42,9 @@ else
   echo "$(date "+%Y-%m-%d %H:%M:%S") 代码更新完成!"
 fi
 
-if [ ${env} = "--env=hk" ];then
-  echo "无需重启Appium及adb服务"
-elif [ ${env} = "--env=prod" ];then
-  echo "无需重启Appium及adb服务"
-else
-  echo "$(date "+%Y-%m-%d %H:%M:%S") 正在重启Appium..."
-  ps aux | grep Appium.app | grep -v grep | awk '{print $2}' | xargs kill -9
-  nohup ${node_path} /Applications/Appium.app/Contents/Resources/app/node_modules/appium/build/lib/main.js >>./nohup.log 2>&1 &
-  echo "$(date "+%Y-%m-%d %H:%M:%S") 重启Appium完毕!"
-
-  echo "$(date "+%Y-%m-%d %H:%M:%S") 正在重启adb..."
-  adb kill-server
-  adb start-server
-  echo "$(date "+%Y-%m-%d %H:%M:%S") 重启adb完毕!"
-fi
-
 echo "$(date "+%Y-%m-%d %H:%M:%S") 正在重启服务..."
 cd ${piaoquan_crawler_dir}
-nohup ${python} -u ${crawler_dir} ${log_type} ${crawler} ${task} ${oss_endpoint} ${env} >>${nohup_dir} 2>&1 &
+nohup ${python} -u ${crawler_dir} ${log_type} ${crawler} ${task} ${env} >>${nohup_dir} 2>&1 &
 echo "$(date "+%Y-%m-%d %H:%M:%S") 服务重启完毕!"
 
 exit 0

BIN
weixinzhishu/logs/.DS_Store


+ 3 - 0
weixinzhishu/logs/__init__.py

@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/4/18

+ 3 - 0
weixinzhishu/weixinzhishu_chlsfiles/__init__.py

@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/4/18

+ 1 - 0
weixinzhishu/weixinzhishu_chlsfiles/charles202304182058.chlsj

@@ -0,0 +1 @@
+[]

+ 1 - 0
weixinzhishu/weixinzhishu_chlsfiles/charles202304182059.chlsj

@@ -0,0 +1 @@
+[]

Plik diff jest za duży
+ 0 - 0
weixinzhishu/weixinzhishu_chlsfiles/charles202304182101.chlsj


Plik diff jest za duży
+ 71 - 0
xiaoniangao/xiaoniangao_follow/xiaoniangao_follow_scheduling.py


+ 679 - 0
xiaoniangao/xiaoniangao_hour/xiaoniangao_hour_scheduling.py

@@ -0,0 +1,679 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/3/15
+import datetime
+import json
+import os
+import random
+import shutil
+import sys
+import time
+import requests
+import urllib3
+
+sys.path.append(os.getcwd())
+from common.common import Common
+from common.feishu import Feishu
+from common.publish import Publish
+from common.scheduling_db import MysqlHelper
+from common.public import filter_word
+
+proxies = {"http": None, "https": None}
+
+
+class XiaoniangaoHour:
+    platform = "小年糕"
+
+    # 生成 uid、token
+    @classmethod
+    def get_uid_token(cls):
+        words = "abcdefghijklmnopqrstuvwxyz0123456789"
+        uid = f"""{"".join(random.sample(words, 8))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 4))}-{"".join(random.sample(words, 12))}"""
+        token = "".join(random.sample(words, 32))
+        uid_token_dict = {
+            "uid": uid,
+            "token": token
+        }
+        return uid_token_dict
+
+    # 基础门槛规则
+    @staticmethod
+    def download_rule(video_dict):
+        """
+        下载视频的基本规则
+        :param video_dict: 视频信息,字典格式
+        :return: 满足规则,返回 True;反之,返回 False
+        """
+        # 视频时长
+        if int(float(video_dict["duration"])) >= 40:
+            # 宽或高
+            if int(video_dict["video_width"]) >= 0 or int(video_dict["video_height"]) >= 0:
+                # 播放量
+                if int(video_dict["play_cnt"]) >= 4000:
+                    # 点赞量
+                    if int(video_dict["like_cnt"]) >= 0:
+                        # 分享量
+                        if int(video_dict["share_cnt"]) >= 0:
+                            # 发布时间 <= 10 天
+                            if int(time.time()) - int(video_dict["publish_time_stamp"]) <= 3600 * 24 * 10:
+                                return True
+                            else:
+                                return False
+                        else:
+                            return False
+                    else:
+                        return False
+                else:
+                    return False
+            return False
+        return False
+
+    # 获取表情及符号
+    @classmethod
+    def get_expression(cls):
+        # 表情列表
+        expression_list = ['📍', '⭕️', '🔥', '📣', '🎈', '⚡', '🔔', '🚩', '💢', '💎', '👉', '💓', '❗️', '🔴', '🔺', '♦️', '♥️', '👉',
+                           '👈', '🏆', '❤️\u200d🔥']
+        # 符号列表
+        char_list = ['...', '~~']
+        return expression_list, char_list
+
+    @classmethod
+    def repeat_video(cls, log_type, crawler, video_id, env):
+        sql = f""" select * from crawler_video where platform="小年糕" and out_video_id="{video_id}"; """
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
+        return len(repeat_video)
+
+    @classmethod
+    def repeat_hour(cls, log_type, crawler, video_id, env):
+        sql = f""" select * from crawler_xiaoniangao_hour where platform="小年糕" and out_video_id="{video_id}"; """
+        repeat_video = MysqlHelper.get_values(log_type, crawler, sql, env)
+        return len(repeat_video)
+
+    # 获取列表
+    @classmethod
+    def get_videoList(cls, log_type, crawler, env):
+        # try:
+        uid_token_dict = cls.get_uid_token()
+        url = "https://kapi.xiaoniangao.cn/trends/get_recommend_trends"
+        headers = {
+            # "x-b3-traceid": cls.hour_x_b3_traceid,
+            "x-b3-traceid": '1c403a4aa72e3c',
+            # "X-Token-Id": cls.hour_x_token_id,
+            "X-Token-Id": 'ab619e96d801f1567388629260aa68ec-1202200806',
+            # "uid": cls.hour_uid,
+            "uid": uid_token_dict['uid'],
+            "content-type": "application/json",
+            "Accept-Encoding": "gzip,compress,br,deflate",
+            "User-Agent": 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X)'
+                          ' AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 '
+                          'MicroMessenger/8.0.20(0x18001432) NetType/WIFI Language/zh_CN',
+            # "Referer": cls.hour_referer
+            "Referer": 'https://servicewechat.com/wxd7911e4c177690e4/624/page-frame.html'
+        }
+        data = {
+            "log_params": {
+                "page": "discover_rec",
+                "common": {
+                    "brand": "iPhone",
+                    "device": "iPhone 11",
+                    "os": "iOS 14.7.1",
+                    "weixinver": "8.0.20",
+                    "srcver": "2.24.2",
+                    "net": "wifi",
+                    "scene": 1089
+                }
+            },
+            "qs": "imageMogr2/gravity/center/rotate/$/thumbnail/!750x500r/crop/750x500/interlace/1/format/jpg",
+            "h_qs": "imageMogr2/gravity/center/rotate/$/thumbnail/!80x80r/crop/80x80/interlace/1/format/jpg",
+            "share_width": 625,
+            "share_height": 500,
+            "ext": {
+                "fmid": 0,
+                "items": {}
+            },
+            "app": "xng",
+            "rec_scene": "discover_rec",
+            "log_common_params": {
+                "e": [{
+                    "data": {
+                        "page": "discoverIndexPage",
+                        "topic": "recommend"
+                    },
+                    "ab": {}
+                }],
+                "ext": {
+                    "brand": "iPhone",
+                    "device": "iPhone 11",
+                    "os": "iOS 14.7.1",
+                    "weixinver": "8.0.20",
+                    "srcver": "2.24.3",
+                    "net": "wifi",
+                    "scene": "1089"
+                },
+                "pj": "1",
+                "pf": "2",
+                "session_id": "7bcce313-b57d-4305-8d14-6ebd9a1bad29"
+            },
+            "refresh": False,
+            "token": uid_token_dict["token"],
+            "uid": uid_token_dict["uid"],
+            "proj": "ma",
+            "wx_ver": "8.0.20",
+            "code_ver": "3.62.0"
+        }
+        urllib3.disable_warnings()
+        r = requests.post(url=url, headers=headers, json=data, proxies=proxies, verify=False)
+        if 'data' not in r.text or r.status_code != 200:
+            Common.logger(log_type, crawler).warning(f"get_videoList:{r.text}\n")
+            return
+        elif "data" not in r.json():
+            Common.logger(log_type, crawler).warning(f"get_videoList:{r.json()}\n")
+            return
+        elif "list" not in r.json()["data"]:
+            Common.logger(log_type, crawler).warning(f"get_videoList:{r.json()['data']}\n")
+            return
+        elif len(r.json()['data']['list']) == 0:
+            Common.logger(log_type, crawler).warning(f"get_videoList:{r.json()['data']['list']}\n")
+            return
+        else:
+            # 视频列表数据
+            feeds = r.json()["data"]["list"]
+            for i in range(len(feeds)):
+                # 标题,表情随机加在片头、片尾,或替代句子中间的标点符号
+                if "title" in feeds[i]:
+                    befor_video_title = feeds[i]["title"].strip().replace("\n", "") \
+                        .replace("/", "").replace("\r", "").replace("#", "") \
+                        .replace(".", "。").replace("\\", "").replace("&NBSP", "") \
+                        .replace(":", "").replace("*", "").replace("?", "") \
+                        .replace("?", "").replace('"', "").replace("<", "") \
+                        .replace(">", "").replace("|", "").replace(" ", "").replace("#表情", "").replace("#符号","").replace(
+                        '"', '').replace("'", '').replace('"', '').replace("'", '')
+
+                    expression = cls.get_expression()
+                    expression_list = expression[0]
+                    char_list = expression[1]
+                    # 随机取一个表情
+                    expression = random.choice(expression_list)
+                    # 生成标题list[表情+title, title+表情]
+                    expression_title_list = [expression + befor_video_title, befor_video_title + expression]
+                    # 从标题list中随机取一个标题
+                    title_list1 = random.choice(expression_title_list)
+                    # 生成标题:原标题+符号
+                    title_list2 = befor_video_title + random.choice(char_list)
+                    # 表情和标题组合,与标题和符号组合,汇总成待使用的标题列表
+                    title_list4 = [title_list2, title_list1]
+                    # 最终标题
+                    video_title = random.choice(title_list4)
+                else:
+                    video_title = 0
+
+                # 视频 ID
+                if "vid" in feeds[i]:
+                    video_id = feeds[i]["vid"]
+                else:
+                    video_id = 0
+
+                # 播放量
+                if "play_pv" in feeds[i]:
+                    video_play_cnt = feeds[i]["play_pv"]
+                else:
+                    video_play_cnt = 0
+
+                # 点赞量
+                if "favor" in feeds[i]:
+                    video_like_cnt = feeds[i]["favor"]["total"]
+                else:
+                    video_like_cnt = 0
+
+                # 评论数
+                if "comment_count" in feeds[i]:
+                    video_comment_cnt = feeds[i]["comment_count"]
+                else:
+                    video_comment_cnt = 0
+
+                # 分享量
+                if "share" in feeds[i]:
+                    video_share_cnt = feeds[i]["share"]
+                else:
+                    video_share_cnt = 0
+
+                # 时长
+                if "du" in feeds[i]:
+                    video_duration = int(feeds[i]["du"] / 1000)
+                else:
+                    video_duration = 0
+
+                # 宽和高
+                if "w" or "h" in feeds[i]:
+                    video_width = feeds[i]["w"]
+                    video_height = feeds[i]["h"]
+                else:
+                    video_width = 0
+                    video_height = 0
+
+                # 发布时间
+                if "t" in feeds[i]:
+                    video_send_time = feeds[i]["t"]
+                else:
+                    video_send_time = 0
+                publish_time_stamp = int(int(video_send_time) / 1000)
+                publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
+
+                # 用户名 / 头像
+                if "user" in feeds[i]:
+                    user_name = feeds[i]["user"]["nick"].strip().replace("\n", "") \
+                        .replace("/", "").replace("快手", "").replace(" ", "") \
+                        .replace(" ", "").replace("&NBSP", "").replace("\r", "")
+                    head_url = feeds[i]["user"]["hurl"]
+                else:
+                    user_name = 0
+                    head_url = 0
+
+                # 用户 ID
+                profile_id = feeds[i]["id"]
+
+                # 用户 mid
+                profile_mid = feeds[i]["user"]["mid"]
+
+                # 视频封面
+                if "url" in feeds[i]:
+                    cover_url = feeds[i]["url"]
+                else:
+                    cover_url = 0
+
+                # 视频播放地址
+                if "v_url" in feeds[i]:
+                    video_url = feeds[i]["v_url"]
+                else:
+                    video_url = 0
+
+                video_dict = {
+                    "video_title": video_title,
+                    "video_id": video_id,
+                    "duration": video_duration,
+                    "play_cnt": video_play_cnt,
+                    "like_cnt": video_like_cnt,
+                    "comment_cnt": video_comment_cnt,
+                    "share_cnt": video_share_cnt,
+                    "user_name": user_name,
+                    "publish_time_stamp": publish_time_stamp,
+                    "publish_time_str": publish_time_str,
+                    "video_width": video_width,
+                    "video_height": video_height,
+                    "avatar_url": head_url,
+                    "profile_id": profile_id,
+                    "profile_mid": profile_mid,
+                    "cover_url": cover_url,
+                    "video_url": video_url,
+                    "session": f"xiaoniangao-hour-{int(time.time())}"
+                }
+                for k, v in video_dict.items():
+                    Common.logger(log_type, crawler).info(f"{k}:{v}")
+
+                # 过滤无效视频
+                if video_title == 0 or video_id == 0 or video_duration == 0 \
+                        or video_send_time == 0 or user_name == 0 or head_url == 0 \
+                        or cover_url == 0 or video_url == 0:
+                    Common.logger(log_type, crawler).warning("无效视频\n")
+                # 抓取基础规则过滤
+                elif cls.download_rule(video_dict) is False:
+                    Common.logger(log_type, crawler).info("不满足基础门槛规则\n")
+                elif cls.repeat_video(log_type, crawler, video_dict['video_id'], env) != 0:
+                    Common.logger(log_type, crawler).info('视频已下载\n')
+                # 过滤敏感词
+                elif any(str(word) if str(word) in video_title else False for word in
+                         filter_word(log_type, crawler, "小年糕", env)) is True:
+                    Common.logger(log_type, crawler).info("视频已中过滤词\n")
+                    time.sleep(1)
+                else:
+                    # 写入飞书小时级feeds数据库表
+                    insert_sql = f""" insert into crawler_xiaoniangao_hour(profile_id,
+                    profile_mid,
+                    platform,
+                    out_video_id,
+                    video_title,
+                    user_name,
+                    cover_url,
+                    video_url,
+                    duration,
+                    publish_time,
+                    play_cnt,
+                    crawler_time_stamp,
+                    crawler_time)
+                    values({profile_id},
+                    {profile_mid},
+                    "{cls.platform}",
+                    "{video_id}",
+                    "{video_title}",
+                    "{user_name}",
+                    "{cover_url}",
+                    "{video_url}",
+                    {video_duration},
+                    "{publish_time_str}",
+                    {video_play_cnt},
+                    {int(time.time())},
+                    "{time.strftime("%Y-%y-%d %H:%M:%S", time.localtime(int(time.time())))}"
+                    )"""
+                    Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+                    MysqlHelper.update_values(log_type, crawler, insert_sql, env)
+                    Common.logger(log_type, crawler).info('视频信息插入数据库成功!\n')
+        # except Exception as e:
+        #     Common.logger(log_type, crawler).error(f"get_videoList:{e}\n")
+
+    @classmethod
+    def get_video_info(cls, log_type, crawler, p_id, p_mid, v_title, v_id):
+        # try:
+        uid_token_dict = cls.get_uid_token()
+        url = "https://kapi.xiaoniangao.cn/profile/get_profile_by_id"
+        headers = {
+            # "x-b3-traceid": cls.hour_x_b3_traceid,
+            "x-b3-traceid": '1c403a4aa72e3c',
+            # "X-Token-Id": cls.hour_x_token_id,
+            "X-Token-Id": 'ab619e96d801f1567388629260aa68ec-1202200806',
+            "uid": uid_token_dict['uid'],
+            "content-type": "application/json",
+            "Accept-Encoding": "gzip,compress,br,deflate",
+            "User-Agent": 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X)'
+                          ' AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 '
+                          'MicroMessenger/8.0.20(0x18001432) NetType/WIFI Language/zh_CN',
+            # "Referer": cls.hour_referer
+            "Referer": 'https://servicewechat.com/wxd7911e4c177690e4/624/page-frame.html'
+        }
+        data = {
+            "play_src": "1",
+            "profile_id": int(p_id),
+            "profile_mid": int(p_mid),
+            "qs": "imageMogr2/gravity/center/rotate/$/thumbnail/"
+                  "!400x400r/crop/400x400/interlace/1/format/jpg",
+            "h_qs": "imageMogr2/gravity/center/rotate/$/thumbnail"
+                    "/!80x80r/crop/80x80/interlace/1/format/jpg",
+            "share_width": 625,
+            "share_height": 500,
+            "no_comments": True,
+            "no_follow": True,
+            "vid": v_id,
+            "hot_l1_comment": True,
+            # "token": cls.hour_token,
+            "token": uid_token_dict['token'],
+            # "uid": cls.hour_uid,
+            "uid": uid_token_dict['uid'],
+            "proj": "ma",
+            "wx_ver": "8.0.20",
+            "code_ver": "3.62.0",
+            "log_common_params": {
+                "e": [{
+                    "data": {
+                        "page": "dynamicSharePage"
+                    }
+                }],
+                "ext": {
+                    "brand": "iPhone",
+                    "device": "iPhone 11",
+                    "os": "iOS 14.7.1",
+                    "weixinver": "8.0.20",
+                    "srcver": "2.24.3",
+                    "net": "wifi",
+                    "scene": "1089"
+                },
+                "pj": "1",
+                "pf": "2",
+                "session_id": "7bcce313-b57d-4305-8d14-6ebd9a1bad29"
+            }
+        }
+        urllib3.disable_warnings()
+        r = requests.post(headers=headers, url=url, json=data, proxies=proxies, verify=False)
+        if r.status_code != 200 or 'data' not in r.text:
+            Common.logger(log_type, crawler).warning(f"get_videoInfo:{r.text}\n")
+        else:
+            hour_play_cnt = r.json()["data"]["play_pv"]
+            hour_cover_url = r.json()["data"]["url"]
+            hour_video_url = r.json()["data"]["v_url"]
+            hour_video_duration = r.json()["data"]["du"]
+            hour_video_comment_cnt = r.json()["data"]["comment_count"]
+            hour_video_like_cnt = r.json()["data"]["favor"]["total"]
+            hour_video_share_cnt = r.json()["data"]["share"]
+            hour_video_width = r.json()["data"]["w"]
+            hour_video_height = r.json()["data"]["h"]
+            hour_video_send_time = r.json()["data"]["t"]
+            publish_time_stamp = int(int(hour_video_send_time) / 1000)
+            publish_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp))
+            hour_user_name = r.json()["data"]["user"]["nick"]
+            hour_head_url = r.json()["data"]["user"]["hurl"]
+            video_info_dict = {
+                "video_id": v_id,
+                "video_title": v_title,
+                "duration": hour_video_duration,
+                "play_cnt": hour_play_cnt,
+                "like_cnt": hour_video_like_cnt,
+                "comment_cnt": hour_video_comment_cnt,
+                "share_cnt": hour_video_share_cnt,
+                "user_name": hour_user_name,
+                "publish_time_stamp": publish_time_stamp,
+                "publish_time_str": publish_time_str,
+                "video_width": hour_video_width,
+                "video_height": hour_video_height,
+                "avatar_url": hour_head_url,
+                "profile_id": p_id,
+                "profile_mid": p_mid,
+                "cover_url": hour_cover_url,
+                "video_url": hour_video_url,
+                "session": f"xiaoniangao-hour-{int(time.time())}"
+            }
+            return video_info_dict
+
+        # except Exception as e:
+        #     Common.logger(log_type, crawler).error(f"download_video:{e}\n")
+
+    # 更新小时榜数据
+    @classmethod
+    def update_videoList(cls, log_type, crawler, strategy, oss_endpoint, env):
+        """
+        更新小时榜数据
+        """
+        # try:
+        befor_yesterday = (datetime.date.today() + datetime.timedelta(days=-3)).strftime("%Y-%m-%d %H:%M:%S")
+        update_time_stamp = int(time.mktime(time.strptime(befor_yesterday, "%Y-%m-%d %H:%M:%S")))
+        select_sql = f""" select * from crawler_xiaoniangao_hour where crawler_time_stamp >= {update_time_stamp} GROUP BY out_video_id """
+        update_video_list = MysqlHelper.get_values(log_type, crawler, select_sql, env)
+        if len(update_video_list) == 0:
+            Common.logger(log_type, crawler).info("暂无需要更新的小时榜数据\n")
+            return
+        for update_video_info in update_video_list:
+            profile_id = update_video_info["profile_id"]
+            profile_mid = update_video_info["profile_mid"]
+            video_title = update_video_info["video_title"]
+            video_id = update_video_info["out_video_id"]
+            if datetime.datetime.now().hour == 10 and datetime.datetime.now().minute <= 10:
+                video_info_dict = cls.get_video_info(log_type=log_type,
+                                                     crawler=crawler,
+                                                     p_id=profile_id,
+                                                     p_mid=profile_mid,
+                                                     v_title=video_title,
+                                                     v_id=video_id)
+                ten_play_cnt = video_info_dict['play_cnt']
+                Common.logger(log_type, crawler).info(f"ten_play_cnt:{ten_play_cnt}")
+                update_sql = f""" update crawler_xiaoniangao_hour set ten_play_cnt={ten_play_cnt} WHERE out_video_id="{video_id}"; """
+                # Common.logger(log_type, crawler).info(f"update_sql:{update_sql}")
+                MysqlHelper.update_values(log_type, crawler, update_sql, env)
+                cls.download_publish(log_type, crawler, video_info_dict, update_video_info, strategy, oss_endpoint,
+                                     env)
+            elif datetime.datetime.now().hour == 15 and datetime.datetime.now().minute <= 10:
+                video_info_dict = cls.get_video_info(log_type=log_type,
+                                                     crawler=crawler,
+                                                     p_id=profile_id,
+                                                     p_mid=profile_mid,
+                                                     v_title=video_title,
+                                                     v_id=video_id)
+                fifteen_play_cnt = video_info_dict['play_cnt']
+                Common.logger(log_type, crawler).info(f"fifteen_play_cnt:{fifteen_play_cnt}")
+                update_sql = f""" update crawler_xiaoniangao_hour set fifteen_play_cnt={fifteen_play_cnt} WHERE out_video_id="{video_id}"; """
+                # Common.logger(log_type, crawler).info(f"update_sql:{update_sql}")
+                MysqlHelper.update_values(log_type, crawler, update_sql, env)
+                cls.download_publish(log_type, crawler, video_info_dict, update_video_info, strategy, oss_endpoint,
+                                     env)
+            elif datetime.datetime.now().hour == 20 and datetime.datetime.now().minute <= 10:
+                video_info_dict = cls.get_video_info(log_type=log_type,
+                                                     crawler=crawler,
+                                                     p_id=profile_id,
+                                                     p_mid=profile_mid,
+                                                     v_title=video_title,
+                                                     v_id=video_id)
+                twenty_play_cnt = video_info_dict['play_cnt']
+                Common.logger(log_type, crawler).info(f"twenty_play_cnt:{twenty_play_cnt}")
+                update_sql = f""" update crawler_xiaoniangao_hour set twenty_play_cnt={twenty_play_cnt} WHERE out_video_id="{video_id}"; """
+                # Common.logger(log_type, crawler).info(f"update_sql:{update_sql}")
+                MysqlHelper.update_values(log_type, crawler, update_sql, env)
+                cls.download_publish(log_type, crawler, video_info_dict, update_video_info, strategy, oss_endpoint,
+                                     env)
+            else:
+                pass
+        # except Exception as e:
+        #     Common.logger(log_type, crawler).error(f"update_videoList:{e}\n")
+
+    @classmethod
+    def download(cls, log_type, crawler, video_info_dict, strategy, oss_endpoint, env):
+        # 下载封面
+        Common.download_method(log_type=log_type, crawler=crawler, text="cover", title=video_info_dict["video_title"],
+                               url=video_info_dict["cover_url"])
+        # 下载视频
+        Common.download_method(log_type=log_type, crawler=crawler, text="video", title=video_info_dict["video_title"],
+                               url=video_info_dict["video_url"])
+        # 保存视频信息至 "./videos/{download_video_title}/info.txt"
+        Common.save_video_info(log_type=log_type, crawler=crawler, video_dict=video_info_dict)
+
+        # 上传视频
+        Common.logger(log_type, crawler).info("开始上传视频...")
+        our_video_id = Publish.upload_and_publish(log_type=log_type,
+                                                  crawler=crawler,
+                                                  strategy=strategy,
+                                                  our_uid="hour",
+                                                  env=env,
+                                                  oss_endpoint=oss_endpoint)
+        if env == "dev":
+            our_video_link = f"https://testadmin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+        else:
+            our_video_link = f"https://admin.piaoquantv.com/cms/post-detail/{our_video_id}/info"
+        Common.logger(log_type, crawler).info("视频上传完成")
+
+        if our_video_id is None:
+            # 删除视频文件夹
+            shutil.rmtree(f"./{crawler}/videos/{video_info_dict['video_title']}")
+            return
+
+        # 视频信息保存数据库
+        rule_dict = {
+            "duration": {"min": 40},
+            "play_cnt": {"min": 4000},
+            "publish_day": {"min": 10}
+        }
+
+        insert_sql = f""" insert into crawler_video(video_id,
+                                                        out_user_id,
+                                                        platform,
+                                                        strategy,
+                                                        out_video_id,
+                                                        video_title,
+                                                        cover_url,
+                                                        video_url,
+                                                        duration,
+                                                        publish_time,
+                                                        play_cnt,
+                                                        crawler_rule,
+                                                        width,
+                                                        height)
+                                                        values({our_video_id},
+                                                        "{video_info_dict['profile_id']}",
+                                                        "{cls.platform}",
+                                                        "小时榜爬虫策略",
+                                                        "{video_info_dict['video_id']}",
+                                                        "{video_info_dict['video_title']}",
+                                                        "{video_info_dict['cover_url']}",
+                                                        "{video_info_dict['video_url']}",
+                                                        {int(video_info_dict['duration'])},
+                                                        "{video_info_dict['publish_time_str']}",
+                                                        {int(video_info_dict['play_cnt'])},
+                                                        '{json.dumps(rule_dict)}',
+                                                        {int(video_info_dict['video_width'])},
+                                                        {int(video_info_dict['video_height'])}) """
+        Common.logger(log_type, crawler).info(f"insert_sql:{insert_sql}")
+        MysqlHelper.update_values(log_type, crawler, insert_sql, env)
+        Common.logger(log_type, crawler).info('视频信息插入数据库成功!')
+
+        # 视频写入飞书
+        Feishu.insert_columns(log_type, crawler, "yatRv2", "ROWS", 1, 2)
+        # 视频ID工作表,首行写入数据
+        upload_time = int(time.time())
+        values = [[time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(upload_time)),
+                   "小时级上升榜",
+                   str(video_info_dict['video_id']),
+                   str(video_info_dict['video_title']),
+                   our_video_link,
+                   video_info_dict['play_cnt'],
+                   video_info_dict['comment_cnt'],
+                   video_info_dict['like_cnt'],
+                   video_info_dict['share_cnt'],
+                   video_info_dict['duration'],
+                   f"{video_info_dict['video_width']}*{video_info_dict['video_height']}",
+                   str(video_info_dict['publish_time_str'].replace("-", "/")),
+                   str(video_info_dict['user_name']),
+                   str(video_info_dict['profile_id']),
+                   str(video_info_dict['profile_mid']),
+                   str(video_info_dict['avatar_url']),
+                   str(video_info_dict['cover_url']),
+                   str(video_info_dict['video_url'])]]
+        time.sleep(1)
+        Feishu.update_values(log_type, crawler, "yatRv2", "F2:Z2", values)
+        Common.logger(log_type, crawler).info('视频信息写入飞书成功\n')
+
+    # 下载/上传
+    @classmethod
+    def download_publish(cls, log_type, crawler, video_info_dict, update_video_info, strategy, oss_endpoint, env):
+        # try:
+        if cls.repeat_video(log_type, crawler, video_info_dict["video_id"], env) != 0:
+            Common.logger(log_type, crawler).info('视频已下载\n')
+        # 播放量大于 50000,直接下载
+        elif int(video_info_dict["play_cnt"]) >= 50000:
+            Common.logger(log_type, crawler).info(
+                f"播放量:{video_info_dict['play_cnt']} >= 50000,满足下载规则,开始下载视频")
+            cls.download(log_type, crawler, video_info_dict, strategy, oss_endpoint, env)
+
+        # 上升榜判断逻辑,任意时间段上升量>=5000,连续两个时间段上升量>=2000
+        elif int(update_video_info['ten_play_cnt']) >= 5000 or int(
+                update_video_info['fifteen_play_cnt']) >= 5000 or int(update_video_info['twenty_play_cnt']) >= 5000:
+            Common.logger(log_type, crawler).info(
+                f"10:00 or 15:00 or 20:00 数据上升量:{int(update_video_info['ten_play_cnt'])} or {int(update_video_info['fifteen_play_cnt'])} or {int(update_video_info['twenty_play_cnt'])} >= 5000")
+            Common.logger(log_type, crawler).info("满足下载规则,开始下载视频")
+            cls.download(log_type, crawler, video_info_dict, strategy, oss_endpoint, env)
+
+        elif int(update_video_info['ten_play_cnt']) >= 2000 and int(update_video_info['fifteen_play_cnt']) >= 2000:
+            Common.logger(log_type, crawler).info(
+                f"10:00 and 15:00 数据上升量:{int(update_video_info['ten_play_cnt'])} and {int(update_video_info['fifteen_play_cnt'])} >= 2000")
+            Common.logger(log_type, crawler).info("满足下载规则,开始下载视频")
+            cls.download(log_type, crawler, video_info_dict, strategy, oss_endpoint, env)
+
+        elif int(update_video_info['fifteen_play_cnt']) >= 2000 and int(update_video_info['twenty_play_cnt']) >= 2000:
+            Common.logger(log_type, crawler).info(
+                f"15:00 and 20:00 数据上升量:{int(update_video_info['fifteen_play_cnt'])} and {int(update_video_info['twenty_play_cnt'])} >= 2000")
+            Common.logger(log_type, crawler).info("满足下载规则,开始下载视频")
+            cls.download(log_type, crawler, video_info_dict, strategy, oss_endpoint, env)
+
+        elif int(update_video_info['ten_play_cnt']) >= 2000 and int(update_video_info['twenty_play_cnt']) >= 2000:
+            Common.logger(log_type, crawler).info(
+                f"今日10:00 / 20:00数据上升量:{int(update_video_info['ten_play_cnt'])} and {int(update_video_info['twenty_play_cnt'])} >= 2000")
+            Common.logger(log_type, crawler).info("满足下载规则,开始下载视频")
+            cls.download(log_type, crawler, video_info_dict, strategy, oss_endpoint, env)
+
+        else:
+            Common.logger(log_type, crawler).info("上升量不满足下载规则")
+    # except Exception as e:
+    #     Common.logger(log_type, crawler).error(f"download_publish:{e}\n")
+
+
+if __name__ == "__main__":
+    print(XiaoniangaoHour.get_expression())
+    # print(XiaoniangaoHour.get_uid_token())
+    # XiaoniangaoHour.get_videoList("test", "xiaoniangao", "dev")
+    # XiaoniangaoHour.update_videoList("test", "xiaoniangao", "小时榜爬虫策略", "out", "dev")
+
+    pass

+ 35 - 0
xiaoniangao/xiaoniangao_main/run_xiaoniangao_follow_scheduling.py

@@ -0,0 +1,35 @@
+# -*- coding: utf-8 -*-
+# @Author: wangkun
+# @Time: 2023/3/13
+import argparse
+import os
+import sys
+sys.path.append(os.getcwd())
+from common.common import Common
+from xiaoniangao.xiaoniangao_follow.xiaoniangao_follow import XiaoniangaoFollow
+
+
+def main(log_type, crawler, env):
+    if env == "dev":
+        oss_endpoint = "out"
+    else:
+        oss_endpoint = "inner"
+    Common.logger(log_type, crawler).info('开始抓取 小年糕 定向榜\n')
+    XiaoniangaoFollow.get_follow_videos(log_type=log_type,
+                                        crawler=crawler,
+                                        strategy="定向爬虫策略",
+                                        oss_endpoint=oss_endpoint,
+                                        env=env)
+    Common.del_logs(log_type, crawler)
+    Common.logger(log_type, crawler).info('抓取完一轮\n')
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()  ## 新建参数解释器对象
+    parser.add_argument('--log_type', type=str)  ## 添加参数,注明参数类型
+    parser.add_argument('--crawler')  ## 添加参数
+    parser.add_argument('--env')  ## 添加参数
+    args = parser.parse_args()  ### 参数赋值,也可以通过终端赋值
+    main(log_type=args.log_type,
+         crawler=args.crawler,
+         env=args.env)

Niektóre pliki nie zostały wyświetlone z powodu dużej ilości zmienionych plików